├── .dockerignore
├── .gitattributes
├── .github
├── CODE_OF_CONDUCT.md
├── ISSUE_TEMPLATE
│ ├── bug-report.yml
│ ├── config.yml
│ ├── feature-request.yml
│ └── question.yml
├── PULL_REQUEST_TEMPLATE.md
├── README_cn.md
├── SECURITY.md
├── dependabot.yml
└── workflows
│ ├── ci-testing.yml
│ ├── codeql-analysis.yml
│ ├── docker.yml
│ ├── greetings.yml
│ └── stale.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── README_rkopt.md
├── benchmarks.py
├── classify
├── predict.py
├── train.py
├── tutorial.ipynb
└── val.py
├── data
├── Argoverse.yaml
├── GlobalWheat2020.yaml
├── ImageNet.yaml
├── Objects365.yaml
├── SKU-110K.yaml
├── VOC.yaml
├── VisDrone.yaml
├── coco.yaml
├── coco128-seg.yaml
├── coco128.yaml
├── hyps
│ ├── hyp.Objects365.yaml
│ ├── hyp.VOC.yaml
│ ├── hyp.scratch-high.yaml
│ ├── hyp.scratch-low.yaml
│ └── hyp.scratch-med.yaml
├── images
│ ├── bus.jpg
│ └── zidane.jpg
├── scripts
│ ├── download_weights.sh
│ ├── get_coco.sh
│ ├── get_coco128.sh
│ └── get_imagenet.sh
└── xView.yaml
├── detect.py
├── export.py
├── hubconf.py
├── models
├── __init__.py
├── common.py
├── common_rk_plug_in.py
├── experimental.py
├── hub
│ ├── anchors.yaml
│ ├── yolov3-spp.yaml
│ ├── yolov3-tiny.yaml
│ ├── yolov3.yaml
│ ├── yolov5-bifpn.yaml
│ ├── yolov5-fpn.yaml
│ ├── yolov5-p2.yaml
│ ├── yolov5-p34.yaml
│ ├── yolov5-p6.yaml
│ ├── yolov5-p7.yaml
│ ├── yolov5-panet.yaml
│ ├── yolov5l6.yaml
│ ├── yolov5m6.yaml
│ ├── yolov5n6.yaml
│ ├── yolov5s-LeakyReLU.yaml
│ ├── yolov5s-ghost.yaml
│ ├── yolov5s-transformer.yaml
│ ├── yolov5s6.yaml
│ └── yolov5x6.yaml
├── segment
│ ├── yolov5l-seg.yaml
│ ├── yolov5m-seg.yaml
│ ├── yolov5n-seg.yaml
│ ├── yolov5s-seg.yaml
│ └── yolov5x-seg.yaml
├── tf.py
├── yolo.py
├── yolov5l.yaml
├── yolov5m.yaml
├── yolov5n.yaml
├── yolov5s.yaml
└── yolov5x.yaml
├── requirements.txt
├── segment
├── predict.py
├── train.py
├── tutorial.ipynb
└── val.py
├── setup.cfg
├── train.py
├── tutorial.ipynb
├── utils
├── __init__.py
├── activations.py
├── augmentations.py
├── autoanchor.py
├── autobatch.py
├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
├── callbacks.py
├── dataloaders.py
├── docker
│ ├── Dockerfile
│ ├── Dockerfile-arm64
│ └── Dockerfile-cpu
├── downloads.py
├── flask_rest_api
│ ├── README.md
│ ├── example_request.py
│ └── restapi.py
├── general.py
├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
├── loggers
│ ├── __init__.py
│ ├── clearml
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── clearml_utils.py
│ │ └── hpo.py
│ ├── comet
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── comet_utils.py
│ │ ├── hpo.py
│ │ └── optimizer_config.json
│ └── wandb
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── log_dataset.py
│ │ ├── sweep.py
│ │ ├── sweep.yaml
│ │ └── wandb_utils.py
├── loss.py
├── metrics.py
├── plots.py
├── segment
│ ├── __init__.py
│ ├── augmentations.py
│ ├── dataloaders.py
│ ├── general.py
│ ├── loss.py
│ ├── metrics.py
│ └── plots.py
├── torch_utils.py
└── triton.py
└── val.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2 | .git
3 | .cache
4 | .idea
5 | runs
6 | output
7 | coco
8 | storage.googleapis.com
9 |
10 | data/samples/*
11 | **/results*.csv
12 | *.jpg
13 |
14 | # Neural Network weights -----------------------------------------------------------------------------------------------
15 | **/*.pt
16 | **/*.pth
17 | **/*.onnx
18 | **/*.engine
19 | **/*.mlmodel
20 | **/*.torchscript
21 | **/*.torchscript.pt
22 | **/*.tflite
23 | **/*.h5
24 | **/*.pb
25 | *_saved_model/
26 | *_web_model/
27 | *_openvino_model/
28 |
29 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
31 |
32 |
33 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
34 | # Byte-compiled / optimized / DLL files
35 | __pycache__/
36 | *.py[cod]
37 | *$py.class
38 |
39 | # C extensions
40 | *.so
41 |
42 | # Distribution / packaging
43 | .Python
44 | env/
45 | build/
46 | develop-eggs/
47 | dist/
48 | downloads/
49 | eggs/
50 | .eggs/
51 | lib/
52 | lib64/
53 | parts/
54 | sdist/
55 | var/
56 | wheels/
57 | *.egg-info/
58 | wandb/
59 | .installed.cfg
60 | *.egg
61 |
62 | # PyInstaller
63 | # Usually these files are written by a python script from a template
64 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
65 | *.manifest
66 | *.spec
67 |
68 | # Installer logs
69 | pip-log.txt
70 | pip-delete-this-directory.txt
71 |
72 | # Unit test / coverage reports
73 | htmlcov/
74 | .tox/
75 | .coverage
76 | .coverage.*
77 | .cache
78 | nosetests.xml
79 | coverage.xml
80 | *.cover
81 | .hypothesis/
82 |
83 | # Translations
84 | *.mo
85 | *.pot
86 |
87 | # Django stuff:
88 | *.log
89 | local_settings.py
90 |
91 | # Flask stuff:
92 | instance/
93 | .webassets-cache
94 |
95 | # Scrapy stuff:
96 | .scrapy
97 |
98 | # Sphinx documentation
99 | docs/_build/
100 |
101 | # PyBuilder
102 | target/
103 |
104 | # Jupyter Notebook
105 | .ipynb_checkpoints
106 |
107 | # pyenv
108 | .python-version
109 |
110 | # celery beat schedule file
111 | celerybeat-schedule
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # dotenv
117 | .env
118 |
119 | # virtualenv
120 | .venv*
121 | venv*/
122 | ENV*/
123 |
124 | # Spyder project settings
125 | .spyderproject
126 | .spyproject
127 |
128 | # Rope project settings
129 | .ropeproject
130 |
131 | # mkdocs documentation
132 | /site
133 |
134 | # mypy
135 | .mypy_cache/
136 |
137 |
138 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
139 |
140 | # General
141 | .DS_Store
142 | .AppleDouble
143 | .LSOverride
144 |
145 | # Icon must end with two \r
146 | Icon
147 | Icon?
148 |
149 | # Thumbnails
150 | ._*
151 |
152 | # Files that might appear in the root of a volume
153 | .DocumentRevisions-V100
154 | .fseventsd
155 | .Spotlight-V100
156 | .TemporaryItems
157 | .Trashes
158 | .VolumeIcon.icns
159 | .com.apple.timemachine.donotpresent
160 |
161 | # Directories potentially created on remote AFP share
162 | .AppleDB
163 | .AppleDesktop
164 | Network Trash Folder
165 | Temporary Items
166 | .apdisk
167 |
168 |
169 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
170 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
171 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
172 |
173 | # User-specific stuff:
174 | .idea/*
175 | .idea/**/workspace.xml
176 | .idea/**/tasks.xml
177 | .idea/dictionaries
178 | .html # Bokeh Plots
179 | .pg # TensorFlow Frozen Graphs
180 | .avi # videos
181 |
182 | # Sensitive or high-churn files:
183 | .idea/**/dataSources/
184 | .idea/**/dataSources.ids
185 | .idea/**/dataSources.local.xml
186 | .idea/**/sqlDataSources.xml
187 | .idea/**/dynamic.xml
188 | .idea/**/uiDesigner.xml
189 |
190 | # Gradle:
191 | .idea/**/gradle.xml
192 | .idea/**/libraries
193 |
194 | # CMake
195 | cmake-build-debug/
196 | cmake-build-release/
197 |
198 | # Mongo Explorer plugin:
199 | .idea/**/mongoSettings.xml
200 |
201 | ## File-based project format:
202 | *.iws
203 |
204 | ## Plugin-specific files:
205 |
206 | # IntelliJ
207 | out/
208 |
209 | # mpeltonen/sbt-idea plugin
210 | .idea_modules/
211 |
212 | # JIRA plugin
213 | atlassian-ide-plugin.xml
214 |
215 | # Cursive Clojure plugin
216 | .idea/replstate.xml
217 |
218 | # Crashlytics plugin (for Android Studio and IntelliJ)
219 | com_crashlytics_export_strings.xml
220 | crashlytics.properties
221 | crashlytics-build.properties
222 | fabric.properties
223 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # this drop notebooks from GitHub language stats
2 | *.ipynb linguist-vendored
3 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | - Demonstrating empathy and kindness toward other people
21 | - Being respectful of differing opinions, viewpoints, and experiences
22 | - Giving and gracefully accepting constructive feedback
23 | - Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | - Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | - The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | - Trolling, insulting or derogatory comments, and personal or political attacks
33 | - Public or private harassment
34 | - Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | - Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | hello@ultralytics.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | For answers to common questions about this code of conduct, see the FAQ at
125 | https://www.contributor-covenant.org/faq. Translations are available at
126 | https://www.contributor-covenant.org/translations.
127 |
128 | [homepage]: https://www.contributor-covenant.org
129 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: 🐛 Bug Report
2 | # title: " "
3 | description: Problems with YOLOv5
4 | labels: [bug, triage]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for submitting a YOLOv5 🐛 Bug Report!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.
19 | required: true
20 |
21 | - type: dropdown
22 | attributes:
23 | label: YOLOv5 Component
24 | description: |
25 | Please select the part of YOLOv5 where you found the bug.
26 | multiple: true
27 | options:
28 | - "Training"
29 | - "Validation"
30 | - "Detection"
31 | - "Export"
32 | - "PyTorch Hub"
33 | - "Multi-GPU"
34 | - "Evolution"
35 | - "Integrations"
36 | - "Other"
37 | validations:
38 | required: false
39 |
40 | - type: textarea
41 | attributes:
42 | label: Bug
43 | description: Provide console output with error messages and/or screenshots of the bug.
44 | placeholder: |
45 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
46 | validations:
47 | required: true
48 |
49 | - type: textarea
50 | attributes:
51 | label: Environment
52 | description: Please specify the software and hardware you used to produce the bug.
53 | placeholder: |
54 | - YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB)
55 | - OS: Ubuntu 20.04
56 | - Python: 3.9.0
57 | validations:
58 | required: false
59 |
60 | - type: textarea
61 | attributes:
62 | label: Minimal Reproducible Example
63 | description: >
64 | When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem.
65 | This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
66 | placeholder: |
67 | ```
68 | # Code to reproduce your issue here
69 | ```
70 | validations:
71 | required: false
72 |
73 | - type: textarea
74 | attributes:
75 | label: Additional
76 | description: Anything else you would like to share?
77 |
78 | - type: checkboxes
79 | attributes:
80 | label: Are you willing to submit a PR?
81 | description: >
82 | (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature.
83 | See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started.
84 | options:
85 | - label: Yes I'd like to help by submitting a PR!
86 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: 💬 Forum
4 | url: https://community.ultralytics.com/
5 | about: Ask on Ultralytics Community Forum
6 | - name: Stack Overflow
7 | url: https://stackoverflow.com/search?q=YOLOv5
8 | about: Ask on Stack Overflow with 'YOLOv5' tag
9 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.yml:
--------------------------------------------------------------------------------
1 | name: 🚀 Feature Request
2 | description: Suggest a YOLOv5 idea
3 | # title: " "
4 | labels: [enhancement]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for submitting a YOLOv5 🚀 Feature Request!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests.
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: Description
24 | description: A short description of your feature.
25 | placeholder: |
26 | What new feature would you like to see in YOLOv5?
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Use case
33 | description: |
34 | Describe the use case of your feature request. It will help us understand and prioritize the feature request.
35 | placeholder: |
36 | How would this feature be used, and who would use it?
37 |
38 | - type: textarea
39 | attributes:
40 | label: Additional
41 | description: Anything else you would like to share?
42 |
43 | - type: checkboxes
44 | attributes:
45 | label: Are you willing to submit a PR?
46 | description: >
47 | (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature.
48 | See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started.
49 | options:
50 | - label: Yes I'd like to help by submitting a PR!
51 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.yml:
--------------------------------------------------------------------------------
1 | name: ❓ Question
2 | description: Ask a YOLOv5 question
3 | # title: " "
4 | labels: [question]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for asking a YOLOv5 ❓ Question!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions.
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: Question
24 | description: What is your question?
25 | placeholder: |
26 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Additional
33 | description: Anything else you would like to share?
34 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/.github/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed.
4 |
5 | ### Reporting a Vulnerability
6 |
7 | To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you!
8 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: pip
4 | directory: "/"
5 | schedule:
6 | interval: weekly
7 | time: "04:00"
8 | open-pull-requests-limit: 10
9 | reviewers:
10 | - glenn-jocher
11 | labels:
12 | - dependencies
13 |
14 | - package-ecosystem: github-actions
15 | directory: "/"
16 | schedule:
17 | interval: weekly
18 | time: "04:00"
19 | open-pull-requests-limit: 5
20 | reviewers:
21 | - glenn-jocher
22 | labels:
23 | - dependencies
24 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
2 | # https://github.com/github/codeql-action
3 |
4 | name: "CodeQL"
5 |
6 | on:
7 | schedule:
8 | - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month
9 |
10 | jobs:
11 | analyze:
12 | name: Analyze
13 | runs-on: ubuntu-latest
14 |
15 | strategy:
16 | fail-fast: false
17 | matrix:
18 | language: ['python']
19 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
20 | # Learn more:
21 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
22 |
23 | steps:
24 | - name: Checkout repository
25 | uses: actions/checkout@v3
26 |
27 | # Initializes the CodeQL tools for scanning.
28 | - name: Initialize CodeQL
29 | uses: github/codeql-action/init@v2
30 | with:
31 | languages: ${{ matrix.language }}
32 | # If you wish to specify custom queries, you can do so here or in a config file.
33 | # By default, queries listed here will override any specified in a config file.
34 | # Prefix the list here with "+" to use these queries and those in the config file.
35 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
36 |
37 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
38 | # If this step fails, then you should remove it and run the build manually (see below)
39 | - name: Autobuild
40 | uses: github/codeql-action/autobuild@v2
41 |
42 | # ℹ️ Command-line programs to run using the OS shell.
43 | # 📚 https://git.io/JvXDl
44 |
45 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
46 | # and modify them (or add more) to build your code if your project
47 | # uses a compiled language
48 |
49 | #- run: |
50 | # make bootstrap
51 | # make release
52 |
53 | - name: Perform CodeQL Analysis
54 | uses: github/codeql-action/analyze@v2
55 |
--------------------------------------------------------------------------------
/.github/workflows/docker.yml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 |
4 | name: Publish Docker Images
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 |
10 | jobs:
11 | docker:
12 | if: github.repository == 'ultralytics/yolov5'
13 | name: Push Docker image to Docker Hub
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout repo
17 | uses: actions/checkout@v3
18 |
19 | - name: Set up QEMU
20 | uses: docker/setup-qemu-action@v2
21 |
22 | - name: Set up Docker Buildx
23 | uses: docker/setup-buildx-action@v2
24 |
25 | - name: Login to Docker Hub
26 | uses: docker/login-action@v2
27 | with:
28 | username: ${{ secrets.DOCKERHUB_USERNAME }}
29 | password: ${{ secrets.DOCKERHUB_TOKEN }}
30 |
31 | - name: Build and push arm64 image
32 | uses: docker/build-push-action@v3
33 | continue-on-error: true
34 | with:
35 | context: .
36 | platforms: linux/arm64
37 | file: utils/docker/Dockerfile-arm64
38 | push: true
39 | tags: ultralytics/yolov5:latest-arm64
40 |
41 | - name: Build and push CPU image
42 | uses: docker/build-push-action@v3
43 | continue-on-error: true
44 | with:
45 | context: .
46 | file: utils/docker/Dockerfile-cpu
47 | push: true
48 | tags: ultralytics/yolov5:latest-cpu
49 |
50 | - name: Build and push GPU image
51 | uses: docker/build-push-action@v3
52 | continue-on-error: true
53 | with:
54 | context: .
55 | file: utils/docker/Dockerfile
56 | push: true
57 | tags: ultralytics/yolov5:latest
58 |
--------------------------------------------------------------------------------
/.github/workflows/greetings.yml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | name: Greetings
4 |
5 | on:
6 | pull_request_target:
7 | types: [opened]
8 | issues:
9 | types: [opened]
10 |
11 | jobs:
12 | greeting:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/first-interaction@v1
16 | with:
17 | repo-token: ${{ secrets.GITHUB_TOKEN }}
18 | pr-message: |
19 | 👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to:
20 |
21 | - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
22 | - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
23 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
24 |
25 | issue-message: |
26 | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607).
27 |
28 | If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.
29 |
30 | If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available.
31 |
32 | For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com.
33 |
34 | ## Requirements
35 |
36 | [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started:
37 | ```bash
38 | git clone https://github.com/ultralytics/yolov5 # clone
39 | cd yolov5
40 | pip install -r requirements.txt # install
41 | ```
42 |
43 | ## Environments
44 |
45 | YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
46 |
47 | - **Notebooks** with free GPU:
48 | - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
49 | - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)
50 | - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart)
51 |
52 | ## Status
53 |
54 |
55 |
56 | If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
57 |
58 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | name: Close stale issues
4 | on:
5 | schedule:
6 | - cron: '0 0 * * *' # Runs at 00:00 UTC every day
7 |
8 | jobs:
9 | stale:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/stale@v6
13 | with:
14 | repo-token: ${{ secrets.GITHUB_TOKEN }}
15 | stale-issue-message: |
16 | 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs.
17 |
18 | Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources:
19 | - **Wiki** – https://github.com/ultralytics/yolov5/wiki
20 | - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials
21 | - **Docs** – https://docs.ultralytics.com
22 |
23 | Access additional [Ultralytics](https://ultralytics.com) ⚡ resources:
24 | - **Ultralytics HUB** – https://ultralytics.com/hub
25 | - **Vision API** – https://ultralytics.com/yolov5
26 | - **About Us** – https://ultralytics.com/about
27 | - **Join Our Team** – https://ultralytics.com/work
28 | - **Contact Us** – https://ultralytics.com/contact
29 |
30 | Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed!
31 |
32 | Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐!
33 |
34 | stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.'
35 | days-before-issue-stale: 30
36 | days-before-issue-close: 10
37 | days-before-pr-stale: 90
38 | days-before-pr-close: 30
39 | exempt-issue-labels: 'documentation,tutorial,TODO'
40 | operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting.
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
2 | *.jpg
3 | *.jpeg
4 | *.png
5 | *.bmp
6 | *.tif
7 | *.tiff
8 | *.heic
9 | *.JPG
10 | *.JPEG
11 | *.PNG
12 | *.BMP
13 | *.TIF
14 | *.TIFF
15 | *.HEIC
16 | *.mp4
17 | *.mov
18 | *.MOV
19 | *.avi
20 | *.data
21 | *.json
22 | *.cfg
23 | !setup.cfg
24 | !cfg/yolov3*.cfg
25 |
26 | storage.googleapis.com
27 | runs/*
28 | data/*
29 | data/images/*
30 | !data/*.yaml
31 | !data/hyps
32 | !data/scripts
33 | !data/images
34 | !data/images/zidane.jpg
35 | !data/images/bus.jpg
36 | !data/*.sh
37 |
38 | results*.csv
39 |
40 | # Datasets -------------------------------------------------------------------------------------------------------------
41 | coco/
42 | coco128/
43 | VOC/
44 |
45 | # MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
46 | *.m~
47 | *.mat
48 | !targets*.mat
49 |
50 | # Neural Network weights -----------------------------------------------------------------------------------------------
51 | *.weights
52 | *.pt
53 | *.pb
54 | *.onnx
55 | *.engine
56 | *.mlmodel
57 | *.torchscript
58 | *.tflite
59 | *.h5
60 | *_saved_model/
61 | *_web_model/
62 | *_openvino_model/
63 | darknet53.conv.74
64 | yolov3-tiny.conv.15
65 |
66 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
67 | # Byte-compiled / optimized / DLL files
68 | __pycache__/
69 | *.py[cod]
70 | *$py.class
71 |
72 | # C extensions
73 | *.so
74 |
75 | # Distribution / packaging
76 | .Python
77 | env/
78 | build/
79 | develop-eggs/
80 | dist/
81 | downloads/
82 | eggs/
83 | .eggs/
84 | lib/
85 | lib64/
86 | parts/
87 | sdist/
88 | var/
89 | wheels/
90 | *.egg-info/
91 | /wandb/
92 | .installed.cfg
93 | *.egg
94 |
95 |
96 | # PyInstaller
97 | # Usually these files are written by a python script from a template
98 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
99 | *.manifest
100 | *.spec
101 |
102 | # Installer logs
103 | pip-log.txt
104 | pip-delete-this-directory.txt
105 |
106 | # Unit test / coverage reports
107 | htmlcov/
108 | .tox/
109 | .coverage
110 | .coverage.*
111 | .cache
112 | nosetests.xml
113 | coverage.xml
114 | *.cover
115 | .hypothesis/
116 |
117 | # Translations
118 | *.mo
119 | *.pot
120 |
121 | # Django stuff:
122 | *.log
123 | local_settings.py
124 |
125 | # Flask stuff:
126 | instance/
127 | .webassets-cache
128 |
129 | # Scrapy stuff:
130 | .scrapy
131 |
132 | # Sphinx documentation
133 | docs/_build/
134 |
135 | # PyBuilder
136 | target/
137 |
138 | # Jupyter Notebook
139 | .ipynb_checkpoints
140 |
141 | # pyenv
142 | .python-version
143 |
144 | # celery beat schedule file
145 | celerybeat-schedule
146 |
147 | # SageMath parsed files
148 | *.sage.py
149 |
150 | # dotenv
151 | .env
152 |
153 | # virtualenv
154 | .venv*
155 | venv*/
156 | ENV*/
157 |
158 | # Spyder project settings
159 | .spyderproject
160 | .spyproject
161 |
162 | # Rope project settings
163 | .ropeproject
164 |
165 | # mkdocs documentation
166 | /site
167 |
168 | # mypy
169 | .mypy_cache/
170 |
171 |
172 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
173 |
174 | # General
175 | .DS_Store
176 | .AppleDouble
177 | .LSOverride
178 |
179 | # Icon must end with two \r
180 | Icon
181 | Icon?
182 |
183 | # Thumbnails
184 | ._*
185 |
186 | # Files that might appear in the root of a volume
187 | .DocumentRevisions-V100
188 | .fseventsd
189 | .Spotlight-V100
190 | .TemporaryItems
191 | .Trashes
192 | .VolumeIcon.icns
193 | .com.apple.timemachine.donotpresent
194 |
195 | # Directories potentially created on remote AFP share
196 | .AppleDB
197 | .AppleDesktop
198 | Network Trash Folder
199 | Temporary Items
200 | .apdisk
201 |
202 |
203 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
204 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
205 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
206 |
207 | # User-specific stuff:
208 | .idea/*
209 | .idea/**/workspace.xml
210 | .idea/**/tasks.xml
211 | .idea/dictionaries
212 | .html # Bokeh Plots
213 | .pg # TensorFlow Frozen Graphs
214 | .avi # videos
215 |
216 | # Sensitive or high-churn files:
217 | .idea/**/dataSources/
218 | .idea/**/dataSources.ids
219 | .idea/**/dataSources.local.xml
220 | .idea/**/sqlDataSources.xml
221 | .idea/**/dynamic.xml
222 | .idea/**/uiDesigner.xml
223 |
224 | # Gradle:
225 | .idea/**/gradle.xml
226 | .idea/**/libraries
227 |
228 | # CMake
229 | cmake-build-debug/
230 | cmake-build-release/
231 |
232 | # Mongo Explorer plugin:
233 | .idea/**/mongoSettings.xml
234 |
235 | ## File-based project format:
236 | *.iws
237 |
238 | ## Plugin-specific files:
239 |
240 | # IntelliJ
241 | out/
242 |
243 | # mpeltonen/sbt-idea plugin
244 | .idea_modules/
245 |
246 | # JIRA plugin
247 | atlassian-ide-plugin.xml
248 |
249 | # Cursive Clojure plugin
250 | .idea/replstate.xml
251 |
252 | # Crashlytics plugin (for Android Studio and IntelliJ)
253 | com_crashlytics_export_strings.xml
254 | crashlytics.properties
255 | crashlytics-build.properties
256 | fabric.properties
257 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Define hooks for code formations
2 | # Will be applied on any updated commit files if a user has installed and linked commit hook
3 |
4 | default_language_version:
5 | python: python3.8
6 |
7 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
8 | ci:
9 | autofix_prs: true
10 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
11 | autoupdate_schedule: monthly
12 | # submodules: true
13 |
14 | repos:
15 | - repo: https://github.com/pre-commit/pre-commit-hooks
16 | rev: v4.3.0
17 | hooks:
18 | # - id: end-of-file-fixer
19 | - id: trailing-whitespace
20 | - id: check-case-conflict
21 | - id: check-yaml
22 | - id: check-toml
23 | - id: pretty-format-json
24 | - id: check-docstring-first
25 |
26 | - repo: https://github.com/asottile/pyupgrade
27 | rev: v3.2.0
28 | hooks:
29 | - id: pyupgrade
30 | name: Upgrade code
31 | args: [ --py37-plus ]
32 |
33 | - repo: https://github.com/PyCQA/isort
34 | rev: 5.10.1
35 | hooks:
36 | - id: isort
37 | name: Sort imports
38 |
39 | - repo: https://github.com/pre-commit/mirrors-yapf
40 | rev: v0.32.0
41 | hooks:
42 | - id: yapf
43 | name: YAPF formatting
44 |
45 | - repo: https://github.com/executablebooks/mdformat
46 | rev: 0.7.16
47 | hooks:
48 | - id: mdformat
49 | name: MD formatting
50 | additional_dependencies:
51 | - mdformat-gfm
52 | - mdformat-black
53 | exclude: "README.md|README_cn.md"
54 |
55 | - repo: https://github.com/asottile/yesqa
56 | rev: v1.4.0
57 | hooks:
58 | - id: yesqa
59 |
60 | - repo: https://github.com/PyCQA/flake8
61 | rev: 5.0.4
62 | hooks:
63 | - id: flake8
64 | name: PEP8
65 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing to YOLOv5 🚀
2 |
3 | We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing a new feature
9 | - Becoming a maintainer
10 |
11 | YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12 | helping push the frontiers of what's possible in AI 😃!
13 |
14 | ## Submitting a Pull Request (PR) 🛠️
15 |
16 | Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17 |
18 | ### 1. Select File to Update
19 |
20 | Select `requirements.txt` to update by clicking on it in GitHub.
21 |
22 |

23 |
24 | ### 2. Click 'Edit this file'
25 |
26 | Button is in top-right corner.
27 |
28 | 
29 |
30 | ### 3. Make Changes
31 |
32 | Change `matplotlib` version from `3.2.2` to `3.3`.
33 |
34 | 
35 |
36 | ### 4. Preview Changes and Submit PR
37 |
38 | Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
39 | for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
40 | changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
41 |
42 | 
43 |
44 | ### PR recommendations
45 |
46 | To allow your work to be integrated as seamlessly as possible, we advise you to:
47 |
48 | - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update
49 | your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
50 |
51 | 
52 |
53 | - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
54 |
55 | 
56 |
57 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
58 | but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
59 |
60 | ## Submitting a Bug Report 🐛
61 |
62 | If you spot a problem with YOLOv5 please submit a Bug Report!
63 |
64 | For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
65 | short guidelines below to help users provide what we need in order to get started.
66 |
67 | When asking a question, people will be better able to provide help if you provide **code** that they can easily
68 | understand and use to **reproduce** the problem. This is referred to by community members as creating
69 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
70 | the problem should be:
71 |
72 | - ✅ **Minimal** – Use as little code as possible that still produces the same problem
73 | - ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
74 | - ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
75 |
76 | In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
77 | should be:
78 |
79 | - ✅ **Current** – Verify that your code is up-to-date with current
80 | GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
81 | copy to ensure your problem has not already been resolved by previous commits.
82 | - ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
83 | repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
84 |
85 | If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛
86 | **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
87 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
88 | understand and diagnose your problem.
89 |
90 | ## License
91 |
92 | By contributing, you agree that your contributions will be licensed under
93 | the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
94 |
--------------------------------------------------------------------------------
/README_rkopt.md:
--------------------------------------------------------------------------------
1 | # YOLOv5 - RKNN optimize
2 |
3 | ## Source
4 |
5 | Base on https://github.com/ultralytics/yolov5 (v7.0) with commit id as 915bbf294bb74c859f0b41f1c23bc395014ea679
6 |
7 |
8 |
9 | ## What different
10 |
11 | With inference result values unchanged, the following optimizations were applied:
12 |
13 | - Optimize focus/SPPF block, getting better performance with same result
14 | - Change output node, remove post_process from the model. (post process block in model is unfriendly for quantization)
15 |
16 |
17 |
18 | With inference result got changed, the following optimization was applied:
19 |
20 | - Using ReLU as activation layer instead of SiLU(Only valid when training new model)
21 |
22 |
23 |
24 | ## How to use
25 |
26 | ```
27 | # for detection model
28 | python export.py --rknpu --weight yolov5s.pt
29 |
30 | # for segmentation model
31 | python export.py --rknpu --weight yolov5s-seg.pt
32 | ```
33 |
34 | - 'yolov5s.pt'/ 'yolov5s-seg.pt' could be replaced with your model path
35 | - A file name "RK_anchors.txt" would be generated and it would be used for the post_process stage.
36 | - **NOTICE: Please call with --rknpu, do not changing the default rknpu value in export.py.**
37 |
38 |
39 |
40 | ## Deploy demo
41 |
42 | Please refer https://github.com/airockchip/rknn_model_zoo
43 |
44 |
--------------------------------------------------------------------------------
/data/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: python train.py --data Argoverse.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── Argoverse ← downloads here (31.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: bus
23 | 5: truck
24 | 6: traffic_light
25 | 7: stop_sign
26 |
27 |
28 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
29 | download: |
30 | import json
31 |
32 | from tqdm import tqdm
33 | from utils.general import download, Path
34 |
35 |
36 | def argoverse2yolo(set):
37 | labels = {}
38 | a = json.load(open(set, "rb"))
39 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
40 | img_id = annot['image_id']
41 | img_name = a['images'][img_id]['name']
42 | img_label_name = f'{img_name[:-3]}txt'
43 |
44 | cls = annot['category_id'] # instance class id
45 | x_center, y_center, width, height = annot['bbox']
46 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
47 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
48 | width /= 1920.0 # scale
49 | height /= 1200.0 # scale
50 |
51 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
52 | if not img_dir.exists():
53 | img_dir.mkdir(parents=True, exist_ok=True)
54 |
55 | k = str(img_dir / img_label_name)
56 | if k not in labels:
57 | labels[k] = []
58 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
59 |
60 | for k in labels:
61 | with open(k, "w") as f:
62 | f.writelines(labels[k])
63 |
64 |
65 | # Download
66 | dir = Path(yaml['path']) # dataset root dir
67 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
68 | download(urls, dir=dir, delete=False)
69 |
70 | # Convert
71 | annotations_dir = 'Argoverse-HD/annotations/'
72 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
73 | for d in "train.json", "val.json":
74 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
75 |
--------------------------------------------------------------------------------
/data/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: python train.py --data GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | names:
30 | 0: wheat_head
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from utils.general import download, Path
36 |
37 |
38 | # Download
39 | dir = Path(yaml['path']) # dataset root dir
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42 | download(urls, dir=dir)
43 |
44 | # Make Directories
45 | for p in 'annotations', 'images', 'labels':
46 | (dir / p).mkdir(parents=True, exist_ok=True)
47 |
48 | # Move
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51 | (dir / p).rename(dir / 'images' / p) # move to /images
52 | f = (dir / p).with_suffix('.json') # json file
53 | if f.exists():
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
55 |
--------------------------------------------------------------------------------
/data/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: python train.py --data SKU-110K.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── SKU-110K ← downloads here (13.6 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | names:
18 | 0: object
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from tqdm import tqdm
25 | from utils.general import np, pd, Path, download, xyxy2xywh
26 |
27 |
28 | # Download
29 | dir = Path(yaml['path']) # dataset root dir
30 | parent = Path(dir.parent) # download dir
31 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
32 | download(urls, dir=parent, delete=False)
33 |
34 | # Rename directories
35 | if dir.exists():
36 | shutil.rmtree(dir)
37 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
38 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
39 |
40 | # Convert labels
41 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
42 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
43 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
44 | images, unique_images = x[:, 0], np.unique(x[:, 0])
45 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
46 | f.writelines(f'./images/{s}\n' for s in unique_images)
47 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
48 | cls = 0 # single-class dataset
49 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
50 | for r in x[images == im]:
51 | w, h = r[6], r[7] # image width, height
52 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
53 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
54 |
--------------------------------------------------------------------------------
/data/VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: python train.py --data VOC.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VOC ← downloads here (2.8 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC
12 | train: # train images (relative to 'path') 16551 images
13 | - images/train2012
14 | - images/train2007
15 | - images/val2012
16 | - images/val2007
17 | val: # val images (relative to 'path') 4952 images
18 | - images/test2007
19 | test: # test images (optional)
20 | - images/test2007
21 |
22 | # Classes
23 | names:
24 | 0: aeroplane
25 | 1: bicycle
26 | 2: bird
27 | 3: boat
28 | 4: bottle
29 | 5: bus
30 | 6: car
31 | 7: cat
32 | 8: chair
33 | 9: cow
34 | 10: diningtable
35 | 11: dog
36 | 12: horse
37 | 13: motorbike
38 | 14: person
39 | 15: pottedplant
40 | 16: sheep
41 | 17: sofa
42 | 18: train
43 | 19: tvmonitor
44 |
45 |
46 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
47 | download: |
48 | import xml.etree.ElementTree as ET
49 |
50 | from tqdm import tqdm
51 | from utils.general import download, Path
52 |
53 |
54 | def convert_label(path, lb_path, year, image_id):
55 | def convert_box(size, box):
56 | dw, dh = 1. / size[0], 1. / size[1]
57 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
58 | return x * dw, y * dh, w * dw, h * dh
59 |
60 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
61 | out_file = open(lb_path, 'w')
62 | tree = ET.parse(in_file)
63 | root = tree.getroot()
64 | size = root.find('size')
65 | w = int(size.find('width').text)
66 | h = int(size.find('height').text)
67 |
68 | names = list(yaml['names'].values()) # names list
69 | for obj in root.iter('object'):
70 | cls = obj.find('name').text
71 | if cls in names and int(obj.find('difficult').text) != 1:
72 | xmlbox = obj.find('bndbox')
73 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
74 | cls_id = names.index(cls) # class id
75 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
76 |
77 |
78 | # Download
79 | dir = Path(yaml['path']) # dataset root dir
80 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
81 | urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
82 | f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
83 | f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
84 | download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
85 |
86 | # Convert
87 | path = dir / 'images/VOCdevkit'
88 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
89 | imgs_path = dir / 'images' / f'{image_set}{year}'
90 | lbs_path = dir / 'labels' / f'{image_set}{year}'
91 | imgs_path.mkdir(exist_ok=True, parents=True)
92 | lbs_path.mkdir(exist_ok=True, parents=True)
93 |
94 | with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
95 | image_ids = f.read().strip().split()
96 | for id in tqdm(image_ids, desc=f'{image_set}{year}'):
97 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
98 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
99 | f.rename(imgs_path / f.name) # move image
100 | convert_label(path, lb_path, year, id) # convert labels to YOLO format
101 |
--------------------------------------------------------------------------------
/data/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: python train.py --data VisDrone.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VisDrone ← downloads here (2.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | names:
18 | 0: pedestrian
19 | 1: people
20 | 2: bicycle
21 | 3: car
22 | 4: van
23 | 5: truck
24 | 6: tricycle
25 | 7: awning-tricycle
26 | 8: bus
27 | 9: motor
28 |
29 |
30 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
31 | download: |
32 | from utils.general import download, os, Path
33 |
34 | def visdrone2yolo(dir):
35 | from PIL import Image
36 | from tqdm import tqdm
37 |
38 | def convert_box(size, box):
39 | # Convert VisDrone box to YOLO xywh box
40 | dw = 1. / size[0]
41 | dh = 1. / size[1]
42 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
43 |
44 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
45 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
46 | for f in pbar:
47 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
48 | lines = []
49 | with open(f, 'r') as file: # read annotation.txt
50 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
51 | if row[4] == '0': # VisDrone 'ignored regions' class 0
52 | continue
53 | cls = int(row[5]) - 1
54 | box = convert_box(img_size, tuple(map(int, row[:4])))
55 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
56 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
57 | fl.writelines(lines) # write label.txt
58 |
59 |
60 | # Download
61 | dir = Path(yaml['path']) # dataset root dir
62 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
63 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
64 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
65 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
66 | download(urls, dir=dir, curl=True, threads=4)
67 |
68 | # Convert
69 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
70 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
71 |
--------------------------------------------------------------------------------
/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: |
102 | from utils.general import download, Path
103 |
104 |
105 | # Download labels
106 | segments = False # segment or box labels
107 | dir = Path(yaml['path']) # dataset root dir
108 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | download(urls, dir=dir.parent)
111 |
112 | # Download data
113 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/data/coco128-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128-seg # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128-seg.zip
102 |
--------------------------------------------------------------------------------
/data/coco128.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128.zip
102 |
--------------------------------------------------------------------------------
/data/hyps/hyp.Objects365.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for Objects365 training
3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.00258
7 | lrf: 0.17
8 | momentum: 0.779
9 | weight_decay: 0.00058
10 | warmup_epochs: 1.33
11 | warmup_momentum: 0.86
12 | warmup_bias_lr: 0.0711
13 | box: 0.0539
14 | cls: 0.299
15 | cls_pw: 0.825
16 | obj: 0.632
17 | obj_pw: 1.0
18 | iou_t: 0.2
19 | anchor_t: 3.44
20 | anchors: 3.2
21 | fl_gamma: 0.0
22 | hsv_h: 0.0188
23 | hsv_s: 0.704
24 | hsv_v: 0.36
25 | degrees: 0.0
26 | translate: 0.0902
27 | scale: 0.491
28 | shear: 0.0
29 | perspective: 0.0
30 | flipud: 0.0
31 | fliplr: 0.5
32 | mosaic: 1.0
33 | mixup: 0.0
34 | copy_paste: 0.0
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for VOC training
3 | # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # YOLOv5 Hyperparameter Evolution Results
7 | # Best generation: 467
8 | # Last generation: 996
9 | # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
10 | # 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865
11 |
12 | lr0: 0.00334
13 | lrf: 0.15135
14 | momentum: 0.74832
15 | weight_decay: 0.00025
16 | warmup_epochs: 3.3835
17 | warmup_momentum: 0.59462
18 | warmup_bias_lr: 0.18657
19 | box: 0.02
20 | cls: 0.21638
21 | cls_pw: 0.5
22 | obj: 0.51728
23 | obj_pw: 0.67198
24 | iou_t: 0.2
25 | anchor_t: 3.3744
26 | fl_gamma: 0.0
27 | hsv_h: 0.01041
28 | hsv_s: 0.54703
29 | hsv_v: 0.27739
30 | degrees: 0.0
31 | translate: 0.04591
32 | scale: 0.75544
33 | shear: 0.0
34 | perspective: 0.0
35 | flipud: 0.0
36 | fliplr: 0.5
37 | mosaic: 0.85834
38 | mixup: 0.04266
39 | copy_paste: 0.0
40 | anchors: 3.412
41 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-high.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for high-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.1 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-low.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for low-augmentation COCO training from scratch
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-med.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for medium-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/data/images/bus.jpg
--------------------------------------------------------------------------------
/data/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/data/images/zidane.jpg
--------------------------------------------------------------------------------
/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases
4 | # Example usage: bash data/scripts/download_weights.sh
5 | # parent
6 | # └── yolov5
7 | # ├── yolov5s.pt ← downloads here
8 | # ├── yolov5m.pt
9 | # └── ...
10 |
11 | python - <= cls >= 0, f'incorrect class index {cls}'
125 |
126 | # Write YOLO label
127 | if id not in shapes:
128 | shapes[id] = Image.open(file).size
129 | box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
130 | with open((labels / id).with_suffix('.txt'), 'a') as f:
131 | f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
132 | except Exception as e:
133 | print(f'WARNING: skipping one label for {file}: {e}')
134 |
135 |
136 | # Download manually from https://challenge.xviewdataset.org
137 | dir = Path(yaml['path']) # dataset root dir
138 | # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
139 | # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
140 | # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
141 | # download(urls, dir=dir, delete=False)
142 |
143 | # Convert labels
144 | convert_labels(dir / 'xView_train.geojson')
145 |
146 | # Move images
147 | images = Path(dir / 'images')
148 | images.mkdir(parents=True, exist_ok=True)
149 | Path(dir / 'train_images').rename(dir / 'images' / 'train')
150 | Path(dir / 'val_images').rename(dir / 'images' / 'val')
151 |
152 | # Split
153 | autosplit(dir / 'images' / 'train')
154 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/models/__init__.py
--------------------------------------------------------------------------------
/models/common_rk_plug_in.py:
--------------------------------------------------------------------------------
1 | # This file contains modules common to various models
2 |
3 | import torch
4 | import torch.nn as nn
5 | from models.common import Conv
6 |
7 |
8 | class surrogate_focus(nn.Module):
9 | # surrogate_focus wh information into c-space
10 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
11 | super(surrogate_focus, self).__init__()
12 | self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
13 |
14 | with torch.no_grad():
15 | self.convsp = nn.Conv2d(3, 12, (2, 2), groups=1, bias=False, stride=(2, 2))
16 | self.convsp.weight.data = torch.zeros(self.convsp.weight.shape).float()
17 | for i in range(4):
18 | for j in range(3):
19 | ch = i*3 + j
20 | if ch>=0 and ch<3:
21 | self.convsp.weight[ch:ch+1, j:j+1, 0, 0] = 1
22 | elif ch>=3 and ch<6:
23 | self.convsp.weight[ch:ch+1, j:j+1, 1, 0] = 1
24 | elif ch>=6 and ch<9:
25 | self.convsp.weight[ch:ch+1, j:j+1, 0, 1] = 1
26 | elif ch>=9 and ch<12:
27 | self.convsp.weight[ch:ch+1, j:j+1, 1, 1] = 1
28 |
29 | def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
30 | return self.conv(self.convsp(x))
--------------------------------------------------------------------------------
/models/experimental.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Experimental modules
4 | """
5 | import math
6 |
7 | import numpy as np
8 | import torch
9 | import torch.nn as nn
10 |
11 | from utils.downloads import attempt_download
12 |
13 |
14 | class Sum(nn.Module):
15 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
16 | def __init__(self, n, weight=False): # n: number of inputs
17 | super().__init__()
18 | self.weight = weight # apply weights boolean
19 | self.iter = range(n - 1) # iter object
20 | if weight:
21 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
22 |
23 | def forward(self, x):
24 | y = x[0] # no weight
25 | if self.weight:
26 | w = torch.sigmoid(self.w) * 2
27 | for i in self.iter:
28 | y = y + x[i + 1] * w[i]
29 | else:
30 | for i in self.iter:
31 | y = y + x[i + 1]
32 | return y
33 |
34 |
35 | class MixConv2d(nn.Module):
36 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
37 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
38 | super().__init__()
39 | n = len(k) # number of convolutions
40 | if equal_ch: # equal c_ per group
41 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
42 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels
43 | else: # equal weight.numel() per group
44 | b = [c2] + [0] * n
45 | a = np.eye(n + 1, n, k=-1)
46 | a -= np.roll(a, 1, axis=1)
47 | a *= np.array(k) ** 2
48 | a[0] = 1
49 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
50 |
51 | self.m = nn.ModuleList([
52 | nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
53 | self.bn = nn.BatchNorm2d(c2)
54 | # self.act = nn.SiLU()
55 | self.act = nn.ReLU()
56 |
57 | def forward(self, x):
58 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
59 |
60 |
61 | class Ensemble(nn.ModuleList):
62 | # Ensemble of models
63 | def __init__(self):
64 | super().__init__()
65 |
66 | def forward(self, x, augment=False, profile=False, visualize=False):
67 | y = [module(x, augment, profile, visualize)[0] for module in self]
68 | # y = torch.stack(y).max(0)[0] # max ensemble
69 | # y = torch.stack(y).mean(0) # mean ensemble
70 | y = torch.cat(y, 1) # nms ensemble
71 | return y, None # inference, train output
72 |
73 |
74 | def attempt_load(weights, device=None, inplace=True, fuse=True):
75 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
76 | from models.yolo import Detect, Model
77 |
78 | model = Ensemble()
79 | for w in weights if isinstance(weights, list) else [weights]:
80 | ckpt = torch.load(attempt_download(w), map_location='cpu') # load
81 | ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
82 |
83 | # Model compatibility updates
84 | if not hasattr(ckpt, 'stride'):
85 | ckpt.stride = torch.tensor([32.])
86 | if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
87 | ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
88 |
89 | model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
90 |
91 | # Module compatibility updates
92 | for m in model.modules():
93 | t = type(m)
94 | if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
95 | m.inplace = inplace # torch 1.7.0 compatibility
96 | if t is Detect and not isinstance(m.anchor_grid, list):
97 | delattr(m, 'anchor_grid')
98 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
99 | elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
100 | m.recompute_scale_factor = None # torch 1.11.0 compatibility
101 |
102 | # Return model
103 | if len(model) == 1:
104 | return model[-1]
105 |
106 | # Return detection ensemble
107 | print(f'Ensemble created with {weights}\n')
108 | for k in 'names', 'nc', 'yaml':
109 | setattr(model, k, getattr(model[0], k))
110 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
111 | assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
112 | return model
113 |
--------------------------------------------------------------------------------
/models/hub/anchors.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Default anchors for COCO data
3 |
4 |
5 | # P5 -------------------------------------------------------------------------------------------------------------------
6 | # P5-640:
7 | anchors_p5_640:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 |
13 | # P6 -------------------------------------------------------------------------------------------------------------------
14 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
15 | anchors_p6_640:
16 | - [9,11, 21,19, 17,41] # P3/8
17 | - [43,32, 39,70, 86,64] # P4/16
18 | - [65,131, 134,130, 120,265] # P5/32
19 | - [282,180, 247,354, 512,387] # P6/64
20 |
21 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
22 | anchors_p6_1280:
23 | - [19,27, 44,40, 38,94] # P3/8
24 | - [96,68, 86,152, 180,137] # P4/16
25 | - [140,301, 303,264, 238,542] # P5/32
26 | - [436,615, 739,380, 925,792] # P6/64
27 |
28 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
29 | anchors_p6_1920:
30 | - [28,41, 67,59, 57,141] # P3/8
31 | - [144,103, 129,227, 270,205] # P4/16
32 | - [209,452, 455,396, 358,812] # P5/32
33 | - [653,922, 1109,570, 1387,1187] # P6/64
34 |
35 |
36 | # P7 -------------------------------------------------------------------------------------------------------------------
37 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
38 | anchors_p7_640:
39 | - [11,11, 13,30, 29,20] # P3/8
40 | - [30,46, 61,38, 39,92] # P4/16
41 | - [78,80, 146,66, 79,163] # P5/32
42 | - [149,150, 321,143, 157,303] # P6/64
43 | - [257,402, 359,290, 524,372] # P7/128
44 |
45 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
46 | anchors_p7_1280:
47 | - [19,22, 54,36, 32,77] # P3/8
48 | - [70,83, 138,71, 75,173] # P4/16
49 | - [165,159, 148,334, 375,151] # P5/32
50 | - [334,317, 251,626, 499,474] # P6/64
51 | - [750,326, 534,814, 1079,818] # P7/128
52 |
53 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
54 | anchors_p7_1920:
55 | - [29,34, 81,55, 47,115] # P3/8
56 | - [105,124, 207,107, 113,259] # P4/16
57 | - [247,238, 222,500, 563,227] # P5/32
58 | - [501,476, 376,939, 749,711] # P6/64
59 | - [1126,489, 801,1222, 1618,1227] # P7/128
60 |
--------------------------------------------------------------------------------
/models/hub/yolov3-spp.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3-SPP head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, SPP, [512, [5, 9, 13]]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/models/hub/yolov3-tiny.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,14, 23,27, 37,58] # P4/16
9 | - [81,82, 135,169, 344,319] # P5/32
10 |
11 | # YOLOv3-tiny backbone
12 | backbone:
13 | # [from, number, module, args]
14 | [[-1, 1, Conv, [16, 3, 1]], # 0
15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16 | [-1, 1, Conv, [32, 3, 1]],
17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18 | [-1, 1, Conv, [64, 3, 1]],
19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20 | [-1, 1, Conv, [128, 3, 1]],
21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22 | [-1, 1, Conv, [256, 3, 1]],
23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24 | [-1, 1, Conv, [512, 3, 1]],
25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27 | ]
28 |
29 | # YOLOv3-tiny head
30 | head:
31 | [[-1, 1, Conv, [1024, 3, 1]],
32 | [-1, 1, Conv, [256, 1, 1]],
33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34 |
35 | [-2, 1, Conv, [128, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39 |
40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41 | ]
42 |
--------------------------------------------------------------------------------
/models/hub/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3 head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, Conv, [512, 1, 1]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/models/hub/yolov5-bifpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 BiFPN head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5-fpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 FPN head
28 | head:
29 | [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
30 |
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
35 |
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 3, C3, [256, False]], # 18 (P3/8-small)
40 |
41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42 | ]
43 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p2.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [1024]],
21 | [-1, 1, SPPF, [1024, 5]], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
25 | head:
26 | [[-1, 1, Conv, [512, 1, 1]],
27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
29 | [-1, 3, C3, [512, False]], # 13
30 |
31 | [-1, 1, Conv, [256, 1, 1]],
32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35 |
36 | [-1, 1, Conv, [128, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 2], 1, Concat, [1]], # cat backbone P2
39 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
40 |
41 | [-1, 1, Conv, [128, 3, 2]],
42 | [[-1, 18], 1, Concat, [1]], # cat head P3
43 | [-1, 3, C3, [256, False]], # 24 (P3/8-small)
44 |
45 | [-1, 1, Conv, [256, 3, 2]],
46 | [[-1, 14], 1, Concat, [1]], # cat head P4
47 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
48 |
49 | [-1, 1, Conv, [512, 3, 2]],
50 | [[-1, 10], 1, Concat, [1]], # cat head P5
51 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
52 |
53 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
54 | ]
55 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p34.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14 | [ -1, 3, C3, [ 128 ] ],
15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16 | [ -1, 6, C3, [ 256 ] ],
17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18 | [ -1, 9, C3, [ 512 ] ],
19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20 | [ -1, 3, C3, [ 1024 ] ],
21 | [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head with (P3, P4) outputs
25 | head:
26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29 | [ -1, 3, C3, [ 512, False ] ], # 13
30 |
31 | [ -1, 1, Conv, [ 256, 1, 1 ] ],
32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35 |
36 | [ -1, 1, Conv, [ 256, 3, 2 ] ],
37 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
38 | [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
39 |
40 | [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
41 | ]
42 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [768]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22 | [-1, 3, C3, [1024]],
23 | [-1, 1, SPPF, [1024, 5]], # 11
24 | ]
25 |
26 | # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
27 | head:
28 | [[-1, 1, Conv, [768, 1, 1]],
29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
31 | [-1, 3, C3, [768, False]], # 15
32 |
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
36 | [-1, 3, C3, [512, False]], # 19
37 |
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
40 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
41 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
42 |
43 | [-1, 1, Conv, [256, 3, 2]],
44 | [[-1, 20], 1, Concat, [1]], # cat head P4
45 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
46 |
47 | [-1, 1, Conv, [512, 3, 2]],
48 | [[-1, 16], 1, Concat, [1]], # cat head P5
49 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
50 |
51 | [-1, 1, Conv, [768, 3, 2]],
52 | [[-1, 12], 1, Concat, [1]], # cat head P6
53 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
54 |
55 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
56 | ]
57 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p7.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [768]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22 | [-1, 3, C3, [1024]],
23 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
24 | [-1, 3, C3, [1280]],
25 | [-1, 1, SPPF, [1280, 5]], # 13
26 | ]
27 |
28 | # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
29 | head:
30 | [[-1, 1, Conv, [1024, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 10], 1, Concat, [1]], # cat backbone P6
33 | [-1, 3, C3, [1024, False]], # 17
34 |
35 | [-1, 1, Conv, [768, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
38 | [-1, 3, C3, [768, False]], # 21
39 |
40 | [-1, 1, Conv, [512, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
43 | [-1, 3, C3, [512, False]], # 25
44 |
45 | [-1, 1, Conv, [256, 1, 1]],
46 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
47 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
48 | [-1, 3, C3, [256, False]], # 29 (P3/8-small)
49 |
50 | [-1, 1, Conv, [256, 3, 2]],
51 | [[-1, 26], 1, Concat, [1]], # cat head P4
52 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
53 |
54 | [-1, 1, Conv, [512, 3, 2]],
55 | [[-1, 22], 1, Concat, [1]], # cat head P5
56 | [-1, 3, C3, [768, False]], # 35 (P5/32-large)
57 |
58 | [-1, 1, Conv, [768, 3, 2]],
59 | [[-1, 18], 1, Concat, [1]], # cat head P6
60 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
61 |
62 | [-1, 1, Conv, [1024, 3, 2]],
63 | [[-1, 14], 1, Concat, [1]], # cat head P7
64 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
65 |
66 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
67 | ]
68 |
--------------------------------------------------------------------------------
/models/hub/yolov5-panet.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 PANet head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5l6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5m6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5n6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-LeakyReLU.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model
6 | depth_multiple: 0.33 # model depth multiple
7 | width_multiple: 0.50 # layer channel multiple
8 | anchors:
9 | - [10,13, 16,30, 33,23] # P3/8
10 | - [30,61, 62,45, 59,119] # P4/16
11 | - [116,90, 156,198, 373,326] # P5/32
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [1024]],
25 | [-1, 1, SPPF, [1024, 5]], # 9
26 | ]
27 |
28 | # YOLOv5 v6.0 head
29 | head:
30 | [[-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 3, C3, [512, False]], # 13
34 |
35 | [-1, 1, Conv, [256, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39 |
40 | [-1, 1, Conv, [256, 3, 2]],
41 | [[-1, 14], 1, Concat, [1]], # cat head P4
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43 |
44 | [-1, 1, Conv, [512, 3, 2]],
45 | [[-1, 10], 1, Concat, [1]], # cat head P5
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47 |
48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-ghost.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3Ghost, [128]],
18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3Ghost, [256]],
20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3Ghost, [512]],
22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3Ghost, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, GhostConv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3Ghost, [512, False]], # 13
33 |
34 | [-1, 1, GhostConv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, GhostConv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, GhostConv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-transformer.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5s6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5x6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/segment/yolov5l-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/segment/yolov5m-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48 | ]
--------------------------------------------------------------------------------
/models/segment/yolov5n-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/segment/yolov5s-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.5 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48 | ]
--------------------------------------------------------------------------------
/models/segment/yolov5x-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5l.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5n.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5s.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5x.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 requirements
2 | # Usage: pip install -r requirements.txt
3 |
4 | # Base ------------------------------------------------------------------------
5 | gitpython
6 | ipython # interactive notebook
7 | matplotlib>=3.2.2
8 | numpy>=1.18.5
9 | opencv-python>=4.1.1
10 | Pillow>=7.1.2
11 | psutil # system resources
12 | PyYAML>=5.3.1
13 | requests>=2.23.0
14 | scipy>=1.4.1
15 | thop>=0.1.1 # FLOPs computation
16 | torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
17 | torchvision>=0.8.1
18 | tqdm>=4.64.0
19 | # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
20 |
21 | # Logging ---------------------------------------------------------------------
22 | tensorboard>=2.4.1
23 | # clearml>=1.2.0
24 | # comet
25 |
26 | # Plotting --------------------------------------------------------------------
27 | pandas>=1.1.4
28 | seaborn>=0.11.0
29 |
30 | # Export ----------------------------------------------------------------------
31 | # coremltools>=6.0 # CoreML export
32 | # onnx>=1.9.0 # ONNX export
33 | # onnx-simplifier>=0.4.1 # ONNX simplifier
34 | # nvidia-pyindex # TensorRT export
35 | # nvidia-tensorrt # TensorRT export
36 | # scikit-learn<=1.1.2 # CoreML quantization
37 | # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
38 | # tensorflowjs>=3.9.0 # TF.js export
39 | # openvino-dev # OpenVINO export
40 |
41 | # Deploy ----------------------------------------------------------------------
42 | # tritonclient[all]~=2.24.0
43 |
44 | # Extras ----------------------------------------------------------------------
45 | # mss # screenshots
46 | # albumentations>=1.0.3
47 | # pycocotools>=2.0 # COCO mAP
48 | # roboflow
49 | # ultralytics # HUB https://hub.ultralytics.com
50 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 | # Local usage: pip install pre-commit, pre-commit run --all-files
4 |
5 | [metadata]
6 | license_file = LICENSE
7 | description_file = README.md
8 |
9 |
10 | [tool:pytest]
11 | norecursedirs =
12 | .git
13 | dist
14 | build
15 | addopts =
16 | --doctest-modules
17 | --durations=25
18 | --color=yes
19 |
20 |
21 | [flake8]
22 | max-line-length = 120
23 | exclude = .tox,*.egg,build,temp
24 | select = E,W,F
25 | doctests = True
26 | verbose = 2
27 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
28 | format = pylint
29 | # see: https://www.flake8rules.com/
30 | ignore =
31 | E731 # Do not assign a lambda expression, use a def
32 | F405 # name may be undefined, or defined from star imports: module
33 | E402 # module level import not at top of file
34 | F401 # module imported but unused
35 | W504 # line break after binary operator
36 | E127 # continuation line over-indented for visual indent
37 | E231 # missing whitespace after ‘,’, ‘;’, or ‘:’
38 | E501 # line too long
39 | F403 # ‘from module import *’ used; unable to detect undefined names
40 |
41 |
42 | [isort]
43 | # https://pycqa.github.io/isort/docs/configuration/options.html
44 | line_length = 120
45 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
46 | multi_line_output = 0
47 |
48 |
49 | [yapf]
50 | based_on_style = pep8
51 | spaces_before_comment = 2
52 | COLUMN_LIMIT = 120
53 | COALESCE_BRACKETS = True
54 | SPACES_AROUND_POWER_OPERATOR = True
55 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False
56 | SPLIT_BEFORE_CLOSING_BRACKET = False
57 | SPLIT_BEFORE_FIRST_ARGUMENT = False
58 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
59 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 | import contextlib
7 | import platform
8 | import threading
9 |
10 |
11 | def emojis(str=''):
12 | # Return platform-dependent emoji-safe version of string
13 | return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
14 |
15 |
16 | class TryExcept(contextlib.ContextDecorator):
17 | # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
18 | def __init__(self, msg=''):
19 | self.msg = msg
20 |
21 | def __enter__(self):
22 | pass
23 |
24 | def __exit__(self, exc_type, value, traceback):
25 | if value:
26 | print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
27 | return True
28 |
29 |
30 | def threaded(func):
31 | # Multi-threads a target function and returns thread. Usage: @threaded decorator
32 | def wrapper(*args, **kwargs):
33 | thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
34 | thread.start()
35 | return thread
36 |
37 | return wrapper
38 |
39 |
40 | def join_threads(verbose=False):
41 | # Join all daemon threads, i.e. atexit.register(lambda: join_threads())
42 | main_thread = threading.current_thread()
43 | for t in threading.enumerate():
44 | if t is not main_thread:
45 | if verbose:
46 | print(f'Joining thread {t.name}')
47 | t.join()
48 |
49 |
50 | def notebook_init(verbose=True):
51 | # Check system software and hardware
52 | print('Checking setup...')
53 |
54 | import os
55 | import shutil
56 |
57 | from utils.general import check_font, check_requirements, is_colab
58 | from utils.torch_utils import select_device # imports
59 |
60 | check_font()
61 |
62 | import psutil
63 | from IPython import display # to display images and clear console output
64 |
65 | if is_colab():
66 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
67 |
68 | # System info
69 | if verbose:
70 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
71 | ram = psutil.virtual_memory().total
72 | total, used, free = shutil.disk_usage("/")
73 | display.clear_output()
74 | s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
75 | else:
76 | s = ''
77 |
78 | select_device(newline=False)
79 | print(emojis(f'Setup complete ✅ {s}'))
80 | return display
81 |
--------------------------------------------------------------------------------
/utils/activations.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Activation functions
4 | """
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | class SiLU(nn.Module):
12 | # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
13 | @staticmethod
14 | def forward(x):
15 | return x * torch.sigmoid(x)
16 |
17 |
18 | class Hardswish(nn.Module):
19 | # Hard-SiLU activation
20 | @staticmethod
21 | def forward(x):
22 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML
23 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
24 |
25 |
26 | class Mish(nn.Module):
27 | # Mish activation https://github.com/digantamisra98/Mish
28 | @staticmethod
29 | def forward(x):
30 | return x * F.softplus(x).tanh()
31 |
32 |
33 | class MemoryEfficientMish(nn.Module):
34 | # Mish activation memory-efficient
35 | class F(torch.autograd.Function):
36 |
37 | @staticmethod
38 | def forward(ctx, x):
39 | ctx.save_for_backward(x)
40 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
41 |
42 | @staticmethod
43 | def backward(ctx, grad_output):
44 | x = ctx.saved_tensors[0]
45 | sx = torch.sigmoid(x)
46 | fx = F.softplus(x).tanh()
47 | return grad_output * (fx + x * sx * (1 - fx * fx))
48 |
49 | def forward(self, x):
50 | return self.F.apply(x)
51 |
52 |
53 | class FReLU(nn.Module):
54 | # FReLU activation https://arxiv.org/abs/2007.11824
55 | def __init__(self, c1, k=3): # ch_in, kernel
56 | super().__init__()
57 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
58 | self.bn = nn.BatchNorm2d(c1)
59 |
60 | def forward(self, x):
61 | return torch.max(x, self.bn(self.conv(x)))
62 |
63 |
64 | class AconC(nn.Module):
65 | r""" ACON activation (activate or not)
66 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
67 | according to "Activate or Not: Learning Customized Activation" .
68 | """
69 |
70 | def __init__(self, c1):
71 | super().__init__()
72 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
73 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
74 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
75 |
76 | def forward(self, x):
77 | dpx = (self.p1 - self.p2) * x
78 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
79 |
80 |
81 | class MetaAconC(nn.Module):
82 | r""" ACON activation (activate or not)
83 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
84 | according to "Activate or Not: Learning Customized Activation" .
85 | """
86 |
87 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
88 | super().__init__()
89 | c2 = max(r, c1 // r)
90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
92 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
93 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
94 | # self.bn1 = nn.BatchNorm2d(c2)
95 | # self.bn2 = nn.BatchNorm2d(c1)
96 |
97 | def forward(self, x):
98 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
99 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
100 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
101 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
102 | dpx = (self.p1 - self.p2) * x
103 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
104 |
--------------------------------------------------------------------------------
/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 |
11 | from utils.general import LOGGER, colorstr
12 | from utils.torch_utils import profile
13 |
14 |
15 | def check_train_batch_size(model, imgsz=640, amp=True):
16 | # Check YOLOv5 training batch size
17 | with torch.cuda.amp.autocast(amp):
18 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
19 |
20 |
21 | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
22 | # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
23 | # Usage:
24 | # import torch
25 | # from utils.autobatch import autobatch
26 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
27 | # print(autobatch(model))
28 |
29 | # Check device
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 | if torch.backends.cudnn.benchmark:
37 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
38 | return batch_size
39 |
40 | # Inspect CUDA memory
41 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
42 | d = str(device).upper() # 'CUDA:0'
43 | properties = torch.cuda.get_device_properties(device) # device properties
44 | t = properties.total_memory / gb # GiB total
45 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved
46 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated
47 | f = t - (r + a) # GiB free
48 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
49 |
50 | # Profile batch sizes
51 | batch_sizes = [1, 2, 4, 8, 16]
52 | try:
53 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
54 | results = profile(img, model, n=3, device=device)
55 | except Exception as e:
56 | LOGGER.warning(f'{prefix}{e}')
57 |
58 | # Fit a solution
59 | y = [x[2] for x in results if x] # memory [2]
60 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
61 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
62 | if None in results: # some sizes failed
63 | i = results.index(None) # first fail index
64 | if b >= batch_sizes[i]: # y intercept above failure point
65 | b = batch_sizes[max(i - 1, 0)] # select prior safe point
66 | if b < 1 or b > 1024: # b outside of safe range
67 | b = batch_size
68 | LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
69 |
70 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
71 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
72 | return b
73 |
--------------------------------------------------------------------------------
/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/utils/aws/__init__.py
--------------------------------------------------------------------------------
/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Callback utils
4 | """
5 |
6 | import threading
7 |
8 |
9 | class Callbacks:
10 | """"
11 | Handles all registered callbacks for YOLOv5 Hooks
12 | """
13 |
14 | def __init__(self):
15 | # Define the available callbacks
16 | self._callbacks = {
17 | 'on_pretrain_routine_start': [],
18 | 'on_pretrain_routine_end': [],
19 | 'on_train_start': [],
20 | 'on_train_epoch_start': [],
21 | 'on_train_batch_start': [],
22 | 'optimizer_step': [],
23 | 'on_before_zero_grad': [],
24 | 'on_train_batch_end': [],
25 | 'on_train_epoch_end': [],
26 | 'on_val_start': [],
27 | 'on_val_batch_start': [],
28 | 'on_val_image_end': [],
29 | 'on_val_batch_end': [],
30 | 'on_val_end': [],
31 | 'on_fit_epoch_end': [], # fit = train + val
32 | 'on_model_save': [],
33 | 'on_train_end': [],
34 | 'on_params_update': [],
35 | 'teardown': [],}
36 | self.stop_training = False # set True to interrupt training
37 |
38 | def register_action(self, hook, name='', callback=None):
39 | """
40 | Register a new action to a callback hook
41 |
42 | Args:
43 | hook: The callback hook name to register the action to
44 | name: The name of the action for later reference
45 | callback: The callback to fire
46 | """
47 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
48 | assert callable(callback), f"callback '{callback}' is not callable"
49 | self._callbacks[hook].append({'name': name, 'callback': callback})
50 |
51 | def get_registered_actions(self, hook=None):
52 | """"
53 | Returns all the registered actions by callback hook
54 |
55 | Args:
56 | hook: The name of the hook to check, defaults to all
57 | """
58 | return self._callbacks[hook] if hook else self._callbacks
59 |
60 | def run(self, hook, *args, thread=False, **kwargs):
61 | """
62 | Loop through the registered actions and fire all callbacks on main thread
63 |
64 | Args:
65 | hook: The name of the hook to check, defaults to all
66 | args: Arguments to receive from YOLOv5
67 | thread: (boolean) Run callbacks in daemon thread
68 | kwargs: Keyword Arguments to receive from YOLOv5
69 | """
70 |
71 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
72 | for logger in self._callbacks[hook]:
73 | if thread:
74 | threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
75 | else:
76 | logger['callback'](*args, **kwargs)
77 |
--------------------------------------------------------------------------------
/utils/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 | # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
4 |
5 | # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
6 | FROM nvcr.io/nvidia/pytorch:22.10-py3
7 | RUN rm -rf /opt/pytorch # remove 1.2GB dir
8 |
9 | # Downloads to user config dir
10 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
11 |
12 | # Install linux packages
13 | RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx
14 |
15 | # Install pip packages
16 | COPY requirements.txt .
17 | RUN python -m pip install --upgrade pip wheel
18 | RUN pip uninstall -y Pillow torchtext # torch torchvision
19 | RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \
20 | 'opencv-python<4.6.0.66' \
21 | --extra-index-url https://download.pytorch.org/whl/cu113
22 |
23 | # Create working directory
24 | RUN mkdir -p /usr/src/app
25 | WORKDIR /usr/src/app
26 |
27 | # Copy contents
28 | # COPY . /usr/src/app (issues as not a .git directory)
29 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
30 |
31 | # Set environment variables
32 | ENV OMP_NUM_THREADS=8
33 |
34 |
35 | # Usage Examples -------------------------------------------------------------------------------------------------------
36 |
37 | # Build and Push
38 | # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t
39 |
40 | # Pull and Run
41 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
42 |
43 | # Pull and Run with local directory access
44 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
45 |
46 | # Kill all
47 | # sudo docker kill $(sudo docker ps -q)
48 |
49 | # Kill all image-based
50 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
51 |
52 | # DockerHub tag update
53 | # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
54 |
55 | # Clean up
56 | # docker system prune -a --volumes
57 |
58 | # Update Ubuntu drivers
59 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
60 |
61 | # DDP test
62 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
63 |
64 | # GCP VM from Image
65 | # docker.io/ultralytics/yolov5:latest
66 |
--------------------------------------------------------------------------------
/utils/docker/Dockerfile-arm64:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM arm64v8/ubuntu:20.04
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | RUN apt update
13 | RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata
14 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
15 | # RUN alias python=python3
16 |
17 | # Install pip packages
18 | COPY requirements.txt .
19 | RUN python3 -m pip install --upgrade pip wheel
20 | RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \
21 | tensorflow-aarch64
22 | # tensorflowjs \
23 | # onnx onnx-simplifier onnxruntime \
24 | # coremltools openvino-dev \
25 |
26 | # Create working directory
27 | RUN mkdir -p /usr/src/app
28 | WORKDIR /usr/src/app
29 |
30 | # Copy contents
31 | # COPY . /usr/src/app (issues as not a .git directory)
32 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
33 |
34 |
35 | # Usage Examples -------------------------------------------------------------------------------------------------------
36 |
37 | # Build and Push
38 | # t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t
39 |
40 | # Pull and Run
41 | # t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
42 |
--------------------------------------------------------------------------------
/utils/docker/Dockerfile-cpu:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM ubuntu:20.04
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | RUN apt update
13 | RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata
14 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
15 | # RUN alias python=python3
16 |
17 | # Install pip packages
18 | COPY requirements.txt .
19 | RUN python3 -m pip install --upgrade pip wheel
20 | RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \
21 | coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \
22 | # openvino-dev \
23 | --extra-index-url https://download.pytorch.org/whl/cpu
24 |
25 | # Create working directory
26 | RUN mkdir -p /usr/src/app
27 | WORKDIR /usr/src/app
28 |
29 | # Copy contents
30 | # COPY . /usr/src/app (issues as not a .git directory)
31 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
32 |
33 |
34 | # Usage Examples -------------------------------------------------------------------------------------------------------
35 |
36 | # Build and Push
37 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t
38 |
39 | # Pull and Run
40 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
41 |
--------------------------------------------------------------------------------
/utils/downloads.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Download utils
4 | """
5 |
6 | import logging
7 | import os
8 | import subprocess
9 | import urllib
10 | from pathlib import Path
11 |
12 | import requests
13 | import torch
14 |
15 |
16 | def is_url(url, check=True):
17 | # Check if string is URL and check if URL exists
18 | try:
19 | url = str(url)
20 | result = urllib.parse.urlparse(url)
21 | assert all([result.scheme, result.netloc]) # check if is url
22 | return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online
23 | except (AssertionError, urllib.request.HTTPError):
24 | return False
25 |
26 |
27 | def gsutil_getsize(url=''):
28 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
29 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
30 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes
31 |
32 |
33 | def url_getsize(url='https://ultralytics.com/images/bus.jpg'):
34 | # Return downloadable file size in bytes
35 | response = requests.head(url, allow_redirects=True)
36 | return int(response.headers.get('content-length', -1))
37 |
38 |
39 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
40 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
41 | from utils.general import LOGGER
42 |
43 | file = Path(file)
44 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
45 | try: # url1
46 | LOGGER.info(f'Downloading {url} to {file}...')
47 | torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)
48 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
49 | except Exception as e: # url2
50 | if file.exists():
51 | file.unlink() # remove partial downloads
52 | LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
53 | os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
54 | finally:
55 | if not file.exists() or file.stat().st_size < min_bytes: # check
56 | if file.exists():
57 | file.unlink() # remove partial downloads
58 | LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}")
59 | LOGGER.info('')
60 |
61 |
62 | def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'):
63 | # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc.
64 | from utils.general import LOGGER
65 |
66 | def github_assets(repository, version='latest'):
67 | # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...])
68 | if version != 'latest':
69 | version = f'tags/{version}' # i.e. tags/v7.0
70 | response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api
71 | return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets
72 |
73 | file = Path(str(file).strip().replace("'", ''))
74 | if not file.exists():
75 | # URL specified
76 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
77 | if str(file).startswith(('http:/', 'https:/')): # download
78 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
79 | file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
80 | if Path(file).is_file():
81 | LOGGER.info(f'Found {url} locally at {file}') # file already exists
82 | else:
83 | safe_download(file=file, url=url, min_bytes=1E5)
84 | return file
85 |
86 | # GitHub assets
87 | assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default
88 | try:
89 | tag, assets = github_assets(repo, release)
90 | except Exception:
91 | try:
92 | tag, assets = github_assets(repo) # latest release
93 | except Exception:
94 | try:
95 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
96 | except Exception:
97 | tag = release
98 |
99 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
100 | if name in assets:
101 | url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror
102 | safe_download(
103 | file,
104 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
105 | min_bytes=1E5,
106 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}')
107 |
108 | return str(file)
109 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Perform test request
4 | """
5 |
6 | import pprint
7 |
8 | import requests
9 |
10 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
11 | IMAGE = "zidane.jpg"
12 |
13 | # Read image
14 | with open(IMAGE, "rb") as f:
15 | image_data = f.read()
16 |
17 | response = requests.post(DETECTION_URL, files={"image": image_data}).json()
18 |
19 | pprint.pprint(response)
20 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Run a Flask REST API exposing one or more YOLOv5s models
4 | """
5 |
6 | import argparse
7 | import io
8 |
9 | import torch
10 | from flask import Flask, request
11 | from PIL import Image
12 |
13 | app = Flask(__name__)
14 | models = {}
15 |
16 | DETECTION_URL = "/v1/object-detection/"
17 |
18 |
19 | @app.route(DETECTION_URL, methods=["POST"])
20 | def predict(model):
21 | if request.method != "POST":
22 | return
23 |
24 | if request.files.get("image"):
25 | # Method 1
26 | # with request.files["image"] as f:
27 | # im = Image.open(io.BytesIO(f.read()))
28 |
29 | # Method 2
30 | im_file = request.files["image"]
31 | im_bytes = im_file.read()
32 | im = Image.open(io.BytesIO(im_bytes))
33 |
34 | if model in models:
35 | results = models[model](im, size=640) # reduce size=320 for faster inference
36 | return results.pandas().xyxy[0].to_json(orient="records")
37 |
38 |
39 | if __name__ == "__main__":
40 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
41 | parser.add_argument("--port", default=5000, type=int, help="port number")
42 | parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
43 | opt = parser.parse_args()
44 |
45 | for m in opt.model:
46 | models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True)
47 |
48 | app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat
49 |
--------------------------------------------------------------------------------
/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/utils/loggers/clearml/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/utils/loggers/clearml/__init__.py
--------------------------------------------------------------------------------
/utils/loggers/clearml/hpo.py:
--------------------------------------------------------------------------------
1 | from clearml import Task
2 | # Connecting ClearML with the current process,
3 | # from here on everything is logged automatically
4 | from clearml.automation import HyperParameterOptimizer, UniformParameterRange
5 | from clearml.automation.optuna import OptimizerOptuna
6 |
7 | task = Task.init(project_name='Hyper-Parameter Optimization',
8 | task_name='YOLOv5',
9 | task_type=Task.TaskTypes.optimizer,
10 | reuse_last_task_id=False)
11 |
12 | # Example use case:
13 | optimizer = HyperParameterOptimizer(
14 | # This is the experiment we want to optimize
15 | base_task_id='',
16 | # here we define the hyper-parameters to optimize
17 | # Notice: The parameter name should exactly match what you see in the UI: /
18 | # For Example, here we see in the base experiment a section Named: "General"
19 | # under it a parameter named "batch_size", this becomes "General/batch_size"
20 | # If you have `argparse` for example, then arguments will appear under the "Args" section,
21 | # and you should instead pass "Args/batch_size"
22 | hyper_parameters=[
23 | UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1),
24 | UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0),
25 | UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98),
26 | UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001),
27 | UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0),
28 | UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95),
29 | UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2),
30 | UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2),
31 | UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0),
32 | UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0),
33 | UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0),
34 | UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0),
35 | UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7),
36 | UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0),
37 | UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0),
38 | UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1),
39 | UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9),
40 | UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9),
41 | UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0),
42 | UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9),
43 | UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9),
44 | UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0),
45 | UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001),
46 | UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0),
47 | UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0),
48 | UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0),
49 | UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0),
50 | UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)],
51 | # this is the objective metric we want to maximize/minimize
52 | objective_metric_title='metrics',
53 | objective_metric_series='mAP_0.5',
54 | # now we decide if we want to maximize it or minimize it (accuracy we maximize)
55 | objective_metric_sign='max',
56 | # let us limit the number of concurrent experiments,
57 | # this in turn will make sure we do dont bombard the scheduler with experiments.
58 | # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
59 | max_number_of_concurrent_tasks=1,
60 | # this is the optimizer class (actually doing the optimization)
61 | # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
62 | optimizer_class=OptimizerOptuna,
63 | # If specified only the top K performing Tasks will be kept, the others will be automatically archived
64 | save_top_k_tasks_only=5, # 5,
65 | compute_time_limit=None,
66 | total_max_jobs=20,
67 | min_iteration_per_job=None,
68 | max_iteration_per_job=None,
69 | )
70 |
71 | # report every 10 seconds, this is way too often, but we are testing here
72 | optimizer.set_report_period(10 / 60)
73 | # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
74 | # an_optimizer.start_locally(job_complete_callback=job_complete_callback)
75 | # set the time limit for the optimization process (2 hours)
76 | optimizer.set_time_limit(in_minutes=120.0)
77 | # Start the optimization process in the local environment
78 | optimizer.start_locally()
79 | # wait until process is done (notice we are controlling the optimization process in the background)
80 | optimizer.wait()
81 | # make sure background optimization stopped
82 | optimizer.stop()
83 |
84 | print('We are done, good bye')
85 |
--------------------------------------------------------------------------------
/utils/loggers/comet/comet_utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from urllib.parse import urlparse
4 |
5 | try:
6 | import comet_ml
7 | except (ModuleNotFoundError, ImportError):
8 | comet_ml = None
9 |
10 | import yaml
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | COMET_PREFIX = "comet://"
15 | COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
16 | COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt")
17 |
18 |
19 | def download_model_checkpoint(opt, experiment):
20 | model_dir = f"{opt.project}/{experiment.name}"
21 | os.makedirs(model_dir, exist_ok=True)
22 |
23 | model_name = COMET_MODEL_NAME
24 | model_asset_list = experiment.get_model_asset_list(model_name)
25 |
26 | if len(model_asset_list) == 0:
27 | logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}")
28 | return
29 |
30 | model_asset_list = sorted(
31 | model_asset_list,
32 | key=lambda x: x["step"],
33 | reverse=True,
34 | )
35 | logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list}
36 |
37 | resource_url = urlparse(opt.weights)
38 | checkpoint_filename = resource_url.query
39 |
40 | if checkpoint_filename:
41 | asset_id = logged_checkpoint_map.get(checkpoint_filename)
42 | else:
43 | asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
44 | checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
45 |
46 | if asset_id is None:
47 | logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment")
48 | return
49 |
50 | try:
51 | logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}")
52 | asset_filename = checkpoint_filename
53 |
54 | model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
55 | model_download_path = f"{model_dir}/{asset_filename}"
56 | with open(model_download_path, "wb") as f:
57 | f.write(model_binary)
58 |
59 | opt.weights = model_download_path
60 |
61 | except Exception as e:
62 | logger.warning("COMET WARNING: Unable to download checkpoint from Comet")
63 | logger.exception(e)
64 |
65 |
66 | def set_opt_parameters(opt, experiment):
67 | """Update the opts Namespace with parameters
68 | from Comet's ExistingExperiment when resuming a run
69 |
70 | Args:
71 | opt (argparse.Namespace): Namespace of command line options
72 | experiment (comet_ml.APIExperiment): Comet API Experiment object
73 | """
74 | asset_list = experiment.get_asset_list()
75 | resume_string = opt.resume
76 |
77 | for asset in asset_list:
78 | if asset["fileName"] == "opt.yaml":
79 | asset_id = asset["assetId"]
80 | asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
81 | opt_dict = yaml.safe_load(asset_binary)
82 | for key, value in opt_dict.items():
83 | setattr(opt, key, value)
84 | opt.resume = resume_string
85 |
86 | # Save hyperparameters to YAML file
87 | # Necessary to pass checks in training script
88 | save_dir = f"{opt.project}/{experiment.name}"
89 | os.makedirs(save_dir, exist_ok=True)
90 |
91 | hyp_yaml_path = f"{save_dir}/hyp.yaml"
92 | with open(hyp_yaml_path, "w") as f:
93 | yaml.dump(opt.hyp, f)
94 | opt.hyp = hyp_yaml_path
95 |
96 |
97 | def check_comet_weights(opt):
98 | """Downloads model weights from Comet and updates the
99 | weights path to point to saved weights location
100 |
101 | Args:
102 | opt (argparse.Namespace): Command Line arguments passed
103 | to YOLOv5 training script
104 |
105 | Returns:
106 | None/bool: Return True if weights are successfully downloaded
107 | else return None
108 | """
109 | if comet_ml is None:
110 | return
111 |
112 | if isinstance(opt.weights, str):
113 | if opt.weights.startswith(COMET_PREFIX):
114 | api = comet_ml.API()
115 | resource = urlparse(opt.weights)
116 | experiment_path = f"{resource.netloc}{resource.path}"
117 | experiment = api.get(experiment_path)
118 | download_model_checkpoint(opt, experiment)
119 | return True
120 |
121 | return None
122 |
123 |
124 | def check_comet_resume(opt):
125 | """Restores run parameters to its original state based on the model checkpoint
126 | and logged Experiment parameters.
127 |
128 | Args:
129 | opt (argparse.Namespace): Command Line arguments passed
130 | to YOLOv5 training script
131 |
132 | Returns:
133 | None/bool: Return True if the run is restored successfully
134 | else return None
135 | """
136 | if comet_ml is None:
137 | return
138 |
139 | if isinstance(opt.resume, str):
140 | if opt.resume.startswith(COMET_PREFIX):
141 | api = comet_ml.API()
142 | resource = urlparse(opt.resume)
143 | experiment_path = f"{resource.netloc}{resource.path}"
144 | experiment = api.get(experiment_path)
145 | set_opt_parameters(opt, experiment)
146 | download_model_checkpoint(opt, experiment)
147 |
148 | return True
149 |
150 | return None
151 |
--------------------------------------------------------------------------------
/utils/loggers/comet/optimizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "algorithm": "random",
3 | "parameters": {
4 | "anchor_t": {
5 | "type": "discrete",
6 | "values": [
7 | 2,
8 | 8
9 | ]
10 | },
11 | "batch_size": {
12 | "type": "discrete",
13 | "values": [
14 | 16,
15 | 32,
16 | 64
17 | ]
18 | },
19 | "box": {
20 | "type": "discrete",
21 | "values": [
22 | 0.02,
23 | 0.2
24 | ]
25 | },
26 | "cls": {
27 | "type": "discrete",
28 | "values": [
29 | 0.2
30 | ]
31 | },
32 | "cls_pw": {
33 | "type": "discrete",
34 | "values": [
35 | 0.5
36 | ]
37 | },
38 | "copy_paste": {
39 | "type": "discrete",
40 | "values": [
41 | 1
42 | ]
43 | },
44 | "degrees": {
45 | "type": "discrete",
46 | "values": [
47 | 0,
48 | 45
49 | ]
50 | },
51 | "epochs": {
52 | "type": "discrete",
53 | "values": [
54 | 5
55 | ]
56 | },
57 | "fl_gamma": {
58 | "type": "discrete",
59 | "values": [
60 | 0
61 | ]
62 | },
63 | "fliplr": {
64 | "type": "discrete",
65 | "values": [
66 | 0
67 | ]
68 | },
69 | "flipud": {
70 | "type": "discrete",
71 | "values": [
72 | 0
73 | ]
74 | },
75 | "hsv_h": {
76 | "type": "discrete",
77 | "values": [
78 | 0
79 | ]
80 | },
81 | "hsv_s": {
82 | "type": "discrete",
83 | "values": [
84 | 0
85 | ]
86 | },
87 | "hsv_v": {
88 | "type": "discrete",
89 | "values": [
90 | 0
91 | ]
92 | },
93 | "iou_t": {
94 | "type": "discrete",
95 | "values": [
96 | 0.7
97 | ]
98 | },
99 | "lr0": {
100 | "type": "discrete",
101 | "values": [
102 | 1e-05,
103 | 0.1
104 | ]
105 | },
106 | "lrf": {
107 | "type": "discrete",
108 | "values": [
109 | 0.01,
110 | 1
111 | ]
112 | },
113 | "mixup": {
114 | "type": "discrete",
115 | "values": [
116 | 1
117 | ]
118 | },
119 | "momentum": {
120 | "type": "discrete",
121 | "values": [
122 | 0.6
123 | ]
124 | },
125 | "mosaic": {
126 | "type": "discrete",
127 | "values": [
128 | 0
129 | ]
130 | },
131 | "obj": {
132 | "type": "discrete",
133 | "values": [
134 | 0.2
135 | ]
136 | },
137 | "obj_pw": {
138 | "type": "discrete",
139 | "values": [
140 | 0.5
141 | ]
142 | },
143 | "optimizer": {
144 | "type": "categorical",
145 | "values": [
146 | "SGD",
147 | "Adam",
148 | "AdamW"
149 | ]
150 | },
151 | "perspective": {
152 | "type": "discrete",
153 | "values": [
154 | 0
155 | ]
156 | },
157 | "scale": {
158 | "type": "discrete",
159 | "values": [
160 | 0
161 | ]
162 | },
163 | "shear": {
164 | "type": "discrete",
165 | "values": [
166 | 0
167 | ]
168 | },
169 | "translate": {
170 | "type": "discrete",
171 | "values": [
172 | 0
173 | ]
174 | },
175 | "warmup_bias_lr": {
176 | "type": "discrete",
177 | "values": [
178 | 0,
179 | 0.2
180 | ]
181 | },
182 | "warmup_epochs": {
183 | "type": "discrete",
184 | "values": [
185 | 5
186 | ]
187 | },
188 | "warmup_momentum": {
189 | "type": "discrete",
190 | "values": [
191 | 0,
192 | 0.95
193 | ]
194 | },
195 | "weight_decay": {
196 | "type": "discrete",
197 | "values": [
198 | 0,
199 | 0.001
200 | ]
201 | }
202 | },
203 | "spec": {
204 | "maxCombo": 0,
205 | "metric": "metrics/mAP_0.5",
206 | "objective": "maximize"
207 | },
208 | "trials": 1
209 | }
210 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/utils/loggers/wandb/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from wandb_utils import WandbLogger
4 |
5 | from utils.general import LOGGER
6 |
7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
8 |
9 |
10 | def create_dataset_artifact(opt):
11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
12 | if not logger.wandb:
13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset")
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
21 | parser.add_argument('--entity', default=None, help='W&B entity')
22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
23 |
24 | opt = parser.parse_args()
25 | opt.resume = False # Explicitly disallow resume check for dataset upload job
26 |
27 | create_dataset_artifact(opt)
28 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import wandb
5 |
6 | FILE = Path(__file__).resolve()
7 | ROOT = FILE.parents[3] # YOLOv5 root directory
8 | if str(ROOT) not in sys.path:
9 | sys.path.append(str(ROOT)) # add ROOT to PATH
10 |
11 | from train import parse_opt, train
12 | from utils.callbacks import Callbacks
13 | from utils.general import increment_path
14 | from utils.torch_utils import select_device
15 |
16 |
17 | def sweep():
18 | wandb.init()
19 | # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb.
20 | hyp_dict = vars(wandb.config).get("_items").copy()
21 |
22 | # Workaround: get necessary opt args
23 | opt = parse_opt(known=True)
24 | opt.batch_size = hyp_dict.get("batch_size")
25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26 | opt.epochs = hyp_dict.get("epochs")
27 | opt.nosave = True
28 | opt.data = hyp_dict.get("data")
29 | opt.weights = str(opt.weights)
30 | opt.cfg = str(opt.cfg)
31 | opt.data = str(opt.data)
32 | opt.hyp = str(opt.hyp)
33 | opt.project = str(opt.project)
34 | device = select_device(opt.device, batch_size=opt.batch_size)
35 |
36 | # train
37 | train(hyp_dict, opt, device, callbacks=Callbacks())
38 |
39 |
40 | if __name__ == "__main__":
41 | sweep()
42 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.yaml:
--------------------------------------------------------------------------------
1 | # Hyperparameters for training
2 | # To set range-
3 | # Provide min and max values as:
4 | # parameter:
5 | #
6 | # min: scalar
7 | # max: scalar
8 | # OR
9 | #
10 | # Set a specific list of search space-
11 | # parameter:
12 | # values: [scalar1, scalar2, scalar3...]
13 | #
14 | # You can use grid, bayesian and hyperopt search strategy
15 | # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
16 |
17 | program: utils/loggers/wandb/sweep.py
18 | method: random
19 | metric:
20 | name: metrics/mAP_0.5
21 | goal: maximize
22 |
23 | parameters:
24 | # hyperparameters: set either min, max range or values list
25 | data:
26 | value: "data/coco128.yaml"
27 | batch_size:
28 | values: [64]
29 | epochs:
30 | values: [10]
31 |
32 | lr0:
33 | distribution: uniform
34 | min: 1e-5
35 | max: 1e-1
36 | lrf:
37 | distribution: uniform
38 | min: 0.01
39 | max: 1.0
40 | momentum:
41 | distribution: uniform
42 | min: 0.6
43 | max: 0.98
44 | weight_decay:
45 | distribution: uniform
46 | min: 0.0
47 | max: 0.001
48 | warmup_epochs:
49 | distribution: uniform
50 | min: 0.0
51 | max: 5.0
52 | warmup_momentum:
53 | distribution: uniform
54 | min: 0.0
55 | max: 0.95
56 | warmup_bias_lr:
57 | distribution: uniform
58 | min: 0.0
59 | max: 0.2
60 | box:
61 | distribution: uniform
62 | min: 0.02
63 | max: 0.2
64 | cls:
65 | distribution: uniform
66 | min: 0.2
67 | max: 4.0
68 | cls_pw:
69 | distribution: uniform
70 | min: 0.5
71 | max: 2.0
72 | obj:
73 | distribution: uniform
74 | min: 0.2
75 | max: 4.0
76 | obj_pw:
77 | distribution: uniform
78 | min: 0.5
79 | max: 2.0
80 | iou_t:
81 | distribution: uniform
82 | min: 0.1
83 | max: 0.7
84 | anchor_t:
85 | distribution: uniform
86 | min: 2.0
87 | max: 8.0
88 | fl_gamma:
89 | distribution: uniform
90 | min: 0.0
91 | max: 4.0
92 | hsv_h:
93 | distribution: uniform
94 | min: 0.0
95 | max: 0.1
96 | hsv_s:
97 | distribution: uniform
98 | min: 0.0
99 | max: 0.9
100 | hsv_v:
101 | distribution: uniform
102 | min: 0.0
103 | max: 0.9
104 | degrees:
105 | distribution: uniform
106 | min: 0.0
107 | max: 45.0
108 | translate:
109 | distribution: uniform
110 | min: 0.0
111 | max: 0.9
112 | scale:
113 | distribution: uniform
114 | min: 0.0
115 | max: 0.9
116 | shear:
117 | distribution: uniform
118 | min: 0.0
119 | max: 10.0
120 | perspective:
121 | distribution: uniform
122 | min: 0.0
123 | max: 0.001
124 | flipud:
125 | distribution: uniform
126 | min: 0.0
127 | max: 1.0
128 | fliplr:
129 | distribution: uniform
130 | min: 0.0
131 | max: 1.0
132 | mosaic:
133 | distribution: uniform
134 | min: 0.0
135 | max: 1.0
136 | mixup:
137 | distribution: uniform
138 | min: 0.0
139 | max: 1.0
140 | copy_paste:
141 | distribution: uniform
142 | min: 0.0
143 | max: 1.0
144 |
--------------------------------------------------------------------------------
/utils/segment/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/airockchip/yolov5/d25a07534c14f44296f9444bab2aa5c601cdaaab/utils/segment/__init__.py
--------------------------------------------------------------------------------
/utils/segment/augmentations.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Image augmentation functions
4 | """
5 |
6 | import math
7 | import random
8 |
9 | import cv2
10 | import numpy as np
11 |
12 | from ..augmentations import box_candidates
13 | from ..general import resample_segments, segment2box
14 |
15 |
16 | def mixup(im, labels, segments, im2, labels2, segments2):
17 | # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
18 | r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
19 | im = (im * r + im2 * (1 - r)).astype(np.uint8)
20 | labels = np.concatenate((labels, labels2), 0)
21 | segments = np.concatenate((segments, segments2), 0)
22 | return im, labels, segments
23 |
24 |
25 | def random_perspective(im,
26 | targets=(),
27 | segments=(),
28 | degrees=10,
29 | translate=.1,
30 | scale=.1,
31 | shear=10,
32 | perspective=0.0,
33 | border=(0, 0)):
34 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
35 | # targets = [cls, xyxy]
36 |
37 | height = im.shape[0] + border[0] * 2 # shape(h,w,c)
38 | width = im.shape[1] + border[1] * 2
39 |
40 | # Center
41 | C = np.eye(3)
42 | C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
43 | C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
44 |
45 | # Perspective
46 | P = np.eye(3)
47 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
48 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
49 |
50 | # Rotation and Scale
51 | R = np.eye(3)
52 | a = random.uniform(-degrees, degrees)
53 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
54 | s = random.uniform(1 - scale, 1 + scale)
55 | # s = 2 ** random.uniform(-scale, scale)
56 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
57 |
58 | # Shear
59 | S = np.eye(3)
60 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
61 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
62 |
63 | # Translation
64 | T = np.eye(3)
65 | T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
66 | T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
67 |
68 | # Combined rotation matrix
69 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
70 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
71 | if perspective:
72 | im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
73 | else: # affine
74 | im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
75 |
76 | # Visualize
77 | # import matplotlib.pyplot as plt
78 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
79 | # ax[0].imshow(im[:, :, ::-1]) # base
80 | # ax[1].imshow(im2[:, :, ::-1]) # warped
81 |
82 | # Transform label coordinates
83 | n = len(targets)
84 | new_segments = []
85 | if n:
86 | new = np.zeros((n, 4))
87 | segments = resample_segments(segments) # upsample
88 | for i, segment in enumerate(segments):
89 | xy = np.ones((len(segment), 3))
90 | xy[:, :2] = segment
91 | xy = xy @ M.T # transform
92 | xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine
93 |
94 | # clip
95 | new[i] = segment2box(xy, width, height)
96 | new_segments.append(xy)
97 |
98 | # filter candidates
99 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
100 | targets = targets[i]
101 | targets[:, 1:5] = new[i]
102 | new_segments = np.array(new_segments)[i]
103 |
104 | return im, targets, new_segments
105 |
--------------------------------------------------------------------------------
/utils/segment/general.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import torch
4 | import torch.nn.functional as F
5 |
6 |
7 | def crop_mask(masks, boxes):
8 | """
9 | "Crop" predicted masks by zeroing out everything not in the predicted bbox.
10 | Vectorized by Chong (thanks Chong).
11 |
12 | Args:
13 | - masks should be a size [h, w, n] tensor of masks
14 | - boxes should be a size [n, 4] tensor of bbox coords in relative point form
15 | """
16 |
17 | n, h, w = masks.shape
18 | x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
19 | r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
20 | c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1)
21 |
22 | return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
23 |
24 |
25 | def process_mask_upsample(protos, masks_in, bboxes, shape):
26 | """
27 | Crop after upsample.
28 | proto_out: [mask_dim, mask_h, mask_w]
29 | out_masks: [n, mask_dim], n is number of masks after nms
30 | bboxes: [n, 4], n is number of masks after nms
31 | shape:input_image_size, (h, w)
32 |
33 | return: h, w, n
34 | """
35 |
36 | c, mh, mw = protos.shape # CHW
37 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
38 | masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
39 | masks = crop_mask(masks, bboxes) # CHW
40 | return masks.gt_(0.5)
41 |
42 |
43 | def process_mask(protos, masks_in, bboxes, shape, upsample=False):
44 | """
45 | Crop before upsample.
46 | proto_out: [mask_dim, mask_h, mask_w]
47 | out_masks: [n, mask_dim], n is number of masks after nms
48 | bboxes: [n, 4], n is number of masks after nms
49 | shape:input_image_size, (h, w)
50 |
51 | return: h, w, n
52 | """
53 |
54 | c, mh, mw = protos.shape # CHW
55 | ih, iw = shape
56 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW
57 |
58 | downsampled_bboxes = bboxes.clone()
59 | downsampled_bboxes[:, 0] *= mw / iw
60 | downsampled_bboxes[:, 2] *= mw / iw
61 | downsampled_bboxes[:, 3] *= mh / ih
62 | downsampled_bboxes[:, 1] *= mh / ih
63 |
64 | masks = crop_mask(masks, downsampled_bboxes) # CHW
65 | if upsample:
66 | masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
67 | return masks.gt_(0.5)
68 |
69 |
70 | def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
71 | """
72 | img1_shape: model input shape, [h, w]
73 | img0_shape: origin pic shape, [h, w, 3]
74 | masks: [h, w, num]
75 | """
76 | # Rescale coordinates (xyxy) from im1_shape to im0_shape
77 | if ratio_pad is None: # calculate from im0_shape
78 | gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
79 | pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
80 | else:
81 | pad = ratio_pad[1]
82 | top, left = int(pad[1]), int(pad[0]) # y, x
83 | bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
84 |
85 | if len(masks.shape) < 2:
86 | raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
87 | masks = masks[top:bottom, left:right]
88 | # masks = masks.permute(2, 0, 1).contiguous()
89 | # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]
90 | # masks = masks.permute(1, 2, 0).contiguous()
91 | masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
92 |
93 | if len(masks.shape) == 2:
94 | masks = masks[:, :, None]
95 | return masks
96 |
97 |
98 | def mask_iou(mask1, mask2, eps=1e-7):
99 | """
100 | mask1: [N, n] m1 means number of predicted objects
101 | mask2: [M, n] m2 means number of gt objects
102 | Note: n means image_w x image_h
103 |
104 | return: masks iou, [N, M]
105 | """
106 | intersection = torch.matmul(mask1, mask2.t()).clamp(0)
107 | union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection
108 | return intersection / (union + eps)
109 |
110 |
111 | def masks_iou(mask1, mask2, eps=1e-7):
112 | """
113 | mask1: [N, n] m1 means number of predicted objects
114 | mask2: [N, n] m2 means number of gt objects
115 | Note: n means image_w x image_h
116 |
117 | return: masks iou, (N, )
118 | """
119 | intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
120 | union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection
121 | return intersection / (union + eps)
122 |
123 |
124 | def masks2segments(masks, strategy='largest'):
125 | # Convert masks(n,160,160) into segments(n,xy)
126 | segments = []
127 | for x in masks.int().cpu().numpy().astype('uint8'):
128 | c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
129 | if c:
130 | if strategy == 'concat': # concatenate all segments
131 | c = np.concatenate([x.reshape(-1, 2) for x in c])
132 | elif strategy == 'largest': # select largest segment
133 | c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
134 | else:
135 | c = np.zeros((0, 2)) # no segments found
136 | segments.append(c.astype('float32'))
137 | return segments
138 |
--------------------------------------------------------------------------------
/utils/segment/metrics.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Model validation metrics
4 | """
5 |
6 | import numpy as np
7 |
8 | from ..metrics import ap_per_class
9 |
10 |
11 | def fitness(x):
12 | # Model fitness as a weighted combination of metrics
13 | w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
14 | return (x[:, :8] * w).sum(1)
15 |
16 |
17 | def ap_per_class_box_and_mask(
18 | tp_m,
19 | tp_b,
20 | conf,
21 | pred_cls,
22 | target_cls,
23 | plot=False,
24 | save_dir=".",
25 | names=(),
26 | ):
27 | """
28 | Args:
29 | tp_b: tp of boxes.
30 | tp_m: tp of masks.
31 | other arguments see `func: ap_per_class`.
32 | """
33 | results_boxes = ap_per_class(tp_b,
34 | conf,
35 | pred_cls,
36 | target_cls,
37 | plot=plot,
38 | save_dir=save_dir,
39 | names=names,
40 | prefix="Box")[2:]
41 | results_masks = ap_per_class(tp_m,
42 | conf,
43 | pred_cls,
44 | target_cls,
45 | plot=plot,
46 | save_dir=save_dir,
47 | names=names,
48 | prefix="Mask")[2:]
49 |
50 | results = {
51 | "boxes": {
52 | "p": results_boxes[0],
53 | "r": results_boxes[1],
54 | "ap": results_boxes[3],
55 | "f1": results_boxes[2],
56 | "ap_class": results_boxes[4]},
57 | "masks": {
58 | "p": results_masks[0],
59 | "r": results_masks[1],
60 | "ap": results_masks[3],
61 | "f1": results_masks[2],
62 | "ap_class": results_masks[4]}}
63 | return results
64 |
65 |
66 | class Metric:
67 |
68 | def __init__(self) -> None:
69 | self.p = [] # (nc, )
70 | self.r = [] # (nc, )
71 | self.f1 = [] # (nc, )
72 | self.all_ap = [] # (nc, 10)
73 | self.ap_class_index = [] # (nc, )
74 |
75 | @property
76 | def ap50(self):
77 | """AP@0.5 of all classes.
78 | Return:
79 | (nc, ) or [].
80 | """
81 | return self.all_ap[:, 0] if len(self.all_ap) else []
82 |
83 | @property
84 | def ap(self):
85 | """AP@0.5:0.95
86 | Return:
87 | (nc, ) or [].
88 | """
89 | return self.all_ap.mean(1) if len(self.all_ap) else []
90 |
91 | @property
92 | def mp(self):
93 | """mean precision of all classes.
94 | Return:
95 | float.
96 | """
97 | return self.p.mean() if len(self.p) else 0.0
98 |
99 | @property
100 | def mr(self):
101 | """mean recall of all classes.
102 | Return:
103 | float.
104 | """
105 | return self.r.mean() if len(self.r) else 0.0
106 |
107 | @property
108 | def map50(self):
109 | """Mean AP@0.5 of all classes.
110 | Return:
111 | float.
112 | """
113 | return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
114 |
115 | @property
116 | def map(self):
117 | """Mean AP@0.5:0.95 of all classes.
118 | Return:
119 | float.
120 | """
121 | return self.all_ap.mean() if len(self.all_ap) else 0.0
122 |
123 | def mean_results(self):
124 | """Mean of results, return mp, mr, map50, map"""
125 | return (self.mp, self.mr, self.map50, self.map)
126 |
127 | def class_result(self, i):
128 | """class-aware result, return p[i], r[i], ap50[i], ap[i]"""
129 | return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
130 |
131 | def get_maps(self, nc):
132 | maps = np.zeros(nc) + self.map
133 | for i, c in enumerate(self.ap_class_index):
134 | maps[c] = self.ap[i]
135 | return maps
136 |
137 | def update(self, results):
138 | """
139 | Args:
140 | results: tuple(p, r, ap, f1, ap_class)
141 | """
142 | p, r, all_ap, f1, ap_class_index = results
143 | self.p = p
144 | self.r = r
145 | self.all_ap = all_ap
146 | self.f1 = f1
147 | self.ap_class_index = ap_class_index
148 |
149 |
150 | class Metrics:
151 | """Metric for boxes and masks."""
152 |
153 | def __init__(self) -> None:
154 | self.metric_box = Metric()
155 | self.metric_mask = Metric()
156 |
157 | def update(self, results):
158 | """
159 | Args:
160 | results: Dict{'boxes': Dict{}, 'masks': Dict{}}
161 | """
162 | self.metric_box.update(list(results["boxes"].values()))
163 | self.metric_mask.update(list(results["masks"].values()))
164 |
165 | def mean_results(self):
166 | return self.metric_box.mean_results() + self.metric_mask.mean_results()
167 |
168 | def class_result(self, i):
169 | return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
170 |
171 | def get_maps(self, nc):
172 | return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
173 |
174 | @property
175 | def ap_class_index(self):
176 | # boxes and masks have the same ap_class_index
177 | return self.metric_box.ap_class_index
178 |
179 |
180 | KEYS = [
181 | "train/box_loss",
182 | "train/seg_loss", # train loss
183 | "train/obj_loss",
184 | "train/cls_loss",
185 | "metrics/precision(B)",
186 | "metrics/recall(B)",
187 | "metrics/mAP_0.5(B)",
188 | "metrics/mAP_0.5:0.95(B)", # metrics
189 | "metrics/precision(M)",
190 | "metrics/recall(M)",
191 | "metrics/mAP_0.5(M)",
192 | "metrics/mAP_0.5:0.95(M)", # metrics
193 | "val/box_loss",
194 | "val/seg_loss", # val loss
195 | "val/obj_loss",
196 | "val/cls_loss",
197 | "x/lr0",
198 | "x/lr1",
199 | "x/lr2",]
200 |
201 | BEST_KEYS = [
202 | "best/epoch",
203 | "best/precision(B)",
204 | "best/recall(B)",
205 | "best/mAP_0.5(B)",
206 | "best/mAP_0.5:0.95(B)",
207 | "best/precision(M)",
208 | "best/recall(M)",
209 | "best/mAP_0.5(M)",
210 | "best/mAP_0.5:0.95(M)",]
211 |
--------------------------------------------------------------------------------
/utils/triton.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """ Utils to interact with the Triton Inference Server
3 | """
4 |
5 | import typing
6 | from urllib.parse import urlparse
7 |
8 | import torch
9 |
10 |
11 | class TritonRemoteModel:
12 | """ A wrapper over a model served by the Triton Inference Server. It can
13 | be configured to communicate over GRPC or HTTP. It accepts Torch Tensors
14 | as input and returns them as outputs.
15 | """
16 |
17 | def __init__(self, url: str):
18 | """
19 | Keyword arguments:
20 | url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000
21 | """
22 |
23 | parsed_url = urlparse(url)
24 | if parsed_url.scheme == "grpc":
25 | from tritonclient.grpc import InferenceServerClient, InferInput
26 |
27 | self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client
28 | model_repository = self.client.get_model_repository_index()
29 | self.model_name = model_repository.models[0].name
30 | self.metadata = self.client.get_model_metadata(self.model_name, as_json=True)
31 |
32 | def create_input_placeholders() -> typing.List[InferInput]:
33 | return [
34 | InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']]
35 |
36 | else:
37 | from tritonclient.http import InferenceServerClient, InferInput
38 |
39 | self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client
40 | model_repository = self.client.get_model_repository_index()
41 | self.model_name = model_repository[0]['name']
42 | self.metadata = self.client.get_model_metadata(self.model_name)
43 |
44 | def create_input_placeholders() -> typing.List[InferInput]:
45 | return [
46 | InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']]
47 |
48 | self._create_input_placeholders_fn = create_input_placeholders
49 |
50 | @property
51 | def runtime(self):
52 | """Returns the model runtime"""
53 | return self.metadata.get("backend", self.metadata.get("platform"))
54 |
55 | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]:
56 | """ Invokes the model. Parameters can be provided via args or kwargs.
57 | args, if provided, are assumed to match the order of inputs of the model.
58 | kwargs are matched with the model input names.
59 | """
60 | inputs = self._create_inputs(*args, **kwargs)
61 | response = self.client.infer(model_name=self.model_name, inputs=inputs)
62 | result = []
63 | for output in self.metadata['outputs']:
64 | tensor = torch.as_tensor(response.as_numpy(output['name']))
65 | result.append(tensor)
66 | return result[0] if len(result) == 1 else result
67 |
68 | def _create_inputs(self, *args, **kwargs):
69 | args_len, kwargs_len = len(args), len(kwargs)
70 | if not args_len and not kwargs_len:
71 | raise RuntimeError("No inputs provided.")
72 | if args_len and kwargs_len:
73 | raise RuntimeError("Cannot specify args and kwargs at the same time")
74 |
75 | placeholders = self._create_input_placeholders_fn()
76 | if args_len:
77 | if args_len != len(placeholders):
78 | raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.")
79 | for input, value in zip(placeholders, args):
80 | input.set_data_from_numpy(value.cpu().numpy())
81 | else:
82 | for input in placeholders:
83 | value = kwargs[input.name]
84 | input.set_data_from_numpy(value.cpu().numpy())
85 | return placeholders
86 |
--------------------------------------------------------------------------------