├── .dockerignore ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.md │ ├── feature-request.md │ └── question.md ├── dependabot.yml └── workflows │ ├── ci-testing.yml │ ├── codeql-analysis.yml │ ├── greetings.yml │ ├── rebase.yml │ └── stale.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── data ├── coco.yaml ├── coco128.yaml ├── hyp.finetune.yaml ├── hyp.scratch.yaml ├── images │ ├── bus.jpg │ └── zidane.jpg ├── scripts │ ├── get_coco.sh │ └── get_voc.sh └── voc.yaml ├── dataset.txt ├── detect.py ├── detect_new.py ├── export_no_focus.py ├── hubconf.py ├── models ├── __init__.py ├── common.py ├── experimental.py ├── export.py ├── hub │ ├── anchors.yaml │ ├── yolov3-spp.yaml │ ├── yolov3-tiny.yaml │ ├── yolov3.yaml │ ├── yolov5-fpn.yaml │ ├── yolov5-p2.yaml │ ├── yolov5-p6.yaml │ ├── yolov5-p7.yaml │ └── yolov5-panet.yaml ├── yolo.py ├── yolov5_rknn_640x640.yaml ├── yolov5l.yaml ├── yolov5m.yaml ├── yolov5s.yaml └── yolov5x.yaml ├── onnx2rknn.py ├── onnx_detect.py ├── onnx_detect_fast.py ├── onnx_to_rknn.py ├── requirements.txt ├── rknn_detect_for_yolov5_original.py ├── rknn_detect_yolov5.py ├── test.py ├── train.py ├── utils ├── __init__.py ├── activations.py ├── autoanchor.py ├── datasets.py ├── general.py ├── google_app_engine │ ├── Dockerfile │ ├── additional_requirements.txt │ └── app.yaml ├── google_utils.py ├── loss.py ├── metrics.py ├── plots.py └── torch_utils.py ├── weights ├── download_weights.sh └── yolov5s.rknn └── yolov5_original ├── data ├── coco.yaml ├── coco128.yaml ├── hyp.finetune.yaml ├── hyp.scratch.yaml └── voc.yaml ├── dataset.txt ├── detect.py ├── detect_new.py ├── export_no_focus.py ├── hubconf.py ├── models ├── __init__.py ├── common.py ├── experimental.py ├── export.py ├── hub │ ├── anchors.yaml │ ├── yolov3-spp.yaml │ ├── yolov3-tiny.yaml │ ├── yolov3.yaml │ ├── yolov5-fpn.yaml │ ├── yolov5-p2.yaml │ ├── yolov5-p6.yaml │ ├── yolov5-p7.yaml │ └── yolov5-panet.yaml ├── yolo.py ├── yolov5_rknn_640x640.yaml ├── yolov5l.yaml ├── yolov5m.yaml ├── yolov5s.yaml └── yolov5x.yaml ├── onnx2rknn.py ├── onnx_test.py ├── rknn_detect.py ├── test.py ├── train.py └── utils ├── __init__.py ├── activations.py ├── autoanchor.py ├── datasets.py ├── general.py ├── google_app_engine ├── Dockerfile ├── additional_requirements.txt └── app.yaml ├── google_utils.py ├── loss.py ├── metrics.py ├── plots.py ├── torch_utils.py └── yolo_tools.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # Repo-specific DockerIgnore ------------------------------------------------------------------------------------------- 2 | #.git 3 | .cache 4 | .idea 5 | runs 6 | output 7 | coco 8 | storage.googleapis.com 9 | 10 | data/samples/* 11 | **/results*.txt 12 | *.jpg 13 | 14 | # Neural Network weights ----------------------------------------------------------------------------------------------- 15 | **/*.weights 16 | **/*.pt 17 | **/*.pth 18 | **/*.onnx 19 | **/*.mlmodel 20 | **/*.torchscript 21 | 22 | 23 | # Below Copied From .gitignore ----------------------------------------------------------------------------------------- 24 | # Below Copied From .gitignore ----------------------------------------------------------------------------------------- 25 | 26 | 27 | # GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- 28 | # Byte-compiled / optimized / DLL files 29 | __pycache__/ 30 | *.py[cod] 31 | *$py.class 32 | 33 | # C extensions 34 | *.so 35 | 36 | # Distribution / packaging 37 | .Python 38 | env/ 39 | build/ 40 | develop-eggs/ 41 | dist/ 42 | downloads/ 43 | eggs/ 44 | .eggs/ 45 | lib/ 46 | lib64/ 47 | parts/ 48 | sdist/ 49 | var/ 50 | wheels/ 51 | *.egg-info/ 52 | wandb/ 53 | .installed.cfg 54 | *.egg 55 | 56 | # PyInstaller 57 | # Usually these files are written by a python script from a template 58 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 59 | *.manifest 60 | *.spec 61 | 62 | # Installer logs 63 | pip-log.txt 64 | pip-delete-this-directory.txt 65 | 66 | # Unit test / coverage reports 67 | htmlcov/ 68 | .tox/ 69 | .coverage 70 | .coverage.* 71 | .cache 72 | nosetests.xml 73 | coverage.xml 74 | *.cover 75 | .hypothesis/ 76 | 77 | # Translations 78 | *.mo 79 | *.pot 80 | 81 | # Django stuff: 82 | *.log 83 | local_settings.py 84 | 85 | # Flask stuff: 86 | instance/ 87 | .webassets-cache 88 | 89 | # Scrapy stuff: 90 | .scrapy 91 | 92 | # Sphinx documentation 93 | docs/_build/ 94 | 95 | # PyBuilder 96 | target/ 97 | 98 | # Jupyter Notebook 99 | .ipynb_checkpoints 100 | 101 | # pyenv 102 | .python-version 103 | 104 | # celery beat schedule file 105 | celerybeat-schedule 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # dotenv 111 | .env 112 | 113 | # virtualenv 114 | .venv* 115 | venv*/ 116 | ENV*/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | 131 | 132 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- 133 | 134 | # General 135 | .DS_Store 136 | .AppleDouble 137 | .LSOverride 138 | 139 | # Icon must end with two \r 140 | Icon 141 | Icon? 142 | 143 | # Thumbnails 144 | ._* 145 | 146 | # Files that might appear in the root of a volume 147 | .DocumentRevisions-V100 148 | .fseventsd 149 | .Spotlight-V100 150 | .TemporaryItems 151 | .Trashes 152 | .VolumeIcon.icns 153 | .com.apple.timemachine.donotpresent 154 | 155 | # Directories potentially created on remote AFP share 156 | .AppleDB 157 | .AppleDesktop 158 | Network Trash Folder 159 | Temporary Items 160 | .apdisk 161 | 162 | 163 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore 164 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 165 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 166 | 167 | # User-specific stuff: 168 | .idea/* 169 | .idea/**/workspace.xml 170 | .idea/**/tasks.xml 171 | .idea/dictionaries 172 | .html # Bokeh Plots 173 | .pg # TensorFlow Frozen Graphs 174 | .avi # videos 175 | 176 | # Sensitive or high-churn files: 177 | .idea/**/dataSources/ 178 | .idea/**/dataSources.ids 179 | .idea/**/dataSources.local.xml 180 | .idea/**/sqlDataSources.xml 181 | .idea/**/dynamic.xml 182 | .idea/**/uiDesigner.xml 183 | 184 | # Gradle: 185 | .idea/**/gradle.xml 186 | .idea/**/libraries 187 | 188 | # CMake 189 | cmake-build-debug/ 190 | cmake-build-release/ 191 | 192 | # Mongo Explorer plugin: 193 | .idea/**/mongoSettings.xml 194 | 195 | ## File-based project format: 196 | *.iws 197 | 198 | ## Plugin-specific files: 199 | 200 | # IntelliJ 201 | out/ 202 | 203 | # mpeltonen/sbt-idea plugin 204 | .idea_modules/ 205 | 206 | # JIRA plugin 207 | atlassian-ide-plugin.xml 208 | 209 | # Cursive Clojure plugin 210 | .idea/replstate.xml 211 | 212 | # Crashlytics plugin (for Android Studio and IntelliJ) 213 | com_crashlytics_export_strings.xml 214 | crashlytics.properties 215 | crashlytics-build.properties 216 | fabric.properties 217 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # this drop notebooks from GitHub language stats 2 | *.ipynb linguist-vendored 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bug report" 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you: 11 | - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo 12 | - **Common dataset**: coco.yaml or coco128.yaml 13 | - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments 14 | 15 | If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. 16 | 17 | 18 | ## 🐛 Bug 19 | A clear and concise description of what the bug is. 20 | 21 | 22 | ## To Reproduce (REQUIRED) 23 | 24 | Input: 25 | ``` 26 | import torch 27 | 28 | a = torch.tensor([5]) 29 | c = a / 0 30 | ``` 31 | 32 | Output: 33 | ``` 34 | Traceback (most recent call last): 35 | File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code 36 | exec(code_obj, self.user_global_ns, self.user_ns) 37 | File "", line 5, in 38 | c = a / 0 39 | RuntimeError: ZeroDivisionError 40 | ``` 41 | 42 | 43 | ## Expected behavior 44 | A clear and concise description of what you expected to happen. 45 | 46 | 47 | ## Environment 48 | If applicable, add screenshots to help explain your problem. 49 | 50 | - OS: [e.g. Ubuntu] 51 | - GPU [e.g. 2080 Ti] 52 | 53 | 54 | ## Additional context 55 | Add any other context about the problem here. 56 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🚀 Feature request" 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## 🚀 Feature 11 | 12 | 13 | ## Motivation 14 | 15 | 16 | 17 | ## Pitch 18 | 19 | 20 | 21 | ## Alternatives 22 | 23 | 24 | 25 | ## Additional context 26 | 27 | 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "❓Question" 3 | about: Ask a general question 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## ❔Question 11 | 12 | 13 | ## Additional context 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | time: "04:00" 8 | open-pull-requests-limit: 10 9 | reviewers: 10 | - glenn-jocher 11 | labels: 12 | - dependencies 13 | -------------------------------------------------------------------------------- /.github/workflows/ci-testing.yml: -------------------------------------------------------------------------------- 1 | name: CI CPU testing 2 | 3 | on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [ master ] 9 | schedule: 10 | - cron: '0 0 * * *' # Runs at 00:00 UTC every day 11 | 12 | jobs: 13 | cpu-tests: 14 | 15 | runs-on: ${{ matrix.os }} 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | os: [ubuntu-latest, macos-latest, windows-latest] 20 | python-version: [3.8] 21 | model: ['yolov5s'] # models to test 22 | 23 | # Timeout: https://stackoverflow.com/a/59076067/4521646 24 | timeout-minutes: 50 25 | steps: 26 | - uses: actions/checkout@v2 27 | - name: Set up Python ${{ matrix.python-version }} 28 | uses: actions/setup-python@v2 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | 32 | # Note: This uses an internal pip API and may not always work 33 | # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow 34 | - name: Get pip cache 35 | id: pip-cache 36 | run: | 37 | python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" 38 | 39 | - name: Cache pip 40 | uses: actions/cache@v1 41 | with: 42 | path: ${{ steps.pip-cache.outputs.dir }} 43 | key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} 44 | restore-keys: | 45 | ${{ runner.os }}-${{ matrix.python-version }}-pip- 46 | 47 | - name: Install dependencies 48 | run: | 49 | python -m pip install --upgrade pip 50 | pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html 51 | pip install -q onnx 52 | python --version 53 | pip --version 54 | pip list 55 | shell: bash 56 | 57 | - name: Download data 58 | run: | 59 | # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip 60 | # unzip -q tmp.zip -d ../ 61 | # rm tmp.zip 62 | 63 | - name: Tests workflow 64 | run: | 65 | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories 66 | di=cpu # inference devices # define device 67 | 68 | # train 69 | python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di 70 | # detect 71 | python detect.py --weights weights/${{ matrix.model }}.pt --device $di 72 | python detect.py --weights runs/train/exp/weights/last.pt --device $di 73 | # test 74 | python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di 75 | python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di 76 | 77 | python hubconf.py # hub 78 | python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect 79 | python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export 80 | shell: bash 81 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. 2 | # https://github.com/github/codeql-action 3 | 4 | name: "CodeQL" 5 | 6 | on: 7 | schedule: 8 | - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month 9 | 10 | jobs: 11 | analyze: 12 | name: Analyze 13 | runs-on: ubuntu-latest 14 | 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | language: [ 'python' ] 19 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 20 | # Learn more: 21 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 22 | 23 | steps: 24 | - name: Checkout repository 25 | uses: actions/checkout@v2 26 | 27 | # Initializes the CodeQL tools for scanning. 28 | - name: Initialize CodeQL 29 | uses: github/codeql-action/init@v1 30 | with: 31 | languages: ${{ matrix.language }} 32 | # If you wish to specify custom queries, you can do so here or in a config file. 33 | # By default, queries listed here will override any specified in a config file. 34 | # Prefix the list here with "+" to use these queries and those in the config file. 35 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 36 | 37 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 38 | # If this step fails, then you should remove it and run the build manually (see below) 39 | - name: Autobuild 40 | uses: github/codeql-action/autobuild@v1 41 | 42 | # ℹ️ Command-line programs to run using the OS shell. 43 | # 📚 https://git.io/JvXDl 44 | 45 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 46 | # and modify them (or add more) to build your code if your project 47 | # uses a compiled language 48 | 49 | #- run: | 50 | # make bootstrap 51 | # make release 52 | 53 | - name: Perform CodeQL Analysis 54 | uses: github/codeql-action/analyze@v1 55 | -------------------------------------------------------------------------------- /.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [pull_request_target, issues] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/first-interaction@v1 10 | with: 11 | repo-token: ${{ secrets.GITHUB_TOKEN }} 12 | pr-message: | 13 | 👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: 14 | - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master update by running the following, replacing 'feature' with the name of your local branch: 15 | ```bash 16 | git remote add upstream https://github.com/ultralytics/yolov5.git 17 | git fetch upstream 18 | git checkout feature # <----- replace 'feature' with local branch name 19 | git rebase upstream/master 20 | git push -u origin -f 21 | ``` 22 | - ✅ Verify all Continuous Integration (CI) **checks are passing**. 23 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee 24 | 25 | issue-message: | 26 | 👋 Hello @${{ github.actor }}, thank you for your interest in 🚀 YOLOv5! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). 27 | 28 | If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. 29 | 30 | If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. 31 | 32 | For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. 33 | 34 | ## Requirements 35 | 36 | Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: 37 | ```bash 38 | $ pip install -r requirements.txt 39 | ``` 40 | 41 | ## Environments 42 | 43 | YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): 44 | 45 | - **Google Colab Notebook** with free GPU: Open In Colab 46 | - **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5) 47 | - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) 48 | - **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker) 49 | 50 | ## Status 51 | 52 | ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) 53 | 54 | If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. 55 | 56 | -------------------------------------------------------------------------------- /.github/workflows/rebase.yml: -------------------------------------------------------------------------------- 1 | name: Automatic Rebase 2 | # https://github.com/marketplace/actions/automatic-rebase 3 | 4 | on: 5 | issue_comment: 6 | types: [created] 7 | 8 | jobs: 9 | rebase: 10 | name: Rebase 11 | if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout the latest code 15 | uses: actions/checkout@v2 16 | with: 17 | fetch-depth: 0 18 | - name: Automatic Rebase 19 | uses: cirrus-actions/rebase@1.3.1 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close stale issues 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v3 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' 14 | stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' 15 | days-before-stale: 30 16 | days-before-close: 5 17 | exempt-issue-labels: 'documentation,tutorial' 18 | operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting. 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Repo-specific GitIgnore ---------------------------------------------------------------------------------------------- 2 | *.jpg 3 | *.jpeg 4 | *.png 5 | *.bmp 6 | *.tif 7 | *.tiff 8 | *.heic 9 | *.JPG 10 | *.JPEG 11 | *.PNG 12 | *.BMP 13 | *.TIF 14 | *.TIFF 15 | *.HEIC 16 | *.mp4 17 | *.mov 18 | *.MOV 19 | *.avi 20 | *.data 21 | *.json 22 | 23 | *.cfg 24 | !cfg/yolov3*.cfg 25 | 26 | storage.googleapis.com 27 | runs/* 28 | data/* 29 | !data/images/zidane.jpg 30 | !data/images/bus.jpg 31 | !data/coco.names 32 | !data/coco_paper.names 33 | !data/coco.data 34 | !data/coco_*.data 35 | !data/coco_*.txt 36 | !data/trainvalno5k.shapes 37 | !data/*.sh 38 | 39 | pycocotools/* 40 | results*.txt 41 | gcp_test*.sh 42 | 43 | # Datasets ------------------------------------------------------------------------------------------------------------- 44 | coco/ 45 | coco128/ 46 | VOC/ 47 | 48 | # MATLAB GitIgnore ----------------------------------------------------------------------------------------------------- 49 | *.m~ 50 | *.mat 51 | !targets*.mat 52 | 53 | # Neural Network weights ----------------------------------------------------------------------------------------------- 54 | *.weights 55 | *.pt 56 | *.onnx 57 | *.mlmodel 58 | *.torchscript 59 | darknet53.conv.74 60 | yolov3-tiny.conv.15 61 | 62 | # GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- 63 | # Byte-compiled / optimized / DLL files 64 | __pycache__/ 65 | *.py[cod] 66 | *$py.class 67 | 68 | # C extensions 69 | *.so 70 | 71 | # Distribution / packaging 72 | .Python 73 | env/ 74 | build/ 75 | develop-eggs/ 76 | dist/ 77 | downloads/ 78 | eggs/ 79 | .eggs/ 80 | lib/ 81 | lib64/ 82 | parts/ 83 | sdist/ 84 | var/ 85 | wheels/ 86 | *.egg-info/ 87 | wandb/ 88 | .installed.cfg 89 | *.egg 90 | 91 | 92 | # PyInstaller 93 | # Usually these files are written by a python script from a template 94 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 95 | *.manifest 96 | *.spec 97 | 98 | # Installer logs 99 | pip-log.txt 100 | pip-delete-this-directory.txt 101 | 102 | # Unit test / coverage reports 103 | htmlcov/ 104 | .tox/ 105 | .coverage 106 | .coverage.* 107 | .cache 108 | nosetests.xml 109 | coverage.xml 110 | *.cover 111 | .hypothesis/ 112 | 113 | # Translations 114 | *.mo 115 | *.pot 116 | 117 | # Django stuff: 118 | *.log 119 | local_settings.py 120 | 121 | # Flask stuff: 122 | instance/ 123 | .webassets-cache 124 | 125 | # Scrapy stuff: 126 | .scrapy 127 | 128 | # Sphinx documentation 129 | docs/_build/ 130 | 131 | # PyBuilder 132 | target/ 133 | 134 | # Jupyter Notebook 135 | .ipynb_checkpoints 136 | 137 | # pyenv 138 | .python-version 139 | 140 | # celery beat schedule file 141 | celerybeat-schedule 142 | 143 | # SageMath parsed files 144 | *.sage.py 145 | 146 | # dotenv 147 | .env 148 | 149 | # virtualenv 150 | .venv* 151 | venv*/ 152 | ENV*/ 153 | 154 | # Spyder project settings 155 | .spyderproject 156 | .spyproject 157 | 158 | # Rope project settings 159 | .ropeproject 160 | 161 | # mkdocs documentation 162 | /site 163 | 164 | # mypy 165 | .mypy_cache/ 166 | 167 | 168 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- 169 | 170 | # General 171 | .DS_Store 172 | .AppleDouble 173 | .LSOverride 174 | 175 | # Icon must end with two \r 176 | Icon 177 | Icon? 178 | 179 | # Thumbnails 180 | ._* 181 | 182 | # Files that might appear in the root of a volume 183 | .DocumentRevisions-V100 184 | .fseventsd 185 | .Spotlight-V100 186 | .TemporaryItems 187 | .Trashes 188 | .VolumeIcon.icns 189 | .com.apple.timemachine.donotpresent 190 | 191 | # Directories potentially created on remote AFP share 192 | .AppleDB 193 | .AppleDesktop 194 | Network Trash Folder 195 | Temporary Items 196 | .apdisk 197 | 198 | 199 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore 200 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 201 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 202 | 203 | # User-specific stuff: 204 | .idea/* 205 | .idea/**/workspace.xml 206 | .idea/**/tasks.xml 207 | .idea/dictionaries 208 | .html # Bokeh Plots 209 | .pg # TensorFlow Frozen Graphs 210 | .avi # videos 211 | 212 | # Sensitive or high-churn files: 213 | .idea/**/dataSources/ 214 | .idea/**/dataSources.ids 215 | .idea/**/dataSources.local.xml 216 | .idea/**/sqlDataSources.xml 217 | .idea/**/dynamic.xml 218 | .idea/**/uiDesigner.xml 219 | 220 | # Gradle: 221 | .idea/**/gradle.xml 222 | .idea/**/libraries 223 | 224 | # CMake 225 | cmake-build-debug/ 226 | cmake-build-release/ 227 | 228 | # Mongo Explorer plugin: 229 | .idea/**/mongoSettings.xml 230 | 231 | ## File-based project format: 232 | *.iws 233 | 234 | ## Plugin-specific files: 235 | 236 | # IntelliJ 237 | out/ 238 | 239 | # mpeltonen/sbt-idea plugin 240 | .idea_modules/ 241 | 242 | # JIRA plugin 243 | atlassian-ide-plugin.xml 244 | 245 | # Cursive Clojure plugin 246 | .idea/replstate.xml 247 | 248 | # Crashlytics plugin (for Android Studio and IntelliJ) 249 | com_crashlytics_export_strings.xml 250 | crashlytics.properties 251 | crashlytics-build.properties 252 | fabric.properties 253 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch 2 | FROM nvcr.io/nvidia/pytorch:20.12-py3 3 | 4 | # Install linux packages 5 | RUN apt update && apt install -y screen libgl1-mesa-glx 6 | 7 | # Install python dependencies 8 | RUN python -m pip install --upgrade pip 9 | COPY requirements.txt . 10 | RUN pip install -r requirements.txt gsutil 11 | 12 | # Create working directory 13 | RUN mkdir -p /usr/src/app 14 | WORKDIR /usr/src/app 15 | 16 | # Copy contents 17 | COPY . /usr/src/app 18 | 19 | # Copy weights 20 | #RUN python3 -c "from models import *; \ 21 | #attempt_download('weights/yolov5s.pt'); \ 22 | #attempt_download('weights/yolov5m.pt'); \ 23 | #attempt_download('weights/yolov5l.pt')" 24 | 25 | 26 | # --------------------------------------------------- Extras Below --------------------------------------------------- 27 | 28 | # Build and Push 29 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t 30 | # for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done 31 | 32 | # Pull and Run 33 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t 34 | 35 | # Pull and Run with local directory access 36 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t 37 | 38 | # Kill all 39 | # sudo docker kill $(sudo docker ps -q) 40 | 41 | # Kill all image-based 42 | # sudo docker kill $(sudo docker ps -a -q --filter ancestor=ultralytics/yolov5:latest) 43 | 44 | # Bash into running container 45 | # sudo docker exec -it 5a9b5863d93d bash 46 | 47 | # Bash into stopped container 48 | # id=5a9b5863d93d && sudo docker start $id && sudo docker exec -it $id bash 49 | 50 | # Send weights to GCP 51 | # python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt 52 | 53 | # Clean up 54 | # docker system prune -a --volumes 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 原版仓库:https://github.com/ultralytics/yolov5 2 | 3 | # 修改版 yolov5 使用方法 4 | 5 | ``` 6 | 环境要求:python version >= 3.6 7 | 8 | 模型训练:python3 train.py 9 | 10 | 模型导出:python3 models/export.py --weights "xxx.pt" 11 | 12 | 转换rknn:python3 onnx_to_rknn.py 13 | 14 | 模型推理:python3 rknn_detect_yolov5.py 15 | ``` 16 | 注意事项:如果训练尺寸不是640那么,anchors会自动聚类重新生成,生成的结果在训练时打印在控制台,或者通过动态查看torch模型类属性获取,如果anchors不对应那么结果就会出现问题。 17 | 18 | 建议:在训练时如果size不是640,那么可以先通过聚类得到anchors并将新的anchors写入到模型配置文件中,然后再训练,防止动态获取的anchors在rknn上预测不准的问题。训练参数别忘记加上 --noautoanchor。 19 | 20 | # 官方原版 yolov5 使用方法: 21 | 22 | 1.下载yolov5原版仓库:https://github.com/ultralytics/yolov5 23 | 24 | 2.训练模型 25 | 26 | 3.导出onnx模型 27 | ``` 28 | python export_no_focus.py --weights weights/yolov5s.pt --img-size 640 640 29 | 所有size均指 width,height .............. 所有shape指 height,width 30 | ``` 31 | 4.转换为rknn模型 32 | ``` 33 | python onnx2rknn.py --onnx weights/yolov5s.onnx --precompile --original 34 | 模型默认和onnx在同一目录 35 | ``` 36 | 5.rknn推理 37 | ``` 38 | python rknn_detect_for_yolov5_original.py 39 | ``` 40 | 当然也可以使用我修改的版本yolov5_original,支持直接使用xml标注文件 41 | 42 | -------------------------------------------------------------------------------- /data/coco.yaml: -------------------------------------------------------------------------------- 1 | # COCO 2017 dataset http://cocodataset.org 2 | # Train command: python train.py --data coco.yaml 3 | # Default dataset location is next to /yolov5: 4 | # /parent_folder 5 | # /coco 6 | # /yolov5 7 | 8 | 9 | # download command/URL (optional) 10 | download: bash data/scripts/get_coco.sh 11 | 12 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 13 | train: ../coco/train2017.txt # 118287 images 14 | val: ../coco/val2017.txt # 5000 images 15 | test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 16 | 17 | # number of classes 18 | nc: 80 19 | 20 | # class names 21 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 22 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 23 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 24 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 25 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 26 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 27 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 28 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 29 | 'hair drier', 'toothbrush' ] 30 | 31 | # Print classes 32 | # with open('data/coco.yaml') as f: 33 | # d = yaml.load(f, Loader=yaml.FullLoader) # dict 34 | # for i, x in enumerate(d['names']): 35 | # print(i, x) 36 | -------------------------------------------------------------------------------- /data/coco128.yaml: -------------------------------------------------------------------------------- 1 | # COCO 2017 dataset http://cocodataset.org - first 128 training images 2 | # Train command: python train.py --data coco128.yaml 3 | # Default dataset location is next to /yolov5: 4 | # /parent_folder 5 | # /coco128 6 | # /yolov5 7 | 8 | 9 | # download command/URL (optional) 10 | download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip 11 | 12 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 13 | train: ../coco128/images/train2017/ # 128 images 14 | val: ../coco128/images/train2017/ # 128 images 15 | 16 | # number of classes 17 | nc: 80 18 | 19 | # class names 20 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 21 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 22 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 23 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 24 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 25 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 26 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 27 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 28 | 'hair drier', 'toothbrush' ] 29 | -------------------------------------------------------------------------------- /data/hyp.finetune.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for VOC finetuning 2 | # python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | # Hyperparameter Evolution Results 7 | # Generations: 306 8 | # P R mAP.5 mAP.5:.95 box obj cls 9 | # Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 10 | 11 | lr0: 0.0032 12 | lrf: 0.12 13 | momentum: 0.843 14 | weight_decay: 0.00036 15 | warmup_epochs: 2.0 16 | warmup_momentum: 0.5 17 | warmup_bias_lr: 0.05 18 | box: 0.0296 19 | cls: 0.243 20 | cls_pw: 0.631 21 | obj: 0.301 22 | obj_pw: 0.911 23 | iou_t: 0.2 24 | anchor_t: 2.91 25 | # anchors: 3.63 26 | fl_gamma: 0.0 27 | hsv_h: 0.0138 28 | hsv_s: 0.664 29 | hsv_v: 0.464 30 | degrees: 0.373 31 | translate: 0.245 32 | scale: 0.898 33 | shear: 0.602 34 | perspective: 0.0 35 | flipud: 0.00856 36 | fliplr: 0.5 37 | mosaic: 1.0 38 | mixup: 0.243 39 | -------------------------------------------------------------------------------- /data/hyp.scratch.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for COCO training from scratch 2 | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 8 | momentum: 0.937 # SGD momentum/Adam beta1 9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 11 | warmup_momentum: 0.8 # warmup initial momentum 12 | warmup_bias_lr: 0.1 # warmup initial bias lr 13 | box: 0.05 # box loss gain 14 | cls: 0.5 # cls loss gain 15 | cls_pw: 1.0 # cls BCELoss positive_weight 16 | obj: 1.0 # obj loss gain (scale with pixels) 17 | obj_pw: 1.0 # obj BCELoss positive_weight 18 | iou_t: 0.20 # IoU training threshold 19 | anchor_t: 4.0 # anchor-multiple threshold 20 | # anchors: 3 # anchors per output layer (0 to ignore) 21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 25 | degrees: 0.0 # image rotation (+/- deg) 26 | translate: 0.1 # image translation (+/- fraction) 27 | scale: 0.5 # image scale (+/- gain) 28 | shear: 0.0 # image shear (+/- deg) 29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 30 | flipud: 0.0 # image flip up-down (probability) 31 | fliplr: 0.5 # image flip left-right (probability) 32 | mosaic: 1.0 # image mosaic (probability) 33 | mixup: 0.0 # image mixup (probability) 34 | -------------------------------------------------------------------------------- /data/images/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soloist-v/yolov5_for_rknn/f4290d1c322b7595a557983467a1fd13cfec287d/data/images/bus.jpg -------------------------------------------------------------------------------- /data/images/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soloist-v/yolov5_for_rknn/f4290d1c322b7595a557983467a1fd13cfec287d/data/images/zidane.jpg -------------------------------------------------------------------------------- /data/scripts/get_coco.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # COCO 2017 dataset http://cocodataset.org 3 | # Download command: bash data/scripts/get_coco.sh 4 | # Train command: python train.py --data coco.yaml 5 | # Default dataset location is next to /yolov5: 6 | # /parent_folder 7 | # /coco 8 | # /yolov5 9 | 10 | # Download/unzip labels 11 | d='../' # unzip directory 12 | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ 13 | f='coco2017labels.zip' # 68 MB 14 | echo 'Downloading' $url$f ' ...' && curl -L $url$f -o $f && unzip -q $f -d $d && rm $f # download, unzip, remove 15 | 16 | # Download/unzip images 17 | d='../coco/images' # unzip directory 18 | url=http://images.cocodataset.org/zips/ 19 | f1='train2017.zip' # 19G, 118k images 20 | f2='val2017.zip' # 1G, 5k images 21 | f3='test2017.zip' # 7G, 41k images (optional) 22 | for f in $f1 $f2; do 23 | echo 'Downloading' $url$f '...' && curl -L $url$f -o $f # download, (unzip, remove in background) 24 | unzip -q $f -d $d && rm $f & 25 | done 26 | wait # finish background tasks 27 | -------------------------------------------------------------------------------- /data/scripts/get_voc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ 3 | # Download command: bash data/scripts/get_voc.sh 4 | # Train command: python train.py --data voc.yaml 5 | # Default dataset location is next to /yolov5: 6 | # /parent_folder 7 | # /VOC 8 | # /yolov5 9 | 10 | start=$(date +%s) 11 | mkdir -p ../tmp 12 | cd ../tmp/ 13 | 14 | # Download/unzip images and labels 15 | d='.' # unzip directory 16 | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ 17 | f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images 18 | f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images 19 | f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images 20 | for f in $f3 $f2 $f1; do 21 | echo 'Downloading' $url$f '...' && curl -L $url$f -o $f # download, (unzip, remove in background) 22 | unzip -q $f -d $d && rm $f & 23 | done 24 | wait # finish background tasks 25 | 26 | end=$(date +%s) 27 | runtime=$((end - start)) 28 | echo "Completed in" $runtime "seconds" 29 | 30 | echo "Splitting dataset..." 31 | python3 - "$@" <train.txt 91 | cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt 92 | 93 | python3 - "$@" <= 1 84 | p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count 85 | else: 86 | p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) 87 | 88 | p = Path(p) # to Path 89 | save_path = str(save_dir / p.name) # img.jpg 90 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt 91 | s += '%gx%g ' % img.shape[2:] # print string 92 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh 93 | if len(det): 94 | # Rescale boxes from img_size to im0 size 95 | det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() 96 | 97 | # Print results 98 | for c in det[:, -1].unique(): 99 | n = (det[:, -1] == c).sum() # detections per class 100 | s += f'{n} {names[int(c)]}s, ' # add to string 101 | 102 | # Write results 103 | for *xyxy, conf, cls in reversed(det): 104 | if save_txt: # Write to file 105 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh 106 | line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format 107 | with open(txt_path + '.txt', 'a') as f: 108 | f.write(('%g ' * len(line)).rstrip() % line + '\n') 109 | 110 | if save_img or view_img: # Add bbox to image 111 | label = f'{names[int(cls)]} {conf:.2f}' 112 | plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) 113 | 114 | # Print time (inference + NMS) 115 | print(f'{s}Done. ({t2 - t1:.3f}s)') 116 | 117 | # Stream results 118 | if view_img: 119 | cv2.imshow(str(p), im0) 120 | 121 | # Save results (image with detections) 122 | if save_img: 123 | if dataset.mode == 'image': 124 | cv2.imwrite(save_path, im0) 125 | else: # 'video' 126 | if vid_path != save_path: # new video 127 | vid_path = save_path 128 | if isinstance(vid_writer, cv2.VideoWriter): 129 | vid_writer.release() # release previous video writer 130 | 131 | fourcc = 'mp4v' # output video codec 132 | fps = vid_cap.get(cv2.CAP_PROP_FPS) 133 | w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 134 | h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 135 | vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) 136 | vid_writer.write(im0) 137 | 138 | if save_txt or save_img: 139 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' 140 | print(f"Results saved to {save_dir}{s}") 141 | 142 | print(f'Done. ({time.time() - t0:.3f}s)') 143 | 144 | 145 | if __name__ == '__main__': 146 | parser = argparse.ArgumentParser() 147 | parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') 148 | parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam 149 | parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') 150 | parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') 151 | parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') 152 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 153 | parser.add_argument('--view-img', action='store_true', help='display results') 154 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') 155 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') 156 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') 157 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') 158 | parser.add_argument('--augment', action='store_true', help='augmented inference') 159 | parser.add_argument('--update', action='store_true', help='update all models') 160 | parser.add_argument('--project', default='runs/detect', help='save results to project/name') 161 | parser.add_argument('--name', default='exp', help='save results to project/name') 162 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') 163 | opt = parser.parse_args() 164 | print(opt) 165 | 166 | with torch.no_grad(): 167 | if opt.update: # update all models (to fix SourceChangeWarning) 168 | for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: 169 | detect() 170 | strip_optimizer(opt.weights) 171 | else: 172 | detect() 173 | -------------------------------------------------------------------------------- /export_no_focus.py: -------------------------------------------------------------------------------- 1 | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats 2 | 3 | Usage: 4 | $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 5 | """ 6 | 7 | import argparse 8 | import sys 9 | import time 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | import numpy as np 13 | import torch 14 | import torch.nn as nn 15 | 16 | import models 17 | from models.experimental import attempt_load 18 | from utils.activations import Hardswish 19 | from utils.general import set_logging, check_img_size 20 | 21 | 22 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 23 | @staticmethod 24 | def forward(x): 25 | return x * torch.sigmoid(x) 26 | 27 | 28 | class Focus(nn.Module): 29 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups 30 | super(Focus, self).__init__() 31 | self.conv = models.common.Conv(c1 * 4, c2, k, s, p, g, act) 32 | 33 | def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) 34 | return self.conv(x) 35 | 36 | 37 | models.common.Focus = Focus 38 | 39 | if __name__ == '__main__': 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--weights', type=str, default='./weights/yolov5s.pt', 42 | help='weights path') # from yolov5/models/ 43 | parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width 44 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 45 | opt = parser.parse_args() 46 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 47 | print(opt) 48 | set_logging() 49 | t = time.time() 50 | model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model 51 | labels = model.names 52 | gs = int(max(model.stride)) # grid size (max stride) 53 | opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 54 | img = torch.zeros(opt.batch_size, 3, *opt.img_size[::-1]) # image size(1,3,320,192) iDetection 55 | img = torch.cat([img[..., ::2, ::2], img[..., 1::2, ::2], img[..., ::2, 1::2], img[..., 1::2, 1::2]], 1) 56 | # np.save(opt.weights.replace('.pt', f'_{opt.img_size[1]}x{opt.img_size[0]}.npy'), np.array(img, "uint8")) 57 | for k, m in model.named_modules(): 58 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 59 | if isinstance(m, models.common.Conv): # assign export-friendly activations 60 | if isinstance(m.act, nn.Hardswish): 61 | m.act = Hardswish() 62 | elif isinstance(m.act, nn.SiLU): 63 | m.act = SiLU() 64 | model.model[-1].export = True # set Detect() layer export=True 65 | y = model(img) # dry run 66 | 67 | # ONNX export 68 | try: 69 | import onnx 70 | 71 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 72 | f = opt.weights.replace('.pt', '.onnx') # filename 73 | torch.onnx.export(model, img, f, verbose=False, opset_version=10, input_names=['images'], 74 | output_names=['classes', 'boxes'] if y is None else ['output']) 75 | 76 | # Checks 77 | onnx_model = onnx.load(f) # load onnx model 78 | onnx.checker.check_model(onnx_model) # check onnx model 79 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 80 | print('ONNX export success, saved as %s' % f) 81 | except Exception as e: 82 | print('ONNX export failure: %s' % e) 83 | 84 | print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) 85 | -------------------------------------------------------------------------------- /hubconf.py: -------------------------------------------------------------------------------- 1 | """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/ 2 | 3 | Usage: 4 | import torch 5 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80) 6 | """ 7 | 8 | from pathlib import Path 9 | 10 | import torch 11 | 12 | from models.yolo import Model 13 | from utils.general import set_logging 14 | from utils.google_utils import attempt_download 15 | 16 | dependencies = ['torch', 'yaml'] 17 | set_logging() 18 | 19 | 20 | def create(name, pretrained, channels, classes, autoshape): 21 | """Creates a specified YOLOv5 model 22 | 23 | Arguments: 24 | name (str): name of model, i.e. 'yolov5s' 25 | pretrained (bool): load pretrained weights into the model 26 | channels (int): number of input channels 27 | classes (int): number of model classes 28 | 29 | Returns: 30 | pytorch model 31 | """ 32 | config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path 33 | try: 34 | model = Model(config, channels, classes) 35 | if pretrained: 36 | fname = f'{name}.pt' # checkpoint filename 37 | attempt_download(fname) # download if not found locally 38 | ckpt = torch.load(fname, map_location=torch.device('cpu')) # load 39 | state_dict = ckpt['model'].float().state_dict() # to FP32 40 | state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter 41 | model.load_state_dict(state_dict, strict=False) # load 42 | if len(ckpt['model'].names) == classes: 43 | model.names = ckpt['model'].names # set class names attribute 44 | if autoshape: 45 | model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS 46 | return model 47 | 48 | except Exception as e: 49 | help_url = 'https://github.com/ultralytics/yolov5/issues/36' 50 | s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url 51 | raise Exception(s) from e 52 | 53 | 54 | def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): 55 | """YOLOv5-small model from https://github.com/ultralytics/yolov5 56 | 57 | Arguments: 58 | pretrained (bool): load pretrained weights into the model, default=False 59 | channels (int): number of input channels, default=3 60 | classes (int): number of model classes, default=80 61 | 62 | Returns: 63 | pytorch model 64 | """ 65 | return create('yolov5s', pretrained, channels, classes, autoshape) 66 | 67 | 68 | def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): 69 | """YOLOv5-medium model from https://github.com/ultralytics/yolov5 70 | 71 | Arguments: 72 | pretrained (bool): load pretrained weights into the model, default=False 73 | channels (int): number of input channels, default=3 74 | classes (int): number of model classes, default=80 75 | 76 | Returns: 77 | pytorch model 78 | """ 79 | return create('yolov5m', pretrained, channels, classes, autoshape) 80 | 81 | 82 | def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): 83 | """YOLOv5-large model from https://github.com/ultralytics/yolov5 84 | 85 | Arguments: 86 | pretrained (bool): load pretrained weights into the model, default=False 87 | channels (int): number of input channels, default=3 88 | classes (int): number of model classes, default=80 89 | 90 | Returns: 91 | pytorch model 92 | """ 93 | return create('yolov5l', pretrained, channels, classes, autoshape) 94 | 95 | 96 | def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True): 97 | """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 98 | 99 | Arguments: 100 | pretrained (bool): load pretrained weights into the model, default=False 101 | channels (int): number of input channels, default=3 102 | classes (int): number of model classes, default=80 103 | 104 | Returns: 105 | pytorch model 106 | """ 107 | return create('yolov5x', pretrained, channels, classes, autoshape) 108 | 109 | 110 | def custom(path_or_model='path/to/model.pt', autoshape=True): 111 | """YOLOv5-custom model from https://github.com/ultralytics/yolov5 112 | 113 | Arguments (3 options): 114 | path_or_model (str): 'path/to/model.pt' 115 | path_or_model (dict): torch.load('path/to/model.pt') 116 | path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] 117 | 118 | Returns: 119 | pytorch model 120 | """ 121 | model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint 122 | if isinstance(model, dict): 123 | model = model['model'] # load model 124 | 125 | hub_model = Model(model.yaml).to(next(model.parameters()).device) # create 126 | hub_model.load_state_dict(model.float().state_dict()) # load state_dict 127 | hub_model.names = model.names # class names 128 | return hub_model.autoshape() if autoshape else hub_model 129 | 130 | 131 | if __name__ == '__main__': 132 | model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example 133 | # model = custom(path_or_model='path/to/model.pt') # custom example 134 | 135 | # Verify inference 136 | from PIL import Image 137 | 138 | imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')] 139 | results = model(imgs) 140 | results.show() 141 | results.print() 142 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soloist-v/yolov5_for_rknn/f4290d1c322b7595a557983467a1fd13cfec287d/models/__init__.py -------------------------------------------------------------------------------- /models/experimental.py: -------------------------------------------------------------------------------- 1 | # This file contains experimental modules 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | 7 | from models.common import Conv, DWConv 8 | from utils.google_utils import attempt_download 9 | 10 | 11 | class CrossConv(nn.Module): 12 | # Cross Convolution Downsample 13 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 14 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 15 | super(CrossConv, self).__init__() 16 | c_ = int(c2 * e) # hidden channels 17 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 18 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 19 | self.add = shortcut and c1 == c2 20 | 21 | def forward(self, x): 22 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 23 | 24 | 25 | class Sum(nn.Module): 26 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 27 | def __init__(self, n, weight=False): # n: number of inputs 28 | super(Sum, self).__init__() 29 | self.weight = weight # apply weights boolean 30 | self.iter = range(n - 1) # iter object 31 | if weight: 32 | self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights 33 | 34 | def forward(self, x): 35 | y = x[0] # no weight 36 | if self.weight: 37 | w = torch.sigmoid(self.w) * 2 38 | for i in self.iter: 39 | y = y + x[i + 1] * w[i] 40 | else: 41 | for i in self.iter: 42 | y = y + x[i + 1] 43 | return y 44 | 45 | 46 | class GhostConv(nn.Module): 47 | # Ghost Convolution https://github.com/huawei-noah/ghostnet 48 | def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups 49 | super(GhostConv, self).__init__() 50 | c_ = c2 // 2 # hidden channels 51 | self.cv1 = Conv(c1, c_, k, s, None, g, act) 52 | self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) 53 | 54 | def forward(self, x): 55 | y = self.cv1(x) 56 | return torch.cat([y, self.cv2(y)], 1) 57 | 58 | 59 | class GhostBottleneck(nn.Module): 60 | # Ghost Bottleneck https://github.com/huawei-noah/ghostnet 61 | def __init__(self, c1, c2, k, s): 62 | super(GhostBottleneck, self).__init__() 63 | c_ = c2 // 2 64 | self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw 65 | DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw 66 | GhostConv(c_, c2, 1, 1, act=False)) # pw-linear 67 | self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), 68 | Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() 69 | 70 | def forward(self, x): 71 | return self.conv(x) + self.shortcut(x) 72 | 73 | 74 | class MixConv2d(nn.Module): 75 | # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 76 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): 77 | super(MixConv2d, self).__init__() 78 | groups = len(k) 79 | if equal_ch: # equal c_ per group 80 | i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices 81 | c_ = [(i == g).sum() for g in range(groups)] # intermediate channels 82 | else: # equal weight.numel() per group 83 | b = [c2] + [0] * groups 84 | a = np.eye(groups + 1, groups, k=-1) 85 | a -= np.roll(a, 1, axis=1) 86 | a *= np.array(k) ** 2 87 | a[0] = 1 88 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 89 | 90 | self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) 91 | self.bn = nn.BatchNorm2d(c2) 92 | self.act = nn.LeakyReLU(0.1, inplace=True) 93 | 94 | def forward(self, x): 95 | return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 96 | 97 | 98 | class Ensemble(nn.ModuleList): 99 | # Ensemble of models 100 | def __init__(self): 101 | super(Ensemble, self).__init__() 102 | 103 | def forward(self, x, augment=False): 104 | y = [] 105 | for module in self: 106 | y.append(module(x, augment)[0]) 107 | # y = torch.stack(y).max(0)[0] # max ensemble 108 | # y = torch.stack(y).mean(0) # mean ensemble 109 | y = torch.cat(y, 1) # nms ensemble 110 | return y, None # inference, train output 111 | 112 | 113 | def attempt_load(weights, map_location=None): 114 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 115 | model = Ensemble() 116 | for w in weights if isinstance(weights, list) else [weights]: 117 | attempt_download(w) 118 | model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model 119 | 120 | # Compatibility updates 121 | for m in model.modules(): 122 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: 123 | m.inplace = True # pytorch 1.7.0 compatibility 124 | elif type(m) is Conv: 125 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 126 | 127 | if len(model) == 1: 128 | return model[-1] # return model 129 | else: 130 | print('Ensemble created with %s\n' % weights) 131 | for k in ['names', 'stride']: 132 | setattr(model, k, getattr(model[-1], k)) 133 | return model # return ensemble 134 | -------------------------------------------------------------------------------- /models/export.py: -------------------------------------------------------------------------------- 1 | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats 2 | 3 | Usage: 4 | $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 5 | """ 6 | 7 | import argparse 8 | import sys 9 | import time 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | 13 | import torch 14 | import torch.nn as nn 15 | 16 | import models 17 | from models.experimental import attempt_load 18 | from utils.activations import Hardswish, SiLU 19 | from utils.general import set_logging, check_img_size 20 | 21 | if __name__ == '__main__': 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument('--weights', type=str, default='./weights/best.pt', help='weights path') # from yolov5/models/ 24 | parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width 25 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 26 | opt = parser.parse_args() 27 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 28 | print(opt) 29 | set_logging() 30 | t = time.time() 31 | 32 | # Load PyTorch model 33 | model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model 34 | labels = model.names 35 | 36 | # Checks 37 | gs = int(max(model.stride)) # grid size (max stride) 38 | opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 39 | 40 | # Input 41 | img = torch.zeros(opt.batch_size, 3, *opt.img_size[::-1]) # image size(1,3,320,192) iDetection 42 | 43 | # Update model 44 | for k, m in model.named_modules(): 45 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 46 | if isinstance(m, models.common.Conv): # assign export-friendly activations 47 | if isinstance(m.act, nn.Hardswish): 48 | m.act = Hardswish() 49 | # elif isinstance(m.act, nn.SiLU): 50 | # m.act = SiLU() 51 | # elif isinstance(m, models.yolo.Detect): 52 | # m.forward = m.forward_export # assign forward (optional) 53 | model.model[-1].export = True # set Detect() layer export=True 54 | y = model(img) # dry run 55 | try: 56 | import onnx 57 | 58 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 59 | f = opt.weights.replace('.pt', f'_{opt.img_size[0]}x{opt.img_size[1]}.onnx') # filename 60 | torch.onnx.export(model, img, f, verbose=False, opset_version=10, input_names=['images'], 61 | output_names=['classes', 'boxes'] if y is None else ['output']) 62 | 63 | # Checks 64 | onnx_model = onnx.load(f) # load onnx model 65 | onnx.checker.check_model(onnx_model) # check onnx model 66 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 67 | print('ONNX export success, saved as %s' % f) 68 | except Exception as e: 69 | print('ONNX export failure: %s' % e) 70 | 71 | -------------------------------------------------------------------------------- /models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # Default YOLOv5 anchors for COCO data 2 | 3 | 4 | # P5 ------------------------------------------------------------------------------------------------------------------- 5 | # P5-640: 6 | anchors_p5_640: 7 | - [ 10,13, 16,30, 33,23 ] # P3/8 8 | - [ 30,61, 62,45, 59,119 ] # P4/16 9 | - [ 116,90, 156,198, 373,326 ] # P5/32 10 | 11 | 12 | # P6 ------------------------------------------------------------------------------------------------------------------- 13 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 14 | anchors_p6_640: 15 | - [ 9,11, 21,19, 17,41 ] # P3/8 16 | - [ 43,32, 39,70, 86,64 ] # P4/16 17 | - [ 65,131, 134,130, 120,265 ] # P5/32 18 | - [ 282,180, 247,354, 512,387 ] # P6/64 19 | 20 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 21 | anchors_p6_1280: 22 | - [ 19,27, 44,40, 38,94 ] # P3/8 23 | - [ 96,68, 86,152, 180,137 ] # P4/16 24 | - [ 140,301, 303,264, 238,542 ] # P5/32 25 | - [ 436,615, 739,380, 925,792 ] # P6/64 26 | 27 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 28 | anchors_p6_1920: 29 | - [ 28,41, 67,59, 57,141 ] # P3/8 30 | - [ 144,103, 129,227, 270,205 ] # P4/16 31 | - [ 209,452, 455,396, 358,812 ] # P5/32 32 | - [ 653,922, 1109,570, 1387,1187 ] # P6/64 33 | 34 | 35 | # P7 ------------------------------------------------------------------------------------------------------------------- 36 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 37 | anchors_p7_640: 38 | - [ 11,11, 13,30, 29,20 ] # P3/8 39 | - [ 30,46, 61,38, 39,92 ] # P4/16 40 | - [ 78,80, 146,66, 79,163 ] # P5/32 41 | - [ 149,150, 321,143, 157,303 ] # P6/64 42 | - [ 257,402, 359,290, 524,372 ] # P7/128 43 | 44 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 45 | anchors_p7_1280: 46 | - [ 19,22, 54,36, 32,77 ] # P3/8 47 | - [ 70,83, 138,71, 75,173 ] # P4/16 48 | - [ 165,159, 148,334, 375,151 ] # P5/32 49 | - [ 334,317, 251,626, 499,474 ] # P6/64 50 | - [ 750,326, 534,814, 1079,818 ] # P7/128 51 | 52 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 53 | anchors_p7_1920: 54 | - [ 29,34, 81,55, 47,115 ] # P3/8 55 | - [ 105,124, 207,107, 113,259 ] # P4/16 56 | - [ 247,238, 222,500, 563,227 ] # P5/32 57 | - [ 501,476, 376,939, 749,711 ] # P6/64 58 | - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 59 | -------------------------------------------------------------------------------- /models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3-SPP head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, SPP, [512, [5, 9, 13]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,14, 23,27, 37,58] # P4/16 9 | - [81,82, 135,169, 344,319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Conv, [16, 3, 1]], # 0 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 16 | [-1, 1, Conv, [32, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 18 | [-1, 1, Conv, [64, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 20 | [-1, 1, Conv, [128, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 22 | [-1, 1, Conv, [256, 3, 1]], 23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 24 | [-1, 1, Conv, [512, 3, 1]], 25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 27 | ] 28 | 29 | # YOLOv3-tiny head 30 | head: 31 | [[-1, 1, Conv, [1024, 3, 1]], 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 34 | 35 | [-2, 1, Conv, [128, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 39 | 40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 41 | ] 42 | -------------------------------------------------------------------------------- /models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3 head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, Conv, [512, [1, 1]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, Bottleneck, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 6, BottleneckCSP, [1024]], # 9 25 | ] 26 | 27 | # YOLOv5 FPN head 28 | head: 29 | [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) 30 | 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) 35 | 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) 40 | 41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 20 | [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], 21 | [ -1, 3, C3, [ 1024, False ] ], # 9 22 | ] 23 | 24 | # YOLOv5 head 25 | head: 26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ], 27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 29 | [ -1, 3, C3, [ 512, False ] ], # 13 30 | 31 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) 35 | 36 | [ -1, 1, Conv, [ 128, 1, 1 ] ], 37 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 38 | [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 39 | [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) 40 | 41 | [ -1, 1, Conv, [ 128, 3, 2 ] ], 42 | [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 43 | [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) 44 | 45 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 46 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 47 | [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) 48 | 49 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 50 | [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 51 | [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) 52 | 53 | [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) 54 | ] 55 | -------------------------------------------------------------------------------- /models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 768 ] ], 21 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 22 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 23 | [ -1, 3, C3, [ 1024, False ] ], # 11 24 | ] 25 | 26 | # YOLOv5 head 27 | head: 28 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 29 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 30 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 31 | [ -1, 3, C3, [ 768, False ] ], # 15 32 | 33 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 34 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 35 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 36 | [ -1, 3, C3, [ 512, False ] ], # 19 37 | 38 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 39 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 40 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 41 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 42 | 43 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 44 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 45 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 46 | 47 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 48 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 49 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 50 | 51 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 52 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 53 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) 54 | 55 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 56 | ] 57 | -------------------------------------------------------------------------------- /models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 768 ] ], 21 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 22 | [ -1, 3, C3, [ 1024 ] ], 23 | [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 24 | [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], 25 | [ -1, 3, C3, [ 1280, False ] ], # 13 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], 31 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 32 | [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 33 | [ -1, 3, C3, [ 1024, False ] ], # 17 34 | 35 | [ -1, 1, Conv, [ 768, 1, 1 ] ], 36 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 37 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 38 | [ -1, 3, C3, [ 768, False ] ], # 21 39 | 40 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 41 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 42 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 43 | [ -1, 3, C3, [ 512, False ] ], # 25 44 | 45 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 46 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 47 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 48 | [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) 49 | 50 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 51 | [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 52 | [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) 53 | 54 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 55 | [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 56 | [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) 57 | 58 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 59 | [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 60 | [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) 61 | 62 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], 63 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 64 | [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) 65 | 66 | [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) 67 | ] 68 | -------------------------------------------------------------------------------- /models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, BottleneckCSP, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, BottleneckCSP, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 PANet head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, BottleneckCSP, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /models/yolov5_rknn_640x640.yaml: -------------------------------------------------------------------------------- 1 | class: rknn_detect_yolov5.Detector 2 | opt: 3 | model: "weights/yolov5s.rknn" 4 | size: [ 640, 640 ] 5 | masks: [ [ 0, 1, 2 ], [ 3, 4, 5 ], [ 6, 7, 8 ] ] 6 | anchors: [ [ 10,13 ], [ 16,30 ], [ 33,23 ], [ 30,61 ], [ 62,45 ], [ 59,119 ], [ 116,90 ], [ 156,198 ], [ 373,326 ] ] 7 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 8 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 9 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 10 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 11 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 12 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 13 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 14 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 15 | 'hair drier', 'toothbrush' ] 16 | conf_thres: 0.3 17 | iou_thres: 0.5 18 | platform: 0 19 | -------------------------------------------------------------------------------- /models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /onnx2rknn.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from rknn.api import RKNN 4 | 5 | if __name__ == '__main__': 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("-i", '--onnx', type=str, default='weights/yolov5s.onnx', help='weights path') # from yolov5/models/ 8 | parser.add_argument('--rknn', type=str, default='', help='保存路径') 9 | parser.add_argument("-p", '--precompile', action="store_true", help='是否是预编译模型') 10 | parser.add_argument("-o", '--original', action="store_true", help='是否是yolov5原生的模型') 11 | parser.add_argument("-bs", '--batch-size', type=int, default=1, help='batch size') 12 | opt = parser.parse_args() 13 | ONNX_MODEL = opt.onnx 14 | if opt.rknn: 15 | RKNN_MODEL = opt.rknn 16 | else: 17 | RKNN_MODEL = "%s.rknn" % os.path.splitext(ONNX_MODEL)[0] 18 | rknn = RKNN() 19 | print('--> config model') 20 | if opt.original: 21 | rknn.config(mean_values=[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], 22 | std_values=[[255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0]], 23 | batch_size=opt.batch_size, target_platform="rk3399pro") # reorder_channel='0 1 2', 24 | else: 25 | rknn.config(channel_mean_value='0 0 0 255', reorder_channel='2 1 0', batch_size=opt.batch_size, 26 | target_platform="rk3399pro") 27 | # Load tensorflow model 28 | print('--> Loading model') 29 | ret = rknn.load_onnx(model=ONNX_MODEL) 30 | assert ret == 0, "Load onnx failed!" 31 | # Build model 32 | print('--> Building model') 33 | if opt.precompile: 34 | ret = rknn.build(do_quantization=True, dataset='./dataset.txt', pre_compile=True) # pre_compile=True 35 | else: 36 | ret = rknn.build(do_quantization=True, dataset='./dataset.txt') 37 | assert ret == 0, "Build onnx failed!" 38 | # Export rknn model 39 | print('--> Export RKNN model') 40 | ret = rknn.export_rknn(RKNN_MODEL) 41 | assert ret == 0, "Export %s.rknn failed!" % opt.rknn 42 | print('done') 43 | -------------------------------------------------------------------------------- /onnx_to_rknn.py: -------------------------------------------------------------------------------- 1 | import os 2 | import urllib 3 | import traceback 4 | import time 5 | import sys 6 | import numpy as np 7 | import cv2 8 | from rknn.api import RKNN 9 | 10 | """" 11 | 将onnx模型转换为rknn模型 12 | """ 13 | 14 | if __name__ == '__main__': 15 | ONNX_MODEL = 'yolov5m_640x640.onnx' 16 | RKNN_MODEL = 'yolov5m_640x640.rknn' 17 | 18 | # Create RKNN object 19 | rknn = RKNN() 20 | print('--> config model') 21 | # rknn.config(mean_values=[[123.675, 116.28, 103.53]], std_values=[[58.82, 58.82, 58.82]], reorder_channel='0 1 2') 22 | # rknn.config(batch_size=1,target_platform=["rk1806", "rk1808", "rk3399pro"], mean_values='0 0 0 255') 23 | rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1) 24 | # rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2', batch_size=1) 25 | # rknn.config(mean_values=[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], std_values=[[255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0]], reorder_channel='0 1 2', batch_size=1) 26 | print('done') 27 | 28 | # Load tensorflow model 29 | print('--> Loading model') 30 | ret = rknn.load_onnx(model=ONNX_MODEL) 31 | if ret != 0: 32 | print('Load resnet50v2 failed!') 33 | exit(ret) 34 | print('done') 35 | 36 | # Build model 37 | print('--> Building model') 38 | ret = rknn.build(do_quantization=True, dataset='./dataset.txt') # pre_compile=True 39 | # ret = rknn.build(do_quantization=True) # pre_compile=True 40 | if ret != 0: 41 | print('Build resnet50 failed!') 42 | exit(ret) 43 | print('done') 44 | 45 | # Export rknn model 46 | print('--> Export RKNN model') 47 | ret = rknn.export_rknn(RKNN_MODEL) 48 | if ret != 0: 49 | print('Export resnet50v2.rknn failed!') 50 | exit(ret) 51 | print('done') 52 | rknn.release() 53 | 54 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # pip install -r requirements.txt 2 | 3 | # base ---------------------------------------- 4 | Cython 5 | matplotlib>=3.2.2 6 | numpy>=1.18.5 7 | opencv-python>=4.1.2 8 | Pillow 9 | PyYAML>=5.3 10 | scipy>=1.4.1 11 | tensorboard>=2.2 12 | torch>=1.7.0 13 | torchvision>=0.8.1 14 | tqdm>=4.41.0 15 | 16 | # logging ------------------------------------- 17 | # wandb 18 | 19 | # plotting ------------------------------------ 20 | seaborn>=0.11.0 21 | pandas 22 | 23 | # export -------------------------------------- 24 | # coremltools==4.0 25 | # onnx>=1.8.0 26 | # scikit-learn==0.19.2 # for coreml quantization 27 | 28 | # extras -------------------------------------- 29 | thop # FLOPS computation 30 | pycocotools>=2.0 # COCO mAP 31 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soloist-v/yolov5_for_rknn/f4290d1c322b7595a557983467a1fd13cfec287d/utils/__init__.py -------------------------------------------------------------------------------- /utils/activations.py: -------------------------------------------------------------------------------- 1 | # Activation functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | # SiLU https://arxiv.org/pdf/1905.02244.pdf ---------------------------------------------------------------------------- 9 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 10 | @staticmethod 11 | def forward(x): 12 | return x * torch.sigmoid(x) 13 | 14 | 15 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() 16 | @staticmethod 17 | def forward(x): 18 | # return x * F.hardsigmoid(x) # for torchscript and CoreML 19 | return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX 20 | 21 | 22 | class MemoryEfficientSwish(nn.Module): 23 | class F(torch.autograd.Function): 24 | @staticmethod 25 | def forward(ctx, x): 26 | ctx.save_for_backward(x) 27 | return x * torch.sigmoid(x) 28 | 29 | @staticmethod 30 | def backward(ctx, grad_output): 31 | x = ctx.saved_tensors[0] 32 | sx = torch.sigmoid(x) 33 | return grad_output * (sx * (1 + x * (1 - sx))) 34 | 35 | def forward(self, x): 36 | return self.F.apply(x) 37 | 38 | 39 | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- 40 | class Mish(nn.Module): 41 | @staticmethod 42 | def forward(x): 43 | return x * F.softplus(x).tanh() 44 | 45 | 46 | class MemoryEfficientMish(nn.Module): 47 | class F(torch.autograd.Function): 48 | @staticmethod 49 | def forward(ctx, x): 50 | ctx.save_for_backward(x) 51 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 52 | 53 | @staticmethod 54 | def backward(ctx, grad_output): 55 | x = ctx.saved_tensors[0] 56 | sx = torch.sigmoid(x) 57 | fx = F.softplus(x).tanh() 58 | return grad_output * (fx + x * sx * (1 - fx * fx)) 59 | 60 | def forward(self, x): 61 | return self.F.apply(x) 62 | 63 | 64 | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- 65 | class FReLU(nn.Module): 66 | def __init__(self, c1, k=3): # ch_in, kernel 67 | super().__init__() 68 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 69 | self.bn = nn.BatchNorm2d(c1) 70 | 71 | def forward(self, x): 72 | return torch.max(x, self.bn(self.conv(x))) 73 | -------------------------------------------------------------------------------- /utils/autoanchor.py: -------------------------------------------------------------------------------- 1 | # Auto-anchor utils 2 | 3 | import numpy as np 4 | import torch 5 | import yaml 6 | from scipy.cluster.vq import kmeans 7 | from tqdm import tqdm 8 | 9 | 10 | def check_anchor_order(m): 11 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary 12 | a = m.anchor_grid.prod(-1).view(-1) # anchor area 13 | da = a[-1] - a[0] # delta a 14 | ds = m.stride[-1] - m.stride[0] # delta s 15 | if da.sign() != ds.sign(): # same order 16 | print('Reversing anchor order') 17 | m.anchors[:] = m.anchors.flip(0) 18 | m.anchor_grid[:] = m.anchor_grid.flip(0) 19 | 20 | 21 | def check_anchors(dataset, model, thr=4.0, imgsz=640): 22 | # Check anchor fit to data, recompute if necessary 23 | print('\nAnalyzing anchors... ', end='') 24 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() 25 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) 26 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale 27 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh 28 | 29 | def metric(k): # compute metric 30 | r = wh[:, None] / k[None] 31 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 32 | best = x.max(1)[0] # best_x 33 | aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold 34 | bpr = (best > 1. / thr).float().mean() # best possible recall 35 | return bpr, aat 36 | 37 | bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) 38 | print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='') 39 | if bpr < 0.98: # threshold to recompute 40 | print('. Attempting to improve anchors, please wait...') 41 | na = m.anchor_grid.numel() // 2 # number of anchors 42 | new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) 43 | new_bpr = metric(new_anchors.reshape(-1, 2))[0] 44 | if new_bpr > bpr: # replace anchors 45 | new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) 46 | m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference 47 | m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss 48 | check_anchor_order(m) 49 | print('New anchors saved to model. Update model *.yaml to use these anchors in the future.') 50 | else: 51 | print('Original anchors better than new anchors. Proceeding with original anchors.') 52 | print('') # newline 53 | 54 | 55 | def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): 56 | """ Creates kmeans-evolved anchors from training dataset 57 | 58 | Arguments: 59 | path: path to dataset *.yaml, or a loaded dataset 60 | n: number of anchors 61 | img_size: image size used for training 62 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 63 | gen: generations to evolve anchors using genetic algorithm 64 | verbose: print all results 65 | 66 | Return: 67 | k: kmeans evolved anchors 68 | 69 | Usage: 70 | from utils.autoanchor import *; _ = kmean_anchors() 71 | """ 72 | thr = 1. / thr 73 | 74 | def metric(k, wh): # compute metrics 75 | r = wh[:, None] / k[None] 76 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 77 | # x = wh_iou(wh, torch.tensor(k)) # iou metric 78 | return x, x.max(1)[0] # x, best_x 79 | 80 | def anchor_fitness(k): # mutation fitness 81 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) 82 | return (best * (best > thr).float()).mean() # fitness 83 | 84 | def print_results(k): 85 | k = k[np.argsort(k.prod(1))] # sort small to large 86 | x, best = metric(k, wh0) 87 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr 88 | print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat)) 89 | print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' % 90 | (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='') 91 | for i, x in enumerate(k): 92 | print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg 93 | return k 94 | 95 | if isinstance(path, str): # *.yaml file 96 | with open(path) as f: 97 | data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict 98 | from utils.datasets import LoadImagesAndLabels 99 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) 100 | else: 101 | dataset = path # dataset 102 | 103 | # Get label wh 104 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) 105 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh 106 | 107 | # Filter 108 | i = (wh0 < 3.0).any(1).sum() 109 | if i: 110 | print('WARNING: Extremely small objects found. ' 111 | '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0))) 112 | wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels 113 | # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 114 | 115 | # Kmeans calculation 116 | print('Running kmeans for %g anchors on %g points...' % (n, len(wh))) 117 | s = wh.std(0) # sigmas for whitening 118 | k, dist = kmeans(wh / s, n, iter=30) # points, mean distance 119 | k *= s 120 | wh = torch.tensor(wh, dtype=torch.float32) # filtered 121 | wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered 122 | k = print_results(k) 123 | 124 | # Plot 125 | # k, d = [None] * 20, [None] * 20 126 | # for i in tqdm(range(1, 21)): 127 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance 128 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) 129 | # ax = ax.ravel() 130 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') 131 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh 132 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) 133 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) 134 | # fig.savefig('wh.png', dpi=200) 135 | 136 | # Evolve 137 | npr = np.random 138 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma 139 | pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar 140 | for _ in pbar: 141 | v = np.ones(sh) 142 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) 143 | v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) 144 | kg = (k.copy() * v).clip(min=2.0) 145 | fg = anchor_fitness(kg) 146 | if fg > f: 147 | f, k = fg, kg.copy() 148 | pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f 149 | if verbose: 150 | print_results(k) 151 | 152 | return print_results(k) 153 | -------------------------------------------------------------------------------- /utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==18.1 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 -------------------------------------------------------------------------------- /utils/google_utils.py: -------------------------------------------------------------------------------- 1 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries 2 | 3 | import os 4 | import platform 5 | import subprocess 6 | import time 7 | from pathlib import Path 8 | 9 | import requests 10 | import torch 11 | 12 | 13 | def gsutil_getsize(url=''): 14 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 15 | s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8') 16 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes 17 | 18 | 19 | def attempt_download(weights): 20 | # Attempt to download pretrained weights if not found locally 21 | weights = str(weights).strip().replace("'", '') 22 | file = Path(weights).name.lower() 23 | 24 | msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/' 25 | response = requests.get('https://api.github.com/repos/ultralytics/yolov5/releases/latest').json() # github api 26 | assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] 27 | redundant = False # second download option 28 | 29 | if file in assets and not os.path.isfile(weights): 30 | try: # GitHub 31 | tag = response['tag_name'] # i.e. 'v1.0' 32 | url = f'https://github.com/ultralytics/yolov5/releases/download/{tag}/{file}' 33 | print('Downloading %s to %s...' % (url, weights)) 34 | torch.hub.download_url_to_file(url, weights) 35 | assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check 36 | except Exception as e: # GCP 37 | print('Download error: %s' % e) 38 | assert redundant, 'No secondary mirror' 39 | url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file 40 | print('Downloading %s to %s...' % (url, weights)) 41 | r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights) 42 | finally: 43 | if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check 44 | os.remove(weights) if os.path.exists(weights) else None # remove partial downloads 45 | print('ERROR: Download failure: %s' % msg) 46 | print('') 47 | return 48 | 49 | 50 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', name='tmp.zip'): 51 | # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() 52 | t = time.time() 53 | print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='') 54 | os.remove(name) if os.path.exists(name) else None # remove existing 55 | os.remove('cookie') if os.path.exists('cookie') else None 56 | 57 | # Attempt file download 58 | out = "NUL" if platform.system() == "Windows" else "/dev/null" 59 | os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out)) 60 | if os.path.exists('cookie'): # large file 61 | s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name) 62 | else: # small file 63 | s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id) 64 | r = os.system(s) # execute, capture return 65 | os.remove('cookie') if os.path.exists('cookie') else None 66 | 67 | # Error check 68 | if r != 0: 69 | os.remove(name) if os.path.exists(name) else None # remove partial 70 | print('Download error ') # raise Exception('Download error') 71 | return r 72 | 73 | # Unzip if archive 74 | if name.endswith('.zip'): 75 | print('unzipping... ', end='') 76 | os.system('unzip -q %s' % name) # unzip 77 | os.remove(name) # remove zip to free space 78 | 79 | print('Done (%.1fs)' % (time.time() - t)) 80 | return r 81 | 82 | 83 | def get_token(cookie="./cookie"): 84 | with open(cookie) as f: 85 | for line in f: 86 | if "download" in line: 87 | return line.split()[-1] 88 | return "" 89 | 90 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): 91 | # # Uploads a file to a bucket 92 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python 93 | # 94 | # storage_client = storage.Client() 95 | # bucket = storage_client.get_bucket(bucket_name) 96 | # blob = bucket.blob(destination_blob_name) 97 | # 98 | # blob.upload_from_filename(source_file_name) 99 | # 100 | # print('File {} uploaded to {}.'.format( 101 | # source_file_name, 102 | # destination_blob_name)) 103 | # 104 | # 105 | # def download_blob(bucket_name, source_blob_name, destination_file_name): 106 | # # Uploads a blob from a bucket 107 | # storage_client = storage.Client() 108 | # bucket = storage_client.get_bucket(bucket_name) 109 | # blob = bucket.blob(source_blob_name) 110 | # 111 | # blob.download_to_filename(destination_file_name) 112 | # 113 | # print('Blob {} downloaded to {}.'.format( 114 | # source_blob_name, 115 | # destination_file_name)) 116 | -------------------------------------------------------------------------------- /utils/metrics.py: -------------------------------------------------------------------------------- 1 | # Model validation metrics 2 | 3 | from pathlib import Path 4 | 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import torch 8 | 9 | from . import general 10 | 11 | 12 | def fitness(x): 13 | # Model fitness as a weighted combination of metrics 14 | w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] 15 | return (x[:, :4] * w).sum(1) 16 | 17 | 18 | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]): 19 | """ Compute the average precision, given the recall and precision curves. 20 | Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. 21 | # Arguments 22 | tp: True positives (nparray, nx1 or nx10). 23 | conf: Objectness value from 0-1 (nparray). 24 | pred_cls: Predicted object classes (nparray). 25 | target_cls: True object classes (nparray). 26 | plot: Plot precision-recall curve at mAP@0.5 27 | save_dir: Plot save directory 28 | # Returns 29 | The average precision as computed in py-faster-rcnn. 30 | """ 31 | 32 | # Sort by objectness 33 | i = np.argsort(-conf) 34 | tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] 35 | 36 | # Find unique classes 37 | unique_classes = np.unique(target_cls) 38 | 39 | # Create Precision-Recall curve and compute AP for each class 40 | px, py = np.linspace(0, 1, 1000), [] # for plotting 41 | pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 42 | s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) 43 | ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) 44 | for ci, c in enumerate(unique_classes): 45 | i = pred_cls == c 46 | n_l = (target_cls == c).sum() # number of labels 47 | n_p = i.sum() # number of predictions 48 | 49 | if n_p == 0 or n_l == 0: 50 | continue 51 | else: 52 | # Accumulate FPs and TPs 53 | fpc = (1 - tp[i]).cumsum(0) 54 | tpc = tp[i].cumsum(0) 55 | 56 | # Recall 57 | recall = tpc / (n_l + 1e-16) # recall curve 58 | r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases 59 | 60 | # Precision 61 | precision = tpc / (tpc + fpc) # precision curve 62 | p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score 63 | 64 | # AP from recall-precision curve 65 | for j in range(tp.shape[1]): 66 | ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) 67 | if plot and (j == 0): 68 | py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 69 | 70 | # Compute F1 score (harmonic mean of precision and recall) 71 | f1 = 2 * p * r / (p + r + 1e-16) 72 | 73 | if plot: 74 | plot_pr_curve(px, py, ap, save_dir, names) 75 | 76 | return p, r, ap, f1, unique_classes.astype('int32') 77 | 78 | 79 | def compute_ap(recall, precision): 80 | """ Compute the average precision, given the recall and precision curves 81 | # Arguments 82 | recall: The recall curve (list) 83 | precision: The precision curve (list) 84 | # Returns 85 | Average precision, precision curve, recall curve 86 | """ 87 | 88 | # Append sentinel values to beginning and end 89 | mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) 90 | mpre = np.concatenate(([1.], precision, [0.])) 91 | 92 | # Compute the precision envelope 93 | mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) 94 | 95 | # Integrate area under curve 96 | method = 'interp' # methods: 'continuous', 'interp' 97 | if method == 'interp': 98 | x = np.linspace(0, 1, 101) # 101-point interp (COCO) 99 | ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate 100 | else: # 'continuous' 101 | i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes 102 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve 103 | 104 | return ap, mpre, mrec 105 | 106 | 107 | class ConfusionMatrix: 108 | # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix 109 | def __init__(self, nc, conf=0.25, iou_thres=0.45): 110 | self.matrix = np.zeros((nc + 1, nc + 1)) 111 | self.nc = nc # number of classes 112 | self.conf = conf 113 | self.iou_thres = iou_thres 114 | 115 | def process_batch(self, detections, labels): 116 | """ 117 | Return intersection-over-union (Jaccard index) of boxes. 118 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 119 | Arguments: 120 | detections (Array[N, 6]), x1, y1, x2, y2, conf, class 121 | labels (Array[M, 5]), class, x1, y1, x2, y2 122 | Returns: 123 | None, updates confusion matrix accordingly 124 | """ 125 | detections = detections[detections[:, 4] > self.conf] 126 | gt_classes = labels[:, 0].int() 127 | detection_classes = detections[:, 5].int() 128 | iou = general.box_iou(labels[:, 1:], detections[:, :4]) 129 | 130 | x = torch.where(iou > self.iou_thres) 131 | if x[0].shape[0]: 132 | matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() 133 | if x[0].shape[0] > 1: 134 | matches = matches[matches[:, 2].argsort()[::-1]] 135 | matches = matches[np.unique(matches[:, 1], return_index=True)[1]] 136 | matches = matches[matches[:, 2].argsort()[::-1]] 137 | matches = matches[np.unique(matches[:, 0], return_index=True)[1]] 138 | else: 139 | matches = np.zeros((0, 3)) 140 | 141 | n = matches.shape[0] > 0 142 | m0, m1, _ = matches.transpose().astype(np.int16) 143 | for i, gc in enumerate(gt_classes): 144 | j = m0 == i 145 | if n and sum(j) == 1: 146 | self.matrix[gc, detection_classes[m1[j]]] += 1 # correct 147 | else: 148 | self.matrix[gc, self.nc] += 1 # background FP 149 | 150 | if n: 151 | for i, dc in enumerate(detection_classes): 152 | if not any(m1 == i): 153 | self.matrix[self.nc, dc] += 1 # background FN 154 | 155 | def matrix(self): 156 | return self.matrix 157 | 158 | def plot(self, save_dir='', names=()): 159 | try: 160 | import seaborn as sn 161 | 162 | array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize 163 | array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) 164 | 165 | fig = plt.figure(figsize=(12, 9), tight_layout=True) 166 | sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size 167 | labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels 168 | sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, 169 | xticklabels=names + ['background FN'] if labels else "auto", 170 | yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1)) 171 | fig.axes[0].set_xlabel('True') 172 | fig.axes[0].set_ylabel('Predicted') 173 | fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) 174 | except Exception as e: 175 | pass 176 | 177 | def print(self): 178 | for i in range(self.nc + 1): 179 | print(' '.join(map(str, self.matrix[i]))) 180 | 181 | 182 | # Plots ---------------------------------------------------------------------------------------------------------------- 183 | 184 | def plot_pr_curve(px, py, ap, save_dir='.', names=()): 185 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 186 | py = np.stack(py, axis=1) 187 | 188 | if 0 < len(names) < 21: # show mAP in legend if < 10 classes 189 | for i, y in enumerate(py.T): 190 | ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision) 191 | else: 192 | ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) 193 | 194 | ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) 195 | ax.set_xlabel('Recall') 196 | ax.set_ylabel('Precision') 197 | ax.set_xlim(0, 1) 198 | ax.set_ylim(0, 1) 199 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 200 | fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250) 201 | -------------------------------------------------------------------------------- /weights/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Download latest models from https://github.com/ultralytics/yolov5/releases 3 | # Usage: 4 | # $ bash weights/download_weights.sh 5 | 6 | python - <= 1 84 | p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count 85 | else: 86 | p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) 87 | 88 | p = Path(p) # to Path 89 | save_path = str(save_dir / p.name) # img.jpg 90 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt 91 | s += '%gx%g ' % img.shape[2:] # print string 92 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh 93 | if len(det): 94 | # Rescale boxes from img_size to im0 size 95 | det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() 96 | 97 | # Print results 98 | for c in det[:, -1].unique(): 99 | n = (det[:, -1] == c).sum() # detections per class 100 | s += f'{n} {names[int(c)]}s, ' # add to string 101 | 102 | # Write results 103 | for *xyxy, conf, cls in reversed(det): 104 | if save_txt: # Write to file 105 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh 106 | line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format 107 | with open(txt_path + '.txt', 'a') as f: 108 | f.write(('%g ' * len(line)).rstrip() % line + '\n') 109 | 110 | if save_img or view_img: # Add bbox to image 111 | label = f'{names[int(cls)]} {conf:.2f}' 112 | plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) 113 | 114 | # Print time (inference + NMS) 115 | print(f'{s}Done. ({t2 - t1:.3f}s)') 116 | 117 | # Stream results 118 | if view_img: 119 | cv2.imshow(str(p), im0) 120 | 121 | # Save results (image with detections) 122 | if save_img: 123 | if dataset.mode == 'image': 124 | cv2.imwrite(save_path, im0) 125 | else: # 'video' 126 | if vid_path != save_path: # new video 127 | vid_path = save_path 128 | if isinstance(vid_writer, cv2.VideoWriter): 129 | vid_writer.release() # release previous video writer 130 | 131 | fourcc = 'mp4v' # output video codec 132 | fps = vid_cap.get(cv2.CAP_PROP_FPS) 133 | w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 134 | h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 135 | vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) 136 | vid_writer.write(im0) 137 | 138 | if save_txt or save_img: 139 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' 140 | print(f"Results saved to {save_dir}{s}") 141 | 142 | print(f'Done. ({time.time() - t0:.3f}s)') 143 | 144 | 145 | if __name__ == '__main__': 146 | parser = argparse.ArgumentParser() 147 | parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') 148 | parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam 149 | parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') 150 | parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') 151 | parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') 152 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 153 | parser.add_argument('--view-img', action='store_true', help='display results') 154 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') 155 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') 156 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') 157 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') 158 | parser.add_argument('--augment', action='store_true', help='augmented inference') 159 | parser.add_argument('--update', action='store_true', help='update all models') 160 | parser.add_argument('--project', default='runs/detect', help='save results to project/name') 161 | parser.add_argument('--name', default='exp', help='save results to project/name') 162 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') 163 | opt = parser.parse_args() 164 | print(opt) 165 | 166 | with torch.no_grad(): 167 | if opt.update: # update all models (to fix SourceChangeWarning) 168 | for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: 169 | detect() 170 | strip_optimizer(opt.weights) 171 | else: 172 | detect() 173 | -------------------------------------------------------------------------------- /yolov5_original/export_no_focus.py: -------------------------------------------------------------------------------- 1 | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats 2 | 3 | Usage: 4 | $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 5 | """ 6 | 7 | import argparse 8 | import sys 9 | import time 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | import numpy as np 13 | import torch 14 | import torch.nn as nn 15 | 16 | import models 17 | from models.experimental import attempt_load 18 | from utils.activations import Hardswish 19 | from utils.general import set_logging, check_img_size 20 | 21 | 22 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 23 | @staticmethod 24 | def forward(x): 25 | return x * torch.sigmoid(x) 26 | 27 | 28 | class Focus(nn.Module): 29 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups 30 | super(Focus, self).__init__() 31 | self.conv = models.common.Conv(c1 * 4, c2, k, s, p, g, act) 32 | 33 | def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) 34 | return self.conv(x) 35 | 36 | 37 | models.common.Focus = Focus 38 | 39 | if __name__ == '__main__': 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--weights', type=str, default='./weights/yolov5s.pt', 42 | help='weights path') # from yolov5/models/ 43 | parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width 44 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 45 | opt = parser.parse_args() 46 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 47 | print(opt) 48 | set_logging() 49 | t = time.time() 50 | model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model 51 | labels = model.names 52 | gs = int(max(model.stride)) # grid size (max stride) 53 | opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 54 | img = torch.zeros(opt.batch_size, 3, *opt.img_size[::-1]) # image size(1,3,320,192) iDetection 55 | img = torch.cat([img[..., ::2, ::2], img[..., 1::2, ::2], img[..., ::2, 1::2], img[..., 1::2, 1::2]], 1) 56 | # np.save(opt.weights.replace('.pt', f'_{opt.img_size[1]}x{opt.img_size[0]}.npy'), np.array(img, "uint8")) 57 | for k, m in model.named_modules(): 58 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 59 | if isinstance(m, models.common.Conv): # assign export-friendly activations 60 | if isinstance(m.act, nn.Hardswish): 61 | m.act = Hardswish() 62 | elif isinstance(m.act, nn.SiLU): 63 | m.act = SiLU() 64 | model.model[-1].export = True # set Detect() layer export=True 65 | y = model(img) # dry run 66 | 67 | # ONNX export 68 | try: 69 | import onnx 70 | 71 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 72 | f = opt.weights.replace('.pt', '.onnx') # filename 73 | torch.onnx.export(model, img, f, verbose=False, opset_version=10, input_names=['images'], 74 | output_names=['classes', 'boxes'] if y is None else ['output']) 75 | 76 | # Checks 77 | onnx_model = onnx.load(f) # load onnx model 78 | onnx.checker.check_model(onnx_model) # check onnx model 79 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 80 | print('ONNX export success, saved as %s' % f) 81 | except Exception as e: 82 | print('ONNX export failure: %s' % e) 83 | 84 | print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) 85 | -------------------------------------------------------------------------------- /yolov5_original/hubconf.py: -------------------------------------------------------------------------------- 1 | """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/ 2 | 3 | Usage: 4 | import torch 5 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80) 6 | """ 7 | 8 | from pathlib import Path 9 | 10 | import torch 11 | 12 | from models.yolo import Model 13 | from utils.general import set_logging 14 | from utils.google_utils import attempt_download 15 | 16 | dependencies = ['torch', 'yaml'] 17 | set_logging() 18 | 19 | 20 | def create(name, pretrained, channels, classes, autoshape): 21 | """Creates a specified YOLOv5 model 22 | 23 | Arguments: 24 | name (str): name of model, i.e. 'yolov5s' 25 | pretrained (bool): load pretrained weights into the model 26 | channels (int): number of input channels 27 | classes (int): number of model classes 28 | 29 | Returns: 30 | pytorch model 31 | """ 32 | config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path 33 | try: 34 | model = Model(config, channels, classes) 35 | if pretrained: 36 | fname = f'{name}.pt' # checkpoint filename 37 | attempt_download(fname) # download if not found locally 38 | ckpt = torch.load(fname, map_location=torch.device('cpu')) # load 39 | state_dict = ckpt['model'].float().state_dict() # to FP32 40 | state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter 41 | model.load_state_dict(state_dict, strict=False) # load 42 | if len(ckpt['model'].names) == classes: 43 | model.names = ckpt['model'].names # set class names attribute 44 | if autoshape: 45 | model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS 46 | return model 47 | 48 | except Exception as e: 49 | help_url = 'https://github.com/ultralytics/yolov5/issues/36' 50 | s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url 51 | raise Exception(s) from e 52 | 53 | 54 | def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): 55 | """YOLOv5-small model from https://github.com/ultralytics/yolov5 56 | 57 | Arguments: 58 | pretrained (bool): load pretrained weights into the model, default=False 59 | channels (int): number of input channels, default=3 60 | classes (int): number of model classes, default=80 61 | 62 | Returns: 63 | pytorch model 64 | """ 65 | return create('yolov5s', pretrained, channels, classes, autoshape) 66 | 67 | 68 | def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): 69 | """YOLOv5-medium model from https://github.com/ultralytics/yolov5 70 | 71 | Arguments: 72 | pretrained (bool): load pretrained weights into the model, default=False 73 | channels (int): number of input channels, default=3 74 | classes (int): number of model classes, default=80 75 | 76 | Returns: 77 | pytorch model 78 | """ 79 | return create('yolov5m', pretrained, channels, classes, autoshape) 80 | 81 | 82 | def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): 83 | """YOLOv5-large model from https://github.com/ultralytics/yolov5 84 | 85 | Arguments: 86 | pretrained (bool): load pretrained weights into the model, default=False 87 | channels (int): number of input channels, default=3 88 | classes (int): number of model classes, default=80 89 | 90 | Returns: 91 | pytorch model 92 | """ 93 | return create('yolov5l', pretrained, channels, classes, autoshape) 94 | 95 | 96 | def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True): 97 | """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 98 | 99 | Arguments: 100 | pretrained (bool): load pretrained weights into the model, default=False 101 | channels (int): number of input channels, default=3 102 | classes (int): number of model classes, default=80 103 | 104 | Returns: 105 | pytorch model 106 | """ 107 | return create('yolov5x', pretrained, channels, classes, autoshape) 108 | 109 | 110 | def custom(path_or_model='path/to/model.pt', autoshape=True): 111 | """YOLOv5-custom model from https://github.com/ultralytics/yolov5 112 | 113 | Arguments (3 options): 114 | path_or_model (str): 'path/to/model.pt' 115 | path_or_model (dict): torch.load('path/to/model.pt') 116 | path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] 117 | 118 | Returns: 119 | pytorch model 120 | """ 121 | model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint 122 | if isinstance(model, dict): 123 | model = model['model'] # load model 124 | 125 | hub_model = Model(model.yaml).to(next(model.parameters()).device) # create 126 | hub_model.load_state_dict(model.float().state_dict()) # load state_dict 127 | hub_model.names = model.names # class names 128 | return hub_model.autoshape() if autoshape else hub_model 129 | 130 | 131 | if __name__ == '__main__': 132 | model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example 133 | # model = custom(path_or_model='path/to/model.pt') # custom example 134 | 135 | # Verify inference 136 | from PIL import Image 137 | 138 | imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')] 139 | results = model(imgs) 140 | results.show() 141 | results.print() 142 | -------------------------------------------------------------------------------- /yolov5_original/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soloist-v/yolov5_for_rknn/f4290d1c322b7595a557983467a1fd13cfec287d/yolov5_original/models/__init__.py -------------------------------------------------------------------------------- /yolov5_original/models/experimental.py: -------------------------------------------------------------------------------- 1 | # This file contains experimental modules 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | 7 | from models.common import Conv, DWConv 8 | from utils.google_utils import attempt_download 9 | 10 | 11 | class CrossConv(nn.Module): 12 | # Cross Convolution Downsample 13 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 14 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 15 | super(CrossConv, self).__init__() 16 | c_ = int(c2 * e) # hidden channels 17 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 18 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 19 | self.add = shortcut and c1 == c2 20 | 21 | def forward(self, x): 22 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 23 | 24 | 25 | class Sum(nn.Module): 26 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 27 | def __init__(self, n, weight=False): # n: number of inputs 28 | super(Sum, self).__init__() 29 | self.weight = weight # apply weights boolean 30 | self.iter = range(n - 1) # iter object 31 | if weight: 32 | self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights 33 | 34 | def forward(self, x): 35 | y = x[0] # no weight 36 | if self.weight: 37 | w = torch.sigmoid(self.w) * 2 38 | for i in self.iter: 39 | y = y + x[i + 1] * w[i] 40 | else: 41 | for i in self.iter: 42 | y = y + x[i + 1] 43 | return y 44 | 45 | 46 | class GhostConv(nn.Module): 47 | # Ghost Convolution https://github.com/huawei-noah/ghostnet 48 | def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups 49 | super(GhostConv, self).__init__() 50 | c_ = c2 // 2 # hidden channels 51 | self.cv1 = Conv(c1, c_, k, s, None, g, act) 52 | self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) 53 | 54 | def forward(self, x): 55 | y = self.cv1(x) 56 | return torch.cat([y, self.cv2(y)], 1) 57 | 58 | 59 | class GhostBottleneck(nn.Module): 60 | # Ghost Bottleneck https://github.com/huawei-noah/ghostnet 61 | def __init__(self, c1, c2, k, s): 62 | super(GhostBottleneck, self).__init__() 63 | c_ = c2 // 2 64 | self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw 65 | DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw 66 | GhostConv(c_, c2, 1, 1, act=False)) # pw-linear 67 | self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), 68 | Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() 69 | 70 | def forward(self, x): 71 | return self.conv(x) + self.shortcut(x) 72 | 73 | 74 | class MixConv2d(nn.Module): 75 | # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 76 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): 77 | super(MixConv2d, self).__init__() 78 | groups = len(k) 79 | if equal_ch: # equal c_ per group 80 | i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices 81 | c_ = [(i == g).sum() for g in range(groups)] # intermediate channels 82 | else: # equal weight.numel() per group 83 | b = [c2] + [0] * groups 84 | a = np.eye(groups + 1, groups, k=-1) 85 | a -= np.roll(a, 1, axis=1) 86 | a *= np.array(k) ** 2 87 | a[0] = 1 88 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 89 | 90 | self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) 91 | self.bn = nn.BatchNorm2d(c2) 92 | self.act = nn.LeakyReLU(0.1, inplace=True) 93 | 94 | def forward(self, x): 95 | return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 96 | 97 | 98 | class Ensemble(nn.ModuleList): 99 | # Ensemble of models 100 | def __init__(self): 101 | super(Ensemble, self).__init__() 102 | 103 | def forward(self, x, augment=False): 104 | y = [] 105 | for module in self: 106 | y.append(module(x, augment)[0]) 107 | # y = torch.stack(y).max(0)[0] # max ensemble 108 | # y = torch.stack(y).mean(0) # mean ensemble 109 | y = torch.cat(y, 1) # nms ensemble 110 | return y, None # inference, train output 111 | 112 | 113 | def attempt_load(weights, map_location=None): 114 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 115 | model = Ensemble() 116 | for w in weights if isinstance(weights, list) else [weights]: 117 | attempt_download(w) 118 | model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model 119 | 120 | # Compatibility updates 121 | for m in model.modules(): 122 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: 123 | m.inplace = True # pytorch 1.7.0 compatibility 124 | elif type(m) is Conv: 125 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 126 | 127 | if len(model) == 1: 128 | return model[-1] # return model 129 | else: 130 | print('Ensemble created with %s\n' % weights) 131 | for k in ['names', 'stride']: 132 | setattr(model, k, getattr(model[-1], k)) 133 | return model # return ensemble 134 | -------------------------------------------------------------------------------- /yolov5_original/models/export.py: -------------------------------------------------------------------------------- 1 | """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats 2 | 3 | Usage: 4 | $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 5 | """ 6 | 7 | import argparse 8 | import sys 9 | import time 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | 13 | import torch 14 | import torch.nn as nn 15 | 16 | import models 17 | from models.experimental import attempt_load 18 | from utils.activations import Hardswish, SiLU 19 | from utils.general import set_logging, check_img_size 20 | 21 | if __name__ == '__main__': 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='weights path') # from yolov5/models/ 24 | parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width 25 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') 26 | opt = parser.parse_args() 27 | opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand 28 | print(opt) 29 | set_logging() 30 | t = time.time() 31 | 32 | # Load PyTorch model 33 | model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model 34 | labels = model.names 35 | 36 | # Checks 37 | gs = int(max(model.stride)) # grid size (max stride) 38 | opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples 39 | 40 | # Input 41 | img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection 42 | 43 | # Update model 44 | for k, m in model.named_modules(): 45 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 46 | if isinstance(m, models.common.Conv): # assign export-friendly activations 47 | if isinstance(m.act, nn.Hardswish): 48 | m.act = Hardswish() 49 | elif isinstance(m.act, nn.SiLU): 50 | m.act = SiLU() 51 | # elif isinstance(m, models.yolo.Detect): 52 | # m.forward = m.forward_export # assign forward (optional) 53 | model.model[-1].export = True # set Detect() layer export=True 54 | y = model(img) # dry run 55 | 56 | # TorchScript export 57 | # try: 58 | # print('\nStarting TorchScript export with torch %s...' % torch.__version__) 59 | # f = opt.weights.replace('.pt', '.torchscript.pt') # filename 60 | # ts = torch.jit.trace(model, img) 61 | # ts.save(f) 62 | # print('TorchScript export success, saved as %s' % f) 63 | # except Exception as e: 64 | # print('TorchScript export failure: %s' % e) 65 | 66 | # ONNX export 67 | try: 68 | import onnx 69 | 70 | print('\nStarting ONNX export with onnx %s...' % onnx.__version__) 71 | f = opt.weights.replace('.pt', '.onnx') # filename 72 | torch.onnx.export(model, img, f, verbose=False, opset_version=10, input_names=['images'], 73 | output_names=['classes', 'boxes'] if y is None else ['output']) 74 | 75 | # Checks 76 | onnx_model = onnx.load(f) # load onnx model 77 | onnx.checker.check_model(onnx_model) # check onnx model 78 | # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model 79 | print('ONNX export success, saved as %s' % f) 80 | except Exception as e: 81 | print('ONNX export failure: %s' % e) 82 | 83 | # CoreML export 84 | # try: 85 | # import coremltools as ct 86 | # 87 | # print('\nStarting CoreML export with coremltools %s...' % ct.__version__) 88 | # # convert model from torchscript and apply pixel scaling as per detect.py 89 | # model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) 90 | # f = opt.weights.replace('.pt', '.mlmodel') # filename 91 | # model.save(f) 92 | # print('CoreML export success, saved as %s' % f) 93 | # except Exception as e: 94 | # print('CoreML export failure: %s' % e) 95 | 96 | # Finish 97 | print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) 98 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # Default YOLOv5 anchors for COCO data 2 | 3 | 4 | # P5 ------------------------------------------------------------------------------------------------------------------- 5 | # P5-640: 6 | anchors_p5_640: 7 | - [ 10,13, 16,30, 33,23 ] # P3/8 8 | - [ 30,61, 62,45, 59,119 ] # P4/16 9 | - [ 116,90, 156,198, 373,326 ] # P5/32 10 | 11 | 12 | # P6 ------------------------------------------------------------------------------------------------------------------- 13 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 14 | anchors_p6_640: 15 | - [ 9,11, 21,19, 17,41 ] # P3/8 16 | - [ 43,32, 39,70, 86,64 ] # P4/16 17 | - [ 65,131, 134,130, 120,265 ] # P5/32 18 | - [ 282,180, 247,354, 512,387 ] # P6/64 19 | 20 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 21 | anchors_p6_1280: 22 | - [ 19,27, 44,40, 38,94 ] # P3/8 23 | - [ 96,68, 86,152, 180,137 ] # P4/16 24 | - [ 140,301, 303,264, 238,542 ] # P5/32 25 | - [ 436,615, 739,380, 925,792 ] # P6/64 26 | 27 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 28 | anchors_p6_1920: 29 | - [ 28,41, 67,59, 57,141 ] # P3/8 30 | - [ 144,103, 129,227, 270,205 ] # P4/16 31 | - [ 209,452, 455,396, 358,812 ] # P5/32 32 | - [ 653,922, 1109,570, 1387,1187 ] # P6/64 33 | 34 | 35 | # P7 ------------------------------------------------------------------------------------------------------------------- 36 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 37 | anchors_p7_640: 38 | - [ 11,11, 13,30, 29,20 ] # P3/8 39 | - [ 30,46, 61,38, 39,92 ] # P4/16 40 | - [ 78,80, 146,66, 79,163 ] # P5/32 41 | - [ 149,150, 321,143, 157,303 ] # P6/64 42 | - [ 257,402, 359,290, 524,372 ] # P7/128 43 | 44 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 45 | anchors_p7_1280: 46 | - [ 19,22, 54,36, 32,77 ] # P3/8 47 | - [ 70,83, 138,71, 75,173 ] # P4/16 48 | - [ 165,159, 148,334, 375,151 ] # P5/32 49 | - [ 334,317, 251,626, 499,474 ] # P6/64 50 | - [ 750,326, 534,814, 1079,818 ] # P7/128 51 | 52 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 53 | anchors_p7_1920: 54 | - [ 29,34, 81,55, 47,115 ] # P3/8 55 | - [ 105,124, 207,107, 113,259 ] # P4/16 56 | - [ 247,238, 222,500, 563,227 ] # P5/32 57 | - [ 501,476, 376,939, 749,711 ] # P6/64 58 | - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 59 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3-SPP head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, SPP, [512, [5, 9, 13]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,14, 23,27, 37,58] # P4/16 9 | - [81,82, 135,169, 344,319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Conv, [16, 3, 1]], # 0 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 16 | [-1, 1, Conv, [32, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 18 | [-1, 1, Conv, [64, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 20 | [-1, 1, Conv, [128, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 22 | [-1, 1, Conv, [256, 3, 1]], 23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 24 | [-1, 1, Conv, [512, 3, 1]], 25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 27 | ] 28 | 29 | # YOLOv3-tiny head 30 | head: 31 | [[-1, 1, Conv, [1024, 3, 1]], 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 34 | 35 | [-2, 1, Conv, [128, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 39 | 40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 41 | ] 42 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3 head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, Conv, [512, [1, 1]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, Bottleneck, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 6, BottleneckCSP, [1024]], # 9 25 | ] 26 | 27 | # YOLOv5 FPN head 28 | head: 29 | [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) 30 | 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) 35 | 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) 40 | 41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 20 | [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], 21 | [ -1, 3, C3, [ 1024, False ] ], # 9 22 | ] 23 | 24 | # YOLOv5 head 25 | head: 26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ], 27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 29 | [ -1, 3, C3, [ 512, False ] ], # 13 30 | 31 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) 35 | 36 | [ -1, 1, Conv, [ 128, 1, 1 ] ], 37 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 38 | [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 39 | [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) 40 | 41 | [ -1, 1, Conv, [ 128, 3, 2 ] ], 42 | [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 43 | [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) 44 | 45 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 46 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 47 | [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) 48 | 49 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 50 | [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 51 | [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) 52 | 53 | [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) 54 | ] 55 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 768 ] ], 21 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 22 | [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], 23 | [ -1, 3, C3, [ 1024, False ] ], # 11 24 | ] 25 | 26 | # YOLOv5 head 27 | head: 28 | [ [ -1, 1, Conv, [ 768, 1, 1 ] ], 29 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 30 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 31 | [ -1, 3, C3, [ 768, False ] ], # 15 32 | 33 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 34 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 35 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 36 | [ -1, 3, C3, [ 512, False ] ], # 19 37 | 38 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 39 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 40 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 41 | [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) 42 | 43 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 44 | [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 45 | [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) 46 | 47 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 48 | [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 49 | [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) 50 | 51 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 52 | [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 53 | [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) 54 | 55 | [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) 56 | ] 57 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 3 8 | 9 | # YOLOv5 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 9, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 768 ] ], 21 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 22 | [ -1, 3, C3, [ 1024 ] ], 23 | [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 24 | [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], 25 | [ -1, 3, C3, [ 1280, False ] ], # 13 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], 31 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 32 | [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 33 | [ -1, 3, C3, [ 1024, False ] ], # 17 34 | 35 | [ -1, 1, Conv, [ 768, 1, 1 ] ], 36 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 37 | [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 38 | [ -1, 3, C3, [ 768, False ] ], # 21 39 | 40 | [ -1, 1, Conv, [ 512, 1, 1 ] ], 41 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 42 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 43 | [ -1, 3, C3, [ 512, False ] ], # 25 44 | 45 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 46 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 47 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 48 | [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) 49 | 50 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 51 | [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 52 | [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) 53 | 54 | [ -1, 1, Conv, [ 512, 3, 2 ] ], 55 | [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 56 | [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) 57 | 58 | [ -1, 1, Conv, [ 768, 3, 2 ] ], 59 | [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 60 | [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) 61 | 62 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], 63 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 64 | [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) 65 | 66 | [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) 67 | ] 68 | -------------------------------------------------------------------------------- /yolov5_original/models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, BottleneckCSP, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, BottleneckCSP, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 PANet head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, BottleneckCSP, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5_original/models/yolov5_rknn_640x640.yaml: -------------------------------------------------------------------------------- 1 | class: rknn_detect_yolov5.Detector 2 | opt: 3 | model: "checkpoint/yolov5s.rknn" 4 | size: [ 640, 640 ] 5 | masks: [ [ 0, 1, 2 ], [ 3, 4, 5 ], [ 6, 7, 8 ] ] 6 | anchors: [ [ 10,13 ], [ 16,30 ], [ 33,23 ], [ 30,61 ], [ 62,45 ], [ 59,119 ], [ 116,90 ], [ 156,198 ], [ 373,326 ] ] 7 | names: "data/yolov5_rknn_512x288.names" 8 | conf_thres: 0.3 9 | iou_thres: 0.5 10 | platform: 0 11 | -------------------------------------------------------------------------------- /yolov5_original/models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5_original/models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5_original/models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5_original/models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /yolov5_original/onnx2rknn.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from rknn.api import RKNN 4 | 5 | if __name__ == '__main__': 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument("-i", '--onnx', type=str, default='weights/yolov5s.onnx', help='weights path') # from yolov5/models/ 8 | parser.add_argument('--rknn', type=str, default='', help='保存路径') 9 | parser.add_argument("-p", '--precompile', action="store_true", help='是否是预编译模型') 10 | parser.add_argument("-o", '--original', action="store_true", help='是否是yolov5原生的模型') 11 | parser.add_argument("-bs", '--batch-size', type=int, default=1, help='batch size') 12 | opt = parser.parse_args() 13 | ONNX_MODEL = opt.onnx 14 | if opt.rknn: 15 | RKNN_MODEL = opt.rknn 16 | else: 17 | RKNN_MODEL = "%s.rknn" % os.path.splitext(ONNX_MODEL)[0] 18 | rknn = RKNN() 19 | print('--> config model') 20 | if opt.original: 21 | rknn.config(mean_values=[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], 22 | std_values=[[255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0]], 23 | batch_size=opt.batch_size, target_platform="rk3399pro") # reorder_channel='0 1 2', 24 | else: 25 | rknn.config(channel_mean_value='0 0 0 255', reorder_channel='2 1 0', batch_size=opt.batch_size, 26 | target_platform="rk3399pro") 27 | # Load tensorflow model 28 | print('--> Loading model') 29 | ret = rknn.load_onnx(model=ONNX_MODEL) 30 | assert ret == 0, "Load onnx failed!" 31 | # Build model 32 | print('--> Building model') 33 | if opt.precompile: 34 | ret = rknn.build(do_quantization=True, dataset='./dataset.txt', pre_compile=True) # pre_compile=True 35 | else: 36 | ret = rknn.build(do_quantization=True, dataset='./dataset.txt') 37 | assert ret == 0, "Build onnx failed!" 38 | # Export rknn model 39 | print('--> Export RKNN model') 40 | ret = rknn.export_rknn(RKNN_MODEL) 41 | assert ret == 0, "Export %s.rknn failed!" % opt.rknn 42 | print('done') 43 | -------------------------------------------------------------------------------- /yolov5_original/onnx_test.py: -------------------------------------------------------------------------------- 1 | import random 2 | import onnxruntime 3 | import torch 4 | import os 5 | import cv2 6 | import numpy as np 7 | import time 8 | from utils.plots import plot_one_box 9 | from utils.general import non_max_suppression 10 | from utils.datasets import letterbox 11 | 12 | 13 | class Detect: 14 | stride = [8, 16, 32] # strides computed during build 15 | 16 | def __init__(self, size, nc=80, anchors=()): # detection layer 17 | super(Detect, self).__init__() 18 | self.size = size 19 | self.nc = nc # number of classes 20 | self.no = nc + 5 # number of outputs per anchor 21 | self.nl = len(anchors) # number of detection layers 22 | self.na = len(anchors[0]) // 2 # number of anchors 23 | self.grid = [torch.zeros(1)] * self.nl # init grid 24 | a = torch.tensor(anchors).float().view(self.nl, -1, 2) 25 | self.anchors = a # shape(nl,na,2) 26 | self.anchor_grid = a.clone().view(self.nl, 1, -1, 1, 1, 2) # shape(nl,1,na,1,1,2) 27 | 28 | def predict(self, x): 29 | # x = x.copy() # for profiling 30 | z = [] # inference output 31 | print("# anchors", self.na) 32 | print("# detection layers", self.nl) 33 | print("# outputs", self.no) 34 | print("len x", len(x), type(x)) 35 | w = self.size[0] 36 | for i in range(self.nl): 37 | x[i] = torch.from_numpy(x[i]) 38 | print("xi shape:", x[i].shape) 39 | bs, _, ny, nx, _ = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) 40 | print("nx ny", nx, ny) 41 | print("bs", bs) 42 | y = x[i].sigmoid() 43 | yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) 44 | self.grid[i] = torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() 45 | print("grid shape>>", self.grid[i].shape) 46 | y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * (w / nx) # == (h / ny) 47 | y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh 48 | z.append(y.view(bs, -1, self.no)) 49 | out = torch.cat(z, 1) 50 | pred_res = non_max_suppression(out, 0.6, )[0] 51 | pred_res[:, :4] = scale_coords(img.shape[2:], pred_res[:, :4], img_src.shape).round() 52 | return pred_res 53 | 54 | 55 | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): 56 | # Rescale coords (xyxy) from img1_shape to img0_shape 57 | if ratio_pad is None: # calculate from img0_shape 58 | gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new 59 | pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding 60 | else: 61 | gain = ratio_pad[0][0] 62 | pad = ratio_pad[1] 63 | 64 | coords[:, [0, 2]] -= pad[0] # x padding 65 | coords[:, [1, 3]] -= pad[1] # y padding 66 | coords[:, :4] /= gain 67 | clip_coords(coords, img0_shape) 68 | return coords 69 | 70 | 71 | def clip_coords(boxes, img_shape): 72 | # Clip bounding xyxy bounding boxes to image shape (height, width) 73 | boxes[:, 0].clamp_(0, img_shape[1]) # x1 74 | boxes[:, 1].clamp_(0, img_shape[0]) # y1 75 | boxes[:, 2].clamp_(0, img_shape[1]) # x2 76 | boxes[:, 3].clamp_(0, img_shape[0]) # y2 77 | 78 | 79 | def processImg(img_mat, new_shape=(416, 416)): 80 | img = letterbox(img_mat, new_shape=new_shape, auto=False)[0] 81 | # img = letterbox(img_mat, new_shape=new_shape)[0] 82 | cv2.imshow("img", img) 83 | cv2.waitKey() 84 | img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 85 | return img 86 | return np.ascontiguousarray(img) 87 | 88 | 89 | CLASSES = ("person", "bicycle", "car", "motorbike ", "aeroplane ", "bus ", "train", "truck ", "boat", "traffic light", 90 | "fire hydrant", "stop sign ", "parking meter", "bench", "bird", "cat", "dog ", "horse ", "sheep", "cow", 91 | "elephant", 92 | "bear", "zebra ", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", 93 | "snowboard", "sports ball", "kite", 94 | "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 95 | "fork", "knife ", 96 | "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza ", "donut", 97 | "cake", "chair", "sofa", 98 | "pottedplant", "bed", "diningtable", "toilet ", "tvmonitor", "laptop ", "mouse ", "remote ", 99 | "keyboard ", "cell phone", "microwave", 100 | "oven ", "toaster", "sink", "refrigerator ", "book", "clock", "vase", "scissors ", "teddy bear ", 101 | "hair drier", "toothbrush ") 102 | SIZE = (416, 416) 103 | anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] 104 | d = Detect(SIZE, 80, anchors) 105 | dirname = r"D:\Workspace\test_space_01\yolov5\yolov5-3.1_train\inference\images" 106 | fs = (os.path.join(p, name) for p, _, names in os.walk(dirname) for name in names) 107 | session = onnxruntime.InferenceSession(r"D:\Workspace\test_space_01\yolov5\yolov5-4.0\yolov5-4.0\weights\yolov5m_416x416.onnx") 108 | for i in session.get_inputs(): 109 | print(i) 110 | for i in session.get_outputs(): 111 | print(i) 112 | colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(CLASSES))] 113 | for img_path in fs: 114 | if os.path.splitext(img_path)[1].lower() not in (".jpg", ".png", ".jpeg"): 115 | continue 116 | 117 | print("-" * 70) 118 | input_names = list(map(lambda x: x.name, session.get_inputs())) 119 | output_names = list(map(lambda x: x.name, session.get_outputs())) 120 | img_src = cv2.imread(img_path) 121 | img = img_src.copy() 122 | img = processImg(img) 123 | # img = img.reshape((1,) + img.shape) 124 | img = img[None] 125 | img = np.concatenate((img[..., ::2, ::2], img[..., 1::2, ::2], img[..., ::2, 1::2], img[..., 1::2, 1::2]), 1) 126 | img = img.astype(np.float32) 127 | img /= 255.0 128 | t0 = time.time() 129 | print("img shape \t:", img.shape) 130 | pred_onx = session.run( 131 | output_names, {input_names[0]: img}) 132 | print("_"*30) 133 | [print(m.shape) for m in pred_onx] 134 | print("_"*30) 135 | # print(pred_onx[0][..., 1]) 136 | print("*" * 30) 137 | res = d.predict(pred_onx) 138 | print(res.shape) 139 | # out = non_max_suppression(res, 0.7)[0] 140 | # out[:, :4] = scale_coords(img.shape[2:], out[:, :4], img_src.shape).round() 141 | for *xyxy, conf, cls in res: 142 | label = '%s %.2f' % (CLASSES[int(cls)], conf) 143 | print(label) 144 | plot_one_box(xyxy, img_src, label=label, color=colors[int(cls)], line_thickness=3) 145 | cv2.imshow("src", img_src) 146 | cv2.waitKey() 147 | -------------------------------------------------------------------------------- /yolov5_original/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/soloist-v/yolov5_for_rknn/f4290d1c322b7595a557983467a1fd13cfec287d/yolov5_original/utils/__init__.py -------------------------------------------------------------------------------- /yolov5_original/utils/activations.py: -------------------------------------------------------------------------------- 1 | # Activation functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | # SiLU https://arxiv.org/pdf/1905.02244.pdf ---------------------------------------------------------------------------- 9 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 10 | @staticmethod 11 | def forward(x): 12 | return x * torch.sigmoid(x) 13 | 14 | 15 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() 16 | @staticmethod 17 | def forward(x): 18 | # return x * F.hardsigmoid(x) # for torchscript and CoreML 19 | return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX 20 | 21 | 22 | class MemoryEfficientSwish(nn.Module): 23 | class F(torch.autograd.Function): 24 | @staticmethod 25 | def forward(ctx, x): 26 | ctx.save_for_backward(x) 27 | return x * torch.sigmoid(x) 28 | 29 | @staticmethod 30 | def backward(ctx, grad_output): 31 | x = ctx.saved_tensors[0] 32 | sx = torch.sigmoid(x) 33 | return grad_output * (sx * (1 + x * (1 - sx))) 34 | 35 | def forward(self, x): 36 | return self.F.apply(x) 37 | 38 | 39 | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- 40 | class Mish(nn.Module): 41 | @staticmethod 42 | def forward(x): 43 | return x * F.softplus(x).tanh() 44 | 45 | 46 | class MemoryEfficientMish(nn.Module): 47 | class F(torch.autograd.Function): 48 | @staticmethod 49 | def forward(ctx, x): 50 | ctx.save_for_backward(x) 51 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 52 | 53 | @staticmethod 54 | def backward(ctx, grad_output): 55 | x = ctx.saved_tensors[0] 56 | sx = torch.sigmoid(x) 57 | fx = F.softplus(x).tanh() 58 | return grad_output * (fx + x * sx * (1 - fx * fx)) 59 | 60 | def forward(self, x): 61 | return self.F.apply(x) 62 | 63 | 64 | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- 65 | class FReLU(nn.Module): 66 | def __init__(self, c1, k=3): # ch_in, kernel 67 | super().__init__() 68 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 69 | self.bn = nn.BatchNorm2d(c1) 70 | 71 | def forward(self, x): 72 | return torch.max(x, self.bn(self.conv(x))) 73 | -------------------------------------------------------------------------------- /yolov5_original/utils/autoanchor.py: -------------------------------------------------------------------------------- 1 | # Auto-anchor utils 2 | 3 | import numpy as np 4 | import torch 5 | import yaml 6 | from scipy.cluster.vq import kmeans 7 | from tqdm import tqdm 8 | 9 | 10 | def check_anchor_order(m): 11 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary 12 | a = m.anchor_grid.prod(-1).view(-1) # anchor area 13 | da = a[-1] - a[0] # delta a 14 | ds = m.stride[-1] - m.stride[0] # delta s 15 | if da.sign() != ds.sign(): # same order 16 | print('Reversing anchor order') 17 | m.anchors[:] = m.anchors.flip(0) 18 | m.anchor_grid[:] = m.anchor_grid.flip(0) 19 | 20 | 21 | def check_anchors(dataset, model, thr=4.0, imgsz=640, cfg=None): 22 | import re 23 | tmp = """anchors: 24 | - %s # P3/8 25 | - %s # P4/16 26 | - %s # P5/32""" 27 | pattern = re.compile(r"anchors:[\s\S]+?P5/32") 28 | # Check anchor fit to data, recompute if necessary 29 | print('\nAnalyzing anchors... ', end='') 30 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() 31 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) 32 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale 33 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh 34 | 35 | def metric(k): # compute metric 36 | r = wh[:, None] / k[None] 37 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 38 | best = x.max(1)[0] # best_x 39 | aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold 40 | bpr = (best > 1. / thr).float().mean() # best possible recall 41 | return bpr, aat 42 | 43 | bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) 44 | print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='') 45 | if bpr < 0.99: # threshold to recompute 46 | print('. Attempting to improve anchors, please wait...') 47 | na = m.anchor_grid.numel() // 2 # number of anchors 48 | new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False).round() 49 | # *************************************************************************************** 50 | cfg_content = open(cfg).read() 51 | tmp_anchors = tuple(str(an) for an in np.array(new_anchors, np.int).reshape(-1, 2 * 3).tolist()) 52 | cfg_content = re.sub(pattern, tmp % tmp_anchors, cfg_content) 53 | open(cfg, "w").write(cfg_content) 54 | # *************************************************************************************** 55 | new_bpr = metric(new_anchors.reshape(-1, 2))[0] 56 | if new_bpr > bpr: # replace anchors 57 | new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) 58 | m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference 59 | m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss 60 | check_anchor_order(m) 61 | print('New anchors saved to model. Update model *.yaml to use these anchors in the future.') 62 | else: 63 | print('Original anchors better than new anchors. Proceeding with original anchors.') 64 | print('') # newline 65 | 66 | 67 | def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): 68 | """ Creates kmeans-evolved anchors from training dataset 69 | 70 | Arguments: 71 | path: path to dataset *.yaml, or a loaded dataset 72 | n: number of anchors 73 | img_size: image size used for training 74 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 75 | gen: generations to evolve anchors using genetic algorithm 76 | verbose: print all results 77 | 78 | Return: 79 | k: kmeans evolved anchors 80 | 81 | Usage: 82 | from utils.autoanchor import *; _ = kmean_anchors() 83 | """ 84 | thr = 1. / thr 85 | 86 | def metric(k, wh): # compute metrics 87 | r = wh[:, None] / k[None] 88 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 89 | # x = wh_iou(wh, torch.tensor(k)) # iou metric 90 | return x, x.max(1)[0] # x, best_x 91 | 92 | def anchor_fitness(k): # mutation fitness 93 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) 94 | return (best * (best > thr).float()).mean() # fitness 95 | 96 | def print_results(k): 97 | k = k[np.argsort(k.prod(1))] # sort small to large 98 | x, best = metric(k, wh0) 99 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr 100 | print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat)) 101 | print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' % 102 | (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='') 103 | for i, x in enumerate(k): 104 | print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg 105 | return k 106 | 107 | if isinstance(path, str): # *.yaml file 108 | with open(path) as f: 109 | data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict 110 | from utils.datasets import LoadImagesAndLabels 111 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) 112 | else: 113 | dataset = path # dataset 114 | 115 | # Get label wh 116 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) 117 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh 118 | 119 | # Filter 120 | i = (wh0 < 3.0).any(1).sum() 121 | if i: 122 | print('WARNING: Extremely small objects found. ' 123 | '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0))) 124 | wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels 125 | # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 126 | 127 | # Kmeans calculation 128 | print('Running kmeans for %g anchors on %g points...' % (n, len(wh))) 129 | s = wh.std(0) # sigmas for whitening 130 | k, dist = kmeans(wh / s, n, iter=30) # points, mean distance 131 | k *= s 132 | wh = torch.tensor(wh, dtype=torch.float32) # filtered 133 | wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered 134 | k = print_results(k) 135 | 136 | # Plot 137 | # k, d = [None] * 20, [None] * 20 138 | # for i in tqdm(range(1, 21)): 139 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance 140 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) 141 | # ax = ax.ravel() 142 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') 143 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh 144 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) 145 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) 146 | # fig.savefig('wh.png', dpi=200) 147 | 148 | # Evolve 149 | npr = np.random 150 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma 151 | pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar 152 | for _ in pbar: 153 | v = np.ones(sh) 154 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) 155 | v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) 156 | kg = (k.copy() * v).clip(min=2.0) 157 | fg = anchor_fitness(kg) 158 | if fg > f: 159 | f, k = fg, kg.copy() 160 | pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f 161 | if verbose: 162 | print_results(k) 163 | 164 | return print_results(k) 165 | -------------------------------------------------------------------------------- /yolov5_original/utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /yolov5_original/utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==18.1 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /yolov5_original/utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 -------------------------------------------------------------------------------- /yolov5_original/utils/google_utils.py: -------------------------------------------------------------------------------- 1 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries 2 | 3 | import os 4 | import platform 5 | import subprocess 6 | import time 7 | from pathlib import Path 8 | 9 | import requests 10 | import torch 11 | 12 | 13 | def gsutil_getsize(url=''): 14 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 15 | s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8') 16 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes 17 | 18 | 19 | def attempt_download(weights): 20 | # Attempt to download pretrained weights if not found locally 21 | weights = str(weights).strip().replace("'", '') 22 | file = Path(weights).name.lower() 23 | 24 | msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/' 25 | response = requests.get('https://api.github.com/repos/ultralytics/yolov5/releases/latest').json() # github api 26 | assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] 27 | redundant = False # second download option 28 | 29 | if file in assets and not os.path.isfile(weights): 30 | try: # GitHub 31 | tag = response['tag_name'] # i.e. 'v1.0' 32 | url = f'https://github.com/ultralytics/yolov5/releases/download/{tag}/{file}' 33 | print('Downloading %s to %s...' % (url, weights)) 34 | torch.hub.download_url_to_file(url, weights) 35 | assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check 36 | except Exception as e: # GCP 37 | print('Download error: %s' % e) 38 | assert redundant, 'No secondary mirror' 39 | url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file 40 | print('Downloading %s to %s...' % (url, weights)) 41 | r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights) 42 | finally: 43 | if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check 44 | os.remove(weights) if os.path.exists(weights) else None # remove partial downloads 45 | print('ERROR: Download failure: %s' % msg) 46 | print('') 47 | return 48 | 49 | 50 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', name='tmp.zip'): 51 | # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() 52 | t = time.time() 53 | print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='') 54 | os.remove(name) if os.path.exists(name) else None # remove existing 55 | os.remove('cookie') if os.path.exists('cookie') else None 56 | 57 | # Attempt file download 58 | out = "NUL" if platform.system() == "Windows" else "/dev/null" 59 | os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out)) 60 | if os.path.exists('cookie'): # large file 61 | s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name) 62 | else: # small file 63 | s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id) 64 | r = os.system(s) # execute, capture return 65 | os.remove('cookie') if os.path.exists('cookie') else None 66 | 67 | # Error check 68 | if r != 0: 69 | os.remove(name) if os.path.exists(name) else None # remove partial 70 | print('Download error ') # raise Exception('Download error') 71 | return r 72 | 73 | # Unzip if archive 74 | if name.endswith('.zip'): 75 | print('unzipping... ', end='') 76 | os.system('unzip -q %s' % name) # unzip 77 | os.remove(name) # remove zip to free space 78 | 79 | print('Done (%.1fs)' % (time.time() - t)) 80 | return r 81 | 82 | 83 | def get_token(cookie="./cookie"): 84 | with open(cookie) as f: 85 | for line in f: 86 | if "download" in line: 87 | return line.split()[-1] 88 | return "" 89 | 90 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): 91 | # # Uploads a file to a bucket 92 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python 93 | # 94 | # storage_client = storage.Client() 95 | # bucket = storage_client.get_bucket(bucket_name) 96 | # blob = bucket.blob(destination_blob_name) 97 | # 98 | # blob.upload_from_filename(source_file_name) 99 | # 100 | # print('File {} uploaded to {}.'.format( 101 | # source_file_name, 102 | # destination_blob_name)) 103 | # 104 | # 105 | # def download_blob(bucket_name, source_blob_name, destination_file_name): 106 | # # Uploads a blob from a bucket 107 | # storage_client = storage.Client() 108 | # bucket = storage_client.get_bucket(bucket_name) 109 | # blob = bucket.blob(source_blob_name) 110 | # 111 | # blob.download_to_filename(destination_file_name) 112 | # 113 | # print('Blob {} downloaded to {}.'.format( 114 | # source_blob_name, 115 | # destination_file_name)) 116 | -------------------------------------------------------------------------------- /yolov5_original/utils/metrics.py: -------------------------------------------------------------------------------- 1 | # Model validation metrics 2 | 3 | from pathlib import Path 4 | 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import torch 8 | 9 | from . import general 10 | 11 | 12 | def fitness(x): 13 | # Model fitness as a weighted combination of metrics 14 | w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] 15 | return (x[:, :4] * w).sum(1) 16 | 17 | 18 | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]): 19 | """ Compute the average precision, given the recall and precision curves. 20 | Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. 21 | # Arguments 22 | tp: True positives (nparray, nx1 or nx10). 23 | conf: Objectness value from 0-1 (nparray). 24 | pred_cls: Predicted object classes (nparray). 25 | target_cls: True object classes (nparray). 26 | plot: Plot precision-recall curve at mAP@0.5 27 | save_dir: Plot save directory 28 | # Returns 29 | The average precision as computed in py-faster-rcnn. 30 | """ 31 | 32 | # Sort by objectness 33 | i = np.argsort(-conf) 34 | tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] 35 | 36 | # Find unique classes 37 | unique_classes = np.unique(target_cls) 38 | 39 | # Create Precision-Recall curve and compute AP for each class 40 | px, py = np.linspace(0, 1, 1000), [] # for plotting 41 | pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 42 | s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) 43 | ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) 44 | for ci, c in enumerate(unique_classes): 45 | i = pred_cls == c 46 | n_l = (target_cls == c).sum() # number of labels 47 | n_p = i.sum() # number of predictions 48 | 49 | if n_p == 0 or n_l == 0: 50 | continue 51 | else: 52 | # Accumulate FPs and TPs 53 | fpc = (1 - tp[i]).cumsum(0) 54 | tpc = tp[i].cumsum(0) 55 | 56 | # Recall 57 | recall = tpc / (n_l + 1e-16) # recall curve 58 | r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases 59 | 60 | # Precision 61 | precision = tpc / (tpc + fpc) # precision curve 62 | p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score 63 | 64 | # AP from recall-precision curve 65 | for j in range(tp.shape[1]): 66 | ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) 67 | if plot and (j == 0): 68 | py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 69 | 70 | # Compute F1 score (harmonic mean of precision and recall) 71 | f1 = 2 * p * r / (p + r + 1e-16) 72 | 73 | if plot: 74 | plot_pr_curve(px, py, ap, save_dir, names) 75 | 76 | return p, r, ap, f1, unique_classes.astype('int32') 77 | 78 | 79 | def compute_ap(recall, precision): 80 | """ Compute the average precision, given the recall and precision curves 81 | # Arguments 82 | recall: The recall curve (list) 83 | precision: The precision curve (list) 84 | # Returns 85 | Average precision, precision curve, recall curve 86 | """ 87 | 88 | # Append sentinel values to beginning and end 89 | mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) 90 | mpre = np.concatenate(([1.], precision, [0.])) 91 | 92 | # Compute the precision envelope 93 | mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) 94 | 95 | # Integrate area under curve 96 | method = 'interp' # methods: 'continuous', 'interp' 97 | if method == 'interp': 98 | x = np.linspace(0, 1, 101) # 101-point interp (COCO) 99 | ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate 100 | else: # 'continuous' 101 | i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes 102 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve 103 | 104 | return ap, mpre, mrec 105 | 106 | 107 | class ConfusionMatrix: 108 | # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix 109 | def __init__(self, nc, conf=0.25, iou_thres=0.45): 110 | self.matrix = np.zeros((nc + 1, nc + 1)) 111 | self.nc = nc # number of classes 112 | self.conf = conf 113 | self.iou_thres = iou_thres 114 | 115 | def process_batch(self, detections, labels): 116 | """ 117 | Return intersection-over-union (Jaccard index) of boxes. 118 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 119 | Arguments: 120 | detections (Array[N, 6]), x1, y1, x2, y2, conf, class 121 | labels (Array[M, 5]), class, x1, y1, x2, y2 122 | Returns: 123 | None, updates confusion matrix accordingly 124 | """ 125 | detections = detections[detections[:, 4] > self.conf] 126 | gt_classes = labels[:, 0].int() 127 | detection_classes = detections[:, 5].int() 128 | iou = general.box_iou(labels[:, 1:], detections[:, :4]) 129 | 130 | x = torch.where(iou > self.iou_thres) 131 | if x[0].shape[0]: 132 | matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() 133 | if x[0].shape[0] > 1: 134 | matches = matches[matches[:, 2].argsort()[::-1]] 135 | matches = matches[np.unique(matches[:, 1], return_index=True)[1]] 136 | matches = matches[matches[:, 2].argsort()[::-1]] 137 | matches = matches[np.unique(matches[:, 0], return_index=True)[1]] 138 | else: 139 | matches = np.zeros((0, 3)) 140 | 141 | n = matches.shape[0] > 0 142 | m0, m1, _ = matches.transpose().astype(np.int16) 143 | for i, gc in enumerate(gt_classes): 144 | j = m0 == i 145 | if n and sum(j) == 1: 146 | self.matrix[gc, detection_classes[m1[j]]] += 1 # correct 147 | else: 148 | self.matrix[gc, self.nc] += 1 # background FP 149 | 150 | if n: 151 | for i, dc in enumerate(detection_classes): 152 | if not any(m1 == i): 153 | self.matrix[self.nc, dc] += 1 # background FN 154 | 155 | def matrix(self): 156 | return self.matrix 157 | 158 | def plot(self, save_dir='', names=()): 159 | try: 160 | import seaborn as sn 161 | 162 | array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize 163 | array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) 164 | 165 | fig = plt.figure(figsize=(12, 9), tight_layout=True) 166 | sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size 167 | labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels 168 | sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, 169 | xticklabels=names + ['background FN'] if labels else "auto", 170 | yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1)) 171 | fig.axes[0].set_xlabel('True') 172 | fig.axes[0].set_ylabel('Predicted') 173 | fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) 174 | except Exception as e: 175 | pass 176 | 177 | def print(self): 178 | for i in range(self.nc + 1): 179 | print(' '.join(map(str, self.matrix[i]))) 180 | 181 | 182 | # Plots ---------------------------------------------------------------------------------------------------------------- 183 | 184 | def plot_pr_curve(px, py, ap, save_dir='.', names=()): 185 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 186 | py = np.stack(py, axis=1) 187 | 188 | if 0 < len(names) < 21: # show mAP in legend if < 10 classes 189 | for i, y in enumerate(py.T): 190 | ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision) 191 | else: 192 | ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) 193 | 194 | ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) 195 | ax.set_xlabel('Recall') 196 | ax.set_ylabel('Precision') 197 | ax.set_xlim(0, 1) 198 | ax.set_ylim(0, 1) 199 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 200 | fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250) 201 | --------------------------------------------------------------------------------