├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.yml
│ ├── config.yml
│ ├── feature-request.yml
│ └── question.yml
├── dependabot.yml
├── translate-readme.yml
└── workflows
│ ├── ci.yaml
│ ├── cla.yml
│ └── docker.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── README.zh-CN.md
├── docker
├── Dockerfile
├── Dockerfile-arm64
└── Dockerfile-cpu
├── docs
├── CNAME
├── README.md
├── SECURITY.md
├── app.md
├── assets
│ └── favicon.ico
├── callbacks.md
├── cfg.md
├── cli.md
├── engine.md
├── hub.md
├── index.md
├── predict.md
├── python.md
├── quickstart.md
├── reference
│ ├── base_pred.md
│ ├── base_trainer.md
│ ├── base_val.md
│ ├── exporter.md
│ ├── model.md
│ ├── nn.md
│ ├── ops.md
│ └── results.md
├── stylesheets
│ └── style.css
└── tasks
│ ├── classification.md
│ ├── detection.md
│ ├── segmentation.md
│ └── tracking.md
├── examples
├── README.md
├── YOLOv8-CPP-Inference
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── inference.cpp
│ ├── inference.h
│ └── main.cpp
├── YOLOv8-OpenCV-ONNX-Python
│ ├── README.md
│ └── main.py
└── tutorial.ipynb
├── mkdocs.yml
├── requirements.txt
├── setup.cfg
├── setup.py
├── tests
├── test_cli.py
├── test_engine.py
└── test_python.py
└── ultralytics
├── __init__.py
├── assets
├── bus.jpg
└── zidane.jpg
├── datasets
├── Argoverse.yaml
├── GlobalWheat2020.yaml
├── ImageNet.yaml
├── Objects365.yaml
├── SKU-110K.yaml
├── VOC.yaml
├── VisDrone.yaml
├── coco.yaml
├── coco128-seg.yaml
├── coco128.yaml
├── coco8-seg.yaml
├── coco8.yaml
└── xView.yaml
├── hub
├── __init__.py
├── auth.py
├── session.py
└── utils.py
├── models
├── README.md
├── v3
│ ├── yolov3-sppu.yaml
│ ├── yolov3-tinyu.yaml
│ └── yolov3u.yaml
├── v5
│ ├── yolov5lu.yaml
│ ├── yolov5mu.yaml
│ ├── yolov5nu.yaml
│ ├── yolov5su.yaml
│ └── yolov5xu.yaml
└── v8
│ ├── cls
│ ├── yolov8l-cls.yaml
│ ├── yolov8m-cls.yaml
│ ├── yolov8n-cls.yaml
│ ├── yolov8s-cls.yaml
│ └── yolov8x-cls.yaml
│ ├── seg
│ ├── yolov8l-seg.yaml
│ ├── yolov8m-seg.yaml
│ ├── yolov8n-seg.yaml
│ ├── yolov8s-seg.yaml
│ └── yolov8x-seg.yaml
│ ├── yolov8l.yaml
│ ├── yolov8m.yaml
│ ├── yolov8n.yaml
│ ├── yolov8s.yaml
│ ├── yolov8x.yaml
│ └── yolov8x6.yaml
├── nn
├── __init__.py
├── autobackend.py
├── autoshape.py
├── modules.py
└── tasks.py
├── tracker
├── README.md
├── __init__.py
├── cfg
│ ├── botsort.yaml
│ └── bytetrack.yaml
├── track.py
├── trackers
│ ├── __init__.py
│ ├── basetrack.py
│ ├── bot_sort.py
│ └── byte_tracker.py
└── utils
│ ├── __init__.py
│ ├── gmc.py
│ ├── kalman_filter.py
│ └── matching.py
└── yolo
├── __init__.py
├── cfg
├── __init__.py
└── default.yaml
├── data
├── __init__.py
├── augment.py
├── base.py
├── build.py
├── dataloaders
│ ├── __init__.py
│ ├── stream_loaders.py
│ ├── v5augmentations.py
│ └── v5loader.py
├── dataset.py
├── dataset_wrappers.py
├── scripts
│ ├── download_weights.sh
│ ├── get_coco.sh
│ ├── get_coco128.sh
│ └── get_imagenet.sh
└── utils.py
├── engine
├── __init__.py
├── exporter.py
├── model.py
├── predictor.py
├── results.py
├── trainer.py
└── validator.py
├── utils
├── __init__.py
├── autobatch.py
├── benchmarks.py
├── callbacks
│ ├── __init__.py
│ ├── base.py
│ ├── clearml.py
│ ├── comet.py
│ ├── hub.py
│ └── tensorboard.py
├── checks.py
├── dist.py
├── downloads.py
├── files.py
├── instance.py
├── loss.py
├── metrics.py
├── ops.py
├── plotting.py
├── tal.py
└── torch_utils.py
└── v8
├── __init__.py
├── classify
├── __init__.py
├── predict.py
├── train.py
└── val.py
├── detect
├── __init__.py
├── predict.py
├── train.py
└── val.py
└── segment
├── __init__.py
├── predict.py
├── train.py
└── val.py
/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: 🐛 Bug Report
2 | # title: " "
3 | description: Problems with YOLOv8
4 | labels: [bug, triage]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for submitting a YOLOv8 🐛 Bug Report!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar bug report already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar bug report.
19 | required: true
20 |
21 | - type: dropdown
22 | attributes:
23 | label: YOLOv8 Component
24 | description: |
25 | Please select the part of YOLOv8 where you found the bug.
26 | multiple: true
27 | options:
28 | - "Training"
29 | - "Validation"
30 | - "Detection"
31 | - "Export"
32 | - "PyTorch Hub"
33 | - "Multi-GPU"
34 | - "Evolution"
35 | - "Integrations"
36 | - "Other"
37 | validations:
38 | required: false
39 |
40 | - type: textarea
41 | attributes:
42 | label: Bug
43 | description: Provide console output with error messages and/or screenshots of the bug.
44 | placeholder: |
45 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
46 | validations:
47 | required: true
48 |
49 | - type: textarea
50 | attributes:
51 | label: Environment
52 | description: Please specify the software and hardware you used to produce the bug.
53 | placeholder: |
54 | - YOLO: Ultralytics YOLOv8.0.21 🚀 Python-3.8.10 torch-1.13.1+cu117 CUDA:0 (A100-SXM-80GB, 81251MiB)
55 | - OS: Ubuntu 20.04
56 | - Python: 3.8.10
57 | validations:
58 | required: false
59 |
60 | - type: textarea
61 | attributes:
62 | label: Minimal Reproducible Example
63 | description: >
64 | When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem.
65 | This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
66 | placeholder: |
67 | ```
68 | # Code to reproduce your issue here
69 | ```
70 | validations:
71 | required: false
72 |
73 | - type: textarea
74 | attributes:
75 | label: Additional
76 | description: Anything else you would like to share?
77 |
78 | - type: checkboxes
79 | attributes:
80 | label: Are you willing to submit a PR?
81 | description: >
82 | (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLOv8 for everyone, especially if you have a good understanding of how to implement a fix or feature.
83 | See the YOLOv8 [Contributing Guide](https://github.com/ultralytics/ultralytics/blob/main/CONTRIBUTING.md) to get started.
84 | options:
85 | - label: Yes I'd like to help by submitting a PR!
86 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: 📄Docs
4 | url: https://docs.ultralytics.com/
5 | about: Full Ultralytics YOLOv8 Documentation
6 | - name: 💬 Forum
7 | url: https://community.ultralytics.com/
8 | about: Ask on Ultralytics Community Forum
9 | - name: Stack Overflow
10 | url: https://stackoverflow.com/search?q=YOLOv8
11 | about: Ask on Stack Overflow with 'YOLOv8' tag
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.yml:
--------------------------------------------------------------------------------
1 | name: 🚀 Feature Request
2 | description: Suggest a YOLOv8 idea
3 | # title: " "
4 | labels: [enhancement]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for submitting a YOLOv8 🚀 Feature Request!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar feature request already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar feature requests.
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: Description
24 | description: A short description of your feature.
25 | placeholder: |
26 | What new feature would you like to see in YOLOv8?
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Use case
33 | description: |
34 | Describe the use case of your feature request. It will help us understand and prioritize the feature request.
35 | placeholder: |
36 | How would this feature be used, and who would use it?
37 |
38 | - type: textarea
39 | attributes:
40 | label: Additional
41 | description: Anything else you would like to share?
42 |
43 | - type: checkboxes
44 | attributes:
45 | label: Are you willing to submit a PR?
46 | description: >
47 | (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLOv8 for everyone, especially if you have a good understanding of how to implement a fix or feature.
48 | See the YOLOv8 [Contributing Guide](https://github.com/ultralytics/ultralytics/blob/main/CONTRIBUTING.md) to get started.
49 | options:
50 | - label: Yes I'd like to help by submitting a PR!
51 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.yml:
--------------------------------------------------------------------------------
1 | name: ❓ Question
2 | description: Ask a YOLOv8 question
3 | # title: " "
4 | labels: [question]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for asking a YOLOv8 ❓ Question!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/ultralytics/issues) and [discussions](https://github.com/ultralytics/ultralytics/discussions) to see if a similar question already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and [discussions](https://github.com/ultralytics/ultralytics/discussions) and found no similar questions.
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: Question
24 | description: What is your question?
25 | placeholder: |
26 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Additional
33 | description: Anything else you would like to share?
34 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: pip
9 | directory: "/"
10 | schedule:
11 | interval: weekly
12 | time: "04:00"
13 | open-pull-requests-limit: 10
14 | reviewers:
15 | - glenn-jocher
16 | labels:
17 | - dependencies
18 |
19 | - package-ecosystem: github-actions
20 | directory: "/"
21 | schedule:
22 | interval: weekly
23 | time: "04:00"
24 | open-pull-requests-limit: 5
25 | reviewers:
26 | - glenn-jocher
27 | labels:
28 | - dependencies
29 |
--------------------------------------------------------------------------------
/.github/translate-readme.yml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md
3 |
4 | name: Translate README
5 |
6 | on:
7 | push:
8 | branches:
9 | - translate_readme # replace with 'main' to enable action
10 | paths:
11 | - README.md
12 |
13 | jobs:
14 | Translate:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@v3
18 | - name: Setup Node.js
19 | uses: actions/setup-node@v3
20 | with:
21 | node-version: 16
22 | # ISO Language Codes: https://cloud.google.com/translate/docs/languages
23 | - name: Adding README - Chinese Simplified
24 | uses: dephraiim/translate-readme@main
25 | with:
26 | LANG: zh-CN
27 |
--------------------------------------------------------------------------------
/.github/workflows/cla.yml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | name: "CLA Assistant"
4 | on:
5 | issue_comment:
6 | types:
7 | - created
8 | pull_request_target:
9 | types:
10 | - reopened
11 | - opened
12 | - synchronize
13 |
14 | jobs:
15 | CLA:
16 | if: github.repository == 'ultralytics/ultralytics'
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: "CLA Assistant"
20 | if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target'
21 | uses: contributor-assistant/github-action@v2.3.0
22 | env:
23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
24 | # must be repository secret token
25 | PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
26 | with:
27 | path-to-signatures: 'signatures/version1/cla.json'
28 | path-to-document: 'https://github.com/ultralytics/assets/blob/main/documents/CLA.md' # CLA document
29 | # branch should not be protected
30 | branch: 'main'
31 | allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot*
32 |
33 | remote-organization-name: ultralytics
34 | remote-repository-name: cla
35 | custom-pr-sign-comment: 'I have read the CLA Document and I sign the CLA'
36 | custom-allsigned-prcomment: All Contributors have signed the CLA. ✅
37 | #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign'
38 |
--------------------------------------------------------------------------------
/.github/workflows/docker.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Builds ultralytics/ultralytics:latest images on DockerHub https://hub.docker.com/r/ultralytics
3 |
4 | name: Publish Docker Images
5 |
6 | on:
7 | push:
8 | branches: [main]
9 |
10 | jobs:
11 | docker:
12 | if: github.repository == 'ultralytics/ultralytics'
13 | name: Push Docker image to Docker Hub
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout repo
17 | uses: actions/checkout@v3
18 |
19 | - name: Set up QEMU
20 | uses: docker/setup-qemu-action@v2
21 |
22 | - name: Set up Docker Buildx
23 | uses: docker/setup-buildx-action@v2
24 |
25 | - name: Login to Docker Hub
26 | uses: docker/login-action@v2
27 | with:
28 | username: ${{ secrets.DOCKERHUB_USERNAME }}
29 | password: ${{ secrets.DOCKERHUB_TOKEN }}
30 |
31 | - name: Build and push arm64 image
32 | uses: docker/build-push-action@v4
33 | continue-on-error: true
34 | with:
35 | context: .
36 | platforms: linux/arm64
37 | file: docker/Dockerfile-arm64
38 | push: true
39 | tags: ultralytics/ultralytics:latest-arm64
40 |
41 | - name: Build and push CPU image
42 | uses: docker/build-push-action@v4
43 | continue-on-error: true
44 | with:
45 | context: .
46 | file: docker/Dockerfile-cpu
47 | push: true
48 | tags: ultralytics/ultralytics:latest-cpu
49 |
50 | - name: Build and push GPU image
51 | uses: docker/build-push-action@v4
52 | continue-on-error: true
53 | with:
54 | context: .
55 | file: docker/Dockerfile
56 | push: true
57 | tags: ultralytics/ultralytics:latest
58 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # Profiling
85 | *.pclprof
86 |
87 | # pyenv
88 | .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # datasets and projects
135 | datasets/
136 | runs/
137 | wandb/
138 |
139 | .DS_Store
140 |
141 | # Neural Network weights -----------------------------------------------------------------------------------------------
142 | weights/
143 | *.weights
144 | *.pt
145 | *.pb
146 | *.onnx
147 | *.engine
148 | *.mlmodel
149 | *.torchscript
150 | *.tflite
151 | *.h5
152 | *_saved_model/
153 | *_web_model/
154 | *_openvino_model/
155 | *_paddle_model/
156 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md
3 |
4 | exclude: 'docs/'
5 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
6 | ci:
7 | autofix_prs: true
8 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
9 | autoupdate_schedule: monthly
10 | # submodules: true
11 |
12 | repos:
13 | - repo: https://github.com/pre-commit/pre-commit-hooks
14 | rev: v4.4.0
15 | hooks:
16 | - id: end-of-file-fixer
17 | - id: trailing-whitespace
18 | - id: check-case-conflict
19 | - id: check-yaml
20 | - id: check-docstring-first
21 | - id: double-quote-string-fixer
22 | - id: detect-private-key
23 |
24 | - repo: https://github.com/asottile/pyupgrade
25 | rev: v3.3.1
26 | hooks:
27 | - id: pyupgrade
28 | name: Upgrade code
29 | args: [--py37-plus]
30 |
31 | - repo: https://github.com/PyCQA/isort
32 | rev: 5.12.0
33 | hooks:
34 | - id: isort
35 | name: Sort imports
36 |
37 | - repo: https://github.com/google/yapf
38 | rev: v0.32.0
39 | hooks:
40 | - id: yapf
41 | name: YAPF formatting
42 |
43 | - repo: https://github.com/executablebooks/mdformat
44 | rev: 0.7.16
45 | hooks:
46 | - id: mdformat
47 | name: MD formatting
48 | additional_dependencies:
49 | - mdformat-gfm
50 | - mdformat-black
51 | # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
52 |
53 | - repo: https://github.com/PyCQA/flake8
54 | rev: 6.0.0
55 | hooks:
56 | - id: flake8
57 | name: PEP8
58 |
59 | - repo: https://github.com/codespell-project/codespell
60 | rev: v2.2.2
61 | hooks:
62 | - id: codespell
63 | args:
64 | - --ignore-words-list=crate,nd,strack,dota
65 |
66 | #- repo: https://github.com/asottile/yesqa
67 | # rev: v1.4.0
68 | # hooks:
69 | # - id: yesqa
70 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | preferred-citation:
3 | type: software
4 | message: If you use this software, please cite it as below.
5 | authors:
6 | - family-names: Jocher
7 | given-names: Glenn
8 | orcid: "https://orcid.org/0000-0001-5950-6979"
9 | - family-names: Chaurasia
10 | given-names: Ayush
11 | orcid: "https://orcid.org/0000-0002-7603-6750"
12 | - family-names: Qiu
13 | given-names: Jing
14 | orcid: "https://orcid.org/0000-0003-3783-7069"
15 | title: "YOLO by Ultralytics"
16 | version: 8.0.0
17 | # doi: 10.5281/zenodo.3908559 # TODO
18 | date-released: 2023-1-10
19 | license: GPL-3.0
20 | url: "https://github.com/ultralytics/ultralytics"
21 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.md
2 | include requirements.txt
3 | include LICENSE
4 | include setup.py
5 | recursive-include ultralytics *.yaml
6 | recursive-exclude __pycache__ *
7 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
3 | # Image is CUDA-optimized for YOLOv8 single/multi-GPU training and inference
4 |
5 | # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
6 | # FROM docker.io/pytorch/pytorch:latest
7 | FROM pytorch/pytorch:latest
8 |
9 | # Downloads to user config dir
10 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
11 |
12 | # Install linux packages
13 | ENV DEBIAN_FRONTEND noninteractive
14 | RUN apt update
15 | RUN TZ=Etc/UTC apt install -y tzdata
16 | RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++
17 | # RUN alias python=python3
18 |
19 | # Security updates
20 | # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
21 | RUN apt upgrade --no-install-recommends -y openssl tar
22 |
23 | # Create working directory
24 | RUN mkdir -p /usr/src/ultralytics
25 | WORKDIR /usr/src/ultralytics
26 |
27 | # Copy contents
28 | # COPY . /usr/src/app (issues as not a .git directory)
29 | RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
30 |
31 | # Install pip packages
32 | RUN python3 -m pip install --upgrade pip wheel
33 | RUN pip install --no-cache '.[export]' albumentations comet gsutil notebook
34 |
35 | # Set environment variables
36 | ENV OMP_NUM_THREADS=1
37 |
38 | # Cleanup
39 | ENV DEBIAN_FRONTEND teletype
40 |
41 |
42 | # Usage Examples -------------------------------------------------------------------------------------------------------
43 |
44 | # Build and Push
45 | # t=ultralytics/ultralytics:latest && sudo docker build -f docker/Dockerfile -t $t . && sudo docker push $t
46 |
47 | # Pull and Run
48 | # t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
49 |
50 | # Pull and Run with local directory access
51 | # t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
52 |
53 | # Kill all
54 | # sudo docker kill $(sudo docker ps -q)
55 |
56 | # Kill all image-based
57 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/ultralytics:latest)
58 |
59 | # DockerHub tag update
60 | # t=ultralytics/ultralytics:latest tnew=ultralytics/ultralytics:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
61 |
62 | # Clean up
63 | # sudo docker system prune -a --volumes
64 |
65 | # Update Ubuntu drivers
66 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
67 |
68 | # DDP test
69 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
70 |
71 | # GCP VM from Image
72 | # docker.io/ultralytics/ultralytics:latest
73 |
--------------------------------------------------------------------------------
/docker/Dockerfile-arm64:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Builds ultralytics/ultralytics:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM arm64v8/ubuntu:rolling
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
16 | # RUN alias python=python3
17 |
18 | # Create working directory
19 | RUN mkdir -p /usr/src/ultralytics
20 | WORKDIR /usr/src/ultralytics
21 |
22 | # Copy contents
23 | # COPY . /usr/src/app (issues as not a .git directory)
24 | RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
25 |
26 | # Install pip packages
27 | RUN python3 -m pip install --upgrade pip wheel
28 | RUN pip install --no-cache . albumentations gsutil notebook
29 |
30 | # Cleanup
31 | ENV DEBIAN_FRONTEND teletype
32 |
33 |
34 | # Usage Examples -------------------------------------------------------------------------------------------------------
35 |
36 | # Build and Push
37 | # t=ultralytics/ultralytics:latest-arm64 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-arm64 -t $t . && sudo docker push $t
38 |
39 | # Pull and Run
40 | # t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
41 |
--------------------------------------------------------------------------------
/docker/Dockerfile-cpu:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM ubuntu:rolling
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++
16 | # RUN alias python=python3
17 |
18 | # Create working directory
19 | RUN mkdir -p /usr/src/ultralytics
20 | WORKDIR /usr/src/ultralytics
21 |
22 | # Copy contents
23 | # COPY . /usr/src/app (issues as not a .git directory)
24 | RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
25 |
26 | # Install pip packages
27 | RUN python3 -m pip install --upgrade pip wheel
28 | RUN pip install --no-cache '.[export]' albumentations gsutil notebook \
29 | --extra-index-url https://download.pytorch.org/whl/cpu
30 |
31 | # Cleanup
32 | ENV DEBIAN_FRONTEND teletype
33 |
34 |
35 | # Usage Examples -------------------------------------------------------------------------------------------------------
36 |
37 | # Build and Push
38 | # t=ultralytics/ultralytics:latest-cpu && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t
39 |
40 | # Pull and Run
41 | # t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
42 |
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | docs.ultralytics.com
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Ultralytics Docs
2 |
3 | Ultralytics Docs are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com).
4 |
5 | ### Install Ultralytics package
6 |
7 | To install the ultralytics package in developer mode, you will need to have Git and Python 3 installed on your system.
8 | Then, follow these steps:
9 |
10 | 1. Clone the ultralytics repository to your local machine using Git:
11 |
12 | ```bash
13 | git clone https://github.com/ultralytics/ultralytics.git
14 | ```
15 |
16 | 2. Navigate to the root directory of the repository:
17 |
18 | ```bash
19 | cd ultralytics
20 | ```
21 |
22 | 3. Install the package in developer mode using pip:
23 |
24 | ```bash
25 | pip install -e '.[dev]'
26 | ```
27 |
28 | This will install the ultralytics package and its dependencies in developer mode, allowing you to make changes to the
29 | package code and have them reflected immediately in your Python environment.
30 |
31 | Note that you may need to use the pip3 command instead of pip if you have multiple versions of Python installed on your
32 | system.
33 |
34 | ### Building and Serving Locally
35 |
36 | The `mkdocs serve` command is used to build and serve a local version of the MkDocs documentation site. It is typically
37 | used during the development and testing phase of a documentation project.
38 |
39 | ```bash
40 | mkdocs serve
41 | ```
42 |
43 | Here is a breakdown of what this command does:
44 |
45 | - `mkdocs`: This is the command-line interface (CLI) for the MkDocs static site generator. It is used to build and serve
46 | MkDocs sites.
47 | - `serve`: This is a subcommand of the `mkdocs` CLI that tells it to build and serve the documentation site locally.
48 | - `-a`: This flag specifies the hostname and port number to bind the server to. The default value is `localhost:8000`.
49 | - `-t`: This flag specifies the theme to use for the documentation site. The default value is `mkdocs`.
50 | - `-s`: This flag tells the `serve` command to serve the site in silent mode, which means it will not display any log
51 | messages or progress updates.
52 | When you run the `mkdocs serve` command, it will build the documentation site using the files in the `docs/` directory
53 | and serve it at the specified hostname and port number. You can then view the site by going to the URL in your web
54 | browser.
55 |
56 | While the site is being served, you can make changes to the documentation files and see them reflected in the live site
57 | immediately. This is useful for testing and debugging your documentation before deploying it to a live server.
58 |
59 | To stop the serve command and terminate the local server, you can use the `CTRL+C` keyboard shortcut.
60 |
61 | ### Deploying Your Documentation Site
62 |
63 | To deploy your MkDocs documentation site, you will need to choose a hosting provider and a deployment method. Some
64 | popular options include GitHub Pages, GitLab Pages, and Amazon S3.
65 |
66 | Before you can deploy your site, you will need to configure your `mkdocs.yml` file to specify the remote host and any
67 | other necessary deployment settings.
68 |
69 | Once you have configured your `mkdocs.yml` file, you can use the `mkdocs deploy` command to build and deploy your site.
70 | This command will build the documentation site using the files in the `docs/` directory and the specified configuration
71 | file and theme, and then deploy the site to the specified remote host.
72 |
73 | For example, to deploy your site to GitHub Pages using the gh-deploy plugin, you can use the following command:
74 |
75 | ```bash
76 | mkdocs gh-deploy
77 | ```
78 |
79 | If you are using GitHub Pages, you can set a custom domain for your documentation site by going to the "Settings" page
80 | for your repository and updating the "Custom domain" field in the "GitHub Pages" section.
81 |
82 | 
83 |
84 | For more information on deploying your MkDocs documentation site, see
85 | the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/).
86 |
--------------------------------------------------------------------------------
/docs/SECURITY.md:
--------------------------------------------------------------------------------
1 | At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To
2 | ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented
3 | several measures to detect and prevent security vulnerabilities.
4 |
5 | [](https://snyk.io/advisor/python/ultralytics)
6 |
7 | ## Snyk Scanning
8 |
9 | We use [Snyk](https://snyk.io/advisor/python/ultralytics) to regularly scan the YOLOv8 repository for vulnerabilities
10 | and security issues. Our goal is to identify and remediate any potential threats as soon as possible, to minimize any
11 | risks to our users.
12 |
13 | ## GitHub CodeQL Scanning
14 |
15 | In addition to our Snyk scans, we also use
16 | GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql)
17 | scans to proactively identify and address security vulnerabilities.
18 |
19 | ## Reporting Security Issues
20 |
21 | If you suspect or discover a security vulnerability in the YOLOv8 repository, please let us know immediately. You can
22 | reach out to us directly via our [contact form](https://ultralytics.com/contact) or
23 | via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon
24 | as possible.
25 |
26 | We appreciate your help in keeping the YOLOv8 repository secure and safe for everyone.
27 |
--------------------------------------------------------------------------------
/docs/app.md:
--------------------------------------------------------------------------------
1 | # Ultralytics HUB App for YOLOv8
2 |
3 |
4 |
5 |
6 |
7 |
35 |
36 |
37 | Welcome to the Ultralytics HUB app for demonstrating YOLOv5 and YOLOv8 models! In this app, available on the [Apple App
38 | Store](https://apps.apple.com/xk/app/ultralytics/id1583935240) and the
39 | [Google Play Store](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app), you will be able
40 | to see the power and capabilities of YOLOv5, a state-of-the-art object detection model developed by Ultralytics.
41 |
42 | **To install simply scan the QR code above**. The App currently features YOLOv5 models, with YOLOv8 models coming soon.
43 |
44 | With YOLOv5, you can detect and classify objects in images and videos with high accuracy and speed. The model has been
45 | trained on a large dataset and is able to detect a wide range of objects, including cars, pedestrians, and traffic
46 | signs.
47 |
48 | In this app, you will be able to try out YOLOv5 on your own images and videos, and see the model in action. You can also
49 | learn more about how YOLOv5 works and how it can be used in real-world applications.
50 |
51 | We hope you enjoy using YOLOv5 and seeing its capabilities firsthand. Thank you for choosing Ultralytics for your object
52 | detection needs!
--------------------------------------------------------------------------------
/docs/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wudashuo/yolov8/3861e6c82aaa1bbb214f020ece3a4bd4712eacbe/docs/assets/favicon.ico
--------------------------------------------------------------------------------
/docs/callbacks.md:
--------------------------------------------------------------------------------
1 | ## Callbacks
2 |
3 | Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes.
4 | Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of
5 | these objects can be found in Reference section of the docs.
6 |
7 | ## Examples
8 |
9 | ### Returning additional information with Prediction
10 |
11 | In this example, we want to return the original frame with each result object. Here's how we can do that
12 |
13 | ```python
14 | def on_predict_batch_end(predictor):
15 | # results -> List[batch_size]
16 | _, _, im0s, _, _ = predictor.batch
17 | im0s = im0s if isinstance(im0s, list) else [im0s]
18 | predictor.results = zip(predictor.results, im0s)
19 |
20 | model = YOLO(f"yolov8n.pt")
21 | model.add_callback("on_predict_batch_end", on_predict_batch_end)
22 | for (result, frame) in model.track/predict():
23 | pass
24 | ```
25 |
26 | ## All callbacks
27 |
28 | Here are all supported callbacks.
29 |
30 | ### Trainer
31 |
32 | `on_pretrain_routine_start`
33 |
34 | `on_pretrain_routine_end`
35 |
36 | `on_train_start`
37 |
38 | `on_train_epoch_start`
39 |
40 | `on_train_batch_start`
41 |
42 | `optimizer_step`
43 |
44 | `on_before_zero_grad`
45 |
46 | `on_train_batch_end`
47 |
48 | `on_train_epoch_end`
49 |
50 | `on_fit_epoch_end`
51 |
52 | `on_model_save`
53 |
54 | `on_train_end`
55 |
56 | `on_params_update`
57 |
58 | `teardown`
59 |
60 | ### Validator
61 |
62 | `on_val_start`
63 |
64 | `on_val_batch_start`
65 |
66 | `on_val_batch_end`
67 |
68 | `on_val_end`
69 |
70 | ### Predictor
71 |
72 | `on_predict_start`
73 |
74 | `on_predict_batch_start`
75 |
76 | `on_predict_postprocess_end`
77 |
78 | `on_predict_batch_end`
79 |
80 | `on_predict_end`
81 |
82 | ### Exporter
83 |
84 | `on_export_start`
85 |
86 | `on_export_end`
87 |
--------------------------------------------------------------------------------
/docs/engine.md:
--------------------------------------------------------------------------------
1 | Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine
2 | executors. Let's take a look at the Trainer engine.
3 |
4 | ## BaseTrainer
5 |
6 | BaseTrainer contains the generic boilerplate training routine. It can be customized for any task based over overriding
7 | the required functions or operations as long the as correct formats are followed. For example, you can support your own
8 | custom model and dataloader by just overriding these functions:
9 |
10 | * `get_model(cfg, weights)` - The function that builds the model to be trained
11 | * `get_dataloder()` - The function that builds the dataloader
12 | More details and source code can be found in [`BaseTrainer` Reference](reference/base_trainer.md)
13 |
14 | ## DetectionTrainer
15 |
16 | Here's how you can use the YOLOv8 `DetectionTrainer` and customize it.
17 |
18 | ```python
19 | from ultralytics.yolo.v8.detect import DetectionTrainer
20 |
21 | trainer = DetectionTrainer(overrides={...})
22 | trainer.train()
23 | trained_model = trainer.best # get best model
24 | ```
25 |
26 | ### Customizing the DetectionTrainer
27 |
28 | Let's customize the trainer **to train a custom detection model** that is not supported directly. You can do this by
29 | simply overloading the existing the `get_model` functionality:
30 |
31 | ```python
32 | from ultralytics.yolo.v8.detect import DetectionTrainer
33 |
34 |
35 | class CustomTrainer(DetectionTrainer):
36 | def get_model(self, cfg, weights):
37 | ...
38 |
39 |
40 | trainer = CustomTrainer(overrides={...})
41 | trainer.train()
42 | ```
43 |
44 | You now realize that you need to customize the trainer further to:
45 |
46 | * Customize the `loss function`.
47 | * Add `callback` that uploads model to your Google Drive after every 10 `epochs`
48 | Here's how you can do it:
49 |
50 | ```python
51 | from ultralytics.yolo.v8.detect import DetectionTrainer
52 |
53 |
54 | class CustomTrainer(DetectionTrainer):
55 | def get_model(self, cfg, weights):
56 | ...
57 |
58 | def criterion(self, preds, batch):
59 | # get ground truth
60 | imgs = batch["imgs"]
61 | bboxes = batch["bboxes"]
62 | ...
63 | return loss, loss_items # see Reference-> Trainer for details on the expected format
64 |
65 |
66 | # callback to upload model weights
67 | def log_model(trainer):
68 | last_weight_path = trainer.last
69 | ...
70 |
71 |
72 | trainer = CustomTrainer(overrides={...})
73 | trainer.add_callback("on_train_epoch_end", log_model) # Adds to existing callback
74 | trainer.train()
75 | ```
76 |
77 | To know more about Callback triggering events and entry point, checkout our Callbacks guide # TODO
78 |
79 | ## Other engine components
80 |
81 | There are other components that can be customized similarly like `Validators` and `Predictors`
82 | See Reference section for more information on these.
83 |
84 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | 
4 |
5 |

6 |

7 |

8 |
9 |

10 |

11 |

12 |
13 |
14 |
15 | Welcome to the Ultralytics YOLOv8 documentation landing
16 | page! [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of the YOLO (You Only Look
17 | Once) object detection and image segmentation model developed by [Ultralytics](https://ultralytics.com). This page
18 | serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and
19 | understand its features and capabilities.
20 |
21 | The YOLOv8 model is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
22 | object detection and image segmentation tasks. It can be trained on large datasets and is capable of running on a
23 | variety of hardware platforms, from CPUs to GPUs.
24 |
25 | Whether you are a seasoned machine learning practitioner or new to the field, we hope that the resources on this page
26 | will help you get the most out of YOLOv8. For any bugs and feature requests please
27 | visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). For professional support
28 | please [Contact Us](https://ultralytics.com/contact).
29 |
30 | ## A Brief History of YOLO
31 |
32 | YOLO (You Only Look Once) is a popular object detection and image segmentation model developed by Joseph Redmon and Ali
33 | Farhadi at the University of Washington. The first version of YOLO was released in 2015 and quickly gained popularity
34 | due to its high speed and accuracy.
35 |
36 | YOLOv2 was released in 2016 and improved upon the original model by incorporating batch normalization, anchor boxes, and
37 | dimension clusters. YOLOv3 was released in 2018 and further improved the model's performance by using a more efficient
38 | backbone network, adding a feature pyramid, and making use of focal loss.
39 |
40 | In 2020, YOLOv4 was released which introduced a number of innovations such as the use of Mosaic data augmentation, a new
41 | anchor-free detection head, and a new loss function.
42 |
43 | In 2021, Ultralytics released [YOLOv5](https://github.com/ultralytics/yolov5), which further improved the model's
44 | performance and added new features such as support for panoptic segmentation and object tracking.
45 |
46 | YOLO has been widely used in a variety of applications, including autonomous vehicles, security and surveillance, and
47 | medical imaging. It has also been used to win several competitions, such as the COCO Object Detection Challenge and the
48 | DOTA Object Detection Challenge.
49 |
50 | For more information about the history and development of YOLO, you can refer to the following references:
51 |
52 | - Redmon, J., & Farhadi, A. (2015). You only look once: Unified, real-time object detection. In Proceedings of the IEEE
53 | conference on computer vision and pattern recognition (pp. 779-788).
54 | - Redmon, J., & Farhadi, A. (2016). YOLO9000: Better, faster, stronger. In Proceedings
55 |
56 | ## Ultralytics YOLOv8
57 |
58 | [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of the YOLO object detection and
59 | image segmentation model developed by Ultralytics. YOLOv8 is a cutting-edge, state-of-the-art (SOTA) model that builds
60 | upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and
61 | flexibility.
62 |
63 | One key feature of YOLOv8 is its extensibility. It is designed as a framework that supports all previous versions of
64 | YOLO, making it easy to switch between different versions and compare their performance. This makes YOLOv8 an ideal
65 | choice for users who want to take advantage of the latest YOLO technology while still being able to use their existing
66 | YOLO models.
67 |
68 | In addition to its extensibility, YOLOv8 includes a number of other innovations that make it an appealing choice for a
69 | wide range of object detection and image segmentation tasks. These include a new backbone network, a new anchor-free
70 | detection head, and a new loss function. YOLOv8 is also highly efficient and can be run on a variety of hardware
71 | platforms, from CPUs to GPUs.
72 |
73 | Overall, YOLOv8 is a powerful and flexible tool for object detection and image segmentation that offers the best of both
74 | worlds: the latest SOTA technology and the ability to use and compare all previous YOLO versions.
75 |
--------------------------------------------------------------------------------
/docs/quickstart.md:
--------------------------------------------------------------------------------
1 | ## Install
2 |
3 | Install YOLOv8 via the `ultralytics` pip package for the latest stable release or by cloning
4 | the [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics) repository for the most
5 | up-to-date version.
6 |
7 | !!! example "Pip install method (recommended)"
8 |
9 | ```bash
10 | pip install ultralytics
11 | ```
12 |
13 | !!! example "Git clone method (for development)"
14 |
15 | ```bash
16 | git clone https://github.com/ultralytics/ultralytics
17 | cd ultralytics
18 | pip install -e '.[dev]'
19 | ```
20 | See contributing section to know more about contributing to the project
21 |
22 | ## Use with CLI
23 |
24 | The YOLO command line interface (CLI) lets you simply train, validate or infer models on various tasks and versions.
25 | CLI requires no customization or code. You can simply run all tasks from the terminal with the `yolo` command.
26 |
27 | !!! example
28 |
29 | === "Syntax"
30 | ```bash
31 | yolo task=detect mode=train model=yolov8n.yaml args...
32 | classify predict yolov8n-cls.yaml args...
33 | segment val yolov8n-seg.yaml args...
34 | export yolov8n.pt format=onnx args...
35 | ```
36 |
37 | === "Example training"
38 | ```bash
39 | yolo detect train model=yolov8n.pt data=coco128.yaml device=0
40 | ```
41 | === "Example Multi-GPU training"
42 | ```bash
43 | yolo detect train model=yolov8n.pt data=coco128.yaml device=\'0,1,2,3\'
44 | ```
45 |
46 | [CLI Guide](cli.md){ .md-button .md-button--primary}
47 |
48 | ## Use with Python
49 |
50 | Python usage allows users to easily use YOLOv8 inside their Python projects. It provides functions for loading and
51 | running the model, as well as for processing the model's output. The interface is designed to be easy to use, so that
52 | users can quickly implement object detection in their projects.
53 |
54 | Overall, the Python interface is a useful tool for anyone looking to incorporate object detection, segmentation or
55 | classification into their Python projects using YOLOv8.
56 |
57 | !!! example
58 |
59 | ```python
60 | from ultralytics import YOLO
61 |
62 | # Load a model
63 | model = YOLO("yolov8n.yaml") # build a new model from scratch
64 | model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
65 |
66 | # Use the model
67 | results = model.train(data="coco128.yaml", epochs=3) # train the model
68 | results = model.val() # evaluate model performance on the validation set
69 | results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
70 | success = model.export(format="onnx") # export the model to ONNX format
71 | ```
72 |
73 | [Python Guide](python.md){.md-button .md-button--primary}
74 |
--------------------------------------------------------------------------------
/docs/reference/base_pred.md:
--------------------------------------------------------------------------------
1 | All task Predictors are inherited from `BasePredictors` class that contains the model validation routine boilerplate.
2 | You can override any function of these Trainers to suit your needs.
3 |
4 | ---
5 |
6 | ### BasePredictor API Reference
7 |
8 | :::ultralytics.yolo.engine.predictor.BasePredictor
--------------------------------------------------------------------------------
/docs/reference/base_trainer.md:
--------------------------------------------------------------------------------
1 | All task Trainers are inherited from `BaseTrainer` class that contains the model training and optimization routine
2 | boilerplate. You can override any function of these Trainers to suit your needs.
3 |
4 | ---
5 |
6 | ### BaseTrainer API Reference
7 |
8 | :::ultralytics.yolo.engine.trainer.BaseTrainer
--------------------------------------------------------------------------------
/docs/reference/base_val.md:
--------------------------------------------------------------------------------
1 | All task Validators are inherited from `BaseValidator` class that contains the model validation routine boilerplate. You
2 | can override any function of these Trainers to suit your needs.
3 |
4 | ---
5 |
6 | ### BaseValidator API Reference
7 |
8 | :::ultralytics.yolo.engine.validator.BaseValidator
--------------------------------------------------------------------------------
/docs/reference/exporter.md:
--------------------------------------------------------------------------------
1 | ### Exporter API Reference
2 |
3 | :::ultralytics.yolo.engine.exporter.Exporter
--------------------------------------------------------------------------------
/docs/reference/model.md:
--------------------------------------------------------------------------------
1 | ::: ultralytics.yolo.engine.model
2 |
--------------------------------------------------------------------------------
/docs/reference/nn.md:
--------------------------------------------------------------------------------
1 | # nn Module
2 |
3 | Ultralytics nn module contains 3 main components:
4 |
5 | 1. **AutoBackend**: A module that can run inference on all popular model formats
6 | 2. **BaseModel**: `BaseModel` class defines the operations supported by tasks like Detection and Segmentation
7 | 3. **modules**: Optimized and reusable neural network blocks built on PyTorch.
8 |
9 | ## AutoBackend
10 |
11 | :::ultralytics.nn.autobackend.AutoBackend
12 |
13 | ## BaseModel
14 |
15 | :::ultralytics.nn.tasks.BaseModel
16 |
17 | ## Modules
18 |
19 | TODO
--------------------------------------------------------------------------------
/docs/reference/ops.md:
--------------------------------------------------------------------------------
1 | This module contains optimized deep learning related operations used in the Ultralytics YOLO framework
2 |
3 | ## Non-max suppression
4 |
5 | :::ultralytics.yolo.utils.ops.non_max_suppression
6 | handler: python
7 | options:
8 | show_source: false
9 | show_root_toc_entry: false
10 | ---
11 |
12 | ## Scale boxes
13 |
14 | :::ultralytics.yolo.utils.ops.scale_boxes
15 | handler: python
16 | options:
17 | show_source: false
18 | show_root_toc_entry: false
19 | ---
20 |
21 | ## Scale image
22 |
23 | :::ultralytics.yolo.utils.ops.scale_image
24 | handler: python
25 | options:
26 | show_source: false
27 | show_root_toc_entry: false
28 | ---
29 |
30 | ## clip boxes
31 |
32 | :::ultralytics.yolo.utils.ops.clip_boxes
33 | handler: python
34 | options:
35 | show_source: false
36 | show_root_toc_entry: false
37 | ---
38 |
39 | # Box Format Conversion
40 |
41 | ## xyxy2xywh
42 |
43 | :::ultralytics.yolo.utils.ops.xyxy2xywh
44 | handler: python
45 | options:
46 | show_source: false
47 | show_root_toc_entry: false
48 | ---
49 |
50 | ## xywh2xyxy
51 |
52 | :::ultralytics.yolo.utils.ops.xywh2xyxy
53 | handler: python
54 | options:
55 | show_source: false
56 | show_root_toc_entry: false
57 | ---
58 |
59 | ## xywhn2xyxy
60 |
61 | :::ultralytics.yolo.utils.ops.xywhn2xyxy
62 | handler: python
63 | options:
64 | show_source: false
65 | show_root_toc_entry: false
66 | ---
67 |
68 | ## xyxy2xywhn
69 |
70 | :::ultralytics.yolo.utils.ops.xyxy2xywhn
71 | handler: python
72 | options:
73 | show_source: false
74 | show_root_toc_entry: false
75 | ---
76 |
77 | ## xyn2xy
78 |
79 | :::ultralytics.yolo.utils.ops.xyn2xy
80 | handler: python
81 | options:
82 | show_source: false
83 | show_root_toc_entry: false
84 | ---
85 |
86 | ## xywh2ltwh
87 |
88 | :::ultralytics.yolo.utils.ops.xywh2ltwh
89 | handler: python
90 | options:
91 | show_source: false
92 | show_root_toc_entry: false
93 | ---
94 |
95 | ## xyxy2ltwh
96 |
97 | :::ultralytics.yolo.utils.ops.xyxy2ltwh
98 | handler: python
99 | options:
100 | show_source: false
101 | show_root_toc_entry: false
102 | ---
103 |
104 | ## ltwh2xywh
105 |
106 | :::ultralytics.yolo.utils.ops.ltwh2xywh
107 | handler: python
108 | options:
109 | show_source: false
110 | show_root_toc_entry: false
111 | ---
112 |
113 | ## ltwh2xyxy
114 |
115 | :::ultralytics.yolo.utils.ops.ltwh2xyxy
116 | handler: python
117 | options:
118 | show_source: false
119 | show_root_toc_entry: false
120 | ---
121 |
122 | ## segment2box
123 |
124 | :::ultralytics.yolo.utils.ops.segment2box
125 | handler: python
126 | options:
127 | show_source: false
128 | show_root_toc_entry: false
129 | ---
130 |
131 | # Mask Operations
132 |
133 | ## resample_segments
134 |
135 | :::ultralytics.yolo.utils.ops.resample_segments
136 | handler: python
137 | options:
138 | show_source: false
139 | show_root_toc_entry: false
140 | ---
141 |
142 | ## crop_mask
143 |
144 | :::ultralytics.yolo.utils.ops.crop_mask
145 | handler: python
146 | options:
147 | show_source: false
148 | show_root_toc_entry: false
149 | ---
150 |
151 | ## process_mask_upsample
152 |
153 | :::ultralytics.yolo.utils.ops.process_mask_upsample
154 | handler: python
155 | options:
156 | show_source: false
157 | show_root_toc_entry: false
158 | ---
159 |
160 | ## process_mask
161 |
162 | :::ultralytics.yolo.utils.ops.process_mask
163 | handler: python
164 | options:
165 | show_source: false
166 | show_root_toc_entry: false
167 | ---
168 |
169 | ## process_mask_native
170 |
171 | :::ultralytics.yolo.utils.ops.process_mask_native
172 | handler: python
173 | options:
174 | show_source: false
175 | show_root_toc_entry: false
176 | ---
177 |
178 | ## scale_segments
179 |
180 | :::ultralytics.yolo.utils.ops.scale_segments
181 | handler: python
182 | options:
183 | show_source: false
184 | show_root_toc_entry: false
185 | ---
186 |
187 | ## masks2segments
188 |
189 | :::ultralytics.yolo.utils.ops.masks2segments
190 | handler: python
191 | options:
192 | show_source: false
193 | show_root_toc_entry: false
194 | ---
195 |
196 | ## clip_segments
197 |
198 | :::ultralytics.yolo.utils.ops.clip_segments
199 | handler: python
200 | options:
201 | show_source: false
202 | show_root_toc_entry: false
203 | ---
204 |
205 |
206 |
207 |
208 |
209 |
--------------------------------------------------------------------------------
/docs/reference/results.md:
--------------------------------------------------------------------------------
1 | ### Results API Reference
2 |
3 | :::ultralytics.yolo.engine.results.Results
4 |
5 | ### Boxes API Reference
6 |
7 | :::ultralytics.yolo.engine.results.Boxes
8 |
9 | ### Masks API Reference
10 |
11 | :::ultralytics.yolo.engine.results.Masks
12 |
--------------------------------------------------------------------------------
/docs/stylesheets/style.css:
--------------------------------------------------------------------------------
1 | th, td {
2 | border: 1px solid var(--md-typeset-table-color);
3 | border-spacing: 0px;
4 | border-bottom: none;
5 | border-left: none;
6 | border-top: none;
7 | }
8 |
9 | .md-typeset__table {
10 | line-height: 1;
11 | }
12 |
13 | .md-typeset__table table:not([class]) {
14 | font-size: .74rem;
15 | border-right: none;
16 | }
17 |
18 | .md-typeset__table table:not([class]) td,
19 | .md-typeset__table table:not([class]) th {
20 | padding: 15px;
21 | }
22 |
23 | /* light mode alternating table bg colors */
24 | .md-typeset__table tr:nth-child(2n) {
25 | background-color: #f8f8f8;
26 | }
27 |
28 | /* dark mode alternating table bg colors */
29 | [data-md-color-scheme="slate"] .md-typeset__table tr:nth-child(2n) {
30 | background-color: hsla(var(--md-hue),25%,25%,1)
31 | }
--------------------------------------------------------------------------------
/docs/tasks/tracking.md:
--------------------------------------------------------------------------------
1 | Object tracking is a task that involves identifying the location and class of objects, then assigning a unique ID to
2 | that detection in video streams.
3 |
4 | The output of tracker is the same as detection with an added object ID.
5 |
6 | ## Available Trackers
7 |
8 | The following tracking algorithms have been implemented and can be enabled by passing `tracker=tracker_type.yaml`
9 |
10 | * [BoT-SORT](https://github.com/NirAharon/BoT-SORT) - `botsort.yaml`
11 | * [ByteTrack](https://github.com/ifzhang/ByteTrack) - `bytetrack.yaml`
12 |
13 | The default tracker is BoT-SORT.
14 |
15 | ## Tracking
16 |
17 | Use a trained YOLOv8n/YOLOv8n-seg model to run tracker on video streams.
18 |
19 | !!! example ""
20 |
21 | === "Python"
22 |
23 | ```python
24 | from ultralytics import YOLO
25 |
26 | # Load a model
27 | model = YOLO("yolov8n.pt") # load an official detection model
28 | model = YOLO("yolov8n-seg.pt") # load an official segmentation model
29 | model = YOLO("path/to/best.pt") # load a custom model
30 |
31 | # Track with the model
32 | results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True)
33 | results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml")
34 | ```
35 | === "CLI"
36 |
37 | ```bash
38 | yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" # official detection model
39 | yolo track model=yolov8n-seg.pt source=... # official segmentation model
40 | yolo track model=path/to/best.pt source=... # custom model
41 | yolo track model=path/to/best.pt tracker="bytetrack.yaml" # bytetrack tracker
42 |
43 | ```
44 |
45 | As in the above usage, we support both the detection and segmentation models for tracking and the only thing you need to
46 | do is loading the corresponding (detection or segmentation) model.
47 |
48 | ## Configuration
49 |
50 | ### Tracking
51 |
52 | Tracking shares the configuration with predict, i.e `conf`, `iou`, `show`. More configurations please refer
53 | to [predict page](https://docs.ultralytics.com/cfg/#prediction).
54 | !!! example ""
55 |
56 | === "Python"
57 |
58 | ```python
59 | from ultralytics import YOLO
60 |
61 | model = YOLO("yolov8n.pt")
62 | results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True)
63 | ```
64 | === "CLI"
65 |
66 | ```bash
67 | yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" conf=0.3, iou=0.5 show
68 |
69 | ```
70 |
71 | ### Tracker
72 |
73 | We also support using a modified tracker config file, just copy a config file i.e `custom_tracker.yaml`
74 | from [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg) and modify
75 | any configurations(expect the `tracker_type`) you need to.
76 | !!! example ""
77 |
78 | === "Python"
79 |
80 | ```python
81 | from ultralytics import YOLO
82 |
83 | model = YOLO("yolov8n.pt")
84 | results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml')
85 | ```
86 | === "CLI"
87 |
88 | ```bash
89 | yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" tracker='custom_tracker.yaml'
90 |
91 | ```
92 |
93 | Please refer to [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg)
94 | page.
95 |
96 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | This is a list of real-world applications and walkthroughs. These can be folders of either python files or notebooks .
2 |
3 | ## Ultralytics YOLO example applications
4 |
5 | | Title | Format | Contributor |
6 | | ------------------------------------------------------------------------ | ------------------ | --------------------------------------------------- |
7 | | [YOLO ONNX detection Inference with C++](./YOLOv8_CPP_Inference) | C++/ONNX | [Justas Bartnykas](https://github.com/JustasBart) |
8 | | [YOLO OpenCV ONNX detection Python](./YOLOv8-OpenCV-ONNX-Python) | OpenCV/Python/ONNX | [Farid Inawan](https://github.com/frdteknikelektro) |
9 | | [YOLO .Net ONNX detection C#](https://www.nuget.org/packages/Yolov8.Net) | C# .Net | [Samuel Stainback](https://github.com/sstainba) |
10 |
11 | ## How can you contribute ?
12 |
13 | We're looking for examples, applications and guides from the community. Here's how you can contribute:
14 |
15 | - Make a PR with `[Example]` prefix in title after adding your project folder in the examples/ folder of the repository
16 | - The project should satisfy these conditions:
17 | - It should use ultralytics framework
18 | - It have a README.md with instructions to run the project
19 | - It should avoid adding large assets or dependencies unless absolutely needed
20 | - The contributor is expected to help out in issues related to their examples
21 |
22 | If you're unsure about any of these requirements, make a PR and we'll happy to guide you
23 |
--------------------------------------------------------------------------------
/examples/YOLOv8-CPP-Inference/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.5)
2 |
3 | project(Yolov8CPPInference VERSION 0.1)
4 |
5 | set(CMAKE_INCLUDE_CURRENT_DIR ON)
6 |
7 | # CUDA
8 | set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
9 | find_package(CUDA 11 REQUIRED)
10 |
11 | set(CMAKE_CUDA_STANDARD 11)
12 | set(CMAKE_CUDA_STANDARD_REQUIRED ON)
13 | # !CUDA
14 |
15 | # OpenCV
16 | find_package(OpenCV REQUIRED)
17 | include_directories(${OpenCV_INCLUDE_DIRS})
18 | # !OpenCV
19 |
20 | set(PROJECT_SOURCES
21 | main.cpp
22 |
23 | inference.h
24 | inference.cpp
25 | )
26 |
27 | add_executable(Yolov8CPPInference ${PROJECT_SOURCES})
28 | target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS})
29 |
--------------------------------------------------------------------------------
/examples/YOLOv8-CPP-Inference/README.md:
--------------------------------------------------------------------------------
1 | # yolov8/yolov5 Inference C++
2 |
3 | Usage:
4 |
5 | ```
6 | # git clone ultralytics
7 | pip install .
8 | cd examples/cpp_
9 |
10 | Add a **yolov8\_.onnx** and/or **yolov5\_.onnx** model(s) to the ultralytics folder.
11 | Edit the **main.cpp** to change the **projectBasePath** to match your user.
12 |
13 | Note that by default the CMake file will try and import the CUDA library to be used with the OpenCVs dnn (cuDNN) GPU Inference.
14 | If your OpenCV build does not use CUDA/cuDNN you can remove that import call and run the example on CPU.
15 |
16 | mkdir build
17 | cd build
18 | cmake ..
19 | make
20 | ./Yolov8CPPInference
21 | ```
22 |
23 | To export yolov8 models:
24 |
25 | ```
26 | yolo export \
27 | model=yolov8s.pt \
28 | imgsz=[480,640] \
29 | format=onnx \
30 | opset=12
31 | ```
32 |
33 | To export yolov5 models:
34 |
35 | ```
36 | python3 export.py \
37 | --weights yolov5s.pt \
38 | --img 480 640 \
39 | --include onnx \
40 | --opset 12
41 | ```
42 |
43 | yolov8s.onnx:
44 |
45 | 
46 |
47 | yolov5s.onnx:
48 |
49 | 
50 |
51 | This repository is based on OpenCVs dnn API to run an ONNX exported model of either yolov5/yolov8 (In theory should work
52 | for yolov6 and yolov7 but not tested). Note that for this example the networks are exported as rectangular (640x480)
53 | resolutions, but it would work for any resolution that you export as although you might want to use the letterBox
54 | approach for square images depending on your use-case.
55 |
56 | The **main** branch version is based on using Qt as a GUI wrapper the main interest here is the **Inference** class file
57 | which shows how to transpose yolov8 models to work as yolov5 models.
58 |
--------------------------------------------------------------------------------
/examples/YOLOv8-CPP-Inference/inference.h:
--------------------------------------------------------------------------------
1 | #ifndef INFERENCE_H
2 | #define INFERENCE_H
3 |
4 | // Cpp native
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | // OpenCV / DNN / Inference
11 | #include
12 | #include
13 | #include
14 |
15 | struct Detection
16 | {
17 | int class_id{0};
18 | std::string className{};
19 | float confidence{0.0};
20 | cv::Scalar color{};
21 | cv::Rect box{};
22 | };
23 |
24 | class Inference
25 | {
26 | public:
27 | Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape = {640, 640}, const std::string &classesTxtFile = "", const bool &runWithCuda = true);
28 | std::vector runInference(const cv::Mat &input);
29 |
30 | private:
31 | void loadClassesFromFile();
32 | void loadOnnxNetwork();
33 | cv::Mat formatToSquare(const cv::Mat &source);
34 |
35 | std::string modelPath{};
36 | std::string classesPath{};
37 | bool cudaEnabled{};
38 |
39 | std::vector classes{"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"};
40 |
41 | cv::Size2f modelShape{};
42 |
43 | float modelConfidenseThreshold {0.25};
44 | float modelScoreThreshold {0.45};
45 | float modelNMSThreshold {0.50};
46 |
47 | bool letterBoxForSquare = true;
48 |
49 | cv::dnn::Net net;
50 | };
51 |
52 | #endif // INFERENCE_H
53 |
--------------------------------------------------------------------------------
/examples/YOLOv8-CPP-Inference/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include
6 |
7 | #include "inference.h"
8 |
9 | using namespace std;
10 | using namespace cv;
11 |
12 | int main(int argc, char **argv)
13 | {
14 | std::string projectBasePath = "/home/user/ultralytics"; // Set your ultralytics base path
15 |
16 | bool runOnGPU = true;
17 |
18 | //
19 | // Pass in either:
20 | //
21 | // "yolov8s.onnx" or "yolov5s.onnx"
22 | //
23 | // To run Inference with yolov8/yolov5 (ONNX)
24 | //
25 |
26 | // Note that in this example the classes are hard-coded and 'classes.txt' is a place holder.
27 | Inference inf(projectBasePath + "/yolov8s.onnx", cv::Size(640, 480), "classes.txt", runOnGPU);
28 |
29 | std::vector imageNames;
30 | imageNames.push_back(projectBasePath + "/ultralytics/assets/bus.jpg");
31 | imageNames.push_back(projectBasePath + "/ultralytics/assets/zidane.jpg");
32 |
33 | for (int i = 0; i < imageNames.size(); ++i)
34 | {
35 | cv::Mat frame = cv::imread(imageNames[i]);
36 |
37 | // Inference starts here...
38 | std::vector output = inf.runInference(frame);
39 |
40 | int detections = output.size();
41 | std::cout << "Number of detections:" << detections << std::endl;
42 |
43 | for (int i = 0; i < detections; ++i)
44 | {
45 | Detection detection = output[i];
46 |
47 | cv::Rect box = detection.box;
48 | cv::Scalar color = detection.color;
49 |
50 | // Detection box
51 | cv::rectangle(frame, box, color, 2);
52 |
53 | // Detection box text
54 | std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);
55 | cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
56 | cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
57 |
58 | cv::rectangle(frame, textBox, color, cv::FILLED);
59 | cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);
60 | }
61 | // Inference ends here...
62 |
63 | // This is only for preview purposes
64 | float scale = 0.8;
65 | cv::resize(frame, frame, cv::Size(frame.cols*scale, frame.rows*scale));
66 | cv::imshow("Inference", frame);
67 |
68 | cv::waitKey(-1);
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/examples/YOLOv8-OpenCV-ONNX-Python/README.md:
--------------------------------------------------------------------------------
1 | # YOLOv8 - OpenCV
2 |
3 | Implementation YOLOv8 on OpenCV using ONNX Format.
4 |
5 | Just simply clone and run
6 |
7 | ```bash
8 | pip install -r requirements.txt
9 | python main.py
10 | ```
11 |
12 | If you start from scratch:
13 |
14 | ```bash
15 | pip install ultralytics
16 | yolo export model=yolov8n.pt imgsz=640 format=onnx opset=12
17 | ```
18 |
19 | _\*Make sure to include "opset=12"_
20 |
--------------------------------------------------------------------------------
/examples/YOLOv8-OpenCV-ONNX-Python/main.py:
--------------------------------------------------------------------------------
1 | import cv2.dnn
2 | import numpy as np
3 |
4 | from ultralytics.yolo.utils import ROOT, yaml_load
5 | from ultralytics.yolo.utils.checks import check_yaml
6 |
7 | CLASSES = yaml_load(check_yaml('coco128.yaml'))['names']
8 |
9 | colors = np.random.uniform(0, 255, size=(len(CLASSES), 3))
10 |
11 |
12 | def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
13 | label = f'{CLASSES[class_id]} ({confidence:.2f})'
14 | color = colors[class_id]
15 | cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
16 | cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
17 |
18 |
19 | def main():
20 | model: cv2.dnn.Net = cv2.dnn.readNetFromONNX('yolov8n.onnx')
21 | original_image: np.ndarray = cv2.imread(str(ROOT / 'assets/bus.jpg'))
22 | [height, width, _] = original_image.shape
23 | length = max((height, width))
24 | image = np.zeros((length, length, 3), np.uint8)
25 | image[0:height, 0:width] = original_image
26 | scale = length / 640
27 |
28 | blob = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(640, 640))
29 | model.setInput(blob)
30 | outputs = model.forward()
31 |
32 | outputs = np.array([cv2.transpose(outputs[0])])
33 | rows = outputs.shape[1]
34 |
35 | boxes = []
36 | scores = []
37 | class_ids = []
38 |
39 | for i in range(rows):
40 | classes_scores = outputs[0][i][4:]
41 | (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores)
42 | if maxScore >= 0.25:
43 | box = [
44 | outputs[0][i][0] - (0.5 * outputs[0][i][2]), outputs[0][i][1] - (0.5 * outputs[0][i][3]),
45 | outputs[0][i][2], outputs[0][i][3]]
46 | boxes.append(box)
47 | scores.append(maxScore)
48 | class_ids.append(maxClassIndex)
49 |
50 | result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.25, 0.45, 0.5)
51 |
52 | detections = []
53 | for i in range(len(result_boxes)):
54 | index = result_boxes[i]
55 | box = boxes[index]
56 | detection = {
57 | 'class_id': class_ids[index],
58 | 'class_name': CLASSES[class_ids[index]],
59 | 'confidence': scores[index],
60 | 'box': box,
61 | 'scale': scale}
62 | detections.append(detection)
63 | draw_bounding_box(original_image, class_ids[index], scores[index], round(box[0] * scale), round(box[1] * scale),
64 | round((box[0] + box[2]) * scale), round((box[1] + box[3]) * scale))
65 |
66 | cv2.imshow('image', original_image)
67 | cv2.waitKey(0)
68 | cv2.destroyAllWindows()
69 |
70 | return detections
71 |
72 |
73 | if __name__ == '__main__':
74 | main()
75 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | site_name: YOLOv8 Docs
4 | repo_url: https://github.com/ultralytics/ultralytics
5 | edit_uri: https://github.com/ultralytics/ultralytics/tree/main/docs
6 | repo_name: ultralytics/ultralytics
7 | remote_name: https://github.com/ultralytics/docs
8 |
9 | theme:
10 | name: "material"
11 | logo: https://github.com/ultralytics/assets/raw/main/logo/Ultralytics_Logotype_Reverse.svg
12 | favicon: https://github.com/ultralytics/assets/raw/main/logo/favicon-yolo.ico
13 | font:
14 | text: Roboto
15 |
16 | palette:
17 | # Palette toggle for light mode
18 | - scheme: default
19 | # primary: grey
20 | toggle:
21 | icon: material/brightness-7
22 | name: Switch to dark mode
23 |
24 | # Palette toggle for dark mode
25 | - scheme: slate
26 | # primary: black
27 | toggle:
28 | icon: material/brightness-4
29 | name: Switch to light mode
30 | features:
31 | - content.action.edit
32 | - content.code.annotate
33 | - content.tooltips
34 | - search.highlight
35 | - search.share
36 | - search.suggest
37 | - toc.follow
38 | - navigation.top
39 | - navigation.expand
40 | - navigation.footer
41 | - content.tabs.link # all code tabs change simultaneously
42 |
43 | # Customization
44 | copyright: Ultralytics 2023. All rights reserved.
45 | extra:
46 | # version:
47 | # provider: mike # version drop-down menu
48 | analytics:
49 | provider: google
50 | property: G-2M5EHKC0BH
51 | social:
52 | - icon: fontawesome/brands/github
53 | link: https://github.com/ultralytics
54 | - icon: fontawesome/brands/linkedin
55 | link: https://www.linkedin.com/company/ultralytics
56 | - icon: fontawesome/brands/twitter
57 | link: https://twitter.com/ultralytics
58 | - icon: fontawesome/brands/youtube
59 | link: https://www.youtube.com/ultralytics
60 | - icon: fontawesome/brands/docker
61 | link: https://hub.docker.com/r/ultralytics/ultralytics/
62 | - icon: fontawesome/brands/python
63 | link: https://pypi.org/project/ultralytics/
64 |
65 | extra_css:
66 | - stylesheets/style.css
67 |
68 | markdown_extensions:
69 | # Div text decorators
70 | - admonition
71 | - pymdownx.details
72 | - pymdownx.superfences
73 | - tables
74 | - attr_list
75 | - def_list
76 | # Syntax highlight
77 | - pymdownx.highlight:
78 | anchor_linenums: true
79 | - pymdownx.inlinehilite
80 | - pymdownx.snippets
81 |
82 | # Button
83 | - attr_list
84 |
85 | # Content tabs
86 | - pymdownx.superfences
87 | - pymdownx.tabbed:
88 | alternate_style: true
89 |
90 | # Highlight
91 | - pymdownx.critic
92 | - pymdownx.caret
93 | - pymdownx.keys
94 | - pymdownx.mark
95 | - pymdownx.tilde
96 |
97 | plugins:
98 | - mkdocstrings
99 | - search
100 |
101 | # Primary navigation
102 | nav:
103 | - Home: index.md
104 | - Quickstart: quickstart.md
105 | - Tasks:
106 | - Detection: tasks/detection.md
107 | - Segmentation: tasks/segmentation.md
108 | - Multi-Object Tracking: tasks/tracking.md
109 | - Classification: tasks/classification.md
110 | - Usage:
111 | - CLI: cli.md
112 | - Python: python.md
113 | - Predict: predict.md
114 | - Configuration: cfg.md
115 | - Customization using callbacks: callbacks.md
116 | - Advanced customization: engine.md
117 | - Ultralytics HUB: hub.md
118 | - iOS and Android App: app.md
119 | - Reference:
120 | - Engine:
121 | - Model: reference/model.md
122 | - Trainer: reference/base_trainer.md
123 | - Validator: reference/base_val.md
124 | - Predictor: reference/base_pred.md
125 | - Exporter: reference/exporter.md
126 | - Results: reference/results.md
127 | - ultralytics.nn: reference/nn.md
128 | - Operations: reference/ops.md
129 | - Security: SECURITY.md
130 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Ultralytics requirements
2 | # Usage: pip install -r requirements.txt
3 |
4 | # Base ----------------------------------------
5 | matplotlib>=3.2.2
6 | numpy>=1.18.5
7 | opencv-python>=4.6.0
8 | Pillow>=7.1.2
9 | PyYAML>=5.3.1
10 | requests>=2.23.0
11 | scipy>=1.4.1
12 | torch>=1.7.0
13 | torchvision>=0.8.1
14 | tqdm>=4.64.0
15 |
16 | # Logging -------------------------------------
17 | tensorboard>=2.4.1
18 | # clearml
19 | # comet
20 |
21 | # Plotting ------------------------------------
22 | pandas>=1.1.4
23 | seaborn>=0.11.0
24 |
25 | # Export --------------------------------------
26 | # coremltools>=6.0 # CoreML export
27 | # onnx>=1.12.0 # ONNX export
28 | # onnxsim>=0.4.1 # ONNX simplifier
29 | # nvidia-pyindex # TensorRT export
30 | # nvidia-tensorrt # TensorRT export
31 | # scikit-learn==0.19.2 # CoreML quantization
32 | # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
33 | # tflite-support
34 | # tensorflowjs>=3.9.0 # TF.js export
35 | # openvino-dev>=2022.3 # OpenVINO export
36 |
37 | # Extras --------------------------------------
38 | psutil # system utilization
39 | thop>=0.1.1 # FLOPs computation
40 | # ipython # interactive notebook
41 | # albumentations>=1.0.3
42 | # pycocotools>=2.0.6 # COCO mAP
43 | # roboflow
44 | certifi>=2022.12.7 # not directly required, pinned by Snyk to avoid a vulnerability
45 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 | # Local usage: pip install pre-commit, pre-commit run --all-files
4 |
5 | [metadata]
6 | license_files = LICENSE
7 | description_file = README.md
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 | [flake8]
20 | max-line-length = 120
21 | exclude = .tox,*.egg,build,temp
22 | select = E,W,F
23 | doctests = True
24 | verbose = 2
25 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
26 | format = pylint
27 | # see: https://www.flake8rules.com/
28 | ignore = E731,F405,E402,W504,E501
29 | # E731: Do not assign a lambda expression, use a def
30 | # F405: name may be undefined, or defined from star imports: module
31 | # E402: module level import not at top of file
32 | # W504: line break after binary operator
33 | # E501: line too long
34 | # removed:
35 | # F401: module imported but unused
36 | # E231: missing whitespace after ‘,’, ‘;’, or ‘:’
37 | # E127: continuation line over-indented for visual indent
38 | # F403: ‘from module import *’ used; unable to detect undefined names
39 |
40 |
41 | [isort]
42 | # https://pycqa.github.io/isort/docs/configuration/options.html
43 | line_length = 120
44 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
45 | multi_line_output = 0
46 |
47 | [yapf]
48 | based_on_style = pep8
49 | spaces_before_comment = 2
50 | COLUMN_LIMIT = 120
51 | COALESCE_BRACKETS = True
52 | SPACES_AROUND_POWER_OPERATOR = True
53 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
54 | SPLIT_BEFORE_CLOSING_BRACKET = False
55 | SPLIT_BEFORE_FIRST_ARGUMENT = False
56 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
57 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import re
4 | from pathlib import Path
5 |
6 | import pkg_resources as pkg
7 | from setuptools import find_packages, setup
8 |
9 | # Settings
10 | FILE = Path(__file__).resolve()
11 | PARENT = FILE.parent # root directory
12 | README = (PARENT / 'README.md').read_text(encoding='utf-8')
13 | REQUIREMENTS = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements((PARENT / 'requirements.txt').read_text())]
14 | PKG_REQUIREMENTS = ['sentry_sdk'] # pip-only requirements
15 |
16 |
17 | def get_version():
18 | file = PARENT / 'ultralytics/__init__.py'
19 | return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', file.read_text(encoding='utf-8'), re.M)[1]
20 |
21 |
22 | setup(
23 | name='ultralytics', # name of pypi package
24 | version=get_version(), # version of pypi package
25 | python_requires='>=3.7',
26 | license='GPL-3.0',
27 | description='Ultralytics YOLOv8',
28 | long_description=README,
29 | long_description_content_type='text/markdown',
30 | url='https://github.com/ultralytics/ultralytics',
31 | project_urls={
32 | 'Bug Reports': 'https://github.com/ultralytics/ultralytics/issues',
33 | 'Funding': 'https://ultralytics.com',
34 | 'Source': 'https://github.com/ultralytics/ultralytics'},
35 | author='Ultralytics',
36 | author_email='hello@ultralytics.com',
37 | packages=find_packages(), # required
38 | include_package_data=True,
39 | install_requires=REQUIREMENTS + PKG_REQUIREMENTS,
40 | extras_require={
41 | 'dev': ['check-manifest', 'pytest', 'pytest-cov', 'coverage', 'mkdocs-material', 'mkdocstrings[python]'],
42 | 'export': ['coremltools>=6.0', 'onnx', 'onnxsim', 'onnxruntime', 'openvino-dev>=2022.3'],
43 | 'tf': ['onnx2tf', 'sng4onnx', 'tflite_support', 'tensorflow']},
44 | classifiers=[
45 | 'Development Status :: 4 - Beta',
46 | 'Intended Audience :: Developers',
47 | 'Intended Audience :: Education',
48 | 'Intended Audience :: Science/Research',
49 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
50 | 'Programming Language :: Python :: 3',
51 | 'Programming Language :: Python :: 3.7',
52 | 'Programming Language :: Python :: 3.8',
53 | 'Programming Language :: Python :: 3.9',
54 | 'Programming Language :: Python :: 3.10',
55 | 'Programming Language :: Python :: 3.11',
56 | 'Topic :: Software Development',
57 | 'Topic :: Scientific/Engineering',
58 | 'Topic :: Scientific/Engineering :: Artificial Intelligence',
59 | 'Topic :: Scientific/Engineering :: Image Recognition',
60 | 'Operating System :: POSIX :: Linux',
61 | 'Operating System :: MacOS',
62 | 'Operating System :: Microsoft :: Windows', ],
63 | keywords='machine-learning, deep-learning, vision, ML, DL, AI, YOLO, YOLOv3, YOLOv5, YOLOv8, HUB, Ultralytics',
64 | entry_points={
65 | 'console_scripts': ['yolo = ultralytics.yolo.cfg:entrypoint', 'ultralytics = ultralytics.yolo.cfg:entrypoint']})
66 |
--------------------------------------------------------------------------------
/tests/test_cli.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import subprocess
4 | from pathlib import Path
5 |
6 | from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS
7 |
8 | MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n'
9 | CFG = 'yolov8n'
10 |
11 |
12 | def run(cmd):
13 | # Run a subprocess command with check=True
14 | subprocess.run(cmd.split(), check=True)
15 |
16 |
17 | def test_special_modes():
18 | run('yolo checks')
19 | run('yolo settings')
20 | run('yolo help')
21 |
22 |
23 | # Train checks ---------------------------------------------------------------------------------------------------------
24 | def test_train_det():
25 | run(f'yolo train detect model={CFG}.yaml data=coco8.yaml imgsz=32 epochs=1 v5loader')
26 |
27 |
28 | def test_train_seg():
29 | run(f'yolo train segment model={CFG}-seg.yaml data=coco8-seg.yaml imgsz=32 epochs=1')
30 |
31 |
32 | def test_train_cls():
33 | run(f'yolo train classify model={CFG}-cls.yaml data=imagenet10 imgsz=32 epochs=1')
34 |
35 |
36 | # Val checks -----------------------------------------------------------------------------------------------------------
37 | def test_val_detect():
38 | run(f'yolo val detect model={MODEL}.pt data=coco8.yaml imgsz=32')
39 |
40 |
41 | def test_val_segment():
42 | run(f'yolo val segment model={MODEL}-seg.pt data=coco8-seg.yaml imgsz=32')
43 |
44 |
45 | def test_val_classify():
46 | run(f'yolo val classify model={MODEL}-cls.pt data=imagenet10 imgsz=32')
47 |
48 |
49 | # Predict checks -------------------------------------------------------------------------------------------------------
50 | def test_predict_detect():
51 | run(f"yolo predict model={MODEL}.pt source={ROOT / 'assets'} imgsz=32 save save_crop save_txt")
52 | if ONLINE:
53 | run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32')
54 | run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32')
55 | run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/assets/decelera_portrait_min.mov imgsz=32')
56 |
57 |
58 | def test_predict_segment():
59 | run(f"yolo predict model={MODEL}-seg.pt source={ROOT / 'assets'} imgsz=32 save")
60 |
61 |
62 | def test_predict_classify():
63 | run(f"yolo predict model={MODEL}-cls.pt source={ROOT / 'assets'} imgsz=32 save")
64 |
65 |
66 | # Export checks --------------------------------------------------------------------------------------------------------
67 | def test_export_detect_torchscript():
68 | run(f'yolo export model={MODEL}.pt format=torchscript')
69 |
70 |
71 | def test_export_segment_torchscript():
72 | run(f'yolo export model={MODEL}-seg.pt format=torchscript')
73 |
74 |
75 | def test_export_classify_torchscript():
76 | run(f'yolo export model={MODEL}-cls.pt format=torchscript')
77 |
78 |
79 | def test_export_detect_edgetpu(enabled=False):
80 | if enabled and LINUX:
81 | run(f'yolo export model={MODEL}.pt format=edgetpu')
82 |
--------------------------------------------------------------------------------
/tests/test_engine.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from pathlib import Path
4 |
5 | from ultralytics.yolo.cfg import get_cfg
6 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, SETTINGS
7 | from ultralytics.yolo.v8 import classify, detect, segment
8 |
9 | CFG_DET = 'yolov8n.yaml'
10 | CFG_SEG = 'yolov8n-seg.yaml'
11 | CFG_CLS = 'squeezenet1_0'
12 | CFG = get_cfg(DEFAULT_CFG)
13 | MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n'
14 | SOURCE = ROOT / 'assets'
15 |
16 |
17 | def test_detect():
18 | overrides = {'data': 'coco8.yaml', 'model': CFG_DET, 'imgsz': 32, 'epochs': 1, 'save': False}
19 | CFG.data = 'coco8.yaml'
20 |
21 | # Trainer
22 | trainer = detect.DetectionTrainer(overrides=overrides)
23 | trainer.train()
24 |
25 | # Validator
26 | val = detect.DetectionValidator(args=CFG)
27 | val(model=trainer.best) # validate best.pt
28 |
29 | # Predictor
30 | pred = detect.DetectionPredictor(overrides={'imgsz': [64, 64]})
31 | result = pred(source=SOURCE, model=f'{MODEL}.pt')
32 | assert len(result), 'predictor test failed'
33 |
34 | overrides['resume'] = trainer.last
35 | trainer = detect.DetectionTrainer(overrides=overrides)
36 | try:
37 | trainer.train()
38 | except Exception as e:
39 | print(f'Expected exception caught: {e}')
40 | return
41 |
42 | Exception('Resume test failed!')
43 |
44 |
45 | def test_segment():
46 | overrides = {'data': 'coco8-seg.yaml', 'model': CFG_SEG, 'imgsz': 32, 'epochs': 1, 'save': False}
47 | CFG.data = 'coco8-seg.yaml'
48 | CFG.v5loader = False
49 | # YOLO(CFG_SEG).train(**overrides) # works
50 |
51 | # trainer
52 | trainer = segment.SegmentationTrainer(overrides=overrides)
53 | trainer.train()
54 |
55 | # Validator
56 | val = segment.SegmentationValidator(args=CFG)
57 | val(model=trainer.best) # validate best.pt
58 |
59 | # Predictor
60 | pred = segment.SegmentationPredictor(overrides={'imgsz': [64, 64]})
61 | result = pred(source=SOURCE, model=f'{MODEL}-seg.pt')
62 | assert len(result), 'predictor test failed'
63 |
64 | # Test resume
65 | overrides['resume'] = trainer.last
66 | trainer = segment.SegmentationTrainer(overrides=overrides)
67 | try:
68 | trainer.train()
69 | except Exception as e:
70 | print(f'Expected exception caught: {e}')
71 | return
72 |
73 | Exception('Resume test failed!')
74 |
75 |
76 | def test_classify():
77 | overrides = {
78 | 'data': 'imagenet10',
79 | 'model': 'yolov8n-cls.yaml',
80 | 'imgsz': 32,
81 | 'epochs': 1,
82 | 'batch': 64,
83 | 'save': False}
84 | CFG.data = 'imagenet10'
85 | CFG.imgsz = 32
86 | CFG.batch = 64
87 | # YOLO(CFG_SEG).train(**overrides) # works
88 |
89 | # Trainer
90 | trainer = classify.ClassificationTrainer(overrides=overrides)
91 | trainer.train()
92 |
93 | # Validator
94 | val = classify.ClassificationValidator(args=CFG)
95 | val(model=trainer.best)
96 |
97 | # Predictor
98 | pred = classify.ClassificationPredictor(overrides={'imgsz': [64, 64]})
99 | result = pred(source=SOURCE, model=trainer.best)
100 | assert len(result), 'predictor test failed'
101 |
--------------------------------------------------------------------------------
/ultralytics/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | __version__ = '8.0.49'
4 |
5 | from ultralytics.yolo.engine.model import YOLO
6 | from ultralytics.yolo.utils.checks import check_yolo as checks
7 |
8 | __all__ = '__version__', 'YOLO', 'checks' # allow simpler import
9 |
--------------------------------------------------------------------------------
/ultralytics/assets/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wudashuo/yolov8/3861e6c82aaa1bbb214f020ece3a4bd4712eacbe/ultralytics/assets/bus.jpg
--------------------------------------------------------------------------------
/ultralytics/assets/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wudashuo/yolov8/3861e6c82aaa1bbb214f020ece3a4bd4712eacbe/ultralytics/assets/zidane.jpg
--------------------------------------------------------------------------------
/ultralytics/datasets/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: yolo train data=Argoverse.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── Argoverse ← downloads here (31.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: bus
23 | 5: truck
24 | 6: traffic_light
25 | 7: stop_sign
26 |
27 |
28 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
29 | download: |
30 | import json
31 | from tqdm import tqdm
32 | from ultralytics.yolo.utils.downloads import download
33 | from pathlib import Path
34 |
35 | def argoverse2yolo(set):
36 | labels = {}
37 | a = json.load(open(set, "rb"))
38 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
39 | img_id = annot['image_id']
40 | img_name = a['images'][img_id]['name']
41 | img_label_name = f'{img_name[:-3]}txt'
42 |
43 | cls = annot['category_id'] # instance class id
44 | x_center, y_center, width, height = annot['bbox']
45 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
46 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
47 | width /= 1920.0 # scale
48 | height /= 1200.0 # scale
49 |
50 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
51 | if not img_dir.exists():
52 | img_dir.mkdir(parents=True, exist_ok=True)
53 |
54 | k = str(img_dir / img_label_name)
55 | if k not in labels:
56 | labels[k] = []
57 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
58 |
59 | for k in labels:
60 | with open(k, "w") as f:
61 | f.writelines(labels[k])
62 |
63 |
64 | # Download
65 | dir = Path(yaml['path']) # dataset root dir
66 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
67 | download(urls, dir=dir)
68 |
69 | # Convert
70 | annotations_dir = 'Argoverse-HD/annotations/'
71 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
72 | for d in "train.json", "val.json":
73 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
74 |
--------------------------------------------------------------------------------
/ultralytics/datasets/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: yolo train data=GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | names:
30 | 0: wheat_head
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from ultralytics.yolo.utils.downloads import download
36 | from pathlib import Path
37 |
38 | # Download
39 | dir = Path(yaml['path']) # dataset root dir
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42 | download(urls, dir=dir)
43 |
44 | # Make Directories
45 | for p in 'annotations', 'images', 'labels':
46 | (dir / p).mkdir(parents=True, exist_ok=True)
47 |
48 | # Move
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51 | (dir / p).rename(dir / 'images' / p) # move to /images
52 | f = (dir / p).with_suffix('.json') # json file
53 | if f.exists():
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
55 |
--------------------------------------------------------------------------------
/ultralytics/datasets/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: yolo train data=SKU-110K.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── SKU-110K ← downloads here (13.6 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | names:
18 | 0: object
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from pathlib import Path
25 |
26 | import numpy as np
27 | import pandas as pd
28 | from tqdm import tqdm
29 |
30 | from ultralytics.yolo.utils.downloads import download
31 | from ultralytics.yolo.utils.ops import xyxy2xywh
32 |
33 | # Download
34 | dir = Path(yaml['path']) # dataset root dir
35 | parent = Path(dir.parent) # download dir
36 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
37 | download(urls, dir=parent)
38 |
39 | # Rename directories
40 | if dir.exists():
41 | shutil.rmtree(dir)
42 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
43 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
44 |
45 | # Convert labels
46 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
47 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
48 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
49 | images, unique_images = x[:, 0], np.unique(x[:, 0])
50 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
51 | f.writelines(f'./images/{s}\n' for s in unique_images)
52 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
53 | cls = 0 # single-class dataset
54 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
55 | for r in x[images == im]:
56 | w, h = r[6], r[7] # image width, height
57 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
58 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
59 |
--------------------------------------------------------------------------------
/ultralytics/datasets/VOC.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: yolo train data=VOC.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VOC ← downloads here (2.8 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC
12 | train: # train images (relative to 'path') 16551 images
13 | - images/train2012
14 | - images/train2007
15 | - images/val2012
16 | - images/val2007
17 | val: # val images (relative to 'path') 4952 images
18 | - images/test2007
19 | test: # test images (optional)
20 | - images/test2007
21 |
22 | # Classes
23 | names:
24 | 0: aeroplane
25 | 1: bicycle
26 | 2: bird
27 | 3: boat
28 | 4: bottle
29 | 5: bus
30 | 6: car
31 | 7: cat
32 | 8: chair
33 | 9: cow
34 | 10: diningtable
35 | 11: dog
36 | 12: horse
37 | 13: motorbike
38 | 14: person
39 | 15: pottedplant
40 | 16: sheep
41 | 17: sofa
42 | 18: train
43 | 19: tvmonitor
44 |
45 |
46 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
47 | download: |
48 | import xml.etree.ElementTree as ET
49 |
50 | from tqdm import tqdm
51 | from ultralytics.yolo.utils.downloads import download
52 | from pathlib import Path
53 |
54 | def convert_label(path, lb_path, year, image_id):
55 | def convert_box(size, box):
56 | dw, dh = 1. / size[0], 1. / size[1]
57 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
58 | return x * dw, y * dh, w * dw, h * dh
59 |
60 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
61 | out_file = open(lb_path, 'w')
62 | tree = ET.parse(in_file)
63 | root = tree.getroot()
64 | size = root.find('size')
65 | w = int(size.find('width').text)
66 | h = int(size.find('height').text)
67 |
68 | names = list(yaml['names'].values()) # names list
69 | for obj in root.iter('object'):
70 | cls = obj.find('name').text
71 | if cls in names and int(obj.find('difficult').text) != 1:
72 | xmlbox = obj.find('bndbox')
73 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
74 | cls_id = names.index(cls) # class id
75 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
76 |
77 |
78 | # Download
79 | dir = Path(yaml['path']) # dataset root dir
80 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
81 | urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
82 | f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
83 | f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
84 | download(urls, dir=dir / 'images', curl=True, threads=3)
85 |
86 | # Convert
87 | path = dir / 'images/VOCdevkit'
88 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
89 | imgs_path = dir / 'images' / f'{image_set}{year}'
90 | lbs_path = dir / 'labels' / f'{image_set}{year}'
91 | imgs_path.mkdir(exist_ok=True, parents=True)
92 | lbs_path.mkdir(exist_ok=True, parents=True)
93 |
94 | with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
95 | image_ids = f.read().strip().split()
96 | for id in tqdm(image_ids, desc=f'{image_set}{year}'):
97 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
98 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
99 | f.rename(imgs_path / f.name) # move image
100 | convert_label(path, lb_path, year, id) # convert labels to YOLO format
101 |
--------------------------------------------------------------------------------
/ultralytics/datasets/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: yolo train data=VisDrone.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VisDrone ← downloads here (2.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | names:
18 | 0: pedestrian
19 | 1: people
20 | 2: bicycle
21 | 3: car
22 | 4: van
23 | 5: truck
24 | 6: tricycle
25 | 7: awning-tricycle
26 | 8: bus
27 | 9: motor
28 |
29 |
30 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
31 | download: |
32 | import os
33 | from pathlib import Path
34 |
35 | from ultralytics.yolo.utils.downloads import download
36 |
37 | def visdrone2yolo(dir):
38 | from PIL import Image
39 | from tqdm import tqdm
40 |
41 | def convert_box(size, box):
42 | # Convert VisDrone box to YOLO xywh box
43 | dw = 1. / size[0]
44 | dh = 1. / size[1]
45 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
46 |
47 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
48 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
49 | for f in pbar:
50 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
51 | lines = []
52 | with open(f, 'r') as file: # read annotation.txt
53 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
54 | if row[4] == '0': # VisDrone 'ignored regions' class 0
55 | continue
56 | cls = int(row[5]) - 1
57 | box = convert_box(img_size, tuple(map(int, row[:4])))
58 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
59 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
60 | fl.writelines(lines) # write label.txt
61 |
62 |
63 | # Download
64 | dir = Path(yaml['path']) # dataset root dir
65 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
66 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
67 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
68 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
69 | download(urls, dir=dir, curl=True, threads=4)
70 |
71 | # Convert
72 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
73 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
74 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: yolo train data=coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: |
102 | from ultralytics.yolo.utils.downloads import download
103 | from pathlib import Path
104 |
105 | # Download labels
106 | segments = True # segment or box labels
107 | dir = Path(yaml['path']) # dataset root dir
108 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | download(urls, dir=dir.parent)
111 | # Download data
112 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco128-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128-seg # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128-seg.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco128.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco8-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco8-seg.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco8-seg ← downloads here (1 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco8-seg # dataset root dir
12 | train: images/train # train images (relative to 'path') 4 images
13 | val: images/val # val images (relative to 'path') 4 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco8-seg.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco8.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO8 dataset (first 8 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco8.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco8 ← downloads here (1 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco8 # dataset root dir
12 | train: images/train # train images (relative to 'path') 4 images
13 | val: images/val # val images (relative to 'path') 4 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco8.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/hub/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import requests
4 |
5 | from ultralytics.hub.auth import Auth
6 | from ultralytics.hub.session import HUBTrainingSession
7 | from ultralytics.hub.utils import PREFIX, split_key
8 | from ultralytics.yolo.engine.exporter import EXPORT_FORMATS_LIST
9 | from ultralytics.yolo.engine.model import YOLO
10 | from ultralytics.yolo.utils import LOGGER, emojis
11 |
12 | # Define all export formats
13 | EXPORT_FORMATS_HUB = EXPORT_FORMATS_LIST + ['ultralytics_tflite', 'ultralytics_coreml']
14 |
15 |
16 | def start(key=''):
17 | """
18 | Start training models with Ultralytics HUB. Usage: from ultralytics.hub import start; start('API_KEY')
19 | """
20 | auth = Auth(key)
21 | if not auth.get_state():
22 | model_id = request_api_key(auth)
23 | else:
24 | _, model_id = split_key(key)
25 |
26 | if not model_id:
27 | raise ConnectionError(emojis('Connecting with global API key is not currently supported. ❌'))
28 |
29 | session = HUBTrainingSession(model_id=model_id, auth=auth)
30 | session.check_disk_space()
31 |
32 | model = YOLO(model=session.model_file, session=session)
33 | model.train(**session.train_args)
34 |
35 |
36 | def request_api_key(auth, max_attempts=3):
37 | """
38 | Prompt the user to input their API key. Returns the model ID.
39 | """
40 | import getpass
41 | for attempts in range(max_attempts):
42 | LOGGER.info(f'{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}')
43 | input_key = getpass.getpass('Enter your Ultralytics HUB API key:\n')
44 | auth.api_key, model_id = split_key(input_key)
45 |
46 | if auth.authenticate():
47 | LOGGER.info(f'{PREFIX}Authenticated ✅')
48 | return model_id
49 |
50 | LOGGER.warning(f'{PREFIX}Invalid API key ⚠️\n')
51 |
52 | raise ConnectionError(emojis(f'{PREFIX}Failed to authenticate ❌'))
53 |
54 |
55 | def reset_model(key=''):
56 | # Reset a trained model to an untrained state
57 | api_key, model_id = split_key(key)
58 | r = requests.post('https://api.ultralytics.com/model-reset', json={'apiKey': api_key, 'modelId': model_id})
59 |
60 | if r.status_code == 200:
61 | LOGGER.info(f'{PREFIX}Model reset successfully')
62 | return
63 | LOGGER.warning(f'{PREFIX}Model reset failure {r.status_code} {r.reason}')
64 |
65 |
66 | def export_model(key='', format='torchscript'):
67 | # Export a model to all formats
68 | assert format in EXPORT_FORMATS_HUB, f"Unsupported export format '{format}', valid formats are {EXPORT_FORMATS_HUB}"
69 | api_key, model_id = split_key(key)
70 | r = requests.post('https://api.ultralytics.com/export',
71 | json={
72 | 'apiKey': api_key,
73 | 'modelId': model_id,
74 | 'format': format})
75 | assert r.status_code == 200, f'{PREFIX}{format} export failure {r.status_code} {r.reason}'
76 | LOGGER.info(f'{PREFIX}{format} export started ✅')
77 |
78 |
79 | def get_export(key='', format='torchscript'):
80 | # Get an exported model dictionary with download URL
81 | assert format in EXPORT_FORMATS_HUB, f"Unsupported export format '{format}', valid formats are {EXPORT_FORMATS_HUB}"
82 | api_key, model_id = split_key(key)
83 | r = requests.post('https://api.ultralytics.com/get-export',
84 | json={
85 | 'apiKey': api_key,
86 | 'modelId': model_id,
87 | 'format': format})
88 | assert r.status_code == 200, f'{PREFIX}{format} get_export failure {r.status_code} {r.reason}'
89 | return r.json()
90 |
91 |
92 | if __name__ == '__main__':
93 | start()
94 |
--------------------------------------------------------------------------------
/ultralytics/hub/auth.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import requests
4 |
5 | from ultralytics.hub.utils import HUB_API_ROOT, request_with_credentials
6 | from ultralytics.yolo.utils import is_colab
7 |
8 | API_KEY_PATH = 'https://hub.ultralytics.com/settings?tab=api+keys'
9 |
10 |
11 | class Auth:
12 | id_token = api_key = model_key = False
13 |
14 | def __init__(self, api_key=None):
15 | self.api_key = self._clean_api_key(api_key)
16 | self.authenticate() if self.api_key else self.auth_with_cookies()
17 |
18 | @staticmethod
19 | def _clean_api_key(key: str) -> str:
20 | """Strip model from key if present"""
21 | separator = '_'
22 | return key.split(separator)[0] if separator in key else key
23 |
24 | def authenticate(self) -> bool:
25 | """Attempt to authenticate with server"""
26 | try:
27 | header = self.get_auth_header()
28 | if header:
29 | r = requests.post(f'{HUB_API_ROOT}/v1/auth', headers=header)
30 | if not r.json().get('success', False):
31 | raise ConnectionError('Unable to authenticate.')
32 | return True
33 | raise ConnectionError('User has not authenticated locally.')
34 | except ConnectionError:
35 | self.id_token = self.api_key = False # reset invalid
36 | return False
37 |
38 | def auth_with_cookies(self) -> bool:
39 | """
40 | Attempt to fetch authentication via cookies and set id_token.
41 | User must be logged in to HUB and running in a supported browser.
42 | """
43 | if not is_colab():
44 | return False # Currently only works with Colab
45 | try:
46 | authn = request_with_credentials(f'{HUB_API_ROOT}/v1/auth/auto')
47 | if authn.get('success', False):
48 | self.id_token = authn.get('data', {}).get('idToken', None)
49 | self.authenticate()
50 | return True
51 | raise ConnectionError('Unable to fetch browser authentication details.')
52 | except ConnectionError:
53 | self.id_token = False # reset invalid
54 | return False
55 |
56 | def get_auth_header(self):
57 | if self.id_token:
58 | return {'authorization': f'Bearer {self.id_token}'}
59 | elif self.api_key:
60 | return {'x-api-key': self.api_key}
61 | else:
62 | return None
63 |
64 | def get_state(self) -> bool:
65 | """Get the authentication state"""
66 | return self.id_token or self.api_key
67 |
68 | def set_api_key(self, key: str):
69 | """Get the authentication state"""
70 | self.api_key = key
71 |
--------------------------------------------------------------------------------
/ultralytics/models/v3/yolov3-sppu.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # darknet53 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [32, 3, 1]], # 0
12 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
13 | [-1, 1, Bottleneck, [64]],
14 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
15 | [-1, 2, Bottleneck, [128]],
16 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
17 | [-1, 8, Bottleneck, [256]],
18 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
19 | [-1, 8, Bottleneck, [512]],
20 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
21 | [-1, 4, Bottleneck, [1024]], # 10
22 | ]
23 |
24 | # YOLOv3-SPP head
25 | head:
26 | [[-1, 1, Bottleneck, [1024, False]],
27 | [-1, 1, SPP, [512, [5, 9, 13]]],
28 | [-1, 1, Conv, [1024, 3, 1]],
29 | [-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
31 |
32 | [-2, 1, Conv, [256, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
35 | [-1, 1, Bottleneck, [512, False]],
36 | [-1, 1, Bottleneck, [512, False]],
37 | [-1, 1, Conv, [256, 1, 1]],
38 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
39 |
40 | [-2, 1, Conv, [128, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
43 | [-1, 1, Bottleneck, [256, False]],
44 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
45 |
46 | [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5)
47 | ]
48 |
--------------------------------------------------------------------------------
/ultralytics/models/v3/yolov3-tinyu.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # YOLOv3-tiny backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [16, 3, 1]], # 0
12 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
13 | [-1, 1, Conv, [32, 3, 1]],
14 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
15 | [-1, 1, Conv, [64, 3, 1]],
16 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
17 | [-1, 1, Conv, [128, 3, 1]],
18 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
19 | [-1, 1, Conv, [256, 3, 1]],
20 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
21 | [-1, 1, Conv, [512, 3, 1]],
22 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
23 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
24 | ]
25 |
26 | # YOLOv3-tiny head
27 | head:
28 | [[-1, 1, Conv, [1024, 3, 1]],
29 | [-1, 1, Conv, [256, 1, 1]],
30 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
31 |
32 | [-2, 1, Conv, [128, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
35 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
36 |
37 | [[19, 15], 1, Detect, [nc]], # Detect(P4, P5)
38 | ]
39 |
--------------------------------------------------------------------------------
/ultralytics/models/v3/yolov3u.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # darknet53 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [32, 3, 1]], # 0
12 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
13 | [-1, 1, Bottleneck, [64]],
14 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
15 | [-1, 2, Bottleneck, [128]],
16 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
17 | [-1, 8, Bottleneck, [256]],
18 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
19 | [-1, 8, Bottleneck, [512]],
20 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
21 | [-1, 4, Bottleneck, [1024]], # 10
22 | ]
23 |
24 | # YOLOv3 head
25 | head:
26 | [[-1, 1, Bottleneck, [1024, False]],
27 | [-1, 1, Conv, [512, 1, 1]],
28 | [-1, 1, Conv, [1024, 3, 1]],
29 | [-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
31 |
32 | [-2, 1, Conv, [256, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
35 | [-1, 1, Bottleneck, [512, False]],
36 | [-1, 1, Bottleneck, [512, False]],
37 | [-1, 1, Conv, [256, 1, 1]],
38 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
39 |
40 | [-2, 1, Conv, [128, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
43 | [-1, 1, Bottleneck, [256, False]],
44 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
45 |
46 | [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5)
47 | ]
48 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5lu.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # YOLOv5 v6.0 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
12 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
13 | [-1, 3, C3, [128]],
14 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
15 | [-1, 6, C3, [256]],
16 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
17 | [-1, 9, C3, [512]],
18 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
19 | [-1, 3, C3, [1024]],
20 | [-1, 1, SPPF, [1024, 5]], # 9
21 | ]
22 |
23 | # YOLOv5 v6.0 head
24 | head:
25 | [[-1, 1, Conv, [512, 1, 1]],
26 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
27 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
28 | [-1, 3, C3, [512, False]], # 13
29 |
30 | [-1, 1, Conv, [256, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
33 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
34 |
35 | [-1, 1, Conv, [256, 3, 2]],
36 | [[-1, 14], 1, Concat, [1]], # cat head P4
37 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
38 |
39 | [-1, 1, Conv, [512, 3, 2]],
40 | [[-1, 10], 1, Concat, [1]], # cat head P5
41 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
42 |
43 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
44 | ]
45 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5mu.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 |
8 | # YOLOv5 v6.0 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
12 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
13 | [-1, 3, C3, [128]],
14 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
15 | [-1, 6, C3, [256]],
16 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
17 | [-1, 9, C3, [512]],
18 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
19 | [-1, 3, C3, [1024]],
20 | [-1, 1, SPPF, [1024, 5]], # 9
21 | ]
22 |
23 | # YOLOv5 v6.0 head
24 | head:
25 | [[-1, 1, Conv, [512, 1, 1]],
26 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
27 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
28 | [-1, 3, C3, [512, False]], # 13
29 |
30 | [-1, 1, Conv, [256, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
33 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
34 |
35 | [-1, 1, Conv, [256, 3, 2]],
36 | [[-1, 14], 1, Concat, [1]], # cat head P4
37 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
38 |
39 | [-1, 1, Conv, [512, 3, 2]],
40 | [[-1, 10], 1, Concat, [1]], # cat head P5
41 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
42 |
43 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
44 | ]
45 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5nu.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 |
8 | # YOLOv5 v6.0 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
12 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
13 | [-1, 3, C3, [128]],
14 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
15 | [-1, 6, C3, [256]],
16 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
17 | [-1, 9, C3, [512]],
18 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
19 | [-1, 3, C3, [1024]],
20 | [-1, 1, SPPF, [1024, 5]], # 9
21 | ]
22 |
23 | # YOLOv5 v6.0 head
24 | head:
25 | [[-1, 1, Conv, [512, 1, 1]],
26 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
27 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
28 | [-1, 3, C3, [512, False]], # 13
29 |
30 | [-1, 1, Conv, [256, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
33 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
34 |
35 | [-1, 1, Conv, [256, 3, 2]],
36 | [[-1, 14], 1, Concat, [1]], # cat head P4
37 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
38 |
39 | [-1, 1, Conv, [512, 3, 2]],
40 | [[-1, 10], 1, Concat, [1]], # cat head P5
41 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
42 |
43 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
44 | ]
45 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5su.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 |
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [1024]],
21 | [-1, 1, SPPF, [1024, 5]], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head
25 | head:
26 | [[-1, 1, Conv, [512, 1, 1]],
27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
29 | [-1, 3, C3, [512, False]], # 13
30 |
31 | [-1, 1, Conv, [256, 1, 1]],
32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35 |
36 | [-1, 1, Conv, [256, 3, 2]],
37 | [[-1, 14], 1, Concat, [1]], # cat head P4
38 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
39 |
40 | [-1, 1, Conv, [512, 3, 2]],
41 | [[-1, 10], 1, Concat, [1]], # cat head P5
42 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
43 |
44 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
45 | ]
46 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5xu.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 |
8 | # YOLOv5 v6.0 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
12 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
13 | [-1, 3, C3, [128]],
14 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
15 | [-1, 6, C3, [256]],
16 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
17 | [-1, 9, C3, [512]],
18 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
19 | [-1, 3, C3, [1024]],
20 | [-1, 1, SPPF, [1024, 5]], # 9
21 | ]
22 |
23 | # YOLOv5 v6.0 head
24 | head:
25 | [[-1, 1, Conv, [512, 1, 1]],
26 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
27 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
28 | [-1, 3, C3, [512, False]], # 13
29 |
30 | [-1, 1, Conv, [256, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
33 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
34 |
35 | [-1, 1, Conv, [256, 3, 2]],
36 | [[-1, 14], 1, Concat, [1]], # cat head P4
37 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
38 |
39 | [-1, 1, Conv, [512, 3, 2]],
40 | [[-1, 10], 1, Concat, [1]], # cat head P5
41 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
42 |
43 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
44 | ]
45 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/cls/yolov8l-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.00 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/cls/yolov8m-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 0.67 # scales module repeats
6 | width_multiple: 0.75 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/cls/yolov8n-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/cls/yolov8s-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/cls/yolov8x-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/seg/yolov8l-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.00 # scales convolution channels
7 |
8 | # YOLOv8.0l backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0l head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/seg/yolov8m-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # scales module repeats
6 | width_multiple: 0.75 # scales convolution channels
7 |
8 | # YOLOv8.0m backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [768, True]]
20 | - [-1, 1, SPPF, [768, 5]] # 9
21 |
22 | # YOLOv8.0m head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [768]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/seg/yolov8n-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0n head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/seg/yolov8s-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0s backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0s head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/seg/yolov8x-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0x backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0x head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8l.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.00 # scales convolution channels
7 |
8 | # YOLOv8.0l backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0l head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8m.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # scales module repeats
6 | width_multiple: 0.75 # scales convolution channels
7 |
8 | # YOLOv8.0m backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [768, True]]
20 | - [-1, 1, SPPF, [768, 5]] # 9
21 |
22 | # YOLOv8.0m head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [768]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8n.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0n head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8s.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0s backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0s head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8x.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0x backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0x head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 12
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 21 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8x6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0x6 backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, Conv, [512, 3, 2]] # 9-P6/64
21 | - [-1, 3, C2f, [512, True]]
22 | - [-1, 1, SPPF, [512, 5]] # 11
23 |
24 | # YOLOv8.0x6 head
25 | head:
26 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
27 | - [[-1, 8], 1, Concat, [1]] # cat backbone P5
28 | - [-1, 3, C2, [512, False]] # 14
29 |
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2, [512, False]] # 17
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2, [256, False]] # 20 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 17], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2, [512, False]] # 23 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 14], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2, [512, False]] # 26 (P5/32-large)
45 |
46 | - [-1, 1, Conv, [512, 3, 2]]
47 | - [[-1, 11], 1, Concat, [1]] # cat head P6
48 | - [-1, 3, C2, [512, False]] # 29 (P6/64-xlarge)
49 |
50 | - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6)
51 |
--------------------------------------------------------------------------------
/ultralytics/nn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wudashuo/yolov8/3861e6c82aaa1bbb214f020ece3a4bd4712eacbe/ultralytics/nn/__init__.py
--------------------------------------------------------------------------------
/ultralytics/tracker/README.md:
--------------------------------------------------------------------------------
1 | ## Tracker
2 |
3 | ### Trackers
4 |
5 | - [x] ByteTracker
6 | - [x] BoT-SORT
7 |
8 | ### Usage
9 |
10 | python interface:
11 |
12 | ```python
13 | from ultralytics import YOLO
14 |
15 | model = YOLO("yolov8n.pt") # or a segmentation model .i.e yolov8n-seg.pt
16 | model.track(
17 | source="video/streams",
18 | stream=True,
19 | tracker="botsort.yaml", # or 'bytetrack.yaml'
20 | ...,
21 | )
22 | ```
23 |
24 | cli:
25 |
26 | ```bash
27 | yolo detect track source=... tracker=...
28 | yolo segment track source=... tracker=...
29 | ```
30 |
31 | By default, trackers will use the configuration in `ultralytics/tracker/cfg`.
32 | We also support using a modified tracker config file. Please refer to the tracker config files
33 | in `ultralytics/tracker/cfg`.
34 |
--------------------------------------------------------------------------------
/ultralytics/tracker/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .track import register_tracker
4 | from .trackers import BOTSORT, BYTETracker
5 |
6 | __all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import
7 |
--------------------------------------------------------------------------------
/ultralytics/tracker/cfg/botsort.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT
3 |
4 | tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
5 | track_high_thresh: 0.5 # threshold for the first association
6 | track_low_thresh: 0.1 # threshold for the second association
7 | new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
8 | track_buffer: 30 # buffer to calculate the time when to remove tracks
9 | match_thresh: 0.8 # threshold for matching tracks
10 | # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
11 | # mot20: False # for tracker evaluation(not used for now)
12 |
13 | # BoT-SORT settings
14 | cmc_method: sparseOptFlow # method of global motion compensation
15 | # ReID model related thresh (not supported yet)
16 | proximity_thresh: 0.5
17 | appearance_thresh: 0.25
18 | with_reid: False
19 |
--------------------------------------------------------------------------------
/ultralytics/tracker/cfg/bytetrack.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
3 |
4 | tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
5 | track_high_thresh: 0.5 # threshold for the first association
6 | track_low_thresh: 0.1 # threshold for the second association
7 | new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
8 | track_buffer: 30 # buffer to calculate the time when to remove tracks
9 | match_thresh: 0.8 # threshold for matching tracks
10 | # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
11 | # mot20: False # for tracker evaluation(not used for now)
12 |
--------------------------------------------------------------------------------
/ultralytics/tracker/track.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load
6 | from ultralytics.yolo.utils.checks import check_requirements, check_yaml
7 |
8 | check_requirements('lap') # for linear_assignment
9 |
10 | from .trackers import BOTSORT, BYTETracker
11 |
12 | TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
13 |
14 |
15 | def on_predict_start(predictor):
16 | tracker = check_yaml(predictor.args.tracker)
17 | cfg = IterableSimpleNamespace(**yaml_load(tracker))
18 | assert cfg.tracker_type in ['bytetrack', 'botsort'], \
19 | f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'"
20 | trackers = []
21 | for _ in range(predictor.dataset.bs):
22 | tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
23 | trackers.append(tracker)
24 | predictor.trackers = trackers
25 |
26 |
27 | def on_predict_postprocess_end(predictor):
28 | bs = predictor.dataset.bs
29 | im0s = predictor.batch[2]
30 | im0s = im0s if isinstance(im0s, list) else [im0s]
31 | for i in range(bs):
32 | det = predictor.results[i].boxes.cpu().numpy()
33 | if len(det) == 0:
34 | continue
35 | tracks = predictor.trackers[i].update(det, im0s[i])
36 | if len(tracks) == 0:
37 | continue
38 | predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1]))
39 | if predictor.results[i].masks is not None:
40 | idx = tracks[:, -1].tolist()
41 | predictor.results[i].masks = predictor.results[i].masks[idx]
42 |
43 |
44 | def register_tracker(model):
45 | model.add_callback('on_predict_start', on_predict_start)
46 | model.add_callback('on_predict_postprocess_end', on_predict_postprocess_end)
47 |
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .bot_sort import BOTSORT
4 | from .byte_tracker import BYTETracker
5 |
6 | __all__ = 'BOTSORT', 'BYTETracker' # allow simpler import
7 |
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/basetrack.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from collections import OrderedDict
4 |
5 | import numpy as np
6 |
7 |
8 | class TrackState:
9 | New = 0
10 | Tracked = 1
11 | Lost = 2
12 | Removed = 3
13 |
14 |
15 | class BaseTrack:
16 | _count = 0
17 |
18 | track_id = 0
19 | is_activated = False
20 | state = TrackState.New
21 |
22 | history = OrderedDict()
23 | features = []
24 | curr_feature = None
25 | score = 0
26 | start_frame = 0
27 | frame_id = 0
28 | time_since_update = 0
29 |
30 | # multi-camera
31 | location = (np.inf, np.inf)
32 |
33 | @property
34 | def end_frame(self):
35 | return self.frame_id
36 |
37 | @staticmethod
38 | def next_id():
39 | BaseTrack._count += 1
40 | return BaseTrack._count
41 |
42 | def activate(self, *args):
43 | raise NotImplementedError
44 |
45 | def predict(self):
46 | raise NotImplementedError
47 |
48 | def update(self, *args, **kwargs):
49 | raise NotImplementedError
50 |
51 | def mark_lost(self):
52 | self.state = TrackState.Lost
53 |
54 | def mark_removed(self):
55 | self.state = TrackState.Removed
56 |
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/bot_sort.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from collections import deque
4 |
5 | import numpy as np
6 |
7 | from ..utils import matching
8 | from ..utils.gmc import GMC
9 | from ..utils.kalman_filter import KalmanFilterXYWH
10 | from .basetrack import TrackState
11 | from .byte_tracker import BYTETracker, STrack
12 |
13 |
14 | class BOTrack(STrack):
15 | shared_kalman = KalmanFilterXYWH()
16 |
17 | def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
18 | super().__init__(tlwh, score, cls)
19 |
20 | self.smooth_feat = None
21 | self.curr_feat = None
22 | if feat is not None:
23 | self.update_features(feat)
24 | self.features = deque([], maxlen=feat_history)
25 | self.alpha = 0.9
26 |
27 | def update_features(self, feat):
28 | feat /= np.linalg.norm(feat)
29 | self.curr_feat = feat
30 | if self.smooth_feat is None:
31 | self.smooth_feat = feat
32 | else:
33 | self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
34 | self.features.append(feat)
35 | self.smooth_feat /= np.linalg.norm(self.smooth_feat)
36 |
37 | def predict(self):
38 | mean_state = self.mean.copy()
39 | if self.state != TrackState.Tracked:
40 | mean_state[6] = 0
41 | mean_state[7] = 0
42 |
43 | self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
44 |
45 | def re_activate(self, new_track, frame_id, new_id=False):
46 | if new_track.curr_feat is not None:
47 | self.update_features(new_track.curr_feat)
48 | super().re_activate(new_track, frame_id, new_id)
49 |
50 | def update(self, new_track, frame_id):
51 | if new_track.curr_feat is not None:
52 | self.update_features(new_track.curr_feat)
53 | super().update(new_track, frame_id)
54 |
55 | @property
56 | def tlwh(self):
57 | """Get current position in bounding box format `(top left x, top left y,
58 | width, height)`.
59 | """
60 | if self.mean is None:
61 | return self._tlwh.copy()
62 | ret = self.mean[:4].copy()
63 | ret[:2] -= ret[2:] / 2
64 | return ret
65 |
66 | @staticmethod
67 | def multi_predict(stracks):
68 | if len(stracks) <= 0:
69 | return
70 | multi_mean = np.asarray([st.mean.copy() for st in stracks])
71 | multi_covariance = np.asarray([st.covariance for st in stracks])
72 | for i, st in enumerate(stracks):
73 | if st.state != TrackState.Tracked:
74 | multi_mean[i][6] = 0
75 | multi_mean[i][7] = 0
76 | multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
77 | for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
78 | stracks[i].mean = mean
79 | stracks[i].covariance = cov
80 |
81 | def convert_coords(self, tlwh):
82 | return self.tlwh_to_xywh(tlwh)
83 |
84 | @staticmethod
85 | def tlwh_to_xywh(tlwh):
86 | """Convert bounding box to format `(center x, center y, width,
87 | height)`.
88 | """
89 | ret = np.asarray(tlwh).copy()
90 | ret[:2] += ret[2:] / 2
91 | return ret
92 |
93 |
94 | class BOTSORT(BYTETracker):
95 |
96 | def __init__(self, args, frame_rate=30):
97 | super().__init__(args, frame_rate)
98 | # ReID module
99 | self.proximity_thresh = args.proximity_thresh
100 | self.appearance_thresh = args.appearance_thresh
101 |
102 | if args.with_reid:
103 | # haven't supported BoT-SORT(reid) yet
104 | self.encoder = None
105 | # self.gmc = GMC(method=args.cmc_method, verbose=[args.name, args.ablation])
106 | self.gmc = GMC(method=args.cmc_method)
107 |
108 | def get_kalmanfilter(self):
109 | return KalmanFilterXYWH()
110 |
111 | def init_track(self, dets, scores, cls, img=None):
112 | if len(dets) == 0:
113 | return []
114 | if self.args.with_reid and self.encoder is not None:
115 | features_keep = self.encoder.inference(img, dets)
116 | return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
117 | else:
118 | return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
119 |
120 | def get_dists(self, tracks, detections):
121 | dists = matching.iou_distance(tracks, detections)
122 | dists_mask = (dists > self.proximity_thresh)
123 |
124 | # TODO: mot20
125 | # if not self.args.mot20:
126 | dists = matching.fuse_score(dists, detections)
127 |
128 | if self.args.with_reid and self.encoder is not None:
129 | emb_dists = matching.embedding_distance(tracks, detections) / 2.0
130 | emb_dists[emb_dists > self.appearance_thresh] = 1.0
131 | emb_dists[dists_mask] = 1.0
132 | dists = np.minimum(dists, emb_dists)
133 | return dists
134 |
135 | def multi_predict(self, tracks):
136 | BOTrack.multi_predict(tracks)
137 |
--------------------------------------------------------------------------------
/ultralytics/tracker/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wudashuo/yolov8/3861e6c82aaa1bbb214f020ece3a4bd4712eacbe/ultralytics/tracker/utils/__init__.py
--------------------------------------------------------------------------------
/ultralytics/yolo/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from . import v8
4 |
5 | __all__ = 'v8', # tuple or list
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .base import BaseDataset
4 | from .build import build_classification_dataloader, build_dataloader, load_inference_source
5 | from .dataset import ClassificationDataset, SemanticDataset, YOLODataset
6 | from .dataset_wrappers import MixAndRectDataset
7 |
8 | __all__ = ('BaseDataset', 'ClassificationDataset', 'MixAndRectDataset', 'SemanticDataset', 'YOLODataset',
9 | 'build_classification_dataloader', 'build_dataloader', 'load_inference_source')
10 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wudashuo/yolov8/3861e6c82aaa1bbb214f020ece3a4bd4712eacbe/ultralytics/yolo/data/dataloaders/__init__.py
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataset_wrappers.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import collections
4 | from copy import deepcopy
5 |
6 | from .augment import LetterBox
7 |
8 |
9 | class MixAndRectDataset:
10 | """A wrapper of multiple images mixed dataset.
11 |
12 | Args:
13 | dataset (:obj:`BaseDataset`): The dataset to be mixed.
14 | transforms (Sequence[dict]): config dict to be composed.
15 | """
16 |
17 | def __init__(self, dataset):
18 | self.dataset = dataset
19 | self.imgsz = dataset.imgsz
20 |
21 | def __len__(self):
22 | return len(self.dataset)
23 |
24 | def __getitem__(self, index):
25 | labels = deepcopy(self.dataset[index])
26 | for transform in self.dataset.transforms.tolist():
27 | # mosaic and mixup
28 | if hasattr(transform, 'get_indexes'):
29 | indexes = transform.get_indexes(self.dataset)
30 | if not isinstance(indexes, collections.abc.Sequence):
31 | indexes = [indexes]
32 | mix_labels = [deepcopy(self.dataset[index]) for index in indexes]
33 | labels['mix_labels'] = mix_labels
34 | if self.dataset.rect and isinstance(transform, LetterBox):
35 | transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]]
36 | labels = transform(labels)
37 | if 'mix_labels' in labels:
38 | labels.pop('mix_labels')
39 | return labels
40 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Ultralytics YOLO 🚀, GPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/assets/releases
4 | # Example usage: bash ultralytics/yolo/data/scripts/download_weights.sh
5 | # parent
6 | # └── weights
7 | # ├── yolov8n.pt ← downloads here
8 | # ├── yolov8s.pt
9 | # └── ...
10 |
11 | python - <= batch_sizes[i]: # y intercept above failure point
63 | b = batch_sizes[max(i - 1, 0)] # select prior safe point
64 | if b < 1 or b > 1024: # b outside of safe range
65 | b = batch_size
66 | LOGGER.info(f'{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.')
67 |
68 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
69 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
70 | return b
71 | except Exception as e:
72 | LOGGER.warning(f'{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.')
73 | return batch_size
74 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/benchmarks.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Benchmark a YOLO model formats for speed and accuracy
4 |
5 | Usage:
6 | from ultralytics.yolo.utils.benchmarks import run_benchmarks
7 | run_benchmarks(model='yolov8n.pt', imgsz=160)
8 |
9 | Format | `format=argument` | Model
10 | --- | --- | ---
11 | PyTorch | - | yolov8n.pt
12 | TorchScript | `torchscript` | yolov8n.torchscript
13 | ONNX | `onnx` | yolov8n.onnx
14 | OpenVINO | `openvino` | yolov8n_openvino_model/
15 | TensorRT | `engine` | yolov8n.engine
16 | CoreML | `coreml` | yolov8n.mlmodel
17 | TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/
18 | TensorFlow GraphDef | `pb` | yolov8n.pb
19 | TensorFlow Lite | `tflite` | yolov8n.tflite
20 | TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite
21 | TensorFlow.js | `tfjs` | yolov8n_web_model/
22 | PaddlePaddle | `paddle` | yolov8n_paddle_model/
23 | """
24 |
25 | import platform
26 | import time
27 | from pathlib import Path
28 |
29 | import pandas as pd
30 |
31 | from ultralytics import YOLO
32 | from ultralytics.yolo.engine.exporter import export_formats
33 | from ultralytics.yolo.utils import LINUX, LOGGER, ROOT, SETTINGS
34 | from ultralytics.yolo.utils.checks import check_yolo
35 | from ultralytics.yolo.utils.downloads import download
36 | from ultralytics.yolo.utils.files import file_size
37 | from ultralytics.yolo.utils.torch_utils import select_device
38 |
39 |
40 | def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, half=False, device='cpu', hard_fail=False):
41 | device = select_device(device, verbose=False)
42 | if isinstance(model, (str, Path)):
43 | model = YOLO(model)
44 |
45 | y = []
46 | t0 = time.time()
47 | for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
48 | emoji, filename = '❌', None # export defaults
49 | try:
50 | if model.task == 'classify':
51 | assert i != 11, 'paddle cls exports coming soon'
52 | assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
53 | if 'cpu' in device.type:
54 | assert cpu, 'inference not supported on CPU'
55 | if 'cuda' in device.type:
56 | assert gpu, 'inference not supported on GPU'
57 |
58 | # Export
59 | if format == '-':
60 | filename = model.ckpt_path or model.cfg
61 | export = model # PyTorch format
62 | else:
63 | filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others
64 | export = YOLO(filename, task=model.task)
65 | assert suffix in str(filename), 'export failed'
66 | emoji = '❎' # indicates export succeeded
67 |
68 | # Predict
69 | assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
70 | assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
71 | if not (ROOT / 'assets/bus.jpg').exists():
72 | download(url='https://ultralytics.com/images/bus.jpg', dir=ROOT / 'assets')
73 | export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half)
74 |
75 | # Validate
76 | if model.task == 'detect':
77 | data, key = 'coco128.yaml', 'metrics/mAP50-95(B)'
78 | elif model.task == 'segment':
79 | data, key = 'coco128-seg.yaml', 'metrics/mAP50-95(M)'
80 | elif model.task == 'classify':
81 | data, key = 'imagenet100', 'metrics/accuracy_top5'
82 |
83 | results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False)
84 | metric, speed = results.results_dict[key], results.speed['inference']
85 | y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
86 | except Exception as e:
87 | if hard_fail:
88 | assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}'
89 | LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
90 | y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
91 |
92 | # Print results
93 | check_yolo(device=device) # print system info
94 | df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
95 |
96 | name = Path(model.ckpt_path).name
97 | s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
98 | LOGGER.info(s)
99 | with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
100 | f.write(s)
101 |
102 | if hard_fail and isinstance(hard_fail, float):
103 | metrics = df[key].array # values to compare to floor
104 | floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
105 | assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: one or more metric(s) < floor {floor}'
106 |
107 | return df
108 |
109 |
110 | if __name__ == '__main__':
111 | benchmark()
112 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import add_integration_callbacks, default_callbacks
2 |
3 | __all__ = 'add_integration_callbacks', 'default_callbacks'
4 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/base.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Base callbacks
4 | """
5 |
6 |
7 | # Trainer callbacks ----------------------------------------------------------------------------------------------------
8 | def on_pretrain_routine_start(trainer):
9 | pass
10 |
11 |
12 | def on_pretrain_routine_end(trainer):
13 | pass
14 |
15 |
16 | def on_train_start(trainer):
17 | pass
18 |
19 |
20 | def on_train_epoch_start(trainer):
21 | pass
22 |
23 |
24 | def on_train_batch_start(trainer):
25 | pass
26 |
27 |
28 | def optimizer_step(trainer):
29 | pass
30 |
31 |
32 | def on_before_zero_grad(trainer):
33 | pass
34 |
35 |
36 | def on_train_batch_end(trainer):
37 | pass
38 |
39 |
40 | def on_train_epoch_end(trainer):
41 | pass
42 |
43 |
44 | def on_fit_epoch_end(trainer):
45 | pass
46 |
47 |
48 | def on_model_save(trainer):
49 | pass
50 |
51 |
52 | def on_train_end(trainer):
53 | pass
54 |
55 |
56 | def on_params_update(trainer):
57 | pass
58 |
59 |
60 | def teardown(trainer):
61 | pass
62 |
63 |
64 | # Validator callbacks --------------------------------------------------------------------------------------------------
65 | def on_val_start(validator):
66 | pass
67 |
68 |
69 | def on_val_batch_start(validator):
70 | pass
71 |
72 |
73 | def on_val_batch_end(validator):
74 | pass
75 |
76 |
77 | def on_val_end(validator):
78 | pass
79 |
80 |
81 | # Predictor callbacks --------------------------------------------------------------------------------------------------
82 | def on_predict_start(predictor):
83 | pass
84 |
85 |
86 | def on_predict_batch_start(predictor):
87 | pass
88 |
89 |
90 | def on_predict_batch_end(predictor):
91 | pass
92 |
93 |
94 | def on_predict_postprocess_end(predictor):
95 | pass
96 |
97 |
98 | def on_predict_end(predictor):
99 | pass
100 |
101 |
102 | # Exporter callbacks ---------------------------------------------------------------------------------------------------
103 | def on_export_start(exporter):
104 | pass
105 |
106 |
107 | def on_export_end(exporter):
108 | pass
109 |
110 |
111 | default_callbacks = {
112 | # Run in trainer
113 | 'on_pretrain_routine_start': [on_pretrain_routine_start],
114 | 'on_pretrain_routine_end': [on_pretrain_routine_end],
115 | 'on_train_start': [on_train_start],
116 | 'on_train_epoch_start': [on_train_epoch_start],
117 | 'on_train_batch_start': [on_train_batch_start],
118 | 'optimizer_step': [optimizer_step],
119 | 'on_before_zero_grad': [on_before_zero_grad],
120 | 'on_train_batch_end': [on_train_batch_end],
121 | 'on_train_epoch_end': [on_train_epoch_end],
122 | 'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val
123 | 'on_model_save': [on_model_save],
124 | 'on_train_end': [on_train_end],
125 | 'on_params_update': [on_params_update],
126 | 'teardown': [teardown],
127 |
128 | # Run in validator
129 | 'on_val_start': [on_val_start],
130 | 'on_val_batch_start': [on_val_batch_start],
131 | 'on_val_batch_end': [on_val_batch_end],
132 | 'on_val_end': [on_val_end],
133 |
134 | # Run in predictor
135 | 'on_predict_start': [on_predict_start],
136 | 'on_predict_batch_start': [on_predict_batch_start],
137 | 'on_predict_postprocess_end': [on_predict_postprocess_end],
138 | 'on_predict_batch_end': [on_predict_batch_end],
139 | 'on_predict_end': [on_predict_end],
140 |
141 | # Run in exporter
142 | 'on_export_start': [on_export_start],
143 | 'on_export_end': [on_export_end]}
144 |
145 |
146 | def add_integration_callbacks(instance):
147 | from .clearml import callbacks as clearml_callbacks
148 | from .comet import callbacks as comet_callbacks
149 | from .hub import callbacks as hub_callbacks
150 | from .tensorboard import callbacks as tb_callbacks
151 |
152 | for x in clearml_callbacks, comet_callbacks, hub_callbacks, tb_callbacks:
153 | for k, v in x.items():
154 | if v not in instance.callbacks[k]: # prevent duplicate callbacks addition
155 | instance.callbacks[k].append(v) # callback[name].append(func)
156 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/clearml.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
3 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
4 |
5 | try:
6 | import clearml
7 | from clearml import Task
8 |
9 | assert clearml.__version__ # verify package is not directory
10 | assert not TESTS_RUNNING # do not log pytest
11 | except (ImportError, AssertionError):
12 | clearml = None
13 |
14 |
15 | def _log_images(imgs_dict, group='', step=0):
16 | task = Task.current_task()
17 | if task:
18 | for k, v in imgs_dict.items():
19 | task.get_logger().report_image(group, k, step, v)
20 |
21 |
22 | def on_pretrain_routine_start(trainer):
23 | try:
24 | task = Task.init(project_name=trainer.args.project or 'YOLOv8',
25 | task_name=trainer.args.name,
26 | tags=['YOLOv8'],
27 | output_uri=True,
28 | reuse_last_task_id=False,
29 | auto_connect_frameworks={'pytorch': False})
30 | task.connect(vars(trainer.args), name='General')
31 | except Exception as e:
32 | LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}')
33 |
34 |
35 | def on_train_epoch_end(trainer):
36 | if trainer.epoch == 1:
37 | _log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, 'Mosaic', trainer.epoch)
38 |
39 |
40 | def on_fit_epoch_end(trainer):
41 | task = Task.current_task()
42 | if task and trainer.epoch == 0:
43 | model_info = {
44 | 'model/parameters': get_num_params(trainer.model),
45 | 'model/GFLOPs': round(get_flops(trainer.model), 3),
46 | 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
47 | task.connect(model_info, name='Model')
48 |
49 |
50 | def on_train_end(trainer):
51 | task = Task.current_task()
52 | if task:
53 | task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
54 |
55 |
56 | callbacks = {
57 | 'on_pretrain_routine_start': on_pretrain_routine_start,
58 | 'on_train_epoch_end': on_train_epoch_end,
59 | 'on_fit_epoch_end': on_fit_epoch_end,
60 | 'on_train_end': on_train_end} if clearml else {}
61 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/comet.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
3 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
4 |
5 | try:
6 | import comet_ml
7 |
8 | assert not TESTS_RUNNING # do not log pytest
9 | assert comet_ml.__version__ # verify package is not directory
10 | except (ImportError, AssertionError):
11 | comet_ml = None
12 |
13 |
14 | def on_pretrain_routine_start(trainer):
15 | try:
16 | experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8')
17 | experiment.log_parameters(vars(trainer.args))
18 | except Exception as e:
19 | LOGGER.warning(f'WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}')
20 |
21 |
22 | def on_train_epoch_end(trainer):
23 | experiment = comet_ml.get_global_experiment()
24 | if experiment:
25 | experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1)
26 | if trainer.epoch == 1:
27 | for f in trainer.save_dir.glob('train_batch*.jpg'):
28 | experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
29 |
30 |
31 | def on_fit_epoch_end(trainer):
32 | experiment = comet_ml.get_global_experiment()
33 | if experiment:
34 | experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
35 | if trainer.epoch == 0:
36 | model_info = {
37 | 'model/parameters': get_num_params(trainer.model),
38 | 'model/GFLOPs': round(get_flops(trainer.model), 3),
39 | 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
40 | experiment.log_metrics(model_info, step=trainer.epoch + 1)
41 |
42 |
43 | def on_train_end(trainer):
44 | experiment = comet_ml.get_global_experiment()
45 | if experiment:
46 | experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True)
47 |
48 |
49 | callbacks = {
50 | 'on_pretrain_routine_start': on_pretrain_routine_start,
51 | 'on_train_epoch_end': on_train_epoch_end,
52 | 'on_fit_epoch_end': on_fit_epoch_end,
53 | 'on_train_end': on_train_end} if comet_ml else {}
54 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/hub.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import json
4 | from time import time
5 |
6 | from ultralytics.hub.utils import PREFIX, traces
7 | from ultralytics.yolo.utils import LOGGER
8 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
9 |
10 |
11 | def on_pretrain_routine_end(trainer):
12 | session = getattr(trainer, 'hub_session', None)
13 | if session:
14 | # Start timer for upload rate limit
15 | LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
16 | session.timers = {'metrics': time(), 'ckpt': time()} # start timer on session.rate_limit
17 |
18 |
19 | def on_fit_epoch_end(trainer):
20 | session = getattr(trainer, 'hub_session', None)
21 | if session:
22 | # Upload metrics after val end
23 | all_plots = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics}
24 | if trainer.epoch == 0:
25 | model_info = {
26 | 'model/parameters': get_num_params(trainer.model),
27 | 'model/GFLOPs': round(get_flops(trainer.model), 3),
28 | 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
29 | all_plots = {**all_plots, **model_info}
30 | session.metrics_queue[trainer.epoch] = json.dumps(all_plots)
31 | if time() - session.timers['metrics'] > session.rate_limits['metrics']:
32 | session.upload_metrics()
33 | session.timers['metrics'] = time() # reset timer
34 | session.metrics_queue = {} # reset queue
35 |
36 |
37 | def on_model_save(trainer):
38 | session = getattr(trainer, 'hub_session', None)
39 | if session:
40 | # Upload checkpoints with rate limiting
41 | is_best = trainer.best_fitness == trainer.fitness
42 | if time() - session.timers['ckpt'] > session.rate_limits['ckpt']:
43 | LOGGER.info(f'{PREFIX}Uploading checkpoint {session.model_id}')
44 | session.upload_model(trainer.epoch, trainer.last, is_best)
45 | session.timers['ckpt'] = time() # reset timer
46 |
47 |
48 | def on_train_end(trainer):
49 | session = getattr(trainer, 'hub_session', None)
50 | if session:
51 | # Upload final model and metrics with exponential standoff
52 | LOGGER.info(f'{PREFIX}Syncing final model...')
53 | session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics.get('metrics/mAP50-95(B)', 0), final=True)
54 | session.alive = False # stop heartbeats
55 | LOGGER.info(f'{PREFIX}Done ✅\n'
56 | f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
57 |
58 |
59 | def on_train_start(trainer):
60 | traces(trainer.args, traces_sample_rate=1.0)
61 |
62 |
63 | def on_val_start(validator):
64 | traces(validator.args, traces_sample_rate=1.0)
65 |
66 |
67 | def on_predict_start(predictor):
68 | traces(predictor.args, traces_sample_rate=1.0)
69 |
70 |
71 | def on_export_start(exporter):
72 | traces(exporter.args, traces_sample_rate=1.0)
73 |
74 |
75 | callbacks = {
76 | 'on_pretrain_routine_end': on_pretrain_routine_end,
77 | 'on_fit_epoch_end': on_fit_epoch_end,
78 | 'on_model_save': on_model_save,
79 | 'on_train_end': on_train_end,
80 | 'on_train_start': on_train_start,
81 | 'on_val_start': on_val_start,
82 | 'on_predict_start': on_predict_start,
83 | 'on_export_start': on_export_start}
84 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/tensorboard.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
3 |
4 | try:
5 | from torch.utils.tensorboard import SummaryWriter
6 |
7 | assert not TESTS_RUNNING # do not log pytest
8 | except (ImportError, AssertionError):
9 | SummaryWriter = None
10 |
11 | writer = None # TensorBoard SummaryWriter instance
12 |
13 |
14 | def _log_scalars(scalars, step=0):
15 | if writer:
16 | for k, v in scalars.items():
17 | writer.add_scalar(k, v, step)
18 |
19 |
20 | def on_pretrain_routine_start(trainer):
21 | global writer
22 | try:
23 | writer = SummaryWriter(str(trainer.save_dir))
24 | except Exception as e:
25 | LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')
26 |
27 |
28 | def on_batch_end(trainer):
29 | _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1)
30 |
31 |
32 | def on_fit_epoch_end(trainer):
33 | _log_scalars(trainer.metrics, trainer.epoch + 1)
34 |
35 |
36 | callbacks = {
37 | 'on_pretrain_routine_start': on_pretrain_routine_start,
38 | 'on_fit_epoch_end': on_fit_epoch_end,
39 | 'on_batch_end': on_batch_end}
40 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/dist.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import re
5 | import shutil
6 | import socket
7 | import sys
8 | import tempfile
9 | from pathlib import Path
10 |
11 | from . import USER_CONFIG_DIR
12 | from .torch_utils import TORCH_1_9
13 |
14 |
15 | def find_free_network_port() -> int:
16 | """Finds a free port on localhost.
17 |
18 | It is useful in single-node training when we don't want to connect to a real main node but have to set the
19 | `MASTER_PORT` environment variable.
20 | """
21 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
22 | s.bind(('127.0.0.1', 0))
23 | return s.getsockname()[1] # port
24 |
25 |
26 | def generate_ddp_file(trainer):
27 | module, name = f'{trainer.__class__.__module__}.{trainer.__class__.__name__}'.rsplit('.', 1)
28 |
29 | content = f'''cfg = {vars(trainer.args)} \nif __name__ == "__main__":
30 | from {module} import {name}
31 |
32 | trainer = {name}(cfg=cfg)
33 | trainer.train()'''
34 | (USER_CONFIG_DIR / 'DDP').mkdir(exist_ok=True)
35 | with tempfile.NamedTemporaryFile(prefix='_temp_',
36 | suffix=f'{id(trainer)}.py',
37 | mode='w+',
38 | encoding='utf-8',
39 | dir=USER_CONFIG_DIR / 'DDP',
40 | delete=False) as file:
41 | file.write(content)
42 | return file.name
43 |
44 |
45 | def generate_ddp_command(world_size, trainer):
46 | import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218
47 | if not trainer.resume:
48 | shutil.rmtree(trainer.save_dir) # remove the save_dir
49 | file = str(Path(sys.argv[0]).resolve())
50 | safe_pattern = re.compile(r'^[a-zA-Z0-9_. /\\-]{1,128}$') # allowed characters and maximum of 100 characters
51 | if not (safe_pattern.match(file) and Path(file).exists() and file.endswith('.py')): # using CLI
52 | file = generate_ddp_file(trainer)
53 | dist_cmd = 'torch.distributed.run' if TORCH_1_9 else 'torch.distributed.launch'
54 | port = find_free_network_port()
55 | exclude_args = ['save_dir']
56 | args = [f'{k}={v}' for k, v in vars(trainer.args).items() if k not in exclude_args]
57 | cmd = [sys.executable, '-m', dist_cmd, '--nproc_per_node', f'{world_size}', '--master_port', f'{port}', file] + args
58 | return cmd, file
59 |
60 |
61 | def ddp_cleanup(trainer, file):
62 | # delete temp file if created
63 | if f'{id(trainer)}.py' in file: # if temp_file suffix in file
64 | os.remove(file)
65 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/files.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import contextlib
4 | import glob
5 | import os
6 | import urllib
7 | from datetime import datetime
8 | from pathlib import Path
9 |
10 |
11 | class WorkingDirectory(contextlib.ContextDecorator):
12 | # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
13 | def __init__(self, new_dir):
14 | self.dir = new_dir # new dir
15 | self.cwd = Path.cwd().resolve() # current dir
16 |
17 | def __enter__(self):
18 | os.chdir(self.dir)
19 |
20 | def __exit__(self, exc_type, exc_val, exc_tb):
21 | os.chdir(self.cwd)
22 |
23 |
24 | def increment_path(path, exist_ok=False, sep='', mkdir=False):
25 | """
26 | Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
27 |
28 | If the path exists and exist_ok is not set to True, the path will be incremented by appending a number and sep to
29 | the end of the path. If the path is a file, the file extension will be preserved. If the path is a directory, the
30 | number will be appended directly to the end of the path. If mkdir is set to True, the path will be created as a
31 | directory if it does not already exist.
32 |
33 | Args:
34 | path (str or pathlib.Path): Path to increment.
35 | exist_ok (bool, optional): If True, the path will not be incremented and will be returned as-is. Defaults to False.
36 | sep (str, optional): Separator to use between the path and the incrementation number. Defaults to an empty string.
37 | mkdir (bool, optional): If True, the path will be created as a directory if it does not exist. Defaults to False.
38 |
39 | Returns:
40 | pathlib.Path: Incremented path.
41 | """
42 | path = Path(path) # os-agnostic
43 | if path.exists() and not exist_ok:
44 | path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
45 |
46 | # Method 1
47 | for n in range(2, 9999):
48 | p = f'{path}{sep}{n}{suffix}' # increment path
49 | if not os.path.exists(p): #
50 | break
51 | path = Path(p)
52 |
53 | if mkdir:
54 | path.mkdir(parents=True, exist_ok=True) # make directory
55 |
56 | return path
57 |
58 |
59 | def file_age(path=__file__):
60 | # Return days since last file update
61 | dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
62 | return dt.days # + dt.seconds / 86400 # fractional days
63 |
64 |
65 | def file_date(path=__file__):
66 | # Return human-readable file modification date, i.e. '2021-3-26'
67 | t = datetime.fromtimestamp(Path(path).stat().st_mtime)
68 | return f'{t.year}-{t.month}-{t.day}'
69 |
70 |
71 | def file_size(path):
72 | # Return file/dir size (MB)
73 | if isinstance(path, (str, Path)):
74 | mb = 1 << 20 # bytes to MiB (1024 ** 2)
75 | path = Path(path)
76 | if path.is_file():
77 | return path.stat().st_size / mb
78 | elif path.is_dir():
79 | return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
80 | return 0.0
81 |
82 |
83 | def url2file(url):
84 | # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
85 | url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
86 | return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
87 |
88 |
89 | def get_latest_run(search_dir='.'):
90 | # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
91 | last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
92 | return max(last_list, key=os.path.getctime) if last_list else ''
93 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/loss.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 | from .metrics import bbox_iou
8 | from .tal import bbox2dist
9 |
10 |
11 | class VarifocalLoss(nn.Module):
12 | # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367
13 | def __init__(self):
14 | super().__init__()
15 |
16 | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
17 | weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
18 | with torch.cuda.amp.autocast(enabled=False):
19 | loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction='none') *
20 | weight).sum()
21 | return loss
22 |
23 |
24 | class BboxLoss(nn.Module):
25 |
26 | def __init__(self, reg_max, use_dfl=False):
27 | super().__init__()
28 | self.reg_max = reg_max
29 | self.use_dfl = use_dfl
30 |
31 | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
32 | # IoU loss
33 | weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1)
34 | iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True)
35 | loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
36 |
37 | # DFL loss
38 | if self.use_dfl:
39 | target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max)
40 | loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight
41 | loss_dfl = loss_dfl.sum() / target_scores_sum
42 | else:
43 | loss_dfl = torch.tensor(0.0).to(pred_dist.device)
44 |
45 | return loss_iou, loss_dfl
46 |
47 | @staticmethod
48 | def _df_loss(pred_dist, target):
49 | # Return sum of left and right DFL losses
50 | # Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
51 | tl = target.long() # target left
52 | tr = tl + 1 # target right
53 | wl = tr - target # weight left
54 | wr = 1 - wl # weight right
55 | return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl +
56 | F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True)
57 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.v8 import classify, detect, segment
4 |
5 | __all__ = 'classify', 'segment', 'detect'
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.v8.classify.predict import ClassificationPredictor, predict
4 | from ultralytics.yolo.v8.classify.train import ClassificationTrainer, train
5 | from ultralytics.yolo.v8.classify.val import ClassificationValidator, val
6 |
7 | __all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val'
8 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.yolo.engine.predictor import BasePredictor
6 | from ultralytics.yolo.engine.results import Results
7 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT
8 | from ultralytics.yolo.utils.plotting import Annotator
9 |
10 |
11 | class ClassificationPredictor(BasePredictor):
12 |
13 | def get_annotator(self, img):
14 | return Annotator(img, example=str(self.model.names), pil=True)
15 |
16 | def preprocess(self, img):
17 | img = (img if isinstance(img, torch.Tensor) else torch.Tensor(img)).to(self.model.device)
18 | img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
19 | return img
20 |
21 | def postprocess(self, preds, img, orig_img):
22 | results = []
23 | for i, pred in enumerate(preds):
24 | orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
25 | path, _, _, _, _ = self.batch
26 | img_path = path[i] if isinstance(path, list) else path
27 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, probs=pred))
28 |
29 | return results
30 |
31 | def write_results(self, idx, results, batch):
32 | p, im, im0 = batch
33 | log_string = ''
34 | if len(im.shape) == 3:
35 | im = im[None] # expand for batch dim
36 | self.seen += 1
37 | im0 = im0.copy()
38 | if self.source_type.webcam or self.source_type.from_img: # batch_size >= 1
39 | log_string += f'{idx}: '
40 | frame = self.dataset.count
41 | else:
42 | frame = getattr(self.dataset, 'frame', 0)
43 |
44 | self.data_path = p
45 | # save_path = str(self.save_dir / p.name) # im.jpg
46 | self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
47 | log_string += '%gx%g ' % im.shape[2:] # print string
48 | self.annotator = self.get_annotator(im0)
49 |
50 | result = results[idx]
51 | if len(result) == 0:
52 | return log_string
53 | prob = result.probs
54 | # Print results
55 | n5 = min(len(self.model.names), 5)
56 | top5i = prob.argsort(0, descending=True)[:n5].tolist() # top 5 indices
57 | log_string += f"{', '.join(f'{self.model.names[j]} {prob[j]:.2f}' for j in top5i)}, "
58 |
59 | # write
60 | text = '\n'.join(f'{prob[j]:.2f} {self.model.names[j]}' for j in top5i)
61 | if self.args.save or self.args.show: # Add bbox to image
62 | self.annotator.text((32, 32), text, txt_color=(255, 255, 255))
63 | if self.args.save_txt: # Write to file
64 | with open(f'{self.txt_path}.txt', 'a') as f:
65 | f.write(text + '\n')
66 |
67 | return log_string
68 |
69 |
70 | def predict(cfg=DEFAULT_CFG, use_python=False):
71 | model = cfg.model or 'yolov8n-cls.pt' # or "resnet18"
72 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
73 | else 'https://ultralytics.com/images/bus.jpg'
74 |
75 | args = dict(model=model, source=source)
76 | if use_python:
77 | from ultralytics import YOLO
78 | YOLO(model)(**args)
79 | else:
80 | predictor = ClassificationPredictor(overrides=args)
81 | predictor.predict_cli()
82 |
83 |
84 | if __name__ == '__main__':
85 | predict()
86 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/val.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.data import build_classification_dataloader
4 | from ultralytics.yolo.engine.validator import BaseValidator
5 | from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER
6 | from ultralytics.yolo.utils.metrics import ClassifyMetrics
7 |
8 |
9 | class ClassificationValidator(BaseValidator):
10 |
11 | def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None):
12 | super().__init__(dataloader, save_dir, pbar, args)
13 | self.args.task = 'classify'
14 | self.metrics = ClassifyMetrics()
15 |
16 | def get_desc(self):
17 | return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc')
18 |
19 | def init_metrics(self, model):
20 | self.pred = []
21 | self.targets = []
22 |
23 | def preprocess(self, batch):
24 | batch['img'] = batch['img'].to(self.device, non_blocking=True)
25 | batch['img'] = batch['img'].half() if self.args.half else batch['img'].float()
26 | batch['cls'] = batch['cls'].to(self.device)
27 | return batch
28 |
29 | def update_metrics(self, preds, batch):
30 | n5 = min(len(self.model.names), 5)
31 | self.pred.append(preds.argsort(1, descending=True)[:, :n5])
32 | self.targets.append(batch['cls'])
33 |
34 | def finalize_metrics(self, *args, **kwargs):
35 | self.metrics.speed = self.speed
36 |
37 | def get_stats(self):
38 | self.metrics.process(self.targets, self.pred)
39 | return self.metrics.results_dict
40 |
41 | def get_dataloader(self, dataset_path, batch_size):
42 | return build_classification_dataloader(path=dataset_path,
43 | imgsz=self.args.imgsz,
44 | batch_size=batch_size,
45 | workers=self.args.workers)
46 |
47 | def print_results(self):
48 | pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format
49 | LOGGER.info(pf % ('all', self.metrics.top1, self.metrics.top5))
50 |
51 |
52 | def val(cfg=DEFAULT_CFG, use_python=False):
53 | model = cfg.model or 'yolov8n-cls.pt' # or "resnet18"
54 | data = cfg.data or 'mnist160'
55 |
56 | args = dict(model=model, data=data)
57 | if use_python:
58 | from ultralytics import YOLO
59 | YOLO(model).val(**args)
60 | else:
61 | validator = ClassificationValidator(args=args)
62 | validator(model=args['model'])
63 |
64 |
65 | if __name__ == '__main__':
66 | val()
67 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import DetectionPredictor, predict
4 | from .train import DetectionTrainer, train
5 | from .val import DetectionValidator, val
6 |
7 | __all__ = 'DetectionPredictor', 'predict', 'DetectionTrainer', 'train', 'DetectionValidator', 'val'
8 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.yolo.engine.predictor import BasePredictor
6 | from ultralytics.yolo.engine.results import Results
7 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
8 | from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
9 |
10 |
11 | class DetectionPredictor(BasePredictor):
12 |
13 | def get_annotator(self, img):
14 | return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names))
15 |
16 | def preprocess(self, img):
17 | img = torch.from_numpy(img).to(self.model.device)
18 | img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
19 | img /= 255 # 0 - 255 to 0.0 - 1.0
20 | return img
21 |
22 | def postprocess(self, preds, img, orig_img):
23 | preds = ops.non_max_suppression(preds,
24 | self.args.conf,
25 | self.args.iou,
26 | agnostic=self.args.agnostic_nms,
27 | max_det=self.args.max_det,
28 | classes=self.args.classes)
29 |
30 | results = []
31 | for i, pred in enumerate(preds):
32 | orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
33 | shape = orig_img.shape
34 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
35 | path, _, _, _, _ = self.batch
36 | img_path = path[i] if isinstance(path, list) else path
37 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred))
38 | return results
39 |
40 | def write_results(self, idx, results, batch):
41 | p, im, im0 = batch
42 | log_string = ''
43 | if len(im.shape) == 3:
44 | im = im[None] # expand for batch dim
45 | self.seen += 1
46 | imc = im0.copy() if self.args.save_crop else im0
47 | if self.source_type.webcam or self.source_type.from_img: # batch_size >= 1
48 | log_string += f'{idx}: '
49 | frame = self.dataset.count
50 | else:
51 | frame = getattr(self.dataset, 'frame', 0)
52 | self.data_path = p
53 | self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
54 | log_string += '%gx%g ' % im.shape[2:] # print string
55 | self.annotator = self.get_annotator(im0)
56 |
57 | det = results[idx].boxes # TODO: make boxes inherit from tensors
58 | if len(det) == 0:
59 | return f'{log_string}(no detections), '
60 | for c in det.cls.unique():
61 | n = (det.cls == c).sum() # detections per class
62 | log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
63 |
64 | # write
65 | for d in reversed(det):
66 | cls, conf = d.cls.squeeze(), d.conf.squeeze()
67 | if self.args.save_txt: # Write to file
68 | line = (cls, *(d.xywhn.view(-1).tolist()), conf) \
69 | if self.args.save_conf else (cls, *(d.xywhn.view(-1).tolist())) # label format
70 | with open(f'{self.txt_path}.txt', 'a') as f:
71 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
72 | if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
73 | c = int(cls) # integer class
74 | name = f'id:{int(d.id.item())} {self.model.names[c]}' if d.id is not None else self.model.names[c]
75 | label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
76 | self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
77 | if self.args.save_crop:
78 | save_one_box(d.xyxy,
79 | imc,
80 | file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
81 | BGR=True)
82 |
83 | return log_string
84 |
85 |
86 | def predict(cfg=DEFAULT_CFG, use_python=False):
87 | model = cfg.model or 'yolov8n.pt'
88 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
89 | else 'https://ultralytics.com/images/bus.jpg'
90 |
91 | args = dict(model=model, source=source)
92 | if use_python:
93 | from ultralytics import YOLO
94 | YOLO(model)(**args)
95 | else:
96 | predictor = DetectionPredictor(overrides=args)
97 | predictor.predict_cli()
98 |
99 |
100 | if __name__ == '__main__':
101 | predict()
102 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import SegmentationPredictor, predict
4 | from .train import SegmentationTrainer, train
5 | from .val import SegmentationValidator, val
6 |
7 | __all__ = 'SegmentationPredictor', 'predict', 'SegmentationTrainer', 'train', 'SegmentationValidator', 'val'
8 |
--------------------------------------------------------------------------------