├── .gitattributes
├── requirements.txt
├── setup.cfg
├── .pre-commit-config.yaml
├── Dockerfile
├── .dockerignore
├── CONTRIBUTING.md
├── .gitignore
├── hubconf.py
├── detect.py
├── README.md
├── val.py
├── export.py
├── train.py
└── LICENSE
/.gitattributes:
--------------------------------------------------------------------------------
1 | # this drop notebooks from GitHub language stats
2 | *.ipynb linguist-vendored
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # Base ----------------------------------------
4 | matplotlib>=3.2.2
5 | numpy>=1.18.5
6 | opencv-python>=4.1.2
7 | Pillow>=7.1.2
8 | PyYAML>=5.3.1
9 | requests>=2.23.0
10 | scipy>=1.4.1
11 | torch>=1.7.0
12 | torchvision>=0.8.1
13 | tqdm>=4.41.0
14 |
15 | # Logging -------------------------------------
16 | tensorboard>=2.4.1
17 | # wandb
18 |
19 | # Plotting ------------------------------------
20 | pandas>=1.1.4
21 | seaborn>=0.11.0
22 |
23 | # Export --------------------------------------
24 | # coremltools>=4.1 # CoreML export
25 | # onnx>=1.9.0 # ONNX export
26 | # onnx-simplifier>=0.3.6 # ONNX simplifier
27 | # scikit-learn==0.19.2 # CoreML quantization
28 | # tensorflow>=2.4.1 # TFLite export
29 | # tensorflowjs>=3.9.0 # TF.js export
30 | # openvino-dev # OpenVINO export
31 |
32 | # Extras --------------------------------------
33 | # albumentations>=1.0.3
34 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
35 | # pycocotools>=2.0 # COCO mAP
36 | # roboflow
37 | thop # FLOPs computation
38 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 |
4 | [metadata]
5 | license_file = LICENSE
6 | description-file = README.md
7 |
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 |
20 | [flake8]
21 | max-line-length = 120
22 | exclude = .tox,*.egg,build,temp
23 | select = E,W,F
24 | doctests = True
25 | verbose = 2
26 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
27 | format = pylint
28 | # see: https://www.flake8rules.com/
29 | ignore =
30 | E731 # Do not assign a lambda expression, use a def
31 | F405 # name may be undefined, or defined from star imports: module
32 | E402 # module level import not at top of file
33 | F401 # module imported but unused
34 | W504 # line break after binary operator
35 | E127 # continuation line over-indented for visual indent
36 | W504 # line break after binary operator
37 | E231 # missing whitespace after ‘,’, ‘;’, or ‘:’
38 | E501 # line too long
39 | F403 # ‘from module import *’ used; unable to detect undefined names
40 |
41 |
42 | [isort]
43 | # https://pycqa.github.io/isort/docs/configuration/options.html
44 | line_length = 120
45 | multi_line_output = 0
46 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Define hooks for code formations
2 | # Will be applied on any updated commit files if a user has installed and linked commit hook
3 |
4 | default_language_version:
5 | python: python3.8
6 |
7 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
8 | ci:
9 | autofix_prs: true
10 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
11 | autoupdate_schedule: quarterly
12 | # submodules: true
13 |
14 | repos:
15 | - repo: https://github.com/pre-commit/pre-commit-hooks
16 | rev: v4.1.0
17 | hooks:
18 | - id: end-of-file-fixer
19 | - id: trailing-whitespace
20 | - id: check-case-conflict
21 | - id: check-yaml
22 | - id: check-toml
23 | - id: pretty-format-json
24 | - id: check-docstring-first
25 |
26 | - repo: https://github.com/asottile/pyupgrade
27 | rev: v2.31.0
28 | hooks:
29 | - id: pyupgrade
30 | args: [--py36-plus]
31 | name: Upgrade code
32 |
33 | - repo: https://github.com/PyCQA/isort
34 | rev: 5.10.1
35 | hooks:
36 | - id: isort
37 | name: Sort imports
38 |
39 | # TODO
40 | #- repo: https://github.com/pre-commit/mirrors-yapf
41 | # rev: v0.31.0
42 | # hooks:
43 | # - id: yapf
44 | # name: formatting
45 |
46 | # TODO
47 | #- repo: https://github.com/executablebooks/mdformat
48 | # rev: 0.7.7
49 | # hooks:
50 | # - id: mdformat
51 | # additional_dependencies:
52 | # - mdformat-gfm
53 | # - mdformat-black
54 | # - mdformat_frontmatter
55 |
56 | # TODO
57 | #- repo: https://github.com/asottile/yesqa
58 | # rev: v1.2.3
59 | # hooks:
60 | # - id: yesqa
61 |
62 | - repo: https://github.com/PyCQA/flake8
63 | rev: 4.0.1
64 | hooks:
65 | - id: flake8
66 | name: PEP8
67 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4 | FROM nvcr.io/nvidia/pytorch:21.10-py3
5 |
6 | # Install linux packages
7 | RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
8 |
9 | # Install python dependencies
10 | COPY requirements.txt .
11 | RUN python -m pip install --upgrade pip
12 | RUN pip uninstall -y torch torchvision torchtext
13 | RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \
14 | torch==1.10.2+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
15 | # RUN pip install --no-cache -U torch torchvision
16 |
17 | # Create working directory
18 | RUN mkdir -p /usr/src/app
19 | WORKDIR /usr/src/app
20 |
21 | # Copy contents
22 | COPY . /usr/src/app
23 |
24 | # Downloads to user config dir
25 | ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/
26 |
27 | # Set environment variables
28 | # ENV HOME=/usr/src/app
29 |
30 |
31 | # Usage Examples -------------------------------------------------------------------------------------------------------
32 |
33 | # Build and Push
34 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
35 |
36 | # Pull and Run
37 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
38 |
39 | # Pull and Run with local directory access
40 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
41 |
42 | # Kill all
43 | # sudo docker kill $(sudo docker ps -q)
44 |
45 | # Kill all image-based
46 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
47 |
48 | # Bash into running container
49 | # sudo docker exec -it 5a9b5863d93d bash
50 |
51 | # Bash into stopped container
52 | # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
53 |
54 | # Clean up
55 | # docker system prune -a --volumes
56 |
57 | # Update Ubuntu drivers
58 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
59 |
60 | # DDP test
61 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
62 |
63 | # GCP VM from Image
64 | # docker.io/ultralytics/yolov5:latest
65 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2 | #.git
3 | .cache
4 | .idea
5 | runs
6 | output
7 | coco
8 | storage.googleapis.com
9 |
10 | data/samples/*
11 | **/results*.csv
12 | *.jpg
13 |
14 | # Neural Network weights -----------------------------------------------------------------------------------------------
15 | **/*.pt
16 | **/*.pth
17 | **/*.onnx
18 | **/*.engine
19 | **/*.mlmodel
20 | **/*.torchscript
21 | **/*.torchscript.pt
22 | **/*.tflite
23 | **/*.h5
24 | **/*.pb
25 | *_saved_model/
26 | *_web_model/
27 | *_openvino_model/
28 |
29 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
31 |
32 |
33 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
34 | # Byte-compiled / optimized / DLL files
35 | __pycache__/
36 | *.py[cod]
37 | *$py.class
38 |
39 | # C extensions
40 | *.so
41 |
42 | # Distribution / packaging
43 | .Python
44 | env/
45 | build/
46 | develop-eggs/
47 | dist/
48 | downloads/
49 | eggs/
50 | .eggs/
51 | lib/
52 | lib64/
53 | parts/
54 | sdist/
55 | var/
56 | wheels/
57 | *.egg-info/
58 | wandb/
59 | .installed.cfg
60 | *.egg
61 |
62 | # PyInstaller
63 | # Usually these files are written by a python script from a template
64 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
65 | *.manifest
66 | *.spec
67 |
68 | # Installer logs
69 | pip-log.txt
70 | pip-delete-this-directory.txt
71 |
72 | # Unit test / coverage reports
73 | htmlcov/
74 | .tox/
75 | .coverage
76 | .coverage.*
77 | .cache
78 | nosetests.xml
79 | coverage.xml
80 | *.cover
81 | .hypothesis/
82 |
83 | # Translations
84 | *.mo
85 | *.pot
86 |
87 | # Django stuff:
88 | *.log
89 | local_settings.py
90 |
91 | # Flask stuff:
92 | instance/
93 | .webassets-cache
94 |
95 | # Scrapy stuff:
96 | .scrapy
97 |
98 | # Sphinx documentation
99 | docs/_build/
100 |
101 | # PyBuilder
102 | target/
103 |
104 | # Jupyter Notebook
105 | .ipynb_checkpoints
106 |
107 | # pyenv
108 | .python-version
109 |
110 | # celery beat schedule file
111 | celerybeat-schedule
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # dotenv
117 | .env
118 |
119 | # virtualenv
120 | .venv*
121 | venv*/
122 | ENV*/
123 |
124 | # Spyder project settings
125 | .spyderproject
126 | .spyproject
127 |
128 | # Rope project settings
129 | .ropeproject
130 |
131 | # mkdocs documentation
132 | /site
133 |
134 | # mypy
135 | .mypy_cache/
136 |
137 |
138 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
139 |
140 | # General
141 | .DS_Store
142 | .AppleDouble
143 | .LSOverride
144 |
145 | # Icon must end with two \r
146 | Icon
147 | Icon?
148 |
149 | # Thumbnails
150 | ._*
151 |
152 | # Files that might appear in the root of a volume
153 | .DocumentRevisions-V100
154 | .fseventsd
155 | .Spotlight-V100
156 | .TemporaryItems
157 | .Trashes
158 | .VolumeIcon.icns
159 | .com.apple.timemachine.donotpresent
160 |
161 | # Directories potentially created on remote AFP share
162 | .AppleDB
163 | .AppleDesktop
164 | Network Trash Folder
165 | Temporary Items
166 | .apdisk
167 |
168 |
169 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
170 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
171 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
172 |
173 | # User-specific stuff:
174 | .idea/*
175 | .idea/**/workspace.xml
176 | .idea/**/tasks.xml
177 | .idea/dictionaries
178 | .html # Bokeh Plots
179 | .pg # TensorFlow Frozen Graphs
180 | .avi # videos
181 |
182 | # Sensitive or high-churn files:
183 | .idea/**/dataSources/
184 | .idea/**/dataSources.ids
185 | .idea/**/dataSources.local.xml
186 | .idea/**/sqlDataSources.xml
187 | .idea/**/dynamic.xml
188 | .idea/**/uiDesigner.xml
189 |
190 | # Gradle:
191 | .idea/**/gradle.xml
192 | .idea/**/libraries
193 |
194 | # CMake
195 | cmake-build-debug/
196 | cmake-build-release/
197 |
198 | # Mongo Explorer plugin:
199 | .idea/**/mongoSettings.xml
200 |
201 | ## File-based project format:
202 | *.iws
203 |
204 | ## Plugin-specific files:
205 |
206 | # IntelliJ
207 | out/
208 |
209 | # mpeltonen/sbt-idea plugin
210 | .idea_modules/
211 |
212 | # JIRA plugin
213 | atlassian-ide-plugin.xml
214 |
215 | # Cursive Clojure plugin
216 | .idea/replstate.xml
217 |
218 | # Crashlytics plugin (for Android Studio and IntelliJ)
219 | com_crashlytics_export_strings.xml
220 | crashlytics.properties
221 | crashlytics-build.properties
222 | fabric.properties
223 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing to YOLOv5 🚀
2 |
3 | We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing a new feature
9 | - Becoming a maintainer
10 |
11 | YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12 | helping push the frontiers of what's possible in AI 😃!
13 |
14 | ## Submitting a Pull Request (PR) 🛠️
15 |
16 | Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17 |
18 | ### 1. Select File to Update
19 |
20 | Select `requirements.txt` to update by clicking on it in GitHub.
21 |

22 |
23 | ### 2. Click 'Edit this file'
24 |
25 | Button is in top-right corner.
26 | 
27 |
28 | ### 3. Make Changes
29 |
30 | Change `matplotlib` version from `3.2.2` to `3.3`.
31 | 
32 |
33 | ### 4. Preview Changes and Submit PR
34 |
35 | Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
36 | for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
37 | changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
38 | 
39 |
40 | ### PR recommendations
41 |
42 | To allow your work to be integrated as seamlessly as possible, we advise you to:
43 |
44 | - ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an
45 | automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may
46 | be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name
47 | of your local branch:
48 |
49 | ```bash
50 | git remote add upstream https://github.com/ultralytics/yolov5.git
51 | git fetch upstream
52 | # git checkout feature # <--- replace 'feature' with local branch name
53 | git merge upstream/master
54 | git push -u origin -f
55 | ```
56 |
57 | - ✅ Verify all Continuous Integration (CI) **checks are passing**.
58 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
59 | but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
60 |
61 | ## Submitting a Bug Report 🐛
62 |
63 | If you spot a problem with YOLOv5 please submit a Bug Report!
64 |
65 | For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
66 | short guidelines below to help users provide what we need in order to get started.
67 |
68 | When asking a question, people will be better able to provide help if you provide **code** that they can easily
69 | understand and use to **reproduce** the problem. This is referred to by community members as creating
70 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
71 | the problem should be:
72 |
73 | * ✅ **Minimal** – Use as little code as possible that still produces the same problem
74 | * ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
75 | * ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
76 |
77 | In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
78 | should be:
79 |
80 | * ✅ **Current** – Verify that your code is up-to-date with current
81 | GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
82 | copy to ensure your problem has not already been resolved by previous commits.
83 | * ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
84 | repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
85 |
86 | If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **
87 | Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
88 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
89 | understand and diagnose your problem.
90 |
91 | ## License
92 |
93 | By contributing, you agree that your contributions will be licensed under
94 | the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
95 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
2 | *.jpg
3 | *.jpeg
4 | *.png
5 | *.bmp
6 | *.tif
7 | *.tiff
8 | *.heic
9 | *.JPG
10 | *.JPEG
11 | *.PNG
12 | *.BMP
13 | *.TIF
14 | *.TIFF
15 | *.HEIC
16 | *.mp4
17 | *.mov
18 | *.MOV
19 | *.avi
20 | *.data
21 | *.json
22 | *.cfg
23 | !setup.cfg
24 | !cfg/yolov3*.cfg
25 |
26 | storage.googleapis.com
27 | runs/*
28 | data/*
29 | data/images/*
30 | !data/*.yaml
31 | !data/hyps
32 | !data/scripts
33 | !data/images
34 | !data/images/zidane.jpg
35 | !data/images/bus.jpg
36 | !data/*.sh
37 |
38 | results*.csv
39 |
40 | # Datasets -------------------------------------------------------------------------------------------------------------
41 | coco/
42 | coco128/
43 | VOC/
44 |
45 | # MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
46 | *.m~
47 | *.mat
48 | !targets*.mat
49 |
50 | # Neural Network weights -----------------------------------------------------------------------------------------------
51 | *.weights
52 | *.pt
53 | *.pb
54 | *.onnx
55 | *.engine
56 | *.mlmodel
57 | *.torchscript
58 | *.tflite
59 | *.h5
60 | *_saved_model/
61 | *_web_model/
62 | *_openvino_model/
63 | darknet53.conv.74
64 | yolov3-tiny.conv.15
65 |
66 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
67 | # Byte-compiled / optimized / DLL files
68 | __pycache__/
69 | *.py[cod]
70 | *$py.class
71 |
72 | # C extensions
73 | *.so
74 |
75 | # Distribution / packaging
76 | .Python
77 | env/
78 | build/
79 | develop-eggs/
80 | dist/
81 | downloads/
82 | eggs/
83 | .eggs/
84 | lib/
85 | lib64/
86 | parts/
87 | sdist/
88 | var/
89 | wheels/
90 | *.egg-info/
91 | /wandb/
92 | .installed.cfg
93 | *.egg
94 |
95 |
96 | # PyInstaller
97 | # Usually these files are written by a python script from a template
98 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
99 | *.manifest
100 | *.spec
101 |
102 | # Installer logs
103 | pip-log.txt
104 | pip-delete-this-directory.txt
105 |
106 | # Unit test / coverage reports
107 | htmlcov/
108 | .tox/
109 | .coverage
110 | .coverage.*
111 | .cache
112 | nosetests.xml
113 | coverage.xml
114 | *.cover
115 | .hypothesis/
116 |
117 | # Translations
118 | *.mo
119 | *.pot
120 |
121 | # Django stuff:
122 | *.log
123 | local_settings.py
124 |
125 | # Flask stuff:
126 | instance/
127 | .webassets-cache
128 |
129 | # Scrapy stuff:
130 | .scrapy
131 |
132 | # Sphinx documentation
133 | docs/_build/
134 |
135 | # PyBuilder
136 | target/
137 |
138 | # Jupyter Notebook
139 | .ipynb_checkpoints
140 |
141 | # pyenv
142 | .python-version
143 |
144 | # celery beat schedule file
145 | celerybeat-schedule
146 |
147 | # SageMath parsed files
148 | *.sage.py
149 |
150 | # dotenv
151 | .env
152 |
153 | # virtualenv
154 | .venv*
155 | venv*/
156 | ENV*/
157 |
158 | # Spyder project settings
159 | .spyderproject
160 | .spyproject
161 |
162 | # Rope project settings
163 | .ropeproject
164 |
165 | # mkdocs documentation
166 | /site
167 |
168 | # mypy
169 | .mypy_cache/
170 |
171 |
172 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
173 |
174 | # General
175 | .DS_Store
176 | .AppleDouble
177 | .LSOverride
178 |
179 | # Icon must end with two \r
180 | Icon
181 | Icon?
182 |
183 | # Thumbnails
184 | ._*
185 |
186 | # Files that might appear in the root of a volume
187 | .DocumentRevisions-V100
188 | .fseventsd
189 | .Spotlight-V100
190 | .TemporaryItems
191 | .Trashes
192 | .VolumeIcon.icns
193 | .com.apple.timemachine.donotpresent
194 |
195 | # Directories potentially created on remote AFP share
196 | .AppleDB
197 | .AppleDesktop
198 | Network Trash Folder
199 | Temporary Items
200 | .apdisk
201 |
202 |
203 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
204 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
205 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
206 |
207 | # User-specific stuff:
208 | .idea/*
209 | .idea/**/workspace.xml
210 | .idea/**/tasks.xml
211 | .idea/dictionaries
212 | .html # Bokeh Plots
213 | .pg # TensorFlow Frozen Graphs
214 | .avi # videos
215 |
216 | # Sensitive or high-churn files:
217 | .idea/**/dataSources/
218 | .idea/**/dataSources.ids
219 | .idea/**/dataSources.local.xml
220 | .idea/**/sqlDataSources.xml
221 | .idea/**/dynamic.xml
222 | .idea/**/uiDesigner.xml
223 |
224 | # Gradle:
225 | .idea/**/gradle.xml
226 | .idea/**/libraries
227 |
228 | # CMake
229 | cmake-build-debug/
230 | cmake-build-release/
231 |
232 | # Mongo Explorer plugin:
233 | .idea/**/mongoSettings.xml
234 |
235 | ## File-based project format:
236 | *.iws
237 |
238 | ## Plugin-specific files:
239 |
240 | # IntelliJ
241 | out/
242 |
243 | # mpeltonen/sbt-idea plugin
244 | .idea_modules/
245 |
246 | # JIRA plugin
247 | atlassian-ide-plugin.xml
248 |
249 | # Cursive Clojure plugin
250 | .idea/replstate.xml
251 |
252 | # Crashlytics plugin (for Android Studio and IntelliJ)
253 | com_crashlytics_export_strings.xml
254 | crashlytics.properties
255 | crashlytics-build.properties
256 | fabric.properties
257 |
--------------------------------------------------------------------------------
/hubconf.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
4 |
5 | Usage:
6 | import torch
7 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
8 | model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch
9 | """
10 |
11 | import torch
12 |
13 |
14 | def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
15 | """Creates or loads a YOLOv5 model
16 |
17 | Arguments:
18 | name (str): model name 'yolov5s' or path 'path/to/best.pt'
19 | pretrained (bool): load pretrained weights into the model
20 | channels (int): number of input channels
21 | classes (int): number of model classes
22 | autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
23 | verbose (bool): print all information to screen
24 | device (str, torch.device, None): device to use for model parameters
25 |
26 | Returns:
27 | YOLOv5 model
28 | """
29 | from pathlib import Path
30 |
31 | from models.common import AutoShape, DetectMultiBackend
32 | from models.yolo import Model
33 | from utils.downloads import attempt_download
34 | from utils.general import LOGGER, check_requirements, intersect_dicts, logging
35 | from utils.torch_utils import select_device
36 |
37 | if not verbose:
38 | LOGGER.setLevel(logging.WARNING)
39 | check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
40 | name = Path(name)
41 | path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path
42 | try:
43 | device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
44 |
45 | if pretrained and channels == 3 and classes == 80:
46 | model = DetectMultiBackend(path, device=device) # download/load FP32 model
47 | # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model
48 | else:
49 | cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
50 | model = Model(cfg, channels, classes) # create model
51 | if pretrained:
52 | ckpt = torch.load(attempt_download(path), map_location=device) # load
53 | csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
54 | csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
55 | model.load_state_dict(csd, strict=False) # load
56 | if len(ckpt['model'].names) == classes:
57 | model.names = ckpt['model'].names # set class names attribute
58 | if autoshape:
59 | model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
60 | return model.to(device)
61 |
62 | except Exception as e:
63 | help_url = 'https://github.com/ultralytics/yolov5/issues/36'
64 | s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
65 | raise Exception(s) from e
66 |
67 |
68 | def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
69 | # YOLOv5 custom or local model
70 | return _create(path, autoshape=autoshape, verbose=verbose, device=device)
71 |
72 |
73 | def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
74 | # YOLOv5-nano model https://github.com/ultralytics/yolov5
75 | return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)
76 |
77 |
78 | def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
79 | # YOLOv5-small model https://github.com/ultralytics/yolov5
80 | return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
81 |
82 |
83 | def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
84 | # YOLOv5-medium model https://github.com/ultralytics/yolov5
85 | return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
86 |
87 |
88 | def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
89 | # YOLOv5-large model https://github.com/ultralytics/yolov5
90 | return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
91 |
92 |
93 | def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
94 | # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
95 | return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
96 |
97 |
98 | def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
99 | # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
100 | return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)
101 |
102 |
103 | def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
104 | # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
105 | return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
106 |
107 |
108 | def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
109 | # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
110 | return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
111 |
112 |
113 | def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
114 | # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
115 | return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
116 |
117 |
118 | def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
119 | # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
120 | return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
121 |
122 |
123 | if __name__ == '__main__':
124 | model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
125 | # model = custom(path='path/to/model.pt') # custom
126 |
127 | # Verify inference
128 | from pathlib import Path
129 |
130 | import cv2
131 | import numpy as np
132 | from PIL import Image
133 |
134 | imgs = ['data/images/zidane.jpg', # filename
135 | Path('data/images/zidane.jpg'), # Path
136 | 'https://ultralytics.com/images/zidane.jpg', # URI
137 | cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
138 | Image.open('data/images/bus.jpg'), # PIL
139 | np.zeros((320, 640, 3))] # numpy
140 |
141 | results = model(imgs, size=320) # batched inference
142 | results.print()
143 | results.save()
144 |
--------------------------------------------------------------------------------
/detect.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Run inference on images, videos, directories, streams, etc.
4 |
5 | Usage - sources:
6 | $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam
7 | img.jpg # image
8 | vid.mp4 # video
9 | path/ # directory
10 | path/*.jpg # glob
11 | 'https://youtu.be/Zgi9g1ksQHc' # YouTube
12 | 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
13 |
14 | Usage - formats:
15 | $ python path/to/detect.py --weights yolov5s.pt # PyTorch
16 | yolov5s.torchscript # TorchScript
17 | yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
18 | yolov5s.xml # OpenVINO
19 | yolov5s.engine # TensorRT
20 | yolov5s.mlmodel # CoreML (MacOS-only)
21 | yolov5s_saved_model # TensorFlow SavedModel
22 | yolov5s.pb # TensorFlow GraphDef
23 | yolov5s.tflite # TensorFlow Lite
24 | yolov5s_edgetpu.tflite # TensorFlow Edge TPU
25 | """
26 |
27 | import argparse
28 | import os
29 | import sys
30 | from pathlib import Path
31 |
32 | import cv2
33 | import torch
34 | import torch.backends.cudnn as cudnn
35 |
36 | FILE = Path(__file__).resolve()
37 | ROOT = FILE.parents[0] # YOLOv5 root directory
38 | if str(ROOT) not in sys.path:
39 | sys.path.append(str(ROOT)) # add ROOT to PATH
40 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
41 |
42 | from models.common import DetectMultiBackend
43 | from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
44 | from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
45 | increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
46 | from utils.plots import Annotator, colors, save_one_box
47 | from utils.torch_utils import select_device, time_sync
48 |
49 |
50 | @torch.no_grad()
51 | def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
52 | source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
53 | data=ROOT / 'data/coco128.yaml', # dataset.yaml path
54 | imgsz=(640, 640), # inference size (height, width)
55 | conf_thres=0.25, # confidence threshold
56 | iou_thres=0.45, # NMS IOU threshold
57 | max_det=1000, # maximum detections per image
58 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
59 | view_img=False, # show results
60 | save_txt=False, # save results to *.txt
61 | save_conf=False, # save confidences in --save-txt labels
62 | save_crop=False, # save cropped prediction boxes
63 | nosave=False, # do not save images/videos
64 | classes=None, # filter by class: --class 0, or --class 0 2 3
65 | agnostic_nms=False, # class-agnostic NMS
66 | augment=False, # augmented inference
67 | visualize=False, # visualize features
68 | update=False, # update all models
69 | project=ROOT / 'runs/detect', # save results to project/name
70 | name='exp', # save results to project/name
71 | exist_ok=False, # existing project/name ok, do not increment
72 | line_thickness=3, # bounding box thickness (pixels)
73 | hide_labels=False, # hide labels
74 | hide_conf=False, # hide confidences
75 | half=False, # use FP16 half-precision inference
76 | dnn=False, # use OpenCV DNN for ONNX inference
77 | ):
78 | source = str(source)
79 | save_img = not nosave and not source.endswith('.txt') # save inference images
80 | is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
81 | is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
82 | webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
83 | if is_url and is_file:
84 | source = check_file(source) # download
85 |
86 | # Directories
87 | save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
88 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
89 |
90 | # Load model
91 | device = select_device(device)
92 | model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
93 | stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
94 | imgsz = check_img_size(imgsz, s=stride) # check image size
95 |
96 | # Half
97 | half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
98 | if pt or jit:
99 | model.model.half() if half else model.model.float()
100 |
101 | # Dataloader
102 | if webcam:
103 | view_img = check_imshow()
104 | cudnn.benchmark = True # set True to speed up constant image size inference
105 | dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
106 | bs = len(dataset) # batch_size
107 | else:
108 | dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
109 | bs = 1 # batch_size
110 | vid_path, vid_writer = [None] * bs, [None] * bs
111 |
112 | # Run inference
113 | model.warmup(imgsz=(1 if pt else bs, 3, *imgsz), half=half) # warmup
114 | dt, seen = [0.0, 0.0, 0.0], 0
115 | for path, im, im0s, vid_cap, s in dataset:
116 | t1 = time_sync()
117 | im = torch.from_numpy(im).to(device)
118 | im = im.half() if half else im.float() # uint8 to fp16/32
119 | im /= 255 # 0 - 255 to 0.0 - 1.0
120 | if len(im.shape) == 3:
121 | im = im[None] # expand for batch dim
122 | t2 = time_sync()
123 | dt[0] += t2 - t1
124 |
125 | # Inference
126 | visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
127 | pred = model(im, augment=augment, visualize=visualize)
128 | t3 = time_sync()
129 | dt[1] += t3 - t2
130 |
131 | # NMS
132 | pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
133 | dt[2] += time_sync() - t3
134 |
135 | # Second-stage classifier (optional)
136 | # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
137 |
138 | # Process predictions
139 | for i, det in enumerate(pred): # per image
140 | seen += 1
141 | if webcam: # batch_size >= 1
142 | p, im0, frame = path[i], im0s[i].copy(), dataset.count
143 | s += f'{i}: '
144 | else:
145 | p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
146 |
147 | p = Path(p) # to Path
148 | save_path = str(save_dir / p.name) # im.jpg
149 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
150 | s += '%gx%g ' % im.shape[2:] # print string
151 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
152 | imc = im0.copy() if save_crop else im0 # for save_crop
153 | annotator = Annotator(im0, line_width=line_thickness, example=str(names))
154 | if len(det):
155 | # Rescale boxes from img_size to im0 size
156 | det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
157 |
158 | # Print results
159 | for c in det[:, -1].unique():
160 | n = (det[:, -1] == c).sum() # detections per class
161 | s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
162 |
163 | # Write results
164 | for *xyxy, conf, cls in reversed(det):
165 | if save_txt: # Write to file
166 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
167 | line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
168 | with open(txt_path + '.txt', 'a') as f:
169 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
170 |
171 | if save_img or save_crop or view_img: # Add bbox to image
172 | c = int(cls) # integer class
173 | label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
174 | annotator.box_label(xyxy, label, color=colors(c, True))
175 | if save_crop:
176 | save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
177 |
178 | # Stream results
179 | im0 = annotator.result()
180 | if view_img:
181 | cv2.imshow(str(p), im0)
182 | cv2.waitKey(1) # 1 millisecond
183 |
184 | # Save results (image with detections)
185 | if save_img:
186 | if dataset.mode == 'image':
187 | cv2.imwrite(save_path, im0)
188 | else: # 'video' or 'stream'
189 | if vid_path[i] != save_path: # new video
190 | vid_path[i] = save_path
191 | if isinstance(vid_writer[i], cv2.VideoWriter):
192 | vid_writer[i].release() # release previous video writer
193 | if vid_cap: # video
194 | fps = vid_cap.get(cv2.CAP_PROP_FPS)
195 | w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
196 | h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
197 | else: # stream
198 | fps, w, h = 30, im0.shape[1], im0.shape[0]
199 | save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
200 | vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
201 | vid_writer[i].write(im0)
202 |
203 | # Print time (inference-only)
204 | LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
205 |
206 | # Print results
207 | t = tuple(x / seen * 1E3 for x in dt) # speeds per image
208 | LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
209 | if save_txt or save_img:
210 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
211 | LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
212 | if update:
213 | strip_optimizer(weights) # update model (to fix SourceChangeWarning)
214 |
215 |
216 | def parse_opt():
217 | parser = argparse.ArgumentParser()
218 | parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'improved.pt', help='model path(s)')
219 | parser.add_argument('--source', type=str, default=ROOT / 'C:/Users/Ruipe/Desktop/testimages/test.mp4', help='file/dir/URL/glob, 0 for webcam')#C:/Users/Ruipe/Desktop/testimages/
220 | parser.add_argument('--data', type=str, default=ROOT / 'data/Railway.yaml', help='(optional) dataset.yaml path')
221 | parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
222 | parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
223 | parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
224 | parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
225 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
226 | parser.add_argument('--view-img', action='store_true', help='show results')
227 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
228 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
229 | parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
230 | parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
231 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
232 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
233 | parser.add_argument('--augment', action='store_true', help='augmented inference')
234 | parser.add_argument('--visualize', action='store_true', help='visualize features')
235 | parser.add_argument('--update', action='store_true', help='update all models')
236 | parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
237 | parser.add_argument('--name', default='exp', help='save results to project/name')
238 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
239 | parser.add_argument('--line-thickness', default=1, type=int, help='bounding box thickness (pixels)')
240 | parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
241 | parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
242 | parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
243 | parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
244 | opt = parser.parse_args()
245 | opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
246 | print_args(FILE.stem, opt)
247 | return opt
248 |
249 |
250 | def main(opt):
251 | check_requirements(exclude=('tensorboard', 'thop'))
252 | run(**vars(opt))
253 |
254 |
255 | if __name__ == "__main__":
256 | opt = parse_opt()
257 | main(opt)
258 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |

9 |

10 |

11 |
12 |

13 |

14 |

15 |
16 |
17 |
18 |
19 | This project is improve the YOLOv5 for the small object detection
20 | YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics
21 | open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
22 |
23 |
24 |
53 |
54 |
58 |
59 |
60 |
61 | ## Documentation
62 |
63 | See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment.
64 |
65 | ## Quick Start Examples
66 |
67 |
68 | Install
69 |
70 | Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
71 | [**Python>=3.7.0**](https://www.python.org/) environment, including
72 | [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).
73 |
74 | ```bash
75 | git clone https://github.com/ultralytics/yolov5 # clone
76 | cd yolov5
77 | pip install -r requirements.txt # install
78 | ```
79 |
80 |
81 |
82 |
83 | Inference
84 |
85 | Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)
86 | . [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
87 | YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
88 |
89 | ```python
90 | import torch
91 |
92 | # Model
93 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom
94 |
95 | # Images
96 | img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list
97 |
98 | # Inference
99 | results = model(img)
100 |
101 | # Results
102 | results.print() # or .show(), .save(), .crop(), .pandas(), etc.
103 | ```
104 |
105 |
106 |
107 |
108 |
109 |
110 | Inference with detect.py
111 |
112 | `detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
113 | the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
114 |
115 | ```bash
116 | python detect.py --source 0 # webcam
117 | img.jpg # image
118 | vid.mp4 # video
119 | path/ # directory
120 | path/*.jpg # glob
121 | 'https://youtu.be/Zgi9g1ksQHc' # YouTube
122 | 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
123 | ```
124 |
125 |
126 |
127 |
128 | Training
129 |
130 | The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
131 | results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
132 | and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
133 | YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
134 | 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the
135 | largest `--batch-size` possible, or pass `--batch-size -1` for
136 | YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
137 |
138 | ```bash
139 | python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128
140 | yolov5s 64
141 | yolov5m 40
142 | yolov5l 24
143 | yolov5x 16
144 | ```
145 |
146 |
147 |
148 |
149 |
150 |
151 | Tutorials
152 |
153 | * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 RECOMMENDED
154 | * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️
155 | RECOMMENDED
156 | * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) 🌟 NEW
157 | * [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975) 🌟 NEW
158 | * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475)
159 | * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ⭐ NEW
160 | * [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀
161 | * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303)
162 | * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318)
163 | * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304)
164 | * [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607)
165 | * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) ⭐ NEW
166 | * [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx)
167 |
168 |
169 |
170 | ## Environments
171 |
172 | Get started in seconds with our verified environments. Click each icon below for details.
173 |
174 |
191 |
192 | ## Integrations
193 |
194 |
202 |
203 | |Weights and Biases|Roboflow ⭐ NEW|
204 | |:-:|:-:|
205 | |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |
206 |
207 |
208 |
216 |
217 | ## Why YOLOv5
218 |
219 | 
220 |
221 | YOLOv5-P5 640 Figure (click to expand)
222 |
223 | 
224 |
225 |
226 | Figure Notes (click to expand)
227 |
228 | * **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536.
229 | * **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32.
230 | * **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8.
231 | * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
232 |
233 |
234 | ### Pretrained Checkpoints
235 |
236 | [assets]: https://github.com/ultralytics/yolov5/releases
237 |
238 | [TTA]: https://github.com/ultralytics/yolov5/issues/303
239 |
240 | |Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B)
241 | |--- |--- |--- |--- |--- |--- |--- |--- |---
242 | |[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5**
243 | |[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5
244 | |[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0
245 | |[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1
246 | |[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7
247 | | | | | | | | | |
248 | |[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6
249 | |[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |16.8 |12.6
250 | |[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0
251 | |[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4
252 | |[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |55.0
**55.8** |72.7
**72.7** |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
253 |
254 |
255 | Table Notes (click to expand)
256 |
257 | * All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).
258 | * **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
259 | * **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
260 | * **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
261 |
262 |
263 |
264 | ## Contribute
265 |
266 | We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!
267 |
268 |
269 |
270 | ## Contact
271 |
272 | For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or
273 | professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact).
274 |
275 |
276 |
277 |
306 |
--------------------------------------------------------------------------------
/val.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Validate a trained YOLOv5 model accuracy on a custom dataset
4 |
5 | Usage:
6 | $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640
7 |
8 | Usage - formats:
9 | $ python path/to/val.py --weights yolov5s.pt # PyTorch
10 | yolov5s.torchscript # TorchScript
11 | yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
12 | yolov5s.xml # OpenVINO
13 | yolov5s.engine # TensorRT
14 | yolov5s.mlmodel # CoreML (MacOS-only)
15 | yolov5s_saved_model # TensorFlow SavedModel
16 | yolov5s.pb # TensorFlow GraphDef
17 | yolov5s.tflite # TensorFlow Lite
18 | yolov5s_edgetpu.tflite # TensorFlow Edge TPU
19 | """
20 |
21 | import argparse
22 | import json
23 | import os
24 | import sys
25 | from pathlib import Path
26 | from threading import Thread
27 |
28 | import numpy as np
29 | import torch
30 | from tqdm import tqdm
31 |
32 | FILE = Path(__file__).resolve()
33 | ROOT = FILE.parents[0] # YOLOv5 root directory
34 | if str(ROOT) not in sys.path:
35 | sys.path.append(str(ROOT)) # add ROOT to PATH
36 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
37 |
38 | from models.common import DetectMultiBackend
39 | from utils.callbacks import Callbacks
40 | from utils.datasets import create_dataloader
41 | from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
42 | coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
43 | scale_coords, xywh2xyxy, xyxy2xywh)
44 | from utils.metrics import ConfusionMatrix, ap_per_class
45 | from utils.plots import output_to_target, plot_images, plot_val_study
46 | from utils.torch_utils import select_device, time_sync
47 |
48 |
49 | def save_one_txt(predn, save_conf, shape, file):
50 | # Save one txt result
51 | gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
52 | for *xyxy, conf, cls in predn.tolist():
53 | xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
54 | line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
55 | with open(file, 'a') as f:
56 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
57 |
58 |
59 | def save_one_json(predn, jdict, path, class_map):
60 | # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
61 | image_id = int(path.stem) if path.stem.isnumeric() else path.stem
62 | box = xyxy2xywh(predn[:, :4]) # xywh
63 | box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
64 | for p, b in zip(predn.tolist(), box.tolist()):
65 | jdict.append({'image_id': image_id,
66 | 'category_id': class_map[int(p[5])],
67 | 'bbox': [round(x, 3) for x in b],
68 | 'score': round(p[4], 5)})
69 |
70 |
71 | def process_batch(detections, labels, iouv):
72 | """
73 | Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
74 | Arguments:
75 | detections (Array[N, 6]), x1, y1, x2, y2, conf, class
76 | labels (Array[M, 5]), class, x1, y1, x2, y2
77 | Returns:
78 | correct (Array[N, 10]), for 10 IoU levels
79 | """
80 | correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
81 | iou = box_iou(labels[:, 1:], detections[:, :4])
82 | x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
83 | if x[0].shape[0]:
84 | matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
85 | if x[0].shape[0] > 1:
86 | matches = matches[matches[:, 2].argsort()[::-1]]
87 | matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
88 | # matches = matches[matches[:, 2].argsort()[::-1]]
89 | matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
90 | matches = torch.Tensor(matches).to(iouv.device)
91 | correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
92 | return correct
93 |
94 |
95 | @torch.no_grad()
96 | def run(data,
97 | weights=None, # model.pt path(s)
98 | batch_size=32, # batch size
99 | imgsz=640, # inference size (pixels)
100 | conf_thres=0.001, # confidence threshold
101 | iou_thres=0.6, # NMS IoU threshold
102 | task='speed', # train, val, test, speed or study
103 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
104 | workers=8, # max dataloader workers (per RANK in DDP mode)
105 | single_cls=False, # treat as single-class dataset
106 | augment=False, # augmented inference
107 | verbose=False, # verbose output
108 | save_txt=False, # save results to *.txt
109 | save_hybrid=False, # save label+prediction hybrid results to *.txt
110 | save_conf=False, # save confidences in --save-txt labels
111 | save_json=False, # save a COCO-JSON results file
112 | project=ROOT / 'runs/val', # save to project/name
113 | name='exp', # save to project/name
114 | exist_ok=False, # existing project/name ok, do not increment
115 | half=True, # use FP16 half-precision inference
116 | dnn=False, # use OpenCV DNN for ONNX inference
117 | model=None,
118 | dataloader=None,
119 | save_dir=Path(''),
120 | plots=True,
121 | callbacks=Callbacks(),
122 | compute_loss=None,
123 | ):
124 | # Initialize/load model and set device
125 | training = model is not None
126 | if training: # called by train.py
127 | device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
128 |
129 | half &= device.type != 'cpu' # half precision only supported on CUDA
130 | model.half() if half else model.float()
131 | else: # called directly
132 | device = select_device(device, batch_size=batch_size)
133 |
134 | # Directories
135 | save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
136 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
137 |
138 | # Load model
139 | model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
140 | stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine
141 | imgsz = check_img_size(imgsz, s=stride) # check image size
142 | half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
143 | if pt or jit:
144 | model.model.half() if half else model.model.float()
145 | elif engine:
146 | batch_size = model.batch_size
147 | else:
148 | half = False
149 | batch_size = 1 # export.py models default to batch-size 1
150 | device = torch.device('cpu')
151 | LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
152 |
153 | # Data
154 | data = check_dataset(data) # check
155 |
156 | # Configure
157 | model.eval()
158 | is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
159 | nc = 1 if single_cls else int(data['nc']) # number of classes
160 | iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
161 | niou = iouv.numel()
162 |
163 | # Dataloader
164 | if not training:
165 | model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz), half=half) # warmup
166 | pad = 0.0 if task in ('speed', 'benchmark') else 0.5
167 | rect = False if task == 'benchmark' else pt # square inference for benchmarks
168 | task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
169 | dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect,
170 | workers=workers, prefix=colorstr(f'{task}: '))[0]
171 |
172 | seen = 0
173 | confusion_matrix = ConfusionMatrix(nc=nc)
174 | names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
175 | class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
176 | s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
177 | dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
178 | loss = torch.zeros(3, device=device)
179 | jdict, stats, ap, ap_class = [], [], [], []
180 | pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
181 | for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
182 | t1 = time_sync()
183 | if pt or jit or engine:
184 | im = im.to(device, non_blocking=True)
185 | targets = targets.to(device)
186 | im = im.half() if half else im.float() # uint8 to fp16/32
187 | im /= 255 # 0 - 255 to 0.0 - 1.0
188 | nb, _, height, width = im.shape # batch size, channels, height, width
189 | t2 = time_sync()
190 | dt[0] += t2 - t1
191 |
192 | # Inference
193 | out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
194 | dt[1] += time_sync() - t2
195 |
196 | # Loss
197 | if compute_loss:
198 | loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
199 |
200 | # NMS
201 | targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
202 | lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
203 | t3 = time_sync()
204 | out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
205 | dt[2] += time_sync() - t3
206 |
207 | # Metrics
208 | for si, pred in enumerate(out):
209 | labels = targets[targets[:, 0] == si, 1:]
210 | nl = len(labels)
211 | tcls = labels[:, 0].tolist() if nl else [] # target class
212 | path, shape = Path(paths[si]), shapes[si][0]
213 | seen += 1
214 |
215 | if len(pred) == 0:
216 | if nl:
217 | stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
218 | continue
219 |
220 | # Predictions
221 | if single_cls:
222 | pred[:, 5] = 0
223 | predn = pred.clone()
224 | scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
225 |
226 | # Evaluate
227 | if nl:
228 | tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
229 | scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
230 | labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
231 | correct = process_batch(predn, labelsn, iouv)
232 | if plots:
233 | confusion_matrix.process_batch(predn, labelsn)
234 | else:
235 | correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
236 | stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
237 |
238 | # Save/log
239 | if save_txt:
240 | save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
241 | if save_json:
242 | save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
243 | callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
244 |
245 | # Plot images
246 | if plots and batch_i < 3:
247 | f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
248 | Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()
249 | f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
250 | Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
251 |
252 | # Compute metrics
253 | stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
254 | if len(stats) and stats[0].any():
255 | tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
256 | ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
257 | mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
258 | nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
259 | else:
260 | nt = torch.zeros(1)
261 |
262 | # Print results
263 | pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
264 | LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
265 |
266 | # Print results per class
267 | if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
268 | for i, c in enumerate(ap_class):
269 | LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
270 |
271 | # Print speeds
272 | t = tuple(x / seen * 1E3 for x in dt) # speeds per image
273 | if not training:
274 | shape = (batch_size, 3, imgsz, imgsz)
275 | LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
276 |
277 | # Plots
278 | if plots:
279 | confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
280 | callbacks.run('on_val_end')
281 |
282 | # Save JSON
283 | if save_json and len(jdict):
284 | w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
285 | anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
286 | pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
287 | LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
288 | with open(pred_json, 'w') as f:
289 | json.dump(jdict, f)
290 |
291 | try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
292 | check_requirements(['pycocotools'])
293 | from pycocotools.coco import COCO
294 | from pycocotools.cocoeval import COCOeval
295 |
296 | anno = COCO(anno_json) # init annotations api
297 | pred = anno.loadRes(pred_json) # init predictions api
298 | eval = COCOeval(anno, pred, 'bbox')
299 | if is_coco:
300 | eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
301 | eval.evaluate()
302 | eval.accumulate()
303 | eval.summarize()
304 | map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
305 | except Exception as e:
306 | LOGGER.info(f'pycocotools unable to run: {e}')
307 |
308 | # Return results
309 | model.float() # for training
310 | if not training:
311 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
312 | LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
313 | maps = np.zeros(nc) + map
314 | for i, c in enumerate(ap_class):
315 | maps[c] = ap[i]
316 | return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
317 |
318 |
319 | def parse_opt():
320 | parser = argparse.ArgumentParser()
321 | parser.add_argument('--data', type=str, default=ROOT / 'data/Railway.yaml', help='dataset.yaml path')
322 | parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
323 | parser.add_argument('--batch-size', type=int, default=16, help='batch size')
324 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
325 | parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
326 | parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
327 | parser.add_argument('--task', default='test', help='train, val, test, speed or study')
328 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
329 | parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
330 | parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
331 | parser.add_argument('--augment', action='store_true', help='augmented inference')
332 | parser.add_argument('--verbose', action='store_true', help='report mAP by class')
333 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
334 | parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
335 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
336 | parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
337 | parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
338 | parser.add_argument('--name', default='exp', help='save to project/name')
339 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
340 | parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
341 | parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
342 | opt = parser.parse_args()
343 | opt.data = check_yaml(opt.data) # check YAML
344 | opt.save_json |= opt.data.endswith('Railway.yaml')
345 | opt.save_txt |= opt.save_hybrid
346 | print_args(FILE.stem, opt)
347 | return opt
348 |
349 |
350 | def main(opt):
351 | check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
352 |
353 | if opt.task in ('train', 'val', 'test'): # run normally
354 | if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
355 | LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.')
356 | run(**vars(opt))
357 |
358 | else:
359 | weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
360 | opt.half = True # FP16 for fastest results
361 | if opt.task == 'speed': # speed benchmarks
362 | # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
363 | opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
364 | for opt.weights in weights:
365 | run(**vars(opt), plots=False)
366 |
367 | elif opt.task == 'study': # speed vs mAP benchmarks
368 | # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
369 | for opt.weights in weights:
370 | f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
371 | x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
372 | for opt.imgsz in x: # img-size
373 | LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
374 | r, _, t = run(**vars(opt), plots=False)
375 | y.append(r + t) # results and times
376 | np.savetxt(f, y, fmt='%10.4g') # save
377 | os.system('zip -r study.zip study_*.txt')
378 | plot_val_study(x=x) # plot
379 |
380 |
381 | if __name__ == "__main__":
382 | opt = parse_opt()
383 | main(opt)
384 |
--------------------------------------------------------------------------------
/export.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4 |
5 | Format | `export.py --include` | Model
6 | --- | --- | ---
7 | PyTorch | - | yolov5s.pt
8 | TorchScript | `torchscript` | yolov5s.torchscript
9 | ONNX | `onnx` | yolov5s.onnx
10 | OpenVINO | `openvino` | yolov5s_openvino_model/
11 | TensorRT | `engine` | yolov5s.engine
12 | CoreML | `coreml` | yolov5s.mlmodel
13 | TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14 | TensorFlow GraphDef | `pb` | yolov5s.pb
15 | TensorFlow Lite | `tflite` | yolov5s.tflite
16 | TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17 | TensorFlow.js | `tfjs` | yolov5s_web_model/
18 |
19 | Requirements:
20 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
21 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
22 |
23 | Usage:
24 | $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
25 |
26 | Inference:
27 | $ python path/to/detect.py --weights yolov5s.pt # PyTorch
28 | yolov5s.torchscript # TorchScript
29 | yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
30 | yolov5s.xml # OpenVINO
31 | yolov5s.engine # TensorRT
32 | yolov5s.mlmodel # CoreML (MacOS-only)
33 | yolov5s_saved_model # TensorFlow SavedModel
34 | yolov5s.pb # TensorFlow GraphDef
35 | yolov5s.tflite # TensorFlow Lite
36 | yolov5s_edgetpu.tflite # TensorFlow Edge TPU
37 |
38 | TensorFlow.js:
39 | $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
40 | $ npm install
41 | $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
42 | $ npm start
43 | """
44 |
45 | import argparse
46 | import json
47 | import os
48 | import platform
49 | import subprocess
50 | import sys
51 | import time
52 | import warnings
53 | from pathlib import Path
54 |
55 | import pandas as pd
56 | import torch
57 | import torch.nn as nn
58 | from torch.utils.mobile_optimizer import optimize_for_mobile
59 |
60 | FILE = Path(__file__).resolve()
61 | ROOT = FILE.parents[0] # YOLOv5 root directory
62 | if str(ROOT) not in sys.path:
63 | sys.path.append(str(ROOT)) # add ROOT to PATH
64 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
65 |
66 | from models.common import Conv
67 | from models.experimental import attempt_load
68 | from models.yolo import Detect
69 | from utils.activations import SiLU
70 | from utils.datasets import LoadImages
71 | from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr,
72 | file_size, print_args, url2file)
73 | from utils.torch_utils import select_device
74 |
75 |
76 | def export_formats():
77 | # YOLOv5 export formats
78 | x = [['PyTorch', '-', '.pt'],
79 | ['TorchScript', 'torchscript', '.torchscript'],
80 | ['ONNX', 'onnx', '.onnx'],
81 | ['OpenVINO', 'openvino', '_openvino_model'],
82 | ['TensorRT', 'engine', '.engine'],
83 | ['CoreML', 'coreml', '.mlmodel'],
84 | ['TensorFlow SavedModel', 'saved_model', '_saved_model'],
85 | ['TensorFlow GraphDef', 'pb', '.pb'],
86 | ['TensorFlow Lite', 'tflite', '.tflite'],
87 | ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite'],
88 | ['TensorFlow.js', 'tfjs', '_web_model']]
89 | return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix'])
90 |
91 |
92 | def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
93 | # YOLOv5 TorchScript model export
94 | try:
95 | LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
96 | f = file.with_suffix('.torchscript')
97 |
98 | ts = torch.jit.trace(model, im, strict=False)
99 | d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
100 | extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
101 | if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
102 | optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
103 | else:
104 | ts.save(str(f), _extra_files=extra_files)
105 |
106 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
107 | return f
108 | except Exception as e:
109 | LOGGER.info(f'{prefix} export failure: {e}')
110 |
111 |
112 | def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
113 | # YOLOv5 ONNX export
114 | try:
115 | check_requirements(('onnx',))
116 | import onnx
117 |
118 | LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
119 | f = file.with_suffix('.onnx')
120 |
121 | torch.onnx.export(model, im, f, verbose=False, opset_version=opset,
122 | training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
123 | do_constant_folding=not train,
124 | input_names=['images'],
125 | output_names=['output'],
126 | dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640)
127 | 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
128 | } if dynamic else None)
129 |
130 | # Checks
131 | model_onnx = onnx.load(f) # load onnx model
132 | onnx.checker.check_model(model_onnx) # check onnx model
133 | # LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print
134 |
135 | # Simplify
136 | if simplify:
137 | try:
138 | check_requirements(('onnx-simplifier',))
139 | import onnxsim
140 |
141 | LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
142 | model_onnx, check = onnxsim.simplify(
143 | model_onnx,
144 | dynamic_input_shape=dynamic,
145 | input_shapes={'images': list(im.shape)} if dynamic else None)
146 | assert check, 'assert check failed'
147 | onnx.save(model_onnx, f)
148 | except Exception as e:
149 | LOGGER.info(f'{prefix} simplifier failure: {e}')
150 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
151 | return f
152 | except Exception as e:
153 | LOGGER.info(f'{prefix} export failure: {e}')
154 |
155 |
156 | def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
157 | # YOLOv5 OpenVINO export
158 | try:
159 | check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
160 | import openvino.inference_engine as ie
161 |
162 | LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
163 | f = str(file).replace('.pt', '_openvino_model' + os.sep)
164 |
165 | cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
166 | subprocess.check_output(cmd, shell=True)
167 |
168 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
169 | return f
170 | except Exception as e:
171 | LOGGER.info(f'\n{prefix} export failure: {e}')
172 |
173 |
174 | def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
175 | # YOLOv5 CoreML export
176 | try:
177 | check_requirements(('coremltools',))
178 | import coremltools as ct
179 |
180 | LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
181 | f = file.with_suffix('.mlmodel')
182 |
183 | ts = torch.jit.trace(model, im, strict=False) # TorchScript model
184 | ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
185 | ct_model.save(f)
186 |
187 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
188 | return ct_model, f
189 | except Exception as e:
190 | LOGGER.info(f'\n{prefix} export failure: {e}')
191 | return None, None
192 |
193 |
194 | def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
195 | # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
196 | try:
197 | check_requirements(('tensorrt',))
198 | import tensorrt as trt
199 |
200 | if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
201 | grid = model.model[-1].anchor_grid
202 | model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
203 | export_onnx(model, im, file, 12, train, False, simplify) # opset 12
204 | model.model[-1].anchor_grid = grid
205 | else: # TensorRT >= 8
206 | check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
207 | export_onnx(model, im, file, 13, train, False, simplify) # opset 13
208 | onnx = file.with_suffix('.onnx')
209 |
210 | LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
211 | assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
212 | assert onnx.exists(), f'failed to export ONNX file: {onnx}'
213 | f = file.with_suffix('.engine') # TensorRT engine file
214 | logger = trt.Logger(trt.Logger.INFO)
215 | if verbose:
216 | logger.min_severity = trt.Logger.Severity.VERBOSE
217 |
218 | builder = trt.Builder(logger)
219 | config = builder.create_builder_config()
220 | config.max_workspace_size = workspace * 1 << 30
221 |
222 | flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
223 | network = builder.create_network(flag)
224 | parser = trt.OnnxParser(network, logger)
225 | if not parser.parse_from_file(str(onnx)):
226 | raise RuntimeError(f'failed to load ONNX file: {onnx}')
227 |
228 | inputs = [network.get_input(i) for i in range(network.num_inputs)]
229 | outputs = [network.get_output(i) for i in range(network.num_outputs)]
230 | LOGGER.info(f'{prefix} Network Description:')
231 | for inp in inputs:
232 | LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
233 | for out in outputs:
234 | LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
235 |
236 | half &= builder.platform_has_fast_fp16
237 | LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}')
238 | if half:
239 | config.set_flag(trt.BuilderFlag.FP16)
240 | with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
241 | t.write(engine.serialize())
242 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
243 | return f
244 | except Exception as e:
245 | LOGGER.info(f'\n{prefix} export failure: {e}')
246 |
247 |
248 | def export_saved_model(model, im, file, dynamic,
249 | tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
250 | conf_thres=0.25, keras=False, prefix=colorstr('TensorFlow SavedModel:')):
251 | # YOLOv5 TensorFlow SavedModel export
252 | try:
253 | import tensorflow as tf
254 | from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
255 |
256 | from models.tf import TFDetect, TFModel
257 |
258 | LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
259 | f = str(file).replace('.pt', '_saved_model')
260 | batch_size, ch, *imgsz = list(im.shape) # BCHW
261 |
262 | tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
263 | im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow
264 | _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
265 | inputs = tf.keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
266 | outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
267 | keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
268 | keras_model.trainable = False
269 | keras_model.summary()
270 | if keras:
271 | keras_model.save(f, save_format='tf')
272 | else:
273 | m = tf.function(lambda x: keras_model(x)) # full model
274 | spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
275 | m = m.get_concrete_function(spec)
276 | frozen_func = convert_variables_to_constants_v2(m)
277 | tfm = tf.Module()
278 | tfm.__call__ = tf.function(lambda x: frozen_func(x), [spec])
279 | tfm.__call__(im)
280 | tf.saved_model.save(
281 | tfm,
282 | f,
283 | options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if
284 | check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions())
285 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
286 | return keras_model, f
287 | except Exception as e:
288 | LOGGER.info(f'\n{prefix} export failure: {e}')
289 | return None, None
290 |
291 |
292 | def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')):
293 | # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
294 | try:
295 | import tensorflow as tf
296 | from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
297 |
298 | LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
299 | f = file.with_suffix('.pb')
300 |
301 | m = tf.function(lambda x: keras_model(x)) # full model
302 | m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
303 | frozen_func = convert_variables_to_constants_v2(m)
304 | frozen_func.graph.as_graph_def()
305 | tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
306 |
307 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
308 | return f
309 | except Exception as e:
310 | LOGGER.info(f'\n{prefix} export failure: {e}')
311 |
312 |
313 | def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')):
314 | # YOLOv5 TensorFlow Lite export
315 | try:
316 | import tensorflow as tf
317 |
318 | LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
319 | batch_size, ch, *imgsz = list(im.shape) # BCHW
320 | f = str(file).replace('.pt', '-fp16.tflite')
321 |
322 | converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
323 | converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
324 | converter.target_spec.supported_types = [tf.float16]
325 | converter.optimizations = [tf.lite.Optimize.DEFAULT]
326 | if int8:
327 | from models.tf import representative_dataset_gen
328 | dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data
329 | converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib)
330 | converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
331 | converter.target_spec.supported_types = []
332 | converter.inference_input_type = tf.uint8 # or tf.int8
333 | converter.inference_output_type = tf.uint8 # or tf.int8
334 | converter.experimental_new_quantizer = False
335 | f = str(file).replace('.pt', '-int8.tflite')
336 |
337 | tflite_model = converter.convert()
338 | open(f, "wb").write(tflite_model)
339 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
340 | return f
341 | except Exception as e:
342 | LOGGER.info(f'\n{prefix} export failure: {e}')
343 |
344 |
345 | def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')):
346 | # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
347 | try:
348 | cmd = 'edgetpu_compiler --version'
349 | help_url = 'https://coral.ai/docs/edgetpu/compiler/'
350 | assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
351 | if subprocess.run(cmd + ' >/dev/null', shell=True).returncode != 0:
352 | LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
353 | sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
354 | for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
355 | 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
356 | 'sudo apt-get update',
357 | 'sudo apt-get install edgetpu-compiler']:
358 | subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
359 | ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
360 |
361 | LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
362 | f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
363 | f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
364 |
365 | cmd = f"edgetpu_compiler -s {f_tfl}"
366 | subprocess.run(cmd, shell=True, check=True)
367 |
368 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
369 | return f
370 | except Exception as e:
371 | LOGGER.info(f'\n{prefix} export failure: {e}')
372 |
373 |
374 | def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
375 | # YOLOv5 TensorFlow.js export
376 | try:
377 | check_requirements(('tensorflowjs',))
378 | import re
379 |
380 | import tensorflowjs as tfjs
381 |
382 | LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
383 | f = str(file).replace('.pt', '_web_model') # js dir
384 | f_pb = file.with_suffix('.pb') # *.pb path
385 | f_json = f + '/model.json' # *.json path
386 |
387 | cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \
388 | f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}'
389 | subprocess.run(cmd, shell=True)
390 |
391 | json = open(f_json).read()
392 | with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
393 | subst = re.sub(
394 | r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
395 | r'"Identity.?.?": {"name": "Identity.?.?"}, '
396 | r'"Identity.?.?": {"name": "Identity.?.?"}, '
397 | r'"Identity.?.?": {"name": "Identity.?.?"}}}',
398 | r'{"outputs": {"Identity": {"name": "Identity"}, '
399 | r'"Identity_1": {"name": "Identity_1"}, '
400 | r'"Identity_2": {"name": "Identity_2"}, '
401 | r'"Identity_3": {"name": "Identity_3"}}}',
402 | json)
403 | j.write(subst)
404 |
405 | LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
406 | return f
407 | except Exception as e:
408 | LOGGER.info(f'\n{prefix} export failure: {e}')
409 |
410 |
411 | @torch.no_grad()
412 | def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
413 | weights=ROOT / 'yolov5s.pt', # weights path
414 | imgsz=(640, 640), # image (height, width)
415 | batch_size=1, # batch size
416 | device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
417 | include=('torchscript', 'onnx'), # include formats
418 | half=False, # FP16 half-precision export
419 | inplace=False, # set YOLOv5 Detect() inplace=True
420 | train=False, # model.train() mode
421 | optimize=False, # TorchScript: optimize for mobile
422 | int8=False, # CoreML/TF INT8 quantization
423 | dynamic=False, # ONNX/TF: dynamic axes
424 | simplify=False, # ONNX: simplify model
425 | opset=12, # ONNX: opset version
426 | verbose=False, # TensorRT: verbose log
427 | workspace=4, # TensorRT: workspace size (GB)
428 | nms=False, # TF: add NMS to model
429 | agnostic_nms=False, # TF: add agnostic NMS to model
430 | topk_per_class=100, # TF.js NMS: topk per class to keep
431 | topk_all=100, # TF.js NMS: topk for all classes to keep
432 | iou_thres=0.45, # TF.js NMS: IoU threshold
433 | conf_thres=0.25 # TF.js NMS: confidence threshold
434 | ):
435 | t = time.time()
436 | include = [x.lower() for x in include] # to lowercase
437 | formats = tuple(export_formats()['Argument'][1:]) # --include arguments
438 | flags = [x in include for x in formats]
439 | assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {formats}'
440 | jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans
441 | file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
442 |
443 | # Load PyTorch model
444 | device = select_device(device)
445 | assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0'
446 | model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model
447 | nc, names = model.nc, model.names # number of classes, class names
448 |
449 | # Checks
450 | imgsz *= 2 if len(imgsz) == 1 else 1 # expand
451 | opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12
452 | assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}'
453 |
454 | # Input
455 | gs = int(max(model.stride)) # grid size (max stride)
456 | imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
457 | im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
458 |
459 | # Update model
460 | if half:
461 | im, model = im.half(), model.half() # to FP16
462 | model.train() if train else model.eval() # training mode = no Detect() layer grid construction
463 | for k, m in model.named_modules():
464 | if isinstance(m, Conv): # assign export-friendly activations
465 | if isinstance(m.act, nn.SiLU):
466 | m.act = SiLU()
467 | elif isinstance(m, Detect):
468 | m.inplace = inplace
469 | m.onnx_dynamic = dynamic
470 | if hasattr(m, 'forward_export'):
471 | m.forward = m.forward_export # assign custom forward (optional)
472 |
473 | for _ in range(2):
474 | y = model(im) # dry runs
475 | shape = tuple(y[0].shape) # model output shape
476 | LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
477 |
478 | # Exports
479 | f = [''] * 10 # exported filenames
480 | warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
481 | if jit:
482 | f[0] = export_torchscript(model, im, file, optimize)
483 | if engine: # TensorRT required before ONNX
484 | f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose)
485 | if onnx or xml: # OpenVINO requires ONNX
486 | f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify)
487 | if xml: # OpenVINO
488 | f[3] = export_openvino(model, im, file)
489 | if coreml:
490 | _, f[4] = export_coreml(model, im, file)
491 |
492 | # TensorFlow Exports
493 | if any((saved_model, pb, tflite, edgetpu, tfjs)):
494 | if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707
495 | check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow`
496 | assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.'
497 | model, f[5] = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs,
498 | agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class,
499 | topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model
500 | if pb or tfjs: # pb prerequisite to tfjs
501 | f[6] = export_pb(model, im, file)
502 | if tflite or edgetpu:
503 | f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100)
504 | if edgetpu:
505 | f[8] = export_edgetpu(model, im, file)
506 | if tfjs:
507 | f[9] = export_tfjs(model, im, file)
508 |
509 | # Finish
510 | f = [str(x) for x in f if x] # filter out '' and None
511 | if any(f):
512 | LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)'
513 | f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
514 | f"\nDetect: python detect.py --weights {f[-1]}"
515 | f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')"
516 | f"\nValidate: python val.py --weights {f[-1]}"
517 | f"\nVisualize: https://netron.app")
518 | return f # return list of exported files/dirs
519 |
520 |
521 | def parse_opt():
522 | parser = argparse.ArgumentParser()
523 | parser.add_argument('--data', type=str, default=ROOT / 'data/Railway.yaml', help='dataset.yaml path')
524 | parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'best.pt', help='model.pt path(s)')
525 | parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
526 | parser.add_argument('--batch-size', type=int, default=16, help='batch size')
527 | parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
528 | parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
529 | parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
530 | parser.add_argument('--train', action='store_true', help='model.train() mode')
531 | parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
532 | parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
533 | parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes')
534 | parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
535 | parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version')
536 | parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
537 | parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
538 | parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
539 | parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
540 | parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
541 | parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
542 | parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
543 | parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
544 | parser.add_argument('--include', nargs='+',
545 | default=['torchscript', 'onnx'],
546 | help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs')
547 | opt = parser.parse_args()
548 | print_args(FILE.stem, opt)
549 | return opt
550 |
551 |
552 | def main(opt):
553 | for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
554 | run(**vars(opt))
555 |
556 |
557 | if __name__ == "__main__":
558 | opt = parse_opt()
559 | main(opt)
560 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Train a YOLOv5 model on a custom dataset.
4 |
5 | Models and datasets download automatically from the latest YOLOv5 release.
6 | Models: https://github.com/ultralytics/yolov5/tree/master/models
7 | Datasets: https://github.com/ultralytics/yolov5/tree/master/data
8 | Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
9 |
10 | Usage:
11 | $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED)
12 | $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
13 | """
14 |
15 | import argparse
16 | import math
17 | import os
18 | import random
19 | import sys
20 | import time
21 | from copy import deepcopy
22 | from datetime import datetime
23 | from pathlib import Path
24 |
25 | import numpy as np
26 | import torch
27 | import torch.distributed as dist
28 | import torch.nn as nn
29 | import yaml
30 | from torch.cuda import amp
31 | from torch.nn.parallel import DistributedDataParallel as DDP
32 | from torch.optim import SGD, Adam, AdamW, lr_scheduler
33 | from tqdm import tqdm
34 |
35 | FILE = Path(__file__).resolve()
36 | ROOT = FILE.parents[0] # YOLOv5 root directory
37 | if str(ROOT) not in sys.path:
38 | sys.path.append(str(ROOT)) # add ROOT to PATH
39 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
40 |
41 | import val # for end-of-epoch mAP
42 | from models.experimental import attempt_load
43 | from models.yolo import Model
44 | from utils.autoanchor import check_anchors
45 | from utils.autobatch import check_train_batch_size
46 | from utils.callbacks import Callbacks
47 | from utils.datasets import create_dataloader
48 | from utils.downloads import attempt_download
49 | from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements,
50 | check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds,
51 | intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle,
52 | print_args, print_mutation, strip_optimizer)
53 | from utils.loggers import Loggers
54 | from utils.loggers.wandb.wandb_utils import check_wandb_resume
55 | from utils.loss import ComputeLoss
56 | from utils.metrics import fitness
57 | from utils.plots import plot_evolve, plot_labels
58 | from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first
59 |
60 | LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
61 | RANK = int(os.getenv('RANK', -1))
62 | WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
63 |
64 |
65 | def train(hyp, # path/to/hyp.yaml or hyp dictionary
66 | opt,
67 | device,
68 | callbacks
69 | ):
70 | save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
71 | Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
72 | opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
73 |
74 | # Directories
75 | w = save_dir / 'weights' # weights dir
76 | (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
77 | last, best = w / 'last.pt', w / 'best.pt'
78 |
79 | # Hyperparameters
80 | if isinstance(hyp, str):
81 | with open(hyp, errors='ignore') as f:
82 | hyp = yaml.safe_load(f) # load hyps dict
83 | LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
84 |
85 | # Save run settings
86 | if not evolve:
87 | with open(save_dir / 'hyp.yaml', 'w') as f:
88 | yaml.safe_dump(hyp, f, sort_keys=False)
89 | with open(save_dir / 'opt.yaml', 'w') as f:
90 | yaml.safe_dump(vars(opt), f, sort_keys=False)
91 |
92 | # Loggers
93 | data_dict = None
94 | if RANK in [-1, 0]:
95 | loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
96 | if loggers.wandb:
97 | data_dict = loggers.wandb.data_dict
98 | if resume:
99 | weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
100 |
101 | # Register actions
102 | for k in methods(loggers):
103 | callbacks.register_action(k, callback=getattr(loggers, k))
104 |
105 | # Config
106 | plots = not evolve # create plots
107 | cuda = device.type != 'cpu'
108 | init_seeds(1 + RANK)
109 | with torch_distributed_zero_first(LOCAL_RANK):
110 | data_dict = data_dict or check_dataset(data) # check if None
111 | train_path, val_path = data_dict['train'], data_dict['val']
112 | nc = 1 if single_cls else int(data_dict['nc']) # number of classes
113 | names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
114 | assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
115 | is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
116 |
117 | # Model
118 | check_suffix(weights, '.pt') # check weights
119 | pretrained = weights.endswith('.pt')
120 | if pretrained:
121 | with torch_distributed_zero_first(LOCAL_RANK):
122 | weights = attempt_download(weights) # download if not found locally
123 | ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
124 | model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
125 | exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
126 | csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
127 | csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
128 | model.load_state_dict(csd, strict=False) # load
129 | LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
130 | else:
131 | model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
132 |
133 | # Freeze
134 | freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
135 | for k, v in model.named_parameters():
136 | v.requires_grad = True # train all layers
137 | if any(x in k for x in freeze):
138 | LOGGER.info(f'freezing {k}')
139 | v.requires_grad = False
140 |
141 | # Image size
142 | gs = max(int(model.stride.max()), 32) # grid size (max stride)
143 | imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
144 |
145 | # Batch size
146 | if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
147 | batch_size = check_train_batch_size(model, imgsz)
148 | loggers.on_params_update({"batch_size": batch_size})
149 |
150 | # Optimizer
151 | nbs = 64 # nominal batch size
152 | accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
153 | hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
154 | LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
155 |
156 | g0, g1, g2 = [], [], [] # optimizer parameter groups
157 | for v in model.modules():
158 | if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
159 | g2.append(v.bias)
160 | if isinstance(v, nn.BatchNorm2d): # weight (no decay)
161 | g0.append(v.weight)
162 | elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
163 | g1.append(v.weight)
164 |
165 | if opt.optimizer == 'Adam':
166 | optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
167 | elif opt.optimizer == 'AdamW':
168 | optimizer = AdamW(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
169 | else:
170 | optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
171 |
172 | optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay
173 | optimizer.add_param_group({'params': g2}) # add g2 (biases)
174 | LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
175 | f"{len(g0)} weight (no decay), {len(g1)} weight, {len(g2)} bias")
176 | del g0, g1, g2
177 |
178 | # Scheduler
179 | if opt.cos_lr:
180 | lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
181 | else:
182 | lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
183 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
184 |
185 | # EMA
186 | ema = ModelEMA(model) if RANK in [-1, 0] else None
187 |
188 | # Resume
189 | start_epoch, best_fitness = 0, 0.0
190 | if pretrained:
191 | # Optimizer
192 | if ckpt['optimizer'] is not None:
193 | optimizer.load_state_dict(ckpt['optimizer'])
194 | best_fitness = ckpt['best_fitness']
195 |
196 | # EMA
197 | if ema and ckpt.get('ema'):
198 | ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
199 | ema.updates = ckpt['updates']
200 |
201 | # Epochs
202 | start_epoch = ckpt['epoch'] + 1
203 | if resume:
204 | assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
205 | if epochs < start_epoch:
206 | LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.")
207 | epochs += ckpt['epoch'] # finetune additional epochs
208 |
209 | del ckpt, csd
210 |
211 | # DP mode
212 | if cuda and RANK == -1 and torch.cuda.device_count() > 1:
213 | LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
214 | 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
215 | model = torch.nn.DataParallel(model)
216 |
217 | # SyncBatchNorm
218 | if opt.sync_bn and cuda and RANK != -1:
219 | model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
220 | LOGGER.info('Using SyncBatchNorm()')
221 |
222 | # Trainloader
223 | train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
224 | hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache,
225 | rect=opt.rect, rank=LOCAL_RANK, workers=workers,
226 | image_weights=opt.image_weights, quad=opt.quad,
227 | prefix=colorstr('train: '), shuffle=True)
228 | mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
229 | nb = len(train_loader) # number of batches
230 | assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
231 |
232 | # Process 0
233 | if RANK in [-1, 0]:
234 | val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
235 | hyp=hyp, cache=None if noval else opt.cache,
236 | rect=True, rank=-1, workers=workers * 2, pad=0.5,
237 | prefix=colorstr('val: '))[0]
238 |
239 | if not resume:
240 | labels = np.concatenate(dataset.labels, 0)
241 | # c = torch.tensor(labels[:, 0]) # classes
242 | # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
243 | # model._initialize_biases(cf.to(device))
244 | if plots:
245 | plot_labels(labels, names, save_dir)
246 |
247 | # Anchors
248 | if not opt.noautoanchor:
249 | check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
250 | model.half().float() # pre-reduce anchor precision
251 |
252 | callbacks.run('on_pretrain_routine_end')
253 |
254 | # DDP mode
255 | if cuda and RANK != -1:
256 | model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
257 |
258 | # Model attributes
259 | nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
260 | hyp['box'] *= 3 / nl # scale to layers
261 | hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
262 | hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
263 | hyp['label_smoothing'] = opt.label_smoothing
264 | model.nc = nc # attach number of classes to model
265 | model.hyp = hyp # attach hyperparameters to model
266 | model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
267 | model.names = names
268 |
269 | # Start training
270 | t0 = time.time()
271 | nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
272 | # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
273 | last_opt_step = -1
274 | maps = np.zeros(nc) # mAP per class
275 | results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
276 | scheduler.last_epoch = start_epoch - 1 # do not move
277 | scaler = amp.GradScaler(enabled=cuda)
278 | stopper = EarlyStopping(patience=opt.patience)
279 | compute_loss = ComputeLoss(model) # init loss class
280 | LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
281 | f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
282 | f"Logging results to {colorstr('bold', save_dir)}\n"
283 | f'Starting training for {epochs} epochs...')
284 | for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
285 | model.train()
286 |
287 | # Update image weights (optional, single-GPU only)
288 | if opt.image_weights:
289 | cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
290 | iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
291 | dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
292 |
293 | # Update mosaic border (optional)
294 | # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
295 | # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
296 |
297 | mloss = torch.zeros(3, device=device) # mean losses
298 | if RANK != -1:
299 | train_loader.sampler.set_epoch(epoch)
300 | pbar = enumerate(train_loader)
301 | LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
302 | if RANK in [-1, 0]:
303 | pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
304 | optimizer.zero_grad()
305 | for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
306 | ni = i + nb * epoch # number integrated batches (since train start)
307 | imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
308 |
309 | # Warmup
310 | if ni <= nw:
311 | xi = [0, nw] # x interp
312 | # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
313 | accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
314 | for j, x in enumerate(optimizer.param_groups):
315 | # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
316 | x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
317 | if 'momentum' in x:
318 | x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
319 |
320 | # Multi-scale
321 | if opt.multi_scale:
322 | sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
323 | sf = sz / max(imgs.shape[2:]) # scale factor
324 | if sf != 1:
325 | ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
326 | imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
327 |
328 | # Forward
329 | with amp.autocast(enabled=cuda):
330 | pred = model(imgs) # forward
331 | loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
332 | if RANK != -1:
333 | loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
334 | if opt.quad:
335 | loss *= 4.
336 |
337 | # Backward
338 | scaler.scale(loss).backward()
339 |
340 | # Optimize
341 | if ni - last_opt_step >= accumulate:
342 | scaler.step(optimizer) # optimizer.step
343 | scaler.update()
344 | optimizer.zero_grad()
345 | if ema:
346 | ema.update(model)
347 | last_opt_step = ni
348 |
349 | # Log
350 | if RANK in [-1, 0]:
351 | mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
352 | mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
353 | pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (
354 | f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
355 | callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn)
356 | if callbacks.stop_training:
357 | return
358 | # end batch ------------------------------------------------------------------------------------------------
359 |
360 | # Scheduler
361 | lr = [x['lr'] for x in optimizer.param_groups] # for loggers
362 | scheduler.step()
363 |
364 | if RANK in [-1, 0]:
365 | # mAP
366 | callbacks.run('on_train_epoch_end', epoch=epoch)
367 | ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
368 | final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
369 | if not noval or final_epoch: # Calculate mAP
370 | results, maps, _ = val.run(data_dict,
371 | batch_size=batch_size // WORLD_SIZE * 2,
372 | imgsz=imgsz,
373 | model=ema.ema,
374 | single_cls=single_cls,
375 | dataloader=val_loader,
376 | save_dir=save_dir,
377 | plots=False,
378 | callbacks=callbacks,
379 | compute_loss=compute_loss)
380 |
381 | # Update best mAP
382 | fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
383 | if fi > best_fitness:
384 | best_fitness = fi
385 | log_vals = list(mloss) + list(results) + lr
386 | callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
387 |
388 | # Save model
389 | if (not nosave) or (final_epoch and not evolve): # if save
390 | ckpt = {'epoch': epoch,
391 | 'best_fitness': best_fitness,
392 | 'model': deepcopy(de_parallel(model)).half(),
393 | 'ema': deepcopy(ema.ema).half(),
394 | 'updates': ema.updates,
395 | 'optimizer': optimizer.state_dict(),
396 | 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None,
397 | 'date': datetime.now().isoformat()}
398 |
399 | # Save last, best and delete
400 | torch.save(ckpt, last)
401 | if best_fitness == fi:
402 | torch.save(ckpt, best)
403 | if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0):
404 | torch.save(ckpt, w / f'epoch{epoch}.pt')
405 | del ckpt
406 | callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
407 |
408 | # Stop Single-GPU
409 | if RANK == -1 and stopper(epoch=epoch, fitness=fi):
410 | break
411 |
412 | # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576
413 | # stop = stopper(epoch=epoch, fitness=fi)
414 | # if RANK == 0:
415 | # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks
416 |
417 | # Stop DPP
418 | # with torch_distributed_zero_first(RANK):
419 | # if stop:
420 | # break # must break all DDP ranks
421 |
422 | # end epoch ----------------------------------------------------------------------------------------------------
423 | # end training -----------------------------------------------------------------------------------------------------
424 | if RANK in [-1, 0]:
425 | LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
426 | for f in last, best:
427 | if f.exists():
428 | strip_optimizer(f) # strip optimizers
429 | if f is best:
430 | LOGGER.info(f'\nValidating {f}...')
431 | results, _, _ = val.run(data_dict,
432 | batch_size=batch_size // WORLD_SIZE * 2,
433 | imgsz=imgsz,
434 | model=attempt_load(f, device).half(),
435 | iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65
436 | single_cls=single_cls,
437 | dataloader=val_loader,
438 | save_dir=save_dir,
439 | save_json=is_coco,
440 | verbose=True,
441 | plots=True,
442 | callbacks=callbacks,
443 | compute_loss=compute_loss) # val best model with plots
444 | if is_coco:
445 | callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
446 |
447 | callbacks.run('on_train_end', last, best, plots, epoch, results)
448 | LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
449 |
450 | torch.cuda.empty_cache()
451 | return results
452 |
453 |
454 | def parse_opt(known=False):
455 | parser = argparse.ArgumentParser()
456 | parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
457 | parser.add_argument('--cfg', type=str, default='models/yolo5s_Railway.yaml', help='model.yaml path')
458 | parser.add_argument('--data', type=str, default=ROOT / 'data/Railway.yaml', help='dataset.yaml path')
459 | parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
460 | parser.add_argument('--epochs', type=int, default=51)
461 | parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
462 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
463 | parser.add_argument('--rect', action='store_true', help='rectangular training')
464 | parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
465 | parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
466 | parser.add_argument('--noval', action='store_true', help='only validate final epoch')
467 | parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
468 | parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
469 | parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
470 | parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
471 | parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
472 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
473 | parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
474 | parser.add_argument('--single-cls', default=False,action='store_true', help='train multi-class data as single-class')
475 | parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
476 | parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
477 | parser.add_argument('--workers', type=int, default=10, help='max dataloader workers (per RANK in DDP mode)')
478 | parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
479 | parser.add_argument('--name', default='exp', help='save to project/name')
480 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
481 | parser.add_argument('--quad', action='store_true', help='quad dataloader')
482 | parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
483 | parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
484 | parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
485 | parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
486 | parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
487 | parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
488 |
489 | # Weights & Biases arguments
490 | parser.add_argument('--entity', default=None, help='W&B: Entity')
491 | parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
492 | parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
493 | parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
494 |
495 | opt = parser.parse_known_args()[0] if known else parser.parse_args()
496 | return opt
497 |
498 |
499 | def main(opt, callbacks=Callbacks()):
500 | # Checks
501 | if RANK in [-1, 0]:
502 | print_args(FILE.stem, opt)
503 | check_git_status()
504 | check_requirements(exclude=['thop'])
505 |
506 | # Resume
507 | if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run
508 | ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
509 | assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
510 | with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f:
511 | opt = argparse.Namespace(**yaml.safe_load(f)) # replace
512 | opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
513 | LOGGER.info(f'Resuming training from {ckpt}')
514 | else:
515 | opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
516 | check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
517 | assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
518 | if opt.evolve:
519 | if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
520 | opt.project = str(ROOT / 'runs/evolve')
521 | opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
522 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
523 |
524 | # DDP mode
525 | device = select_device(opt.device, batch_size=opt.batch_size)
526 | if LOCAL_RANK != -1:
527 | msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
528 | assert not opt.image_weights, f'--image-weights {msg}'
529 | assert not opt.evolve, f'--evolve {msg}'
530 | assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
531 | assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
532 | assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
533 | torch.cuda.set_device(LOCAL_RANK)
534 | device = torch.device('cuda', LOCAL_RANK)
535 | dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
536 |
537 | # Train
538 | if not opt.evolve:
539 | train(opt.hyp, opt, device, callbacks)
540 | if WORLD_SIZE > 1 and RANK == 0:
541 | LOGGER.info('Destroying process group... ')
542 | dist.destroy_process_group()
543 |
544 | # Evolve hyperparameters (optional)
545 | else:
546 | # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
547 | meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
548 | 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
549 | 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
550 | 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
551 | 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
552 | 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
553 | 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
554 | 'box': (1, 0.02, 0.2), # box loss gain
555 | 'cls': (1, 0.2, 4.0), # cls loss gain
556 | 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
557 | 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
558 | 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
559 | 'iou_t': (0, 0.1, 0.7), # IoU training threshold
560 | 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
561 | 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
562 | 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
563 | 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
564 | 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
565 | 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
566 | 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
567 | 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
568 | 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
569 | 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
570 | 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
571 | 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
572 | 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
573 | 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
574 | 'mixup': (1, 0.0, 1.0), # image mixup (probability)
575 | 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
576 |
577 | with open(opt.hyp, errors='ignore') as f:
578 | hyp = yaml.safe_load(f) # load hyps dict
579 | if 'anchors' not in hyp: # anchors commented in hyp.yaml
580 | hyp['anchors'] = 3
581 | opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
582 | # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
583 | evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
584 | if opt.bucket:
585 | os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
586 |
587 | for _ in range(opt.evolve): # generations to evolve
588 | if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
589 | # Select parent(s)
590 | parent = 'single' # parent selection method: 'single' or 'weighted'
591 | x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
592 | n = min(5, len(x)) # number of previous results to consider
593 | x = x[np.argsort(-fitness(x))][:n] # top n mutations
594 | w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
595 | if parent == 'single' or len(x) == 1:
596 | # x = x[random.randint(0, n - 1)] # random selection
597 | x = x[random.choices(range(n), weights=w)[0]] # weighted selection
598 | elif parent == 'weighted':
599 | x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
600 |
601 | # Mutate
602 | mp, s = 0.8, 0.2 # mutation probability, sigma
603 | npr = np.random
604 | npr.seed(int(time.time()))
605 | g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
606 | ng = len(meta)
607 | v = np.ones(ng)
608 | while all(v == 1): # mutate until a change occurs (prevent duplicates)
609 | v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
610 | for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
611 | hyp[k] = float(x[i + 7] * v[i]) # mutate
612 |
613 | # Constrain to limits
614 | for k, v in meta.items():
615 | hyp[k] = max(hyp[k], v[1]) # lower limit
616 | hyp[k] = min(hyp[k], v[2]) # upper limit
617 | hyp[k] = round(hyp[k], 5) # significant digits
618 |
619 | # Train mutation
620 | results = train(hyp.copy(), opt, device, callbacks)
621 | callbacks = Callbacks()
622 | # Write mutation results
623 | print_mutation(results, hyp.copy(), save_dir, opt.bucket)
624 |
625 | # Plot results
626 | plot_evolve(evolve_csv)
627 | LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
628 | f"Results saved to {colorstr('bold', save_dir)}\n"
629 | f'Usage example: $ python train.py --hyp {evolve_yaml}')
630 |
631 |
632 | def run(**kwargs):
633 | # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
634 | opt = parse_opt(True)
635 | for k, v in kwargs.items():
636 | setattr(opt, k, v)
637 | main(opt)
638 | return opt
639 |
640 |
641 | if __name__ == "__main__":
642 | opt = parse_opt()
643 | main(opt)
644 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------