├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── data
├── Argoverse.yaml
├── GlobalWheat2020.yaml
├── Objects365.yaml
├── SKU-110K.yaml
├── VOC.yaml
├── VisDrone.yaml
├── coco.yaml
├── coco128.yaml
├── hyps
│ ├── hyp.Objects365.yaml
│ ├── hyp.VOC.yaml
│ ├── hyp.scratch-high.yaml
│ ├── hyp.scratch-low.yaml
│ └── hyp.scratch-med.yaml
├── images
│ ├── bus.jpg
│ └── zidane.jpg
├── my_person.yaml
├── scripts
│ ├── download_weights.sh
│ ├── get_coco.sh
│ └── get_coco128.sh
└── xView.yaml
├── datasets
└── mydata
│ ├── Annotations
│ ├── 1066405,1b8000ef60354f.xml
│ ├── 1066405,2a72f000f214d26a.xml
│ ├── 1066405,2ac2400079a6d80f.xml
│ ├── 1066405,2b6000ffe20c07.xml
│ ├── 1066405,2bf6e00075455d3c.xml
│ ├── 1066405,2bfbf000c47880b7.xml
│ ├── 1066405,2c8c7000530eb0e7.xml
│ ├── 1066405,2cdca0006185e7eb.xml
│ ├── 1066405,2d2c6000adf6f6f4.xml
│ └── 1066405,2d6f2000fec9dcab.xml
│ ├── images
│ ├── 1066405,1b8000ef60354f.jpg
│ ├── 1066405,2a72f000f214d26a.jpg
│ ├── 1066405,2ac2400079a6d80f.jpg
│ ├── 1066405,2b6000ffe20c07.jpg
│ ├── 1066405,2bf6e00075455d3c.jpg
│ ├── 1066405,2bfbf000c47880b7.jpg
│ ├── 1066405,2c8c7000530eb0e7.jpg
│ ├── 1066405,2cdca0006185e7eb.jpg
│ ├── 1066405,2d2c6000adf6f6f4.jpg
│ └── 1066405,2d6f2000fec9dcab.jpg
│ ├── labels
│ ├── 1066405,1b8000ef60354f.txt
│ ├── 1066405,2a72f000f214d26a.txt
│ ├── 1066405,2ac2400079a6d80f.txt
│ ├── 1066405,2b6000ffe20c07.txt
│ ├── 1066405,2bf6e00075455d3c.txt
│ ├── 1066405,2bfbf000c47880b7.txt
│ ├── 1066405,2c8c7000530eb0e7.txt
│ ├── 1066405,2cdca0006185e7eb.txt
│ ├── 1066405,2d2c6000adf6f6f4.txt
│ └── 1066405,2d6f2000fec9dcab.txt
│ ├── test.txt
│ ├── train.cache
│ ├── train.txt
│ ├── val.cache
│ └── val.txt
├── detect.py
├── export.py
├── hubconf.py
├── models
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── common.cpython-37.pyc
│ ├── experimental.cpython-37.pyc
│ └── yolo.cpython-37.pyc
├── common.py
├── experimental.py
├── hub
│ ├── anchors.yaml
│ ├── yolov3-spp.yaml
│ ├── yolov3-tiny.yaml
│ ├── yolov3.yaml
│ ├── yolov5-bifpn.yaml
│ ├── yolov5-fpn.yaml
│ ├── yolov5-p2.yaml
│ ├── yolov5-p34.yaml
│ ├── yolov5-p6.yaml
│ ├── yolov5-p7.yaml
│ ├── yolov5-panet.yaml
│ ├── yolov5l6.yaml
│ ├── yolov5m6.yaml
│ ├── yolov5n6.yaml
│ ├── yolov5s-ghost.yaml
│ ├── yolov5s-transformer.yaml
│ ├── yolov5s6.yaml
│ └── yolov5x6.yaml
├── tf.py
├── yolo.py
├── yolov5l.yaml
├── yolov5m.yaml
├── yolov5m_C3CA.yaml
├── yolov5m_C3CBAM.yaml
├── yolov5m_C3ECA.yaml
├── yolov5m_C3SE.yaml
├── yolov5m_CBAM.yaml
├── yolov5m_CoordAtt.yaml
├── yolov5m_ECA.yaml
├── yolov5m_SE.yaml
├── yolov5n.yaml
├── yolov5s.yaml
└── yolov5x.yaml
├── requirements.txt
├── setup.cfg
├── split.py
├── train.py
├── tutorial.ipynb
├── utils
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── augmentations.cpython-37.pyc
│ ├── autoanchor.cpython-37.pyc
│ ├── autobatch.cpython-37.pyc
│ ├── callbacks.cpython-37.pyc
│ ├── datasets.cpython-37.pyc
│ ├── downloads.cpython-37.pyc
│ ├── general.cpython-37.pyc
│ ├── loss.cpython-37.pyc
│ ├── metrics.cpython-37.pyc
│ ├── plots.cpython-37.pyc
│ └── torch_utils.cpython-37.pyc
├── activations.py
├── augmentations.py
├── autoanchor.py
├── autobatch.py
├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
├── benchmarks.py
├── callbacks.py
├── datasets.py
├── docker
│ ├── .dockerignore
│ ├── Dockerfile
│ └── Dockerfile-cpu
├── downloads.py
├── flask_rest_api
│ ├── README.md
│ ├── example_request.py
│ └── restapi.py
├── general.py
├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
├── loggers
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-37.pyc
│ └── wandb
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── wandb_utils.cpython-37.pyc
│ │ ├── log_dataset.py
│ │ ├── sweep.py
│ │ ├── sweep.yaml
│ │ └── wandb_utils.py
├── loss.py
├── metrics.py
├── plots.py
└── torch_utils.py
├── val.py
└── voc_label.py
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Define hooks for code formations
2 | # Will be applied on any updated commit files if a user has installed and linked commit hook
3 |
4 | default_language_version:
5 | python: python3.8
6 |
7 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
8 | ci:
9 | autofix_prs: true
10 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
11 | autoupdate_schedule: quarterly
12 | # submodules: true
13 |
14 | repos:
15 | - repo: https://github.com/pre-commit/pre-commit-hooks
16 | rev: v4.1.0
17 | hooks:
18 | - id: end-of-file-fixer
19 | - id: trailing-whitespace
20 | - id: check-case-conflict
21 | - id: check-yaml
22 | - id: check-toml
23 | - id: pretty-format-json
24 | - id: check-docstring-first
25 |
26 | - repo: https://github.com/asottile/pyupgrade
27 | rev: v2.31.1
28 | hooks:
29 | - id: pyupgrade
30 | args: [--py36-plus]
31 | name: Upgrade code
32 |
33 | - repo: https://github.com/PyCQA/isort
34 | rev: 5.10.1
35 | hooks:
36 | - id: isort
37 | name: Sort imports
38 |
39 | - repo: https://github.com/pre-commit/mirrors-yapf
40 | rev: v0.32.0
41 | hooks:
42 | - id: yapf
43 | name: YAPF formatting
44 |
45 | # TODO
46 | #- repo: https://github.com/executablebooks/mdformat
47 | # rev: 0.7.7
48 | # hooks:
49 | # - id: mdformat
50 | # additional_dependencies:
51 | # - mdformat-gfm
52 | # - mdformat-black
53 | # - mdformat_frontmatter
54 |
55 | - repo: https://github.com/asottile/yesqa
56 | rev: v1.3.0
57 | hooks:
58 | - id: yesqa
59 |
60 | - repo: https://github.com/PyCQA/flake8
61 | rev: 4.0.1
62 | hooks:
63 | - id: flake8
64 | name: PEP8
65 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing to YOLOv5 🚀
2 |
3 | We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing a new feature
9 | - Becoming a maintainer
10 |
11 | YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12 | helping push the frontiers of what's possible in AI 😃!
13 |
14 | ## Submitting a Pull Request (PR) 🛠️
15 |
16 | Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17 |
18 | ### 1. Select File to Update
19 |
20 | Select `requirements.txt` to update by clicking on it in GitHub.
21 |
22 |
23 | ### 2. Click 'Edit this file'
24 |
25 | Button is in top-right corner.
26 |
27 |
28 | ### 3. Make Changes
29 |
30 | Change `matplotlib` version from `3.2.2` to `3.3`.
31 |
32 |
33 | ### 4. Preview Changes and Submit PR
34 |
35 | Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
36 | for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
37 | changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
38 |
39 |
40 | ### PR recommendations
41 |
42 | To allow your work to be integrated as seamlessly as possible, we advise you to:
43 |
44 | - ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an
45 | automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may
46 | be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name
47 | of your local branch:
48 |
49 | ```bash
50 | git remote add upstream https://github.com/ultralytics/yolov5.git
51 | git fetch upstream
52 | # git checkout feature # <--- replace 'feature' with local branch name
53 | git merge upstream/master
54 | git push -u origin -f
55 | ```
56 |
57 | - ✅ Verify all Continuous Integration (CI) **checks are passing**.
58 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
59 | but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
60 |
61 | ## Submitting a Bug Report 🐛
62 |
63 | If you spot a problem with YOLOv5 please submit a Bug Report!
64 |
65 | For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
66 | short guidelines below to help users provide what we need in order to get started.
67 |
68 | When asking a question, people will be better able to provide help if you provide **code** that they can easily
69 | understand and use to **reproduce** the problem. This is referred to by community members as creating
70 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
71 | the problem should be:
72 |
73 | * ✅ **Minimal** – Use as little code as possible that still produces the same problem
74 | * ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
75 | * ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
76 |
77 | In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
78 | should be:
79 |
80 | * ✅ **Current** – Verify that your code is up-to-date with current
81 | GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
82 | copy to ensure your problem has not already been resolved by previous commits.
83 | * ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
84 | repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
85 |
86 | If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **
87 | Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
88 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
89 | understand and diagnose your problem.
90 |
91 | ## License
92 |
93 | By contributing, you agree that your contributions will be licensed under
94 | the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
95 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # yolov5_attention
2 | YOLOV5 v6.1添加 ECA CA SE CBAM C3SE C3ECA C3CBAM C3CA注意力机制
3 | 哔哩哔哩
4 | https://www.bilibili.com/video/BV1kS4y1c7Bm?spm_id_from=333.999.0.0
5 |
--------------------------------------------------------------------------------
/data/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: python train.py --data Argoverse.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── Argoverse ← downloads here (31.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | nc: 8 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import json
24 |
25 | from tqdm.auto import tqdm
26 | from utils.general import download, Path
27 |
28 |
29 | def argoverse2yolo(set):
30 | labels = {}
31 | a = json.load(open(set, "rb"))
32 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
33 | img_id = annot['image_id']
34 | img_name = a['images'][img_id]['name']
35 | img_label_name = img_name[:-3] + "txt"
36 |
37 | cls = annot['category_id'] # instance class id
38 | x_center, y_center, width, height = annot['bbox']
39 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
40 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
41 | width /= 1920.0 # scale
42 | height /= 1200.0 # scale
43 |
44 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
45 | if not img_dir.exists():
46 | img_dir.mkdir(parents=True, exist_ok=True)
47 |
48 | k = str(img_dir / img_label_name)
49 | if k not in labels:
50 | labels[k] = []
51 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
52 |
53 | for k in labels:
54 | with open(k, "w") as f:
55 | f.writelines(labels[k])
56 |
57 |
58 | # Download
59 | dir = Path('../datasets/Argoverse') # dataset root dir
60 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
61 | download(urls, dir=dir, delete=False)
62 |
63 | # Convert
64 | annotations_dir = 'Argoverse-HD/annotations/'
65 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
66 | for d in "train.json", "val.json":
67 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
68 |
--------------------------------------------------------------------------------
/data/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: python train.py --data GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | nc: 1 # number of classes
30 | names: ['wheat_head'] # class names
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from utils.general import download, Path
36 |
37 |
38 | # Download
39 | dir = Path(yaml['path']) # dataset root dir
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42 | download(urls, dir=dir)
43 |
44 | # Make Directories
45 | for p in 'annotations', 'images', 'labels':
46 | (dir / p).mkdir(parents=True, exist_ok=True)
47 |
48 | # Move
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51 | (dir / p).rename(dir / 'images' / p) # move to /images
52 | f = (dir / p).with_suffix('.json') # json file
53 | if f.exists():
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
55 |
--------------------------------------------------------------------------------
/data/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: python train.py --data SKU-110K.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── SKU-110K ← downloads here (13.6 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | nc: 1 # number of classes
18 | names: ['object'] # class names
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from tqdm.auto import tqdm
25 | from utils.general import np, pd, Path, download, xyxy2xywh
26 |
27 |
28 | # Download
29 | dir = Path(yaml['path']) # dataset root dir
30 | parent = Path(dir.parent) # download dir
31 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
32 | download(urls, dir=parent, delete=False)
33 |
34 | # Rename directories
35 | if dir.exists():
36 | shutil.rmtree(dir)
37 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
38 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
39 |
40 | # Convert labels
41 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
42 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
43 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
44 | images, unique_images = x[:, 0], np.unique(x[:, 0])
45 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
46 | f.writelines(f'./images/{s}\n' for s in unique_images)
47 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
48 | cls = 0 # single-class dataset
49 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
50 | for r in x[images == im]:
51 | w, h = r[6], r[7] # image width, height
52 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
53 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
54 |
--------------------------------------------------------------------------------
/data/VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: python train.py --data VOC.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VOC ← downloads here (2.8 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC
12 | train: # train images (relative to 'path') 16551 images
13 | - images/train2012
14 | - images/train2007
15 | - images/val2012
16 | - images/val2007
17 | val: # val images (relative to 'path') 4952 images
18 | - images/test2007
19 | test: # test images (optional)
20 | - images/test2007
21 |
22 | # Classes
23 | nc: 20 # number of classes
24 | names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
25 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names
26 |
27 |
28 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
29 | download: |
30 | import xml.etree.ElementTree as ET
31 |
32 | from tqdm.auto import tqdm
33 | from utils.general import download, Path
34 |
35 |
36 | def convert_label(path, lb_path, year, image_id):
37 | def convert_box(size, box):
38 | dw, dh = 1. / size[0], 1. / size[1]
39 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
40 | return x * dw, y * dh, w * dw, h * dh
41 |
42 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
43 | out_file = open(lb_path, 'w')
44 | tree = ET.parse(in_file)
45 | root = tree.getroot()
46 | size = root.find('size')
47 | w = int(size.find('width').text)
48 | h = int(size.find('height').text)
49 |
50 | for obj in root.iter('object'):
51 | cls = obj.find('name').text
52 | if cls in yaml['names'] and not int(obj.find('difficult').text) == 1:
53 | xmlbox = obj.find('bndbox')
54 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
55 | cls_id = yaml['names'].index(cls) # class id
56 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
57 |
58 |
59 | # Download
60 | dir = Path(yaml['path']) # dataset root dir
61 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
62 | urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
63 | url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
64 | url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
65 | download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
66 |
67 | # Convert
68 | path = dir / f'images/VOCdevkit'
69 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
70 | imgs_path = dir / 'images' / f'{image_set}{year}'
71 | lbs_path = dir / 'labels' / f'{image_set}{year}'
72 | imgs_path.mkdir(exist_ok=True, parents=True)
73 | lbs_path.mkdir(exist_ok=True, parents=True)
74 |
75 | with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
76 | image_ids = f.read().strip().split()
77 | for id in tqdm(image_ids, desc=f'{image_set}{year}'):
78 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
79 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
80 | f.rename(imgs_path / f.name) # move image
81 | convert_label(path, lb_path, year, id) # convert labels to YOLO format
82 |
--------------------------------------------------------------------------------
/data/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: python train.py --data VisDrone.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VisDrone ← downloads here (2.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | nc: 10 # number of classes
18 | names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor']
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | from utils.general import download, os, Path
24 |
25 | def visdrone2yolo(dir):
26 | from PIL import Image
27 | from tqdm.auto import tqdm
28 |
29 | def convert_box(size, box):
30 | # Convert VisDrone box to YOLO xywh box
31 | dw = 1. / size[0]
32 | dh = 1. / size[1]
33 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
34 |
35 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
36 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
37 | for f in pbar:
38 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
39 | lines = []
40 | with open(f, 'r') as file: # read annotation.txt
41 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
42 | if row[4] == '0': # VisDrone 'ignored regions' class 0
43 | continue
44 | cls = int(row[5]) - 1
45 | box = convert_box(img_size, tuple(map(int, row[:4])))
46 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
47 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
48 | fl.writelines(lines) # write label.txt
49 |
50 |
51 | # Download
52 | dir = Path(yaml['path']) # dataset root dir
53 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
54 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
55 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
56 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
57 | download(urls, dir=dir, curl=True, threads=4)
58 |
59 | # Convert
60 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
61 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
62 |
--------------------------------------------------------------------------------
/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | nc: 80 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26 | 'hair drier', 'toothbrush'] # class names
27 |
28 |
29 | # Download script/URL (optional)
30 | download: |
31 | from utils.general import download, Path
32 |
33 |
34 | # Download labels
35 | segments = False # segment or box labels
36 | dir = Path(yaml['path']) # dataset root dir
37 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
38 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
39 | download(urls, dir=dir.parent)
40 |
41 | # Download data
42 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
43 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
44 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
45 | download(urls, dir=dir / 'images', threads=3)
46 |
--------------------------------------------------------------------------------
/data/coco128.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | nc: 80 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26 | 'hair drier', 'toothbrush'] # class names
27 |
28 |
29 | # Download script/URL (optional)
30 | download: https://ultralytics.com/assets/coco128.zip
31 |
--------------------------------------------------------------------------------
/data/hyps/hyp.Objects365.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for Objects365 training
3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.00258
7 | lrf: 0.17
8 | momentum: 0.779
9 | weight_decay: 0.00058
10 | warmup_epochs: 1.33
11 | warmup_momentum: 0.86
12 | warmup_bias_lr: 0.0711
13 | box: 0.0539
14 | cls: 0.299
15 | cls_pw: 0.825
16 | obj: 0.632
17 | obj_pw: 1.0
18 | iou_t: 0.2
19 | anchor_t: 3.44
20 | anchors: 3.2
21 | fl_gamma: 0.0
22 | hsv_h: 0.0188
23 | hsv_s: 0.704
24 | hsv_v: 0.36
25 | degrees: 0.0
26 | translate: 0.0902
27 | scale: 0.491
28 | shear: 0.0
29 | perspective: 0.0
30 | flipud: 0.0
31 | fliplr: 0.5
32 | mosaic: 1.0
33 | mixup: 0.0
34 | copy_paste: 0.0
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for VOC training
3 | # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # YOLOv5 Hyperparameter Evolution Results
7 | # Best generation: 467
8 | # Last generation: 996
9 | # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
10 | # 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865
11 |
12 | lr0: 0.00334
13 | lrf: 0.15135
14 | momentum: 0.74832
15 | weight_decay: 0.00025
16 | warmup_epochs: 3.3835
17 | warmup_momentum: 0.59462
18 | warmup_bias_lr: 0.18657
19 | box: 0.02
20 | cls: 0.21638
21 | cls_pw: 0.5
22 | obj: 0.51728
23 | obj_pw: 0.67198
24 | iou_t: 0.2
25 | anchor_t: 3.3744
26 | fl_gamma: 0.0
27 | hsv_h: 0.01041
28 | hsv_s: 0.54703
29 | hsv_v: 0.27739
30 | degrees: 0.0
31 | translate: 0.04591
32 | scale: 0.75544
33 | shear: 0.0
34 | perspective: 0.0
35 | flipud: 0.0
36 | fliplr: 0.5
37 | mosaic: 0.85834
38 | mixup: 0.04266
39 | copy_paste: 0.0
40 | anchors: 3.412
41 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-high.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for high-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.1 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-low.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for low-augmentation COCO training from scratch
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-med.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for medium-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/data/images/bus.jpg
--------------------------------------------------------------------------------
/data/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/data/images/zidane.jpg
--------------------------------------------------------------------------------
/data/my_person.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../yolov5/datasets/mydata # dataset root dir
12 | train: train.txt # train images (relative to 'path') 118287 images
13 | val: val.txt # val images (relative to 'path') 5000 images
14 | test: test.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | nc: 1 # number of classes
18 | names: ['person'] # class names
19 |
20 |
--------------------------------------------------------------------------------
/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases
4 | # Example usage: bash path/to/download_weights.sh
5 | # parent
6 | # └── yolov5
7 | # ├── yolov5s.pt ← downloads here
8 | # ├── yolov5m.pt
9 | # └── ...
10 |
11 | python - <= cls >= 0, f'incorrect class index {cls}'
74 |
75 | # Write YOLO label
76 | if id not in shapes:
77 | shapes[id] = Image.open(file).size
78 | box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
79 | with open((labels / id).with_suffix('.txt'), 'a') as f:
80 | f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
81 | except Exception as e:
82 | print(f'WARNING: skipping one label for {file}: {e}')
83 |
84 |
85 | # Download manually from https://challenge.xviewdataset.org
86 | dir = Path(yaml['path']) # dataset root dir
87 | # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
88 | # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
89 | # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
90 | # download(urls, dir=dir, delete=False)
91 |
92 | # Convert labels
93 | convert_labels(dir / 'xView_train.geojson')
94 |
95 | # Move images
96 | images = Path(dir / 'images')
97 | images.mkdir(parents=True, exist_ok=True)
98 | Path(dir / 'train_images').rename(dir / 'images' / 'train')
99 | Path(dir / 'val_images').rename(dir / 'images' / 'val')
100 |
101 | # Split
102 | autosplit(dir / 'images' / 'train')
103 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,1b8000ef60354f.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,1b8000ef60354f.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,1b8000ef60354f.jpg
5 |
6 | Unknown
7 |
8 |
9 | 1024
10 | 683
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 698
21 | 419
22 | 723
23 | 490
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 724
33 | 418
34 | 746
35 | 469
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 528
45 | 448
46 | 580
47 | 514
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 639
57 | 422
58 | 654
59 | 462
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 596
69 | 426
70 | 614
71 | 462
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 96
81 | 442
82 | 116
83 | 468
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 729
93 | 208
94 | 750
95 | 266
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 658
105 | 427
106 | 678
107 | 479
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 675
117 | 426
118 | 693
119 | 482
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 687
129 | 422
130 | 713
131 | 495
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 400
141 | 478
142 | 442
143 | 584
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 124
153 | 460
154 | 199
155 | 561
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 0
165 | 468
166 | 43
167 | 557
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 321
177 | 466
178 | 365
179 | 556
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 755
189 | 420
190 | 810
191 | 532
192 |
193 |
194 |
195 | person
196 | Unspecified
197 | 0
198 | 0
199 |
200 | 967
201 | 221
202 | 1022
203 | 369
204 |
205 |
206 |
207 | person
208 | Unspecified
209 | 0
210 | 0
211 |
212 | 85
213 | 456
214 | 152
215 | 565
216 |
217 |
218 |
219 | person
220 | Unspecified
221 | 0
222 | 0
223 |
224 | 198
225 | 460
226 | 258
227 | 560
228 |
229 |
230 |
231 | person
232 | Unspecified
233 | 0
234 | 0
235 |
236 | 428
237 | 417
238 | 486
239 | 596
240 |
241 |
242 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2a72f000f214d26a.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2a72f000f214d26a.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2a72f000f214d26a.jpg
5 |
6 | Unknown
7 |
8 |
9 | 980
10 | 653
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 738
21 | 441
22 | 753
23 | 522
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 117
33 | 472
34 | 127
35 | 493
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 949
45 | 447
46 | 959
47 | 469
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 854
57 | 447
58 | 867
59 | 472
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 505
69 | 458
70 | 518
71 | 484
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 193
81 | 469
82 | 203
83 | 491
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 595
93 | 451
94 | 611
95 | 480
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 69
105 | 476
106 | 102
107 | 527
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 403
117 | 454
118 | 415
119 | 490
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 870
129 | 442
130 | 887
131 | 472
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 490
141 | 451
142 | 500
143 | 485
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 756
153 | 427
154 | 785
155 | 518
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 292
165 | 461
166 | 324
167 | 511
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 675
177 | 437
178 | 703
179 | 517
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 346
189 | 436
190 | 373
191 | 505
192 |
193 |
194 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2ac2400079a6d80f.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2ac2400079a6d80f.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2ac2400079a6d80f.jpg
5 |
6 | Unknown
7 |
8 |
9 | 974
10 | 493
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 324
21 | 248
22 | 379
23 | 432
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 853
33 | 212
34 | 912
35 | 343
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 255
45 | 205
46 | 376
47 | 434
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 812
57 | 212
58 | 850
59 | 313
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 668
69 | 331
70 | 730
71 | 419
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 867
81 | 312
82 | 959
83 | 418
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 246
93 | 184
94 | 314
95 | 440
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 94
105 | 290
106 | 146
107 | 433
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 161
117 | 263
118 | 209
119 | 440
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 855
129 | 210
130 | 912
131 | 274
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 134
141 | 270
142 | 174
143 | 435
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 197
153 | 268
154 | 246
155 | 433
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 541
165 | 45
166 | 673
167 | 491
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 829
177 | 342
178 | 880
179 | 418
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 0
189 | 223
190 | 45
191 | 444
192 |
193 |
194 |
195 | person
196 | Unspecified
197 | 0
198 | 0
199 |
200 | 38
201 | 212
202 | 99
203 | 445
204 |
205 |
206 |
207 | person
208 | Unspecified
209 | 0
210 | 0
211 |
212 | 900
213 | 225
214 | 969
215 | 347
216 |
217 |
218 |
219 | person
220 | Unspecified
221 | 0
222 | 0
223 |
224 | 380
225 | 66
226 | 519
227 | 492
228 |
229 |
230 |
231 | person
232 | Unspecified
233 | 0
234 | 0
235 |
236 | 638
237 | 24
238 | 824
239 | 493
240 |
241 |
242 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2bf6e00075455d3c.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2bf6e00075455d3c.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2bf6e00075455d3c.jpg
5 |
6 | Unknown
7 |
8 |
9 | 800
10 | 533
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 474
21 | 232
22 | 506
23 | 308
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 444
33 | 231
34 | 478
35 | 326
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 409
45 | 259
46 | 437
47 | 340
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 686
57 | 243
58 | 742
59 | 350
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 268
69 | 218
70 | 303
71 | 297
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 17
81 | 212
82 | 57
83 | 269
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 231
93 | 223
94 | 270
95 | 295
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 352
105 | 218
106 | 395
107 | 292
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 424
117 | 316
118 | 507
119 | 481
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 336
129 | 306
130 | 441
131 | 494
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 482
141 | 306
142 | 560
143 | 486
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 48
153 | 332
154 | 111
155 | 446
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 65
165 | 209
166 | 115
167 | 269
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 529
177 | 310
178 | 645
179 | 494
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 0
189 | 319
190 | 54
191 | 445
192 |
193 |
194 |
195 | person
196 | Unspecified
197 | 0
198 | 0
199 |
200 | 249
201 | 299
202 | 396
203 | 501
204 |
205 |
206 |
207 | person
208 | Unspecified
209 | 0
210 | 0
211 |
212 | 146
213 | 287
214 | 307
215 | 506
216 |
217 |
218 |
219 | person
220 | Unspecified
221 | 0
222 | 0
223 |
224 | 615
225 | 304
226 | 767
227 | 514
228 |
229 |
230 |
231 | person
232 | Unspecified
233 | 0
234 | 0
235 |
236 | 97
237 | 287
238 | 205
239 | 495
240 |
241 |
242 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2bfbf000c47880b7.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2bfbf000c47880b7.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2bfbf000c47880b7.jpg
5 |
6 | Unknown
7 |
8 |
9 | 1300
10 | 956
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 905
21 | 36
22 | 964
23 | 225
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 368
33 | 226
34 | 425
35 | 377
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 1001
45 | 67
46 | 1076
47 | 226
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 722
57 | 42
58 | 772
59 | 161
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 227
69 | 151
70 | 303
71 | 412
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 1067
81 | 51
82 | 1114
83 | 128
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 545
93 | 98
94 | 631
95 | 348
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 0
105 | 148
106 | 59
107 | 459
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 736
117 | 66
118 | 839
119 | 412
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 939
129 | 103
130 | 1020
131 | 376
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 278
141 | 150
142 | 347
143 | 405
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 311
153 | 146
154 | 373
155 | 398
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 102
165 | 174
166 | 193
167 | 437
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 26
177 | 138
178 | 118
179 | 454
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 658
189 | 97
190 | 787
191 | 465
192 |
193 |
194 |
195 | person
196 | Unspecified
197 | 0
198 | 0
199 |
200 | 459
201 | 112
202 | 546
203 | 360
204 |
205 |
206 |
207 | person
208 | Unspecified
209 | 0
210 | 0
211 |
212 | 459
213 | 395
214 | 825
215 | 774
216 |
217 |
218 |
219 | person
220 | Unspecified
221 | 0
222 | 0
223 |
224 | 156
225 | 140
226 | 264
227 | 447
228 |
229 |
230 |
231 | person
232 | Unspecified
233 | 0
234 | 0
235 |
236 | 832
237 | 30
238 | 945
239 | 363
240 |
241 |
242 |
243 | person
244 | Unspecified
245 | 0
246 | 0
247 |
248 | 1169
249 | 0
250 | 1299
251 | 580
252 |
253 |
254 |
255 | person
256 | Unspecified
257 | 0
258 | 0
259 |
260 | 156
261 | 352
262 | 471
263 | 707
264 |
265 |
266 |
267 | person
268 | Unspecified
269 | 0
270 | 0
271 |
272 | 741
273 | 111
274 | 1250
275 | 865
276 |
277 |
278 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2c8c7000530eb0e7.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2c8c7000530eb0e7.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2c8c7000530eb0e7.jpg
5 |
6 | Unknown
7 |
8 |
9 | 640
10 | 480
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 549
21 | 318
22 | 619
23 | 414
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 586
33 | 221
34 | 612
35 | 283
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 580
45 | 270
46 | 636
47 | 348
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 510
57 | 213
58 | 535
59 | 287
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 596
69 | 304
70 | 640
71 | 480
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 437
81 | 318
82 | 538
83 | 454
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 520
93 | 257
94 | 566
95 | 321
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 437
105 | 285
106 | 490
107 | 383
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 90
117 | 248
118 | 115
119 | 370
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 151
129 | 239
130 | 182
131 | 349
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 107
141 | 257
142 | 155
143 | 357
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 1
153 | 229
154 | 103
155 | 478
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 190
165 | 202
166 | 274
167 | 458
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 266
177 | 204
178 | 444
179 | 480
180 |
181 |
182 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2cdca0006185e7eb.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2cdca0006185e7eb.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2cdca0006185e7eb.jpg
5 |
6 | Unknown
7 |
8 |
9 | 1632
10 | 385
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 1295
21 | 298
22 | 1344
23 | 383
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 834
33 | 291
34 | 895
35 | 383
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 1247
45 | 263
46 | 1306
47 | 385
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 601
57 | 246
58 | 674
59 | 381
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 1395
69 | 281
70 | 1479
71 | 385
72 |
73 |
74 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2d2c6000adf6f6f4.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2d2c6000adf6f6f4.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2d2c6000adf6f6f4.jpg
5 |
6 | Unknown
7 |
8 |
9 | 1600
10 | 1063
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 422
21 | 552
22 | 479
23 | 616
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 1
33 | 475
34 | 21
35 | 569
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 1180
45 | 524
46 | 1246
47 | 652
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 38
57 | 533
58 | 92
59 | 642
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 1127
69 | 544
70 | 1177
71 | 629
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 592
81 | 498
82 | 640
83 | 605
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 637
93 | 496
94 | 695
95 | 622
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 1478
105 | 531
106 | 1540
107 | 606
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 471
117 | 528
118 | 595
119 | 659
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 927
129 | 552
130 | 1026
131 | 671
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 1205
141 | 420
142 | 1595
143 | 873
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 2
153 | 451
154 | 509
155 | 912
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 24
165 | 605
166 | 720
167 | 1001
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 989
177 | 507
178 | 1542
179 | 1025
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 558
189 | 280
190 | 1030
191 | 924
192 |
193 |
194 |
--------------------------------------------------------------------------------
/datasets/mydata/Annotations/1066405,2d6f2000fec9dcab.xml:
--------------------------------------------------------------------------------
1 |
2 | D:/youtube_video/data/need/images
3 | 1066405,2d6f2000fec9dcab.jpg
4 | D:\youtube_video\data\yolov5D:/youtube_video/data/need/images\1066405,2d6f2000fec9dcab.jpg
5 |
6 | Unknown
7 |
8 |
9 | 847
10 | 450
11 | 3
12 |
13 | 0
14 |
15 | person
16 | Unspecified
17 | 0
18 | 0
19 |
20 | 499
21 | 152
22 | 542
23 | 203
24 |
25 |
26 |
27 | person
28 | Unspecified
29 | 0
30 | 0
31 |
32 | 567
33 | 146
34 | 605
35 | 209
36 |
37 |
38 |
39 | person
40 | Unspecified
41 | 0
42 | 0
43 |
44 | 83
45 | 138
46 | 134
47 | 292
48 |
49 |
50 |
51 | person
52 | Unspecified
53 | 0
54 | 0
55 |
56 | 673
57 | 161
58 | 723
59 | 204
60 |
61 |
62 |
63 | person
64 | Unspecified
65 | 0
66 | 0
67 |
68 | 368
69 | 156
70 | 405
71 | 288
72 |
73 |
74 |
75 | person
76 | Unspecified
77 | 0
78 | 0
79 |
80 | 810
81 | 163
82 | 847
83 | 359
84 |
85 |
86 |
87 | person
88 | Unspecified
89 | 0
90 | 0
91 |
92 | 0
93 | 270
94 | 42
95 | 321
96 |
97 |
98 |
99 | person
100 | Unspecified
101 | 0
102 | 0
103 |
104 | 611
105 | 165
106 | 662
107 | 212
108 |
109 |
110 |
111 | person
112 | Unspecified
113 | 0
114 | 0
115 |
116 | 90
117 | 148
118 | 230
119 | 350
120 |
121 |
122 |
123 | person
124 | Unspecified
125 | 0
126 | 0
127 |
128 | 0
129 | 319
130 | 101
131 | 447
132 |
133 |
134 |
135 | person
136 | Unspecified
137 | 0
138 | 0
139 |
140 | 103
141 | 331
142 | 300
143 | 448
144 |
145 |
146 |
147 | person
148 | Unspecified
149 | 0
150 | 0
151 |
152 | 365
153 | 161
154 | 502
155 | 390
156 |
157 |
158 |
159 | person
160 | Unspecified
161 | 0
162 | 0
163 |
164 | 235
165 | 158
166 | 340
167 | 361
168 |
169 |
170 |
171 | person
172 | Unspecified
173 | 0
174 | 0
175 |
176 | 32
177 | 141
178 | 92
179 | 325
180 |
181 |
182 |
183 | person
184 | Unspecified
185 | 0
186 | 0
187 |
188 | 746
189 | 157
190 | 843
191 | 439
192 |
193 |
194 |
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,1b8000ef60354f.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,1b8000ef60354f.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2a72f000f214d26a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2a72f000f214d26a.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2ac2400079a6d80f.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2ac2400079a6d80f.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2b6000ffe20c07.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2b6000ffe20c07.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2bf6e00075455d3c.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2bf6e00075455d3c.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2bfbf000c47880b7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2bfbf000c47880b7.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2c8c7000530eb0e7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2c8c7000530eb0e7.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2cdca0006185e7eb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2cdca0006185e7eb.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2d2c6000adf6f6f4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2d2c6000adf6f6f4.jpg
--------------------------------------------------------------------------------
/datasets/mydata/images/1066405,2d6f2000fec9dcab.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/images/1066405,2d6f2000fec9dcab.jpg
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,1b8000ef60354f.txt:
--------------------------------------------------------------------------------
1 | 0 0.69384765625 0.6654465592972182 0.0244140625 0.10395314787701318
2 | 0 0.7177734375 0.6493411420204979 0.021484375 0.0746705710102489
3 | 0 0.541015625 0.7042459736456809 0.05078125 0.09663250366032211
4 | 0 0.63134765625 0.6471449487554906 0.0146484375 0.05856515373352855
5 | 0 0.5908203125 0.6500732064421669 0.017578125 0.0527086383601757
6 | 0 0.103515625 0.6661786237188873 0.01953125 0.03806734992679356
7 | 0 0.72216796875 0.3469985358711567 0.0205078125 0.0849194729136164
8 | 0 0.65234375 0.6632503660322109 0.01953125 0.07613469985358712
9 | 0 0.66796875 0.664714494875549 0.017578125 0.08199121522693997
10 | 0 0.68359375 0.671303074670571 0.025390625 0.10688140556368961
11 | 0 0.4111328125 0.7774524158125915 0.041015625 0.15519765739385066
12 | 0 0.15771484375 0.7474377745241582 0.0732421875 0.1478770131771596
13 | 0 0.02099609375 0.7503660322108345 0.0419921875 0.13030746705710103
14 | 0 0.3349609375 0.7481698389458272 0.04296875 0.13177159590043924
15 | 0 0.76416015625 0.6969253294289898 0.0537109375 0.16398243045387995
16 | 0 0.97119140625 0.43191800878477304 0.0537109375 0.21669106881405564
17 | 0 0.11572265625 0.7474377745241582 0.0654296875 0.1595900439238653
18 | 0 0.22265625 0.746705710102489 0.05859375 0.14641288433382138
19 | 0 0.4462890625 0.7415812591508053 0.056640625 0.26207906295754024
20 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2a72f000f214d26a.txt:
--------------------------------------------------------------------------------
1 | 0 0.7607142857142858 0.7373660030627872 0.015306122448979593 0.12404287901990813
2 | 0 0.12448979591836735 0.7388973966309342 0.010204081632653062 0.03215926493108729
3 | 0 0.9734693877551022 0.7013782542113324 0.010204081632653062 0.033690658499234305
4 | 0 0.878061224489796 0.7036753445635529 0.01326530612244898 0.03828483920367535
5 | 0 0.5219387755102042 0.7212863705972435 0.01326530612244898 0.03981623277182236
6 | 0 0.20204081632653062 0.7350689127105666 0.010204081632653062 0.033690658499234305
7 | 0 0.6153061224489796 0.712863705972435 0.0163265306122449 0.0444104134762634
8 | 0 0.08724489795918368 0.7679938744257274 0.0336734693877551 0.0781010719754977
9 | 0 0.41734693877551027 0.7228177641653906 0.012244897959183675 0.0551301684532925
10 | 0 0.8964285714285715 0.6998468606431854 0.017346938775510204 0.045941807044410414
11 | 0 0.5051020408163266 0.7166921898928025 0.010204081632653062 0.05206738131699847
12 | 0 0.7862244897959184 0.723583460949464 0.02959183673469388 0.13935681470137826
13 | 0 0.31428571428571433 0.7442572741194488 0.0326530612244898 0.0765696784073507
14 | 0 0.703061224489796 0.7304747320061256 0.028571428571428574 0.1225114854517611
15 | 0 0.36683673469387756 0.72052067381317 0.02755102040816327 0.10566615620214395
16 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2ac2400079a6d80f.txt:
--------------------------------------------------------------------------------
1 | 0 0.36088295687885014 0.6896551724137931 0.056468172484599594 0.37322515212981744
2 | 0 0.9060574948665298 0.5628803245436106 0.060574948665297744 0.2657200811359026
3 | 0 0.3239219712525667 0.6480730223123732 0.1242299794661191 0.4645030425963489
4 | 0 0.8531827515400411 0.5324543610547667 0.039014373716632446 0.20486815415821502
5 | 0 0.7176591375770021 0.7606490872210954 0.06365503080082136 0.17849898580121704
6 | 0 0.9373716632443532 0.7403651115618661 0.0944558521560575 0.2150101419878296
7 | 0 0.2874743326488706 0.6328600405679513 0.06981519507186859 0.5192697768762677
8 | 0 0.12320328542094457 0.7332657200811359 0.053388090349075976 0.29006085192697767
9 | 0 0.18993839835728954 0.7129817444219066 0.049281314168377825 0.359026369168357
10 | 0 0.9070841889117044 0.4908722109533469 0.058521560574948665 0.12981744421906694
11 | 0 0.15811088295687886 0.7150101419878296 0.04106776180698152 0.33468559837728196
12 | 0 0.22741273100616016 0.7109533468559838 0.050308008213552365 0.33468559837728196
13 | 0 0.6232032854209446 0.5436105476673427 0.13552361396303902 0.9046653144016227
14 | 0 0.8773100616016427 0.7707910750507099 0.052361396303901436 0.15415821501014199
15 | 0 0.023100616016427107 0.6764705882352942 0.046201232032854214 0.4482758620689655
16 | 0 0.07032854209445585 0.6663286004056795 0.06262833675564682 0.4726166328600406
17 | 0 0.9594455852156057 0.5801217038539553 0.07084188911704313 0.24746450304259635
18 | 0 0.46149897330595485 0.565922920892495 0.14271047227926079 0.8640973630831643
19 | 0 0.7505133470225873 0.524340770791075 0.19096509240246407 0.9513184584178499
20 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2b6000ffe20c07.txt:
--------------------------------------------------------------------------------
1 | 0 0.6089696071163825 0.7028199566160521 0.07042253521126761 0.18004338394793926
2 | 0 0.5363232023721275 0.6513015184381779 0.05114899925871015 0.0683297180043384
3 | 0 0.8884358784284655 0.7174620390455532 0.04966641957005189 0.1052060737527115
4 | 0 0.9688658265381764 0.6540130151843818 0.026686434395848776 0.07158351409978309
5 | 0 0.5541141586360266 0.631236442516269 0.02001482579688658 0.04338394793926247
6 | 0 0.9091919940696812 0.678416485900217 0.04521868050407709 0.10086767895878525
7 | 0 0.2309117865085248 0.6160520607375272 0.0363232023721275 0.06290672451193059
8 | 0 0.810229799851742 0.6767895878524945 0.05485544848035582 0.1279826464208243
9 | 0 0.3906597479614529 0.5813449023861171 0.032616753150481834 0.060737527114967466
10 | 0 0.49036323202372123 0.6578091106290672 0.04373610081541883 0.10086767895878525
11 | 0 0.6100815418828762 0.6556399132321041 0.06375092661230541 0.08351409978308026
12 | 0 0.41363973313565605 0.7478308026030369 0.072646404744255 0.2418655097613883
13 | 0 0.293180133432172 0.6355748373101953 0.04966641957005189 0.08676789587852494
14 | 0 0.9421793921423276 0.6789587852494577 0.03558191252779837 0.09544468546637744
15 | 0 0.3346923647146034 0.5797180043383948 0.04225352112676056 0.07917570498915402
16 | 0 0.18050407709414382 0.6523861171366595 0.07042253521126761 0.12906724511930587
17 | 0 0.2453669384729429 0.6437093275488069 0.051890289103039285 0.10086767895878525
18 | 0 0.37175685693106003 0.6572668112798264 0.048183839881393624 0.1193058568329718
19 | 0 0.7016308376575241 0.6344902386117137 0.03039288361749444 0.06724511930585683
20 | 0 0.3361749444032617 0.6377440347071583 0.03780578206078577 0.07809110629067245
21 | 0 0.04595997034840622 0.5629067245119306 0.031134173461823574 0.11062906724511931
22 | 0 0.7357301704966641 0.6426247288503254 0.040770941438102296 0.09869848156182214
23 | 0 0.4570051890289103 0.7960954446854664 0.08080059303187546 0.28199566160520606
24 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2bf6e00075455d3c.txt:
--------------------------------------------------------------------------------
1 | 0 0.6125 0.5065666041275797 0.04 0.1425891181988743
2 | 0 0.57625 0.5225140712945591 0.0425 0.17823639774859287
3 | 0 0.52875 0.5619136960600375 0.035 0.15196998123827393
4 | 0 0.8925000000000001 0.5562851782363978 0.07 0.20075046904315197
5 | 0 0.356875 0.4831144465290807 0.043750000000000004 0.14821763602251406
6 | 0 0.04625 0.45121951219512196 0.05 0.10694183864915573
7 | 0 0.313125 0.48592870544090055 0.04875 0.1350844277673546
8 | 0 0.466875 0.47842401500938087 0.05375 0.13883677298311445
9 | 0 0.581875 0.7476547842401501 0.10375000000000001 0.30956848030018763
10 | 0 0.48562500000000003 0.7504690431519699 0.13125 0.3527204502814259
11 | 0 0.65125 0.7429643527204502 0.0975 0.33771106941838647
12 | 0 0.099375 0.7298311444652908 0.07875 0.21388367729831145
13 | 0 0.1125 0.44840525328330205 0.0625 0.1125703564727955
14 | 0 0.73375 0.7542213883677298 0.145 0.3452157598499062
15 | 0 0.03375 0.7166979362101313 0.0675 0.23639774859287055
16 | 0 0.403125 0.7504690431519699 0.18375 0.3789868667917448
17 | 0 0.283125 0.7439024390243902 0.20125 0.41088180112570355
18 | 0 0.86375 0.7673545966228893 0.19 0.39399624765478425
19 | 0 0.18875 0.7335834896810507 0.135 0.3902439024390244
20 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2bfbf000c47880b7.txt:
--------------------------------------------------------------------------------
1 | 0 0.7188461538461538 0.1365062761506276 0.045384615384615384 0.19769874476987445
2 | 0 0.305 0.3153765690376569 0.04384615384615385 0.15794979079497906
3 | 0 0.7988461538461539 0.15324267782426776 0.057692307692307696 0.16631799163179914
4 | 0 0.5746153846153846 0.1061715481171548 0.038461538461538464 0.12447698744769874
5 | 0 0.20384615384615384 0.29445606694560666 0.05846153846153846 0.2730125523012552
6 | 0 0.8388461538461538 0.09361924686192467 0.036153846153846154 0.0805439330543933
7 | 0 0.4523076923076923 0.23326359832635982 0.06615384615384616 0.2615062761506276
8 | 0 0.022692307692307692 0.31746861924686187 0.045384615384615384 0.3253138075313807
9 | 0 0.6057692307692307 0.24999999999999997 0.07923076923076923 0.3619246861924686
10 | 0 0.7534615384615385 0.25052301255230125 0.06230769230769231 0.2855648535564853
11 | 0 0.2403846153846154 0.2902719665271966 0.05307692307692308 0.26673640167364016
12 | 0 0.2630769230769231 0.2845188284518828 0.047692307692307694 0.2635983263598326
13 | 0 0.11346153846153846 0.3195606694560669 0.07 0.2751046025104602
14 | 0 0.055384615384615386 0.30962343096234307 0.07076923076923076 0.3305439330543933
15 | 0 0.5557692307692308 0.2939330543933054 0.09923076923076923 0.3849372384937238
16 | 0 0.38653846153846155 0.24686192468619245 0.06692307692307692 0.25941422594142255
17 | 0 0.4938461538461538 0.6114016736401673 0.2815384615384615 0.39644351464435146
18 | 0 0.16153846153846155 0.30700836820083677 0.08307692307692308 0.3211297071129707
19 | 0 0.6834615384615385 0.20554393305439328 0.08692307692307692 0.34832635983263593
20 | 0 0.9492307692307692 0.303347280334728 0.1 0.606694560669456
21 | 0 0.24115384615384616 0.5538702928870293 0.2423076923076923 0.3713389121338912
22 | 0 0.7657692307692308 0.5104602510460251 0.39153846153846156 0.7887029288702928
23 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2c8c7000530eb0e7.txt:
--------------------------------------------------------------------------------
1 | 0 0.9125000000000001 0.7625 0.109375 0.2
2 | 0 0.9359375000000001 0.525 0.040625 0.12916666666666665
3 | 0 0.9500000000000001 0.64375 0.08750000000000001 0.1625
4 | 0 0.81640625 0.5208333333333334 0.0390625 0.15416666666666667
5 | 0 0.9656250000000001 0.8166666666666667 0.06875 0.36666666666666664
6 | 0 0.76171875 0.8041666666666667 0.15781250000000002 0.2833333333333333
7 | 0 0.8484375000000001 0.6020833333333333 0.07187500000000001 0.13333333333333333
8 | 0 0.72421875 0.6958333333333333 0.08281250000000001 0.20416666666666666
9 | 0 0.16015625 0.64375 0.0390625 0.25416666666666665
10 | 0 0.26015625000000003 0.6125 0.0484375 0.22916666666666666
11 | 0 0.20468750000000002 0.6395833333333333 0.07500000000000001 0.20833333333333334
12 | 0 0.08125 0.7364583333333333 0.15937500000000002 0.51875
13 | 0 0.36250000000000004 0.6875 0.13125 0.5333333333333333
14 | 0 0.5546875 0.7125 0.278125 0.575
15 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2cdca0006185e7eb.txt:
--------------------------------------------------------------------------------
1 | 0 0.8085171568627451 0.8844155844155844 0.03002450980392157 0.22077922077922077
2 | 0 0.5297181372549019 0.8753246753246753 0.037377450980392156 0.23896103896103896
3 | 0 0.7821691176470588 0.8415584415584415 0.03615196078431372 0.3168831168831169
4 | 0 0.390625 0.8142857142857143 0.044730392156862746 0.35064935064935066
5 | 0 0.8805147058823529 0.8649350649350649 0.051470588235294115 0.2701298701298701
6 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2d2c6000adf6f6f4.txt:
--------------------------------------------------------------------------------
1 | 0 0.2815625 0.5493885230479775 0.035625000000000004 0.060206961429915336
2 | 0 0.006875 0.49106302916274697 0.0125 0.08842897460018814
3 | 0 0.758125 0.5531514581373471 0.04125 0.12041392285983067
4 | 0 0.040625 0.5526810912511759 0.03375 0.10253998118532455
5 | 0 0.72 0.5517403574788335 0.03125 0.0799623706491063
6 | 0 0.385 0.5188146754468486 0.03 0.1006585136406397
7 | 0 0.41625 0.5258701787394168 0.03625 0.11853245531514582
8 | 0 0.943125 0.5348071495766699 0.03875 0.07055503292568203
9 | 0 0.333125 0.5583254938852305 0.0775 0.12323612417685795
10 | 0 0.6103125 0.5752587017873941 0.061875 0.11194731890874883
11 | 0 0.875 0.6081843838193791 0.24375 0.4261523988711195
12 | 0 0.1596875 0.6411100658513641 0.316875 0.4336782690498589
13 | 0 0.2325 0.7554092191909689 0.435 0.37253057384760113
14 | 0 0.7909375000000001 0.7206020696142992 0.345625 0.48730009407337727
15 | 0 0.49625 0.5663217309501412 0.295 0.605832549388523
16 |
--------------------------------------------------------------------------------
/datasets/mydata/labels/1066405,2d6f2000fec9dcab.txt:
--------------------------------------------------------------------------------
1 | 0 0.614521841794569 0.39444444444444443 0.050767414403778036 0.11333333333333333
2 | 0 0.69185360094451 0.39444444444444443 0.0448642266824085 0.14
3 | 0 0.128099173553719 0.4777777777777778 0.0602125147579693 0.3422222222222222
4 | 0 0.8240850059031877 0.40555555555555556 0.05903187721369539 0.09555555555555556
5 | 0 0.45631641086186536 0.49333333333333335 0.04368358913813459 0.29333333333333333
6 | 0 0.9781582054309327 0.58 0.04368358913813459 0.43555555555555553
7 | 0 0.024793388429752063 0.6566666666666666 0.04958677685950413 0.11333333333333333
8 | 0 0.7514757969303424 0.41888888888888887 0.0602125147579693 0.10444444444444445
9 | 0 0.18890200708382526 0.5533333333333333 0.1652892561983471 0.4488888888888889
10 | 0 0.05962219598583235 0.8511111111111112 0.1192443919716647 0.28444444444444444
11 | 0 0.23789846517119242 0.8655555555555555 0.23258559622195984 0.26
12 | 0 0.5118063754427391 0.6122222222222222 0.16174734356552536 0.5088888888888888
13 | 0 0.3394332939787485 0.5766666666666667 0.12396694214876032 0.45111111111111113
14 | 0 0.07319952774498228 0.5177777777777778 0.07083825265643447 0.4088888888888889
15 | 0 0.9380165289256198 0.6622222222222223 0.11452184179456906 0.6266666666666667
16 |
--------------------------------------------------------------------------------
/datasets/mydata/test.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/test.txt
--------------------------------------------------------------------------------
/datasets/mydata/train.cache:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/train.cache
--------------------------------------------------------------------------------
/datasets/mydata/train.txt:
--------------------------------------------------------------------------------
1 | datasets/mydata/images/1066405,1b8000ef60354f.jpg
2 | datasets/mydata/images/1066405,2a72f000f214d26a.jpg
3 | datasets/mydata/images/1066405,2ac2400079a6d80f.jpg
4 | datasets/mydata/images/1066405,2b6000ffe20c07.jpg
5 | datasets/mydata/images/1066405,2bf6e00075455d3c.jpg
6 | datasets/mydata/images/1066405,2c8c7000530eb0e7.jpg
7 | datasets/mydata/images/1066405,2cdca0006185e7eb.jpg
8 | datasets/mydata/images/1066405,2d2c6000adf6f6f4.jpg
9 | datasets/mydata/images/1066405,2d6f2000fec9dcab.jpg
10 |
--------------------------------------------------------------------------------
/datasets/mydata/val.cache:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/datasets/mydata/val.cache
--------------------------------------------------------------------------------
/datasets/mydata/val.txt:
--------------------------------------------------------------------------------
1 | datasets/mydata/images/1066405,2bfbf000c47880b7.jpg
2 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/models/__init__.py
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/models/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/common.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/models/__pycache__/common.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/experimental.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/models/__pycache__/experimental.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/yolo.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/models/__pycache__/yolo.cpython-37.pyc
--------------------------------------------------------------------------------
/models/experimental.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Experimental modules
4 | """
5 | import math
6 |
7 | import numpy as np
8 | import torch
9 | import torch.nn as nn
10 |
11 | from models.common import Conv
12 | from utils.downloads import attempt_download
13 |
14 |
15 | class CrossConv(nn.Module):
16 | # Cross Convolution Downsample
17 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
18 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
19 | super().__init__()
20 | c_ = int(c2 * e) # hidden channels
21 | self.cv1 = Conv(c1, c_, (1, k), (1, s))
22 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
23 | self.add = shortcut and c1 == c2
24 |
25 | def forward(self, x):
26 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
27 |
28 |
29 | class Sum(nn.Module):
30 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
31 | def __init__(self, n, weight=False): # n: number of inputs
32 | super().__init__()
33 | self.weight = weight # apply weights boolean
34 | self.iter = range(n - 1) # iter object
35 | if weight:
36 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
37 |
38 | def forward(self, x):
39 | y = x[0] # no weight
40 | if self.weight:
41 | w = torch.sigmoid(self.w) * 2
42 | for i in self.iter:
43 | y = y + x[i + 1] * w[i]
44 | else:
45 | for i in self.iter:
46 | y = y + x[i + 1]
47 | return y
48 |
49 |
50 | class MixConv2d(nn.Module):
51 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
52 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
53 | super().__init__()
54 | n = len(k) # number of convolutions
55 | if equal_ch: # equal c_ per group
56 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
57 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels
58 | else: # equal weight.numel() per group
59 | b = [c2] + [0] * n
60 | a = np.eye(n + 1, n, k=-1)
61 | a -= np.roll(a, 1, axis=1)
62 | a *= np.array(k) ** 2
63 | a[0] = 1
64 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
65 |
66 | self.m = nn.ModuleList([
67 | nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
68 | self.bn = nn.BatchNorm2d(c2)
69 | self.act = nn.SiLU()
70 |
71 | def forward(self, x):
72 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
73 |
74 |
75 | class Ensemble(nn.ModuleList):
76 | # Ensemble of models
77 | def __init__(self):
78 | super().__init__()
79 |
80 | def forward(self, x, augment=False, profile=False, visualize=False):
81 | y = []
82 | for module in self:
83 | y.append(module(x, augment, profile, visualize)[0])
84 | # y = torch.stack(y).max(0)[0] # max ensemble
85 | # y = torch.stack(y).mean(0) # mean ensemble
86 | y = torch.cat(y, 1) # nms ensemble
87 | return y, None # inference, train output
88 |
89 |
90 | def attempt_load(weights, map_location=None, inplace=True, fuse=True):
91 | from models.yolo import Detect, Model
92 |
93 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
94 | model = Ensemble()
95 | for w in weights if isinstance(weights, list) else [weights]:
96 | ckpt = torch.load(attempt_download(w), map_location=map_location) # load
97 | ckpt = (ckpt.get('ema') or ckpt['model']).float() # FP32 model
98 | model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode
99 |
100 | # Compatibility updates
101 | for m in model.modules():
102 | t = type(m)
103 | if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
104 | m.inplace = inplace # torch 1.7.0 compatibility
105 | if t is Detect:
106 | if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility
107 | delattr(m, 'anchor_grid')
108 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
109 | elif t is Conv:
110 | m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility
111 | elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
112 | m.recompute_scale_factor = None # torch 1.11.0 compatibility
113 |
114 | if len(model) == 1:
115 | return model[-1] # return model
116 | else:
117 | print(f'Ensemble created with {weights}\n')
118 | for k in 'names', 'nc', 'yaml':
119 | setattr(model, k, getattr(model[0], k))
120 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
121 | assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
122 | return model # return ensemble
123 |
--------------------------------------------------------------------------------
/models/hub/anchors.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Default anchors for COCO data
3 |
4 |
5 | # P5 -------------------------------------------------------------------------------------------------------------------
6 | # P5-640:
7 | anchors_p5_640:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 |
13 | # P6 -------------------------------------------------------------------------------------------------------------------
14 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
15 | anchors_p6_640:
16 | - [9,11, 21,19, 17,41] # P3/8
17 | - [43,32, 39,70, 86,64] # P4/16
18 | - [65,131, 134,130, 120,265] # P5/32
19 | - [282,180, 247,354, 512,387] # P6/64
20 |
21 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
22 | anchors_p6_1280:
23 | - [19,27, 44,40, 38,94] # P3/8
24 | - [96,68, 86,152, 180,137] # P4/16
25 | - [140,301, 303,264, 238,542] # P5/32
26 | - [436,615, 739,380, 925,792] # P6/64
27 |
28 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
29 | anchors_p6_1920:
30 | - [28,41, 67,59, 57,141] # P3/8
31 | - [144,103, 129,227, 270,205] # P4/16
32 | - [209,452, 455,396, 358,812] # P5/32
33 | - [653,922, 1109,570, 1387,1187] # P6/64
34 |
35 |
36 | # P7 -------------------------------------------------------------------------------------------------------------------
37 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
38 | anchors_p7_640:
39 | - [11,11, 13,30, 29,20] # P3/8
40 | - [30,46, 61,38, 39,92] # P4/16
41 | - [78,80, 146,66, 79,163] # P5/32
42 | - [149,150, 321,143, 157,303] # P6/64
43 | - [257,402, 359,290, 524,372] # P7/128
44 |
45 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
46 | anchors_p7_1280:
47 | - [19,22, 54,36, 32,77] # P3/8
48 | - [70,83, 138,71, 75,173] # P4/16
49 | - [165,159, 148,334, 375,151] # P5/32
50 | - [334,317, 251,626, 499,474] # P6/64
51 | - [750,326, 534,814, 1079,818] # P7/128
52 |
53 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
54 | anchors_p7_1920:
55 | - [29,34, 81,55, 47,115] # P3/8
56 | - [105,124, 207,107, 113,259] # P4/16
57 | - [247,238, 222,500, 563,227] # P5/32
58 | - [501,476, 376,939, 749,711] # P6/64
59 | - [1126,489, 801,1222, 1618,1227] # P7/128
60 |
--------------------------------------------------------------------------------
/models/hub/yolov3-spp.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3-SPP head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, SPP, [512, [5, 9, 13]]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/models/hub/yolov3-tiny.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,14, 23,27, 37,58] # P4/16
9 | - [81,82, 135,169, 344,319] # P5/32
10 |
11 | # YOLOv3-tiny backbone
12 | backbone:
13 | # [from, number, module, args]
14 | [[-1, 1, Conv, [16, 3, 1]], # 0
15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16 | [-1, 1, Conv, [32, 3, 1]],
17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18 | [-1, 1, Conv, [64, 3, 1]],
19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20 | [-1, 1, Conv, [128, 3, 1]],
21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22 | [-1, 1, Conv, [256, 3, 1]],
23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24 | [-1, 1, Conv, [512, 3, 1]],
25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27 | ]
28 |
29 | # YOLOv3-tiny head
30 | head:
31 | [[-1, 1, Conv, [1024, 3, 1]],
32 | [-1, 1, Conv, [256, 1, 1]],
33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34 |
35 | [-2, 1, Conv, [128, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39 |
40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41 | ]
42 |
--------------------------------------------------------------------------------
/models/hub/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3 head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, Conv, [512, 1, 1]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/models/hub/yolov5-bifpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 BiFPN head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5-fpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 FPN head
28 | head:
29 | [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
30 |
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
35 |
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 3, C3, [256, False]], # 18 (P3/8-small)
40 |
41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42 | ]
43 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p2.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [1024]],
21 | [-1, 1, SPPF, [1024, 5]], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
25 | head:
26 | [[-1, 1, Conv, [512, 1, 1]],
27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
29 | [-1, 3, C3, [512, False]], # 13
30 |
31 | [-1, 1, Conv, [256, 1, 1]],
32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35 |
36 | [-1, 1, Conv, [128, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 2], 1, Concat, [1]], # cat backbone P2
39 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
40 |
41 | [-1, 1, Conv, [128, 3, 2]],
42 | [[-1, 18], 1, Concat, [1]], # cat head P3
43 | [-1, 3, C3, [256, False]], # 24 (P3/8-small)
44 |
45 | [-1, 1, Conv, [256, 3, 2]],
46 | [[-1, 14], 1, Concat, [1]], # cat head P4
47 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
48 |
49 | [-1, 1, Conv, [512, 3, 2]],
50 | [[-1, 10], 1, Concat, [1]], # cat head P5
51 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
52 |
53 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
54 | ]
55 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p34.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14 | [ -1, 3, C3, [ 128 ] ],
15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16 | [ -1, 6, C3, [ 256 ] ],
17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18 | [ -1, 9, C3, [ 512 ] ],
19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20 | [ -1, 3, C3, [ 1024 ] ],
21 | [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head with (P3, P4) outputs
25 | head:
26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29 | [ -1, 3, C3, [ 512, False ] ], # 13
30 |
31 | [ -1, 1, Conv, [ 256, 1, 1 ] ],
32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35 |
36 | [ -1, 1, Conv, [ 256, 3, 2 ] ],
37 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
38 | [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
39 |
40 | [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
41 | ]
42 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [768]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22 | [-1, 3, C3, [1024]],
23 | [-1, 1, SPPF, [1024, 5]], # 11
24 | ]
25 |
26 | # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
27 | head:
28 | [[-1, 1, Conv, [768, 1, 1]],
29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
31 | [-1, 3, C3, [768, False]], # 15
32 |
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
36 | [-1, 3, C3, [512, False]], # 19
37 |
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
40 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
41 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
42 |
43 | [-1, 1, Conv, [256, 3, 2]],
44 | [[-1, 20], 1, Concat, [1]], # cat head P4
45 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
46 |
47 | [-1, 1, Conv, [512, 3, 2]],
48 | [[-1, 16], 1, Concat, [1]], # cat head P5
49 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
50 |
51 | [-1, 1, Conv, [768, 3, 2]],
52 | [[-1, 12], 1, Concat, [1]], # cat head P6
53 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
54 |
55 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
56 | ]
57 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p7.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [768]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22 | [-1, 3, C3, [1024]],
23 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
24 | [-1, 3, C3, [1280]],
25 | [-1, 1, SPPF, [1280, 5]], # 13
26 | ]
27 |
28 | # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
29 | head:
30 | [[-1, 1, Conv, [1024, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 10], 1, Concat, [1]], # cat backbone P6
33 | [-1, 3, C3, [1024, False]], # 17
34 |
35 | [-1, 1, Conv, [768, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
38 | [-1, 3, C3, [768, False]], # 21
39 |
40 | [-1, 1, Conv, [512, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
43 | [-1, 3, C3, [512, False]], # 25
44 |
45 | [-1, 1, Conv, [256, 1, 1]],
46 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
47 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
48 | [-1, 3, C3, [256, False]], # 29 (P3/8-small)
49 |
50 | [-1, 1, Conv, [256, 3, 2]],
51 | [[-1, 26], 1, Concat, [1]], # cat head P4
52 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
53 |
54 | [-1, 1, Conv, [512, 3, 2]],
55 | [[-1, 22], 1, Concat, [1]], # cat head P5
56 | [-1, 3, C3, [768, False]], # 35 (P5/32-large)
57 |
58 | [-1, 1, Conv, [768, 3, 2]],
59 | [[-1, 18], 1, Concat, [1]], # cat head P6
60 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
61 |
62 | [-1, 1, Conv, [1024, 3, 2]],
63 | [[-1, 14], 1, Concat, [1]], # cat head P7
64 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
65 |
66 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
67 | ]
68 |
--------------------------------------------------------------------------------
/models/hub/yolov5-panet.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 PANet head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5l6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5m6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5n6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-ghost.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3Ghost, [128]],
18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3Ghost, [256]],
20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3Ghost, [512]],
22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3Ghost, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, GhostConv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3Ghost, [512, False]], # 13
33 |
34 | [-1, 1, GhostConv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, GhostConv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, GhostConv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-transformer.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5s6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5x6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/yolov5l.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m_C3CA.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3CA, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3CA, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3CA, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3CA, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m_C3CBAM.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3CBAM, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3CBAM, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3CBAM, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3CBAM, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m_C3ECA.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3ECA, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3ECA, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3ECA, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3ECA, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m_C3SE.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3SE, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3SE, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3SE, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3SE, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m_CBAM.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, CBAM, [1024]],
25 | [-1, 1, SPPF, [1024, 5]], # 9
26 | ]
27 |
28 | # YOLOv5 v6.0 head
29 | head:
30 | [[-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 3, C3, [512, False]], # 13
34 |
35 | [-1, 1, Conv, [256, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39 |
40 | [-1, 1, Conv, [256, 3, 2]],
41 | [[-1, 14], 1, Concat, [1]], # cat head P4
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43 |
44 | [-1, 1, Conv, [512, 3, 2]],
45 | [[-1, 10], 1, Concat, [1]], # cat head P5
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47 |
48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/models/yolov5m_CoordAtt.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, CoordAtt, [1024]],
25 | [-1, 1, SPPF, [1024, 5]], # 9
26 | ]
27 |
28 | # YOLOv5 v6.0 head
29 | head:
30 | [[-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 3, C3, [512, False]], # 13
34 |
35 | [-1, 1, Conv, [256, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39 |
40 | [-1, 1, Conv, [256, 3, 2]],
41 | [[-1, 14], 1, Concat, [1]], # cat head P4
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43 |
44 | [-1, 1, Conv, [512, 3, 2]],
45 | [[-1, 10], 1, Concat, [1]], # cat head P5
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47 |
48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/models/yolov5m_ECA.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, ECA, [1024]],
25 | [-1, 1, SPPF, [1024, 5]], # 9
26 | ]
27 |
28 | # YOLOv5 v6.0 head
29 | head:
30 | [[-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 3, C3, [512, False]], # 13
34 |
35 | [-1, 1, Conv, [256, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39 |
40 | [-1, 1, Conv, [256, 3, 2]],
41 | [[-1, 14], 1, Concat, [1]], # cat head P4
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43 |
44 | [-1, 1, Conv, [512, 3, 2]],
45 | [[-1, 10], 1, Concat, [1]], # cat head P5
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47 |
48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/models/yolov5m_SE.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SE, [1024]],
25 | [-1, 1, SPPF, [1024, 5]], # 9
26 | ]
27 |
28 | # YOLOv5 v6.0 head
29 | head:
30 | [[-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 3, C3, [512, False]], # 13
34 |
35 | [-1, 1, Conv, [256, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39 |
40 | [-1, 1, Conv, [256, 3, 2]],
41 | [[-1, 14], 1, Concat, [1]], # cat head P4
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43 |
44 | [-1, 1, Conv, [512, 3, 2]],
45 | [[-1, 10], 1, Concat, [1]], # cat head P5
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47 |
48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/models/yolov5n.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5s.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5x.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # Base ----------------------------------------
4 | matplotlib>=3.2.2
5 | numpy>=1.18.5
6 | opencv-python>=4.1.2
7 | Pillow>=7.1.2
8 | PyYAML>=5.3.1
9 | requests>=2.23.0
10 | scipy>=1.4.1
11 | torch>=1.7.0
12 | torchvision>=0.8.1
13 | tqdm>=4.41.0
14 |
15 | # Logging -------------------------------------
16 | tensorboard>=2.4.1
17 | # wandb
18 |
19 | # Plotting ------------------------------------
20 | pandas>=1.1.4
21 | seaborn>=0.11.0
22 |
23 | # Export --------------------------------------
24 | # coremltools>=4.1 # CoreML export
25 | # onnx>=1.9.0 # ONNX export
26 | # onnx-simplifier>=0.3.6 # ONNX simplifier
27 | # scikit-learn==0.19.2 # CoreML quantization
28 | # tensorflow>=2.4.1 # TFLite export
29 | # tensorflowjs>=3.9.0 # TF.js export
30 | # openvino-dev # OpenVINO export
31 |
32 | # Extras --------------------------------------
33 | # albumentations>=1.0.3
34 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
35 | # pycocotools>=2.0 # COCO mAP
36 | # roboflow
37 | thop # FLOPs computation
38 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 | # Local usage: pip install pre-commit, pre-commit run --all-files
4 |
5 | [metadata]
6 | license_file = LICENSE
7 | description_file = README.md
8 |
9 |
10 | [tool:pytest]
11 | norecursedirs =
12 | .git
13 | dist
14 | build
15 | addopts =
16 | --doctest-modules
17 | --durations=25
18 | --color=yes
19 |
20 |
21 | [flake8]
22 | max-line-length = 120
23 | exclude = .tox,*.egg,build,temp
24 | select = E,W,F
25 | doctests = True
26 | verbose = 2
27 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
28 | format = pylint
29 | # see: https://www.flake8rules.com/
30 | ignore =
31 | E731 # Do not assign a lambda expression, use a def
32 | F405 # name may be undefined, or defined from star imports: module
33 | E402 # module level import not at top of file
34 | F401 # module imported but unused
35 | W504 # line break after binary operator
36 | E127 # continuation line over-indented for visual indent
37 | W504 # line break after binary operator
38 | E231 # missing whitespace after ‘,’, ‘;’, or ‘:’
39 | E501 # line too long
40 | F403 # ‘from module import *’ used; unable to detect undefined names
41 |
42 |
43 | [isort]
44 | # https://pycqa.github.io/isort/docs/configuration/options.html
45 | line_length = 120
46 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
47 | multi_line_output = 0
48 |
49 |
50 | [yapf]
51 | based_on_style = pep8
52 | spaces_before_comment = 2
53 | COLUMN_LIMIT = 120
54 | COALESCE_BRACKETS = True
55 | SPACES_AROUND_POWER_OPERATOR = True
56 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False
57 | SPLIT_BEFORE_CLOSING_BRACKET = False
58 | SPLIT_BEFORE_FIRST_ARGUMENT = False
59 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
60 |
--------------------------------------------------------------------------------
/split.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | import os
4 | import random
5 | import argparse
6 |
7 | parser = argparse.ArgumentParser()
8 | path=''
9 | #xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下
10 | parser.add_argument('--xml_path', default='datasets/mydata/images/', type=str, help='input xml label path')
11 | #数据集的划分,地址选择自己数据下的ImageSets/Main
12 | parser.add_argument('--txt_path', default='datasets/mydata/', type=str, help='output txt label path')
13 | opt = parser.parse_args()
14 |
15 | trainval_percent = 1.0
16 | train_percent = 0.9
17 | xmlfilepath = opt.xml_path
18 | txtsavepath = opt.txt_path
19 | total_xml = os.listdir(xmlfilepath)
20 | if not os.path.exists(txtsavepath):
21 | os.makedirs(txtsavepath)
22 |
23 | num = len(total_xml)
24 | list_index = range(num)
25 | tv = int(num * trainval_percent)
26 | tr = int(tv * train_percent)
27 | trainval = random.sample(list_index, tv)
28 | train = random.sample(trainval, tr)
29 |
30 | #file_trainval = open(txtsavepath + '/trainval.txt', 'w')
31 | file_test = open(txtsavepath + '/test.txt', 'w')
32 | file_train = open(txtsavepath + '/train.txt', 'w')
33 | file_val = open(txtsavepath + '/val.txt', 'w')
34 |
35 |
36 | for i in list_index:
37 | name = total_xml[i][:-4] + '\n'
38 | if i in train:
39 | file_train.write(path+name)
40 | else:
41 | file_val.write(path+name)
42 |
43 |
44 | file_train.close()
45 | file_val.close()
46 | file_test.close()
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 |
7 | def notebook_init(verbose=True):
8 | # Check system software and hardware
9 | print('Checking setup...')
10 |
11 | import os
12 | import shutil
13 |
14 | from utils.general import check_requirements, emojis, is_colab
15 | from utils.torch_utils import select_device # imports
16 |
17 | check_requirements(('psutil', 'IPython'))
18 | import psutil
19 | from IPython import display # to display images and clear console output
20 |
21 | if is_colab():
22 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
23 |
24 | # System info
25 | if verbose:
26 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
27 | ram = psutil.virtual_memory().total
28 | total, used, free = shutil.disk_usage("/")
29 | display.clear_output()
30 | s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
31 | else:
32 | s = ''
33 |
34 | select_device(newline=False)
35 | print(emojis(f'Setup complete ✅ {s}'))
36 | return display
37 |
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/augmentations.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/augmentations.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/autoanchor.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/autoanchor.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/autobatch.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/autobatch.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/callbacks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/callbacks.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/datasets.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/datasets.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/downloads.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/downloads.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/general.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/general.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/loss.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/metrics.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/metrics.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/plots.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/plots.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/torch_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/__pycache__/torch_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/activations.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Activation functions
4 | """
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | class SiLU(nn.Module):
12 | # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
13 | @staticmethod
14 | def forward(x):
15 | return x * torch.sigmoid(x)
16 |
17 |
18 | class Hardswish(nn.Module):
19 | # Hard-SiLU activation
20 | @staticmethod
21 | def forward(x):
22 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML
23 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
24 |
25 |
26 | class Mish(nn.Module):
27 | # Mish activation https://github.com/digantamisra98/Mish
28 | @staticmethod
29 | def forward(x):
30 | return x * F.softplus(x).tanh()
31 |
32 |
33 | class MemoryEfficientMish(nn.Module):
34 | # Mish activation memory-efficient
35 | class F(torch.autograd.Function):
36 |
37 | @staticmethod
38 | def forward(ctx, x):
39 | ctx.save_for_backward(x)
40 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
41 |
42 | @staticmethod
43 | def backward(ctx, grad_output):
44 | x = ctx.saved_tensors[0]
45 | sx = torch.sigmoid(x)
46 | fx = F.softplus(x).tanh()
47 | return grad_output * (fx + x * sx * (1 - fx * fx))
48 |
49 | def forward(self, x):
50 | return self.F.apply(x)
51 |
52 |
53 | class FReLU(nn.Module):
54 | # FReLU activation https://arxiv.org/abs/2007.11824
55 | def __init__(self, c1, k=3): # ch_in, kernel
56 | super().__init__()
57 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
58 | self.bn = nn.BatchNorm2d(c1)
59 |
60 | def forward(self, x):
61 | return torch.max(x, self.bn(self.conv(x)))
62 |
63 |
64 | class AconC(nn.Module):
65 | r""" ACON activation (activate or not)
66 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
67 | according to "Activate or Not: Learning Customized Activation" .
68 | """
69 |
70 | def __init__(self, c1):
71 | super().__init__()
72 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
73 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
74 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
75 |
76 | def forward(self, x):
77 | dpx = (self.p1 - self.p2) * x
78 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
79 |
80 |
81 | class MetaAconC(nn.Module):
82 | r""" ACON activation (activate or not)
83 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
84 | according to "Activate or Not: Learning Customized Activation" .
85 | """
86 |
87 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
88 | super().__init__()
89 | c2 = max(r, c1 // r)
90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
92 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
93 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
94 | # self.bn1 = nn.BatchNorm2d(c2)
95 | # self.bn2 = nn.BatchNorm2d(c1)
96 |
97 | def forward(self, x):
98 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
99 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
100 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
101 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
102 | dpx = (self.p1 - self.p2) * x
103 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
104 |
--------------------------------------------------------------------------------
/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 | from torch.cuda import amp
11 |
12 | from utils.general import LOGGER, colorstr
13 | from utils.torch_utils import profile
14 |
15 |
16 | def check_train_batch_size(model, imgsz=640):
17 | # Check YOLOv5 training batch size
18 | with amp.autocast():
19 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
20 |
21 |
22 | def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
23 | # Automatically estimate best batch size to use `fraction` of available CUDA memory
24 | # Usage:
25 | # import torch
26 | # from utils.autobatch import autobatch
27 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
28 | # print(autobatch(model))
29 |
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 |
37 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
38 | d = str(device).upper() # 'CUDA:0'
39 | properties = torch.cuda.get_device_properties(device) # device properties
40 | t = properties.total_memory / gb # (GiB)
41 | r = torch.cuda.memory_reserved(device) / gb # (GiB)
42 | a = torch.cuda.memory_allocated(device) / gb # (GiB)
43 | f = t - (r + a) # free inside reserved
44 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
45 |
46 | batch_sizes = [1, 2, 4, 8, 16]
47 | try:
48 | img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
49 | y = profile(img, model, n=3, device=device)
50 | except Exception as e:
51 | LOGGER.warning(f'{prefix}{e}')
52 |
53 | y = [x[2] for x in y if x] # memory [2]
54 | batch_sizes = batch_sizes[:len(y)]
55 | p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit
56 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
57 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)')
58 | return b
59 |
--------------------------------------------------------------------------------
/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/aws/__init__.py
--------------------------------------------------------------------------------
/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/utils/benchmarks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Run YOLOv5 benchmarks on all supported export formats
4 |
5 | Format | `export.py --include` | Model
6 | --- | --- | ---
7 | PyTorch | - | yolov5s.pt
8 | TorchScript | `torchscript` | yolov5s.torchscript
9 | ONNX | `onnx` | yolov5s.onnx
10 | OpenVINO | `openvino` | yolov5s_openvino_model/
11 | TensorRT | `engine` | yolov5s.engine
12 | CoreML | `coreml` | yolov5s.mlmodel
13 | TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14 | TensorFlow GraphDef | `pb` | yolov5s.pb
15 | TensorFlow Lite | `tflite` | yolov5s.tflite
16 | TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17 | TensorFlow.js | `tfjs` | yolov5s_web_model/
18 |
19 | Requirements:
20 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
21 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
22 | $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
23 |
24 | Usage:
25 | $ python utils/benchmarks.py --weights yolov5s.pt --img 640
26 | """
27 |
28 | import argparse
29 | import sys
30 | import time
31 | from pathlib import Path
32 |
33 | import pandas as pd
34 |
35 | FILE = Path(__file__).resolve()
36 | ROOT = FILE.parents[1] # YOLOv5 root directory
37 | if str(ROOT) not in sys.path:
38 | sys.path.append(str(ROOT)) # add ROOT to PATH
39 | # ROOT = ROOT.relative_to(Path.cwd()) # relative
40 |
41 | import export
42 | import val
43 | from utils import notebook_init
44 | from utils.general import LOGGER, print_args
45 | from utils.torch_utils import select_device
46 |
47 |
48 | def run(
49 | weights=ROOT / 'yolov5s.pt', # weights path
50 | imgsz=640, # inference size (pixels)
51 | batch_size=1, # batch size
52 | data=ROOT / 'data/coco128.yaml', # dataset.yaml path
53 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
54 | half=False, # use FP16 half-precision inference
55 | test=False, # test exports only
56 | ):
57 | y, t = [], time.time()
58 | formats = export.export_formats()
59 | device = select_device(device)
60 | for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable)
61 | try:
62 | assert i != 9, 'Edge TPU not supported'
63 | assert i != 10, 'TF.js not supported'
64 | if device.type != 'cpu':
65 | assert gpu, f'{name} inference not supported on GPU'
66 |
67 | # Export
68 | if f == '-':
69 | w = weights # PyTorch format
70 | else:
71 | w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
72 | assert suffix in str(w), 'export failed'
73 |
74 | # Validate
75 | result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half)
76 | metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls))
77 | speeds = result[2] # times (preprocess, inference, postprocess)
78 | y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference
79 | except Exception as e:
80 | LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
81 | y.append([name, None, None]) # mAP, t_inference
82 |
83 | # Print results
84 | LOGGER.info('\n')
85 | parse_opt()
86 | notebook_init() # print system info
87 | py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', ''])
88 | LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
89 | LOGGER.info(str(py if map else py.iloc[:, :2]))
90 | return py
91 |
92 |
93 | def test(
94 | weights=ROOT / 'yolov5s.pt', # weights path
95 | imgsz=640, # inference size (pixels)
96 | batch_size=1, # batch size
97 | data=ROOT / 'data/coco128.yaml', # dataset.yaml path
98 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
99 | half=False, # use FP16 half-precision inference
100 | test=False, # test exports only
101 | ):
102 | y, t = [], time.time()
103 | formats = export.export_formats()
104 | device = select_device(device)
105 | for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable)
106 | try:
107 | w = weights if f == '-' else \
108 | export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
109 | assert suffix in str(w), 'export failed'
110 | y.append([name, True])
111 | except Exception:
112 | y.append([name, False]) # mAP, t_inference
113 |
114 | # Print results
115 | LOGGER.info('\n')
116 | parse_opt()
117 | notebook_init() # print system info
118 | py = pd.DataFrame(y, columns=['Format', 'Export'])
119 | LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
120 | LOGGER.info(str(py))
121 | return py
122 |
123 |
124 | def parse_opt():
125 | parser = argparse.ArgumentParser()
126 | parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
127 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
128 | parser.add_argument('--batch-size', type=int, default=1, help='batch size')
129 | parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
130 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
131 | parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
132 | parser.add_argument('--test', action='store_true', help='test exports only')
133 | opt = parser.parse_args()
134 | print_args(vars(opt))
135 | return opt
136 |
137 |
138 | def main(opt):
139 | test(**vars(opt)) if opt.test else run(**vars(opt))
140 |
141 |
142 | if __name__ == "__main__":
143 | opt = parse_opt()
144 | main(opt)
145 |
--------------------------------------------------------------------------------
/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Callback utils
4 | """
5 |
6 |
7 | class Callbacks:
8 | """"
9 | Handles all registered callbacks for YOLOv5 Hooks
10 | """
11 |
12 | def __init__(self):
13 | # Define the available callbacks
14 | self._callbacks = {
15 | 'on_pretrain_routine_start': [],
16 | 'on_pretrain_routine_end': [],
17 | 'on_train_start': [],
18 | 'on_train_epoch_start': [],
19 | 'on_train_batch_start': [],
20 | 'optimizer_step': [],
21 | 'on_before_zero_grad': [],
22 | 'on_train_batch_end': [],
23 | 'on_train_epoch_end': [],
24 | 'on_val_start': [],
25 | 'on_val_batch_start': [],
26 | 'on_val_image_end': [],
27 | 'on_val_batch_end': [],
28 | 'on_val_end': [],
29 | 'on_fit_epoch_end': [], # fit = train + val
30 | 'on_model_save': [],
31 | 'on_train_end': [],
32 | 'on_params_update': [],
33 | 'teardown': [],}
34 | self.stop_training = False # set True to interrupt training
35 |
36 | def register_action(self, hook, name='', callback=None):
37 | """
38 | Register a new action to a callback hook
39 |
40 | Args:
41 | hook: The callback hook name to register the action to
42 | name: The name of the action for later reference
43 | callback: The callback to fire
44 | """
45 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
46 | assert callable(callback), f"callback '{callback}' is not callable"
47 | self._callbacks[hook].append({'name': name, 'callback': callback})
48 |
49 | def get_registered_actions(self, hook=None):
50 | """"
51 | Returns all the registered actions by callback hook
52 |
53 | Args:
54 | hook: The name of the hook to check, defaults to all
55 | """
56 | return self._callbacks[hook] if hook else self._callbacks
57 |
58 | def run(self, hook, *args, **kwargs):
59 | """
60 | Loop through the registered actions and fire all callbacks
61 |
62 | Args:
63 | hook: The name of the hook to check, defaults to all
64 | args: Arguments to receive from YOLOv5
65 | kwargs: Keyword Arguments to receive from YOLOv5
66 | """
67 |
68 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
69 |
70 | for logger in self._callbacks[hook]:
71 | logger['callback'](*args, **kwargs)
72 |
--------------------------------------------------------------------------------
/utils/docker/.dockerignore:
--------------------------------------------------------------------------------
1 | # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2 | #.git
3 | .cache
4 | .idea
5 | runs
6 | output
7 | coco
8 | storage.googleapis.com
9 |
10 | data/samples/*
11 | **/results*.csv
12 | *.jpg
13 |
14 | # Neural Network weights -----------------------------------------------------------------------------------------------
15 | **/*.pt
16 | **/*.pth
17 | **/*.onnx
18 | **/*.engine
19 | **/*.mlmodel
20 | **/*.torchscript
21 | **/*.torchscript.pt
22 | **/*.tflite
23 | **/*.h5
24 | **/*.pb
25 | *_saved_model/
26 | *_web_model/
27 | *_openvino_model/
28 |
29 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
31 |
32 |
33 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
34 | # Byte-compiled / optimized / DLL files
35 | __pycache__/
36 | *.py[cod]
37 | *$py.class
38 |
39 | # C extensions
40 | *.so
41 |
42 | # Distribution / packaging
43 | .Python
44 | env/
45 | build/
46 | develop-eggs/
47 | dist/
48 | downloads/
49 | eggs/
50 | .eggs/
51 | lib/
52 | lib64/
53 | parts/
54 | sdist/
55 | var/
56 | wheels/
57 | *.egg-info/
58 | wandb/
59 | .installed.cfg
60 | *.egg
61 |
62 | # PyInstaller
63 | # Usually these files are written by a python script from a template
64 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
65 | *.manifest
66 | *.spec
67 |
68 | # Installer logs
69 | pip-log.txt
70 | pip-delete-this-directory.txt
71 |
72 | # Unit test / coverage reports
73 | htmlcov/
74 | .tox/
75 | .coverage
76 | .coverage.*
77 | .cache
78 | nosetests.xml
79 | coverage.xml
80 | *.cover
81 | .hypothesis/
82 |
83 | # Translations
84 | *.mo
85 | *.pot
86 |
87 | # Django stuff:
88 | *.log
89 | local_settings.py
90 |
91 | # Flask stuff:
92 | instance/
93 | .webassets-cache
94 |
95 | # Scrapy stuff:
96 | .scrapy
97 |
98 | # Sphinx documentation
99 | docs/_build/
100 |
101 | # PyBuilder
102 | target/
103 |
104 | # Jupyter Notebook
105 | .ipynb_checkpoints
106 |
107 | # pyenv
108 | .python-version
109 |
110 | # celery beat schedule file
111 | celerybeat-schedule
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # dotenv
117 | .env
118 |
119 | # virtualenv
120 | .venv*
121 | venv*/
122 | ENV*/
123 |
124 | # Spyder project settings
125 | .spyderproject
126 | .spyproject
127 |
128 | # Rope project settings
129 | .ropeproject
130 |
131 | # mkdocs documentation
132 | /site
133 |
134 | # mypy
135 | .mypy_cache/
136 |
137 |
138 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
139 |
140 | # General
141 | .DS_Store
142 | .AppleDouble
143 | .LSOverride
144 |
145 | # Icon must end with two \r
146 | Icon
147 | Icon?
148 |
149 | # Thumbnails
150 | ._*
151 |
152 | # Files that might appear in the root of a volume
153 | .DocumentRevisions-V100
154 | .fseventsd
155 | .Spotlight-V100
156 | .TemporaryItems
157 | .Trashes
158 | .VolumeIcon.icns
159 | .com.apple.timemachine.donotpresent
160 |
161 | # Directories potentially created on remote AFP share
162 | .AppleDB
163 | .AppleDesktop
164 | Network Trash Folder
165 | Temporary Items
166 | .apdisk
167 |
168 |
169 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
170 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
171 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
172 |
173 | # User-specific stuff:
174 | .idea/*
175 | .idea/**/workspace.xml
176 | .idea/**/tasks.xml
177 | .idea/dictionaries
178 | .html # Bokeh Plots
179 | .pg # TensorFlow Frozen Graphs
180 | .avi # videos
181 |
182 | # Sensitive or high-churn files:
183 | .idea/**/dataSources/
184 | .idea/**/dataSources.ids
185 | .idea/**/dataSources.local.xml
186 | .idea/**/sqlDataSources.xml
187 | .idea/**/dynamic.xml
188 | .idea/**/uiDesigner.xml
189 |
190 | # Gradle:
191 | .idea/**/gradle.xml
192 | .idea/**/libraries
193 |
194 | # CMake
195 | cmake-build-debug/
196 | cmake-build-release/
197 |
198 | # Mongo Explorer plugin:
199 | .idea/**/mongoSettings.xml
200 |
201 | ## File-based project format:
202 | *.iws
203 |
204 | ## Plugin-specific files:
205 |
206 | # IntelliJ
207 | out/
208 |
209 | # mpeltonen/sbt-idea plugin
210 | .idea_modules/
211 |
212 | # JIRA plugin
213 | atlassian-ide-plugin.xml
214 |
215 | # Cursive Clojure plugin
216 | .idea/replstate.xml
217 |
218 | # Crashlytics plugin (for Android Studio and IntelliJ)
219 | com_crashlytics_export_strings.xml
220 | crashlytics.properties
221 | crashlytics-build.properties
222 | fabric.properties
223 |
--------------------------------------------------------------------------------
/utils/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4 | FROM nvcr.io/nvidia/pytorch:21.10-py3
5 |
6 | # Install linux packages
7 | RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
8 |
9 | # Install python dependencies
10 | COPY requirements.txt .
11 | RUN python -m pip install --upgrade pip
12 | RUN pip uninstall -y torch torchvision torchtext
13 | RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \
14 | torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
15 | # RUN pip install --no-cache -U torch torchvision
16 |
17 | # Create working directory
18 | RUN mkdir -p /usr/src/app
19 | WORKDIR /usr/src/app
20 |
21 | # Copy contents
22 | COPY . /usr/src/app
23 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5
24 |
25 | # Downloads to user config dir
26 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
27 |
28 | # Set environment variables
29 | ENV OMP_NUM_THREADS=8
30 |
31 |
32 | # Usage Examples -------------------------------------------------------------------------------------------------------
33 |
34 | # Build and Push
35 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
36 |
37 | # Pull and Run
38 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
39 |
40 | # Pull and Run with local directory access
41 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
42 |
43 | # Kill all
44 | # sudo docker kill $(sudo docker ps -q)
45 |
46 | # Kill all image-based
47 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
48 |
49 | # Bash into running container
50 | # sudo docker exec -it 5a9b5863d93d bash
51 |
52 | # Bash into stopped container
53 | # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
54 |
55 | # Clean up
56 | # docker system prune -a --volumes
57 |
58 | # Update Ubuntu drivers
59 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
60 |
61 | # DDP test
62 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
63 |
64 | # GCP VM from Image
65 | # docker.io/ultralytics/yolov5:latest
66 |
--------------------------------------------------------------------------------
/utils/docker/Dockerfile-cpu:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
4 | FROM ubuntu:latest
5 |
6 | # Install linux packages
7 | RUN apt update
8 | RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata
9 | RUN apt install -y python3-pip git zip curl htop screen libgl1-mesa-glx libglib2.0-0
10 | RUN alias python=python3
11 |
12 | # Install python dependencies
13 | COPY requirements.txt .
14 | RUN python3 -m pip install --upgrade pip
15 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
16 | coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \
17 | torch==1.11.0+cpu torchvision==0.12.0+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html
18 |
19 | # Create working directory
20 | RUN mkdir -p /usr/src/app
21 | WORKDIR /usr/src/app
22 |
23 | # Copy contents
24 | COPY . /usr/src/app
25 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5
26 |
27 | # Downloads to user config dir
28 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
29 |
30 |
31 | # Usage Examples -------------------------------------------------------------------------------------------------------
32 |
33 | # Build and Push
34 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -t $t . && sudo docker push $t
35 |
36 | # Pull and Run
37 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
38 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Perform test request
4 | """
5 |
6 | import pprint
7 |
8 | import requests
9 |
10 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
11 | IMAGE = "zidane.jpg"
12 |
13 | # Read image
14 | with open(IMAGE, "rb") as f:
15 | image_data = f.read()
16 |
17 | response = requests.post(DETECTION_URL, files={"image": image_data}).json()
18 |
19 | pprint.pprint(response)
20 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Run a Flask REST API exposing a YOLOv5s model
4 | """
5 |
6 | import argparse
7 | import io
8 |
9 | import torch
10 | from flask import Flask, request
11 | from PIL import Image
12 |
13 | app = Flask(__name__)
14 |
15 | DETECTION_URL = "/v1/object-detection/yolov5s"
16 |
17 |
18 | @app.route(DETECTION_URL, methods=["POST"])
19 | def predict():
20 | if not request.method == "POST":
21 | return
22 |
23 | if request.files.get("image"):
24 | image_file = request.files["image"]
25 | image_bytes = image_file.read()
26 |
27 | img = Image.open(io.BytesIO(image_bytes))
28 |
29 | results = model(img, size=640) # reduce size=320 for faster inference
30 | return results.pandas().xyxy[0].to_json(orient="records")
31 |
32 |
33 | if __name__ == "__main__":
34 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
35 | parser.add_argument("--port", default=5000, type=int, help="port number")
36 | opt = parser.parse_args()
37 |
38 | # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210
39 | torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
40 |
41 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
42 | app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat
43 |
--------------------------------------------------------------------------------
/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/utils/loggers/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/loggers/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/utils/loggers/wandb/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/loggers/wandb/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/loggers/wandb/__pycache__/wandb_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mohenghui/yolov5_attention/b3b22930ab6e839a49d2c3a244826375130b93cb/utils/loggers/wandb/__pycache__/wandb_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/utils/loggers/wandb/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from wandb_utils import WandbLogger
4 |
5 | from utils.general import LOGGER
6 |
7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
8 |
9 |
10 | def create_dataset_artifact(opt):
11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
12 | if not logger.wandb:
13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset")
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
21 | parser.add_argument('--entity', default=None, help='W&B entity')
22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
23 |
24 | opt = parser.parse_args()
25 | opt.resume = False # Explicitly disallow resume check for dataset upload job
26 |
27 | create_dataset_artifact(opt)
28 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import wandb
5 |
6 | FILE = Path(__file__).resolve()
7 | ROOT = FILE.parents[3] # YOLOv5 root directory
8 | if str(ROOT) not in sys.path:
9 | sys.path.append(str(ROOT)) # add ROOT to PATH
10 |
11 | from train import parse_opt, train
12 | from utils.callbacks import Callbacks
13 | from utils.general import increment_path
14 | from utils.torch_utils import select_device
15 |
16 |
17 | def sweep():
18 | wandb.init()
19 | # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb.
20 | hyp_dict = vars(wandb.config).get("_items").copy()
21 |
22 | # Workaround: get necessary opt args
23 | opt = parse_opt(known=True)
24 | opt.batch_size = hyp_dict.get("batch_size")
25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26 | opt.epochs = hyp_dict.get("epochs")
27 | opt.nosave = True
28 | opt.data = hyp_dict.get("data")
29 | opt.weights = str(opt.weights)
30 | opt.cfg = str(opt.cfg)
31 | opt.data = str(opt.data)
32 | opt.hyp = str(opt.hyp)
33 | opt.project = str(opt.project)
34 | device = select_device(opt.device, batch_size=opt.batch_size)
35 |
36 | # train
37 | train(hyp_dict, opt, device, callbacks=Callbacks())
38 |
39 |
40 | if __name__ == "__main__":
41 | sweep()
42 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.yaml:
--------------------------------------------------------------------------------
1 | # Hyperparameters for training
2 | # To set range-
3 | # Provide min and max values as:
4 | # parameter:
5 | #
6 | # min: scalar
7 | # max: scalar
8 | # OR
9 | #
10 | # Set a specific list of search space-
11 | # parameter:
12 | # values: [scalar1, scalar2, scalar3...]
13 | #
14 | # You can use grid, bayesian and hyperopt search strategy
15 | # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
16 |
17 | program: utils/loggers/wandb/sweep.py
18 | method: random
19 | metric:
20 | name: metrics/mAP_0.5
21 | goal: maximize
22 |
23 | parameters:
24 | # hyperparameters: set either min, max range or values list
25 | data:
26 | value: "data/coco128.yaml"
27 | batch_size:
28 | values: [64]
29 | epochs:
30 | values: [10]
31 |
32 | lr0:
33 | distribution: uniform
34 | min: 1e-5
35 | max: 1e-1
36 | lrf:
37 | distribution: uniform
38 | min: 0.01
39 | max: 1.0
40 | momentum:
41 | distribution: uniform
42 | min: 0.6
43 | max: 0.98
44 | weight_decay:
45 | distribution: uniform
46 | min: 0.0
47 | max: 0.001
48 | warmup_epochs:
49 | distribution: uniform
50 | min: 0.0
51 | max: 5.0
52 | warmup_momentum:
53 | distribution: uniform
54 | min: 0.0
55 | max: 0.95
56 | warmup_bias_lr:
57 | distribution: uniform
58 | min: 0.0
59 | max: 0.2
60 | box:
61 | distribution: uniform
62 | min: 0.02
63 | max: 0.2
64 | cls:
65 | distribution: uniform
66 | min: 0.2
67 | max: 4.0
68 | cls_pw:
69 | distribution: uniform
70 | min: 0.5
71 | max: 2.0
72 | obj:
73 | distribution: uniform
74 | min: 0.2
75 | max: 4.0
76 | obj_pw:
77 | distribution: uniform
78 | min: 0.5
79 | max: 2.0
80 | iou_t:
81 | distribution: uniform
82 | min: 0.1
83 | max: 0.7
84 | anchor_t:
85 | distribution: uniform
86 | min: 2.0
87 | max: 8.0
88 | fl_gamma:
89 | distribution: uniform
90 | min: 0.0
91 | max: 4.0
92 | hsv_h:
93 | distribution: uniform
94 | min: 0.0
95 | max: 0.1
96 | hsv_s:
97 | distribution: uniform
98 | min: 0.0
99 | max: 0.9
100 | hsv_v:
101 | distribution: uniform
102 | min: 0.0
103 | max: 0.9
104 | degrees:
105 | distribution: uniform
106 | min: 0.0
107 | max: 45.0
108 | translate:
109 | distribution: uniform
110 | min: 0.0
111 | max: 0.9
112 | scale:
113 | distribution: uniform
114 | min: 0.0
115 | max: 0.9
116 | shear:
117 | distribution: uniform
118 | min: 0.0
119 | max: 10.0
120 | perspective:
121 | distribution: uniform
122 | min: 0.0
123 | max: 0.001
124 | flipud:
125 | distribution: uniform
126 | min: 0.0
127 | max: 1.0
128 | fliplr:
129 | distribution: uniform
130 | min: 0.0
131 | max: 1.0
132 | mosaic:
133 | distribution: uniform
134 | min: 0.0
135 | max: 1.0
136 | mixup:
137 | distribution: uniform
138 | min: 0.0
139 | max: 1.0
140 | copy_paste:
141 | distribution: uniform
142 | min: 0.0
143 | max: 1.0
144 |
--------------------------------------------------------------------------------
/voc_label.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as ET
2 | import pickle
3 | import os
4 | import os.path
5 | from os import listdir, getcwd
6 | from os.path import join
7 | sets = ['train', 'test','val']
8 | classes = ['person']
9 | def convert(size, box):
10 | dw = 1. / size[0]
11 | dh = 1. / size[1]
12 | x = (box[0] + box[1]) / 2.0
13 | y = (box[2] + box[3]) / 2.0
14 | w = box[1] - box[0]
15 | h = box[3] - box[2]
16 | x = x * dw
17 | w = w * dw
18 | y = y * dh
19 | h = h * dh
20 | return (x, y, w, h)
21 | def convert_annotation(image_id):
22 | if os.path.isfile('datasets/mydata/Annotations/%s.xml' % (image_id)):
23 | in_file = open('datasets/mydata/Annotations/%s.xml' % (image_id))
24 | out_file = open('datasets/mydata/labels/%s.txt' % (image_id), 'w')
25 | tree = ET.parse(in_file)
26 | root = tree.getroot()
27 | size = root.find('size')
28 | w = int(size.find('width').text)
29 | h = int(size.find('height').text)
30 | for obj in root.iter('object'):
31 | difficult = obj.find('difficult').text
32 | cls = obj.find('name').text
33 | if cls not in classes or int(difficult) == 1:
34 | continue
35 | cls_id = classes.index(cls)
36 | xmlbox = obj.find('bndbox')
37 | b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
38 | float(xmlbox.find('ymax').text))
39 | bb = convert((w, h), b)
40 | out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
41 | wd = getcwd()
42 | print(wd)
43 | for image_set in sets:
44 | if not os.path.exists('datasets/mydata/labels/'):
45 | os.makedirs('datasets/mydata/labels/')
46 | image_ids = open('datasets/mydata/%s.txt' % (image_set)).read().strip().split()
47 | list_file = open('datasets/mydata/%s.txt' % (image_set), 'w')
48 | for image_id in image_ids:
49 | list_file.write('datasets/mydata/images/%s.jpg\n' % (image_id))
50 | convert_annotation(image_id)
51 | list_file.close()
--------------------------------------------------------------------------------