├── .gitignore
├── .idea
├── .gitignore
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── other.xml
├── vcs.xml
└── win10_yolov5_deepsort_counting.iml
├── Arial.ttf
├── README.md
├── cover.png
├── deep_sort
├── configs
│ └── deep_sort.yaml
├── deep_sort
│ ├── README.md
│ ├── __init__.py
│ ├── deep
│ │ ├── __init__.py
│ │ ├── checkpoint
│ │ │ ├── .gitkeep
│ │ │ └── ckpt.t7
│ │ ├── evaluate.py
│ │ ├── feature_extractor.py
│ │ ├── model.py
│ │ ├── original_model.py
│ │ ├── test.py
│ │ ├── train.jpg
│ │ └── train.py
│ ├── deep_sort.py
│ └── sort
│ │ ├── __init__.py
│ │ ├── detection.py
│ │ ├── iou_matching.py
│ │ ├── kalman_filter.py
│ │ ├── linear_assignment.py
│ │ ├── nn_matching.py
│ │ ├── preprocessing.py
│ │ ├── track.py
│ │ └── tracker.py
└── utils
│ ├── __init__.py
│ ├── asserts.py
│ ├── draw.py
│ ├── evaluation.py
│ ├── io.py
│ ├── json_logger.py
│ ├── log.py
│ ├── parser.py
│ └── tools.py
├── detector.py
├── main.py
├── models
├── __init__.py
├── common.py
├── experimental.py
├── hub
│ ├── anchors.yaml
│ ├── yolov3-spp.yaml
│ ├── yolov3-tiny.yaml
│ ├── yolov3.yaml
│ ├── yolov5-bifpn.yaml
│ ├── yolov5-fpn.yaml
│ ├── yolov5-p2.yaml
│ ├── yolov5-p34.yaml
│ ├── yolov5-p6.yaml
│ ├── yolov5-p7.yaml
│ ├── yolov5-panet.yaml
│ ├── yolov5l6.yaml
│ ├── yolov5m6.yaml
│ ├── yolov5n6.yaml
│ ├── yolov5s-ghost.yaml
│ ├── yolov5s-transformer.yaml
│ ├── yolov5s6.yaml
│ └── yolov5x6.yaml
├── tf.py
├── yolo.py
├── yolov5l.yaml
├── yolov5m.yaml
├── yolov5n.yaml
├── yolov5s.yaml
└── yolov5x.yaml
├── requirements.txt
├── tracker.py
├── utils
├── __init__.py
├── activations.py
├── augmentations.py
├── autoanchor.py
├── autobatch.py
├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
├── benchmarks.py
├── callbacks.py
├── datasets.py
├── downloads.py
├── flask_rest_api
│ ├── README.md
│ ├── example_request.py
│ └── restapi.py
├── general.py
├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
├── loggers
│ ├── __init__.py
│ └── wandb
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── log_dataset.py
│ │ ├── sweep.py
│ │ ├── sweep.yaml
│ │ └── wandb_utils.py
├── loss.py
├── metrics.py
├── plots.py
└── torch_utils.py
├── video
└── test.mp4
└── weights
└── yolov5m.pt
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/win10_yolov5_deepsort_counting.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/Arial.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/Arial.ttf
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # win10版本 yolov5 deepsort 行人 车辆 跟踪 检测 计数
2 |
3 | ## 应B站上同学们要求在 win10 运行
4 |
5 | - 更新到 python 3.9.10,请不要安装更高版本。
6 | - 更新到 yolov5 v6.1版。使用的权重文件可以在此下载:https://github.com/ultralytics/yolov5/releases/tag/v6.1
7 | - 更新到 CUDA 11.3+
8 | - 建议保留 Arial.ttf 文件,或在首次运行时由 yolov5 自动下载。
9 |
10 |
11 | ## 功能
12 | - 实现了 出/入 分别计数。
13 | - 显示检测类别。
14 | - 默认是 南/北 方向检测,若要检测不同位置和方向,可在 main.py 文件第13行和21行,修改2个polygon的点。
15 | - 默认检测类别:行人、自行车、小汽车、摩托车、公交车、卡车。
16 | - 检测类别可在 detector.py 文件第60行修改。
17 |
18 |
19 | ### 视频
20 |
21 | bilibili
22 |
23 | [](https://www.bilibili.com/video/BV13Z4y1C7Dt/ "bilibili")
24 |
25 |
26 | ## 运行环境
27 |
28 | - python 3.9.10,pip 22.0.3+
29 | - pytorch 1.10.2+
30 | - pip3 install -r requirements.txt
31 |
32 |
33 | ## 如何运行
34 |
35 | 0. 确保正确安装 python 和 CUDA
36 |
37 | ```
38 | D:\> python -V
39 |
40 | D:\> nvidia-smi
41 |
42 | D:\> nvcc -V
43 | ```
44 |
45 | 1. 下载代码
46 |
47 | ```
48 | D:\> git clone https://github.com/dyh/win10_yolov5_deepsort_counting.git
49 | ```
50 |
51 | > 因此repo包含weights和mp4文件,若 git clone 速度慢,可直接下载zip文件:https://github.com/dyh/win10_yolov5_deepsort_counting/archive/refs/heads/main.zip
52 |
53 | 2. 进入目录
54 |
55 | ```
56 | D:\> cd win10_yolov5_deepsort_counting
57 | ```
58 |
59 | 3. 创建 python 虚拟环境
60 |
61 | ```
62 | D:\win10_yolov5_deepsort_counting> python -m venv venv
63 | ```
64 |
65 | 4. 激活虚拟环境
66 |
67 | ```
68 | D:\win10_yolov5_deepsort_counting> venv\Scripts\activate
69 | ```
70 |
71 | 5. 升级pip
72 |
73 | ```
74 | (venv) D:\win10_yolov5_deepsort_counting> python -m pip install --upgrade pip
75 | ```
76 |
77 | 6. 安装pytorch
78 |
79 | > 根据你的操作系统、虚拟环境以及CUDA版本,在 https://pytorch.org/get-started/locally/ 找到对应的安装命令。我的环境是 win10、pip、CUDA 11.6。
80 |
81 | ```
82 | (venv) D:\win10_yolov5_deepsort_counting> pip3 install torch==1.10.2+cu113 torchvision==0.11.3+cu113 torchaudio===0.10.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
83 | ```
84 |
85 | 7. 安装软件包
86 |
87 | ```
88 | (venv) D:\win10_yolov5_deepsort_counting> pip3 install -r requirements.txt
89 | ```
90 |
91 | 8. 在 main.py 文件中第66行,设置要检测的视频文件路径,默认为 './video/test.mp4'
92 |
93 | > 140MB的测试视频可以在这里下载:https://pan.baidu.com/s/1qHNGGpX1QD6zHyNTqWvg1w 提取码: 8ufq
94 |
95 | ```
96 | capture = cv2.VideoCapture(r'video\test.mp4')
97 | ```
98 |
99 | 9. 运行程序
100 |
101 | ```
102 | (venv) D:\win10_yolov5_deepsort_counting> python main.py
103 | ```
104 |
105 |
106 | ## 使用框架
107 |
108 | - https://github.com/Sharpiless/Yolov5-deepsort-inference
109 | - https://github.com/ultralytics/yolov5/
110 | - https://github.com/ZQPei/deep_sort_pytorch
111 |
--------------------------------------------------------------------------------
/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/cover.png
--------------------------------------------------------------------------------
/deep_sort/configs/deep_sort.yaml:
--------------------------------------------------------------------------------
1 | DEEPSORT:
2 | REID_CKPT: "deep_sort/deep_sort/deep/checkpoint/ckpt.t7"
3 | MAX_DIST: 0.2
4 | MIN_CONFIDENCE: 0.3
5 | NMS_MAX_OVERLAP: 0.5
6 | MAX_IOU_DISTANCE: 0.7
7 | MAX_AGE: 70
8 | N_INIT: 3
9 | NN_BUDGET: 100
10 |
11 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/README.md:
--------------------------------------------------------------------------------
1 | # Deep Sort
2 |
3 | This is the implemention of deep sort with pytorch.
--------------------------------------------------------------------------------
/deep_sort/deep_sort/__init__.py:
--------------------------------------------------------------------------------
1 | from .deep_sort import DeepSort
2 |
3 |
4 | __all__ = ['DeepSort', 'build_tracker']
5 |
6 |
7 | def build_tracker(cfg, use_cuda):
8 | return DeepSort(cfg.DEEPSORT.REID_CKPT,
9 | max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
10 | nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
11 | max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda)
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/deep_sort/deep_sort/deep/__init__.py
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/checkpoint/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/deep_sort/deep_sort/deep/checkpoint/.gitkeep
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/checkpoint/ckpt.t7:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/deep_sort/deep_sort/deep/checkpoint/ckpt.t7
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/evaluate.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | features = torch.load("features.pth")
4 | qf = features["qf"]
5 | ql = features["ql"]
6 | gf = features["gf"]
7 | gl = features["gl"]
8 |
9 | scores = qf.mm(gf.t())
10 | res = scores.topk(5, dim=1)[1][:,0]
11 | top1correct = gl[res].eq(ql).sum().item()
12 |
13 | print("Acc top1:{:.3f}".format(top1correct/ql.size(0)))
14 |
15 |
16 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/feature_extractor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision.transforms as transforms
3 | import numpy as np
4 | import cv2
5 | import logging
6 |
7 | from .model import Net
8 |
9 | class Extractor(object):
10 | def __init__(self, model_path, use_cuda=True):
11 | self.net = Net(reid=True)
12 | self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
13 | state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']
14 | self.net.load_state_dict(state_dict)
15 | logger = logging.getLogger("root.tracker")
16 | logger.info("Loading weights from {}... Done!".format(model_path))
17 | self.net.to(self.device)
18 | self.size = (64, 128)
19 | self.norm = transforms.Compose([
20 | transforms.ToTensor(),
21 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
22 | ])
23 |
24 |
25 |
26 | def _preprocess(self, im_crops):
27 | """
28 | TODO:
29 | 1. to float with scale from 0 to 1
30 | 2. resize to (64, 128) as Market1501 dataset did
31 | 3. concatenate to a numpy array
32 | 3. to torch Tensor
33 | 4. normalize
34 | """
35 | def _resize(im, size):
36 | return cv2.resize(im.astype(np.float32)/255., size)
37 |
38 | im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
39 | return im_batch
40 |
41 |
42 | def __call__(self, im_crops):
43 | im_batch = self._preprocess(im_crops)
44 | with torch.no_grad():
45 | im_batch = im_batch.to(self.device)
46 | features = self.net(im_batch)
47 | return features.cpu().numpy()
48 |
49 |
50 | if __name__ == '__main__':
51 | img = cv2.imread("demo.jpg")[:,:,(2,1,0)]
52 | extr = Extractor("checkpoint/ckpt.t7")
53 | feature = extr(img)
54 | print(feature.shape)
55 |
56 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | class BasicBlock(nn.Module):
6 | def __init__(self, c_in, c_out,is_downsample=False):
7 | super(BasicBlock,self).__init__()
8 | self.is_downsample = is_downsample
9 | if is_downsample:
10 | self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=2, padding=1, bias=False)
11 | else:
12 | self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1, bias=False)
13 | self.bn1 = nn.BatchNorm2d(c_out)
14 | self.relu = nn.ReLU(True)
15 | self.conv2 = nn.Conv2d(c_out,c_out,3,stride=1,padding=1, bias=False)
16 | self.bn2 = nn.BatchNorm2d(c_out)
17 | if is_downsample:
18 | self.downsample = nn.Sequential(
19 | nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
20 | nn.BatchNorm2d(c_out)
21 | )
22 | elif c_in != c_out:
23 | self.downsample = nn.Sequential(
24 | nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
25 | nn.BatchNorm2d(c_out)
26 | )
27 | self.is_downsample = True
28 |
29 | def forward(self,x):
30 | y = self.conv1(x)
31 | y = self.bn1(y)
32 | y = self.relu(y)
33 | y = self.conv2(y)
34 | y = self.bn2(y)
35 | if self.is_downsample:
36 | x = self.downsample(x)
37 | return F.relu(x.add(y),True)
38 |
39 | def make_layers(c_in,c_out,repeat_times, is_downsample=False):
40 | blocks = []
41 | for i in range(repeat_times):
42 | if i ==0:
43 | blocks += [BasicBlock(c_in,c_out, is_downsample=is_downsample),]
44 | else:
45 | blocks += [BasicBlock(c_out,c_out),]
46 | return nn.Sequential(*blocks)
47 |
48 | class Net(nn.Module):
49 | def __init__(self, num_classes=751 ,reid=False):
50 | super(Net,self).__init__()
51 | # 3 128 64
52 | self.conv = nn.Sequential(
53 | nn.Conv2d(3,64,3,stride=1,padding=1),
54 | nn.BatchNorm2d(64),
55 | nn.ReLU(inplace=True),
56 | # nn.Conv2d(32,32,3,stride=1,padding=1),
57 | # nn.BatchNorm2d(32),
58 | # nn.ReLU(inplace=True),
59 | nn.MaxPool2d(3,2,padding=1),
60 | )
61 | # 32 64 32
62 | self.layer1 = make_layers(64,64,2,False)
63 | # 32 64 32
64 | self.layer2 = make_layers(64,128,2,True)
65 | # 64 32 16
66 | self.layer3 = make_layers(128,256,2,True)
67 | # 128 16 8
68 | self.layer4 = make_layers(256,512,2,True)
69 | # 256 8 4
70 | self.avgpool = nn.AvgPool2d((8,4),1)
71 | # 256 1 1
72 | self.reid = reid
73 | self.classifier = nn.Sequential(
74 | nn.Linear(512, 256),
75 | nn.BatchNorm1d(256),
76 | nn.ReLU(inplace=True),
77 | nn.Dropout(),
78 | nn.Linear(256, num_classes),
79 | )
80 |
81 | def forward(self, x):
82 | x = self.conv(x)
83 | x = self.layer1(x)
84 | x = self.layer2(x)
85 | x = self.layer3(x)
86 | x = self.layer4(x)
87 | x = self.avgpool(x)
88 | x = x.view(x.size(0),-1)
89 | # B x 128
90 | if self.reid:
91 | x = x.div(x.norm(p=2,dim=1,keepdim=True))
92 | return x
93 | # classifier
94 | x = self.classifier(x)
95 | return x
96 |
97 |
98 | if __name__ == '__main__':
99 | net = Net()
100 | x = torch.randn(4,3,128,64)
101 | y = net(x)
102 | import ipdb; ipdb.set_trace()
103 |
104 |
105 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/original_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | class BasicBlock(nn.Module):
6 | def __init__(self, c_in, c_out,is_downsample=False):
7 | super(BasicBlock,self).__init__()
8 | self.is_downsample = is_downsample
9 | if is_downsample:
10 | self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=2, padding=1, bias=False)
11 | else:
12 | self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1, bias=False)
13 | self.bn1 = nn.BatchNorm2d(c_out)
14 | self.relu = nn.ReLU(True)
15 | self.conv2 = nn.Conv2d(c_out,c_out,3,stride=1,padding=1, bias=False)
16 | self.bn2 = nn.BatchNorm2d(c_out)
17 | if is_downsample:
18 | self.downsample = nn.Sequential(
19 | nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
20 | nn.BatchNorm2d(c_out)
21 | )
22 | elif c_in != c_out:
23 | self.downsample = nn.Sequential(
24 | nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
25 | nn.BatchNorm2d(c_out)
26 | )
27 | self.is_downsample = True
28 |
29 | def forward(self,x):
30 | y = self.conv1(x)
31 | y = self.bn1(y)
32 | y = self.relu(y)
33 | y = self.conv2(y)
34 | y = self.bn2(y)
35 | if self.is_downsample:
36 | x = self.downsample(x)
37 | return F.relu(x.add(y),True)
38 |
39 | def make_layers(c_in,c_out,repeat_times, is_downsample=False):
40 | blocks = []
41 | for i in range(repeat_times):
42 | if i ==0:
43 | blocks += [BasicBlock(c_in,c_out, is_downsample=is_downsample),]
44 | else:
45 | blocks += [BasicBlock(c_out,c_out),]
46 | return nn.Sequential(*blocks)
47 |
48 | class Net(nn.Module):
49 | def __init__(self, num_classes=625 ,reid=False):
50 | super(Net,self).__init__()
51 | # 3 128 64
52 | self.conv = nn.Sequential(
53 | nn.Conv2d(3,32,3,stride=1,padding=1),
54 | nn.BatchNorm2d(32),
55 | nn.ELU(inplace=True),
56 | nn.Conv2d(32,32,3,stride=1,padding=1),
57 | nn.BatchNorm2d(32),
58 | nn.ELU(inplace=True),
59 | nn.MaxPool2d(3,2,padding=1),
60 | )
61 | # 32 64 32
62 | self.layer1 = make_layers(32,32,2,False)
63 | # 32 64 32
64 | self.layer2 = make_layers(32,64,2,True)
65 | # 64 32 16
66 | self.layer3 = make_layers(64,128,2,True)
67 | # 128 16 8
68 | self.dense = nn.Sequential(
69 | nn.Dropout(p=0.6),
70 | nn.Linear(128*16*8, 128),
71 | nn.BatchNorm1d(128),
72 | nn.ELU(inplace=True)
73 | )
74 | # 256 1 1
75 | self.reid = reid
76 | self.batch_norm = nn.BatchNorm1d(128)
77 | self.classifier = nn.Sequential(
78 | nn.Linear(128, num_classes),
79 | )
80 |
81 | def forward(self, x):
82 | x = self.conv(x)
83 | x = self.layer1(x)
84 | x = self.layer2(x)
85 | x = self.layer3(x)
86 |
87 | x = x.view(x.size(0),-1)
88 | if self.reid:
89 | x = self.dense[0](x)
90 | x = self.dense[1](x)
91 | x = x.div(x.norm(p=2,dim=1,keepdim=True))
92 | return x
93 | x = self.dense(x)
94 | # B x 128
95 | # classifier
96 | x = self.classifier(x)
97 | return x
98 |
99 |
100 | if __name__ == '__main__':
101 | net = Net(reid=True)
102 | x = torch.randn(4,3,128,64)
103 | y = net(x)
104 | import ipdb; ipdb.set_trace()
105 |
106 |
107 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.backends.cudnn as cudnn
3 | import torchvision
4 |
5 | import argparse
6 | import os
7 |
8 | from model import Net
9 |
10 | parser = argparse.ArgumentParser(description="Train on market1501")
11 | parser.add_argument("--data-dir",default='data',type=str)
12 | parser.add_argument("--no-cuda",action="store_true")
13 | parser.add_argument("--gpu-id",default=0,type=int)
14 | args = parser.parse_args()
15 |
16 | # device
17 | device = "cuda:{}".format(args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
18 | if torch.cuda.is_available() and not args.no_cuda:
19 | cudnn.benchmark = True
20 |
21 | # data loader
22 | root = args.data_dir
23 | query_dir = os.path.join(root,"query")
24 | gallery_dir = os.path.join(root,"gallery")
25 | transform = torchvision.transforms.Compose([
26 | torchvision.transforms.Resize((128,64)),
27 | torchvision.transforms.ToTensor(),
28 | torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
29 | ])
30 | queryloader = torch.utils.data.DataLoader(
31 | torchvision.datasets.ImageFolder(query_dir, transform=transform),
32 | batch_size=64, shuffle=False
33 | )
34 | galleryloader = torch.utils.data.DataLoader(
35 | torchvision.datasets.ImageFolder(gallery_dir, transform=transform),
36 | batch_size=64, shuffle=False
37 | )
38 |
39 | # net definition
40 | net = Net(reid=True)
41 | assert os.path.isfile("./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
42 | print('Loading from checkpoint/ckpt.t7')
43 | checkpoint = torch.load("./checkpoint/ckpt.t7")
44 | net_dict = checkpoint['net_dict']
45 | net.load_state_dict(net_dict, strict=False)
46 | net.eval()
47 | net.to(device)
48 |
49 | # compute features
50 | query_features = torch.tensor([]).float()
51 | query_labels = torch.tensor([]).long()
52 | gallery_features = torch.tensor([]).float()
53 | gallery_labels = torch.tensor([]).long()
54 |
55 | with torch.no_grad():
56 | for idx,(inputs,labels) in enumerate(queryloader):
57 | inputs = inputs.to(device)
58 | features = net(inputs).cpu()
59 | query_features = torch.cat((query_features, features), dim=0)
60 | query_labels = torch.cat((query_labels, labels))
61 |
62 | for idx,(inputs,labels) in enumerate(galleryloader):
63 | inputs = inputs.to(device)
64 | features = net(inputs).cpu()
65 | gallery_features = torch.cat((gallery_features, features), dim=0)
66 | gallery_labels = torch.cat((gallery_labels, labels))
67 |
68 | gallery_labels -= 2
69 |
70 | # save features
71 | features = {
72 | "qf": query_features,
73 | "ql": query_labels,
74 | "gf": gallery_features,
75 | "gl": gallery_labels
76 | }
77 | torch.save(features,"features.pth")
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/train.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/deep_sort/deep_sort/deep/train.jpg
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep/train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import time
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | import torch
8 | import torch.backends.cudnn as cudnn
9 | import torchvision
10 |
11 | from model import Net
12 |
13 | parser = argparse.ArgumentParser(description="Train on market1501")
14 | parser.add_argument("--data-dir",default='data',type=str)
15 | parser.add_argument("--no-cuda",action="store_true")
16 | parser.add_argument("--gpu-id",default=0,type=int)
17 | parser.add_argument("--lr",default=0.1, type=float)
18 | parser.add_argument("--interval",'-i',default=20,type=int)
19 | parser.add_argument('--resume', '-r',action='store_true')
20 | args = parser.parse_args()
21 |
22 | # device
23 | device = "cuda:{}".format(args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
24 | if torch.cuda.is_available() and not args.no_cuda:
25 | cudnn.benchmark = True
26 |
27 | # data loading
28 | root = args.data_dir
29 | train_dir = os.path.join(root,"train")
30 | test_dir = os.path.join(root,"test")
31 | transform_train = torchvision.transforms.Compose([
32 | torchvision.transforms.RandomCrop((128,64),padding=4),
33 | torchvision.transforms.RandomHorizontalFlip(),
34 | torchvision.transforms.ToTensor(),
35 | torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
36 | ])
37 | transform_test = torchvision.transforms.Compose([
38 | torchvision.transforms.Resize((128,64)),
39 | torchvision.transforms.ToTensor(),
40 | torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
41 | ])
42 | trainloader = torch.utils.data.DataLoader(
43 | torchvision.datasets.ImageFolder(train_dir, transform=transform_train),
44 | batch_size=64,shuffle=True
45 | )
46 | testloader = torch.utils.data.DataLoader(
47 | torchvision.datasets.ImageFolder(test_dir, transform=transform_test),
48 | batch_size=64,shuffle=True
49 | )
50 | num_classes = max(len(trainloader.dataset.classes), len(testloader.dataset.classes))
51 |
52 | # net definition
53 | start_epoch = 0
54 | net = Net(num_classes=num_classes)
55 | if args.resume:
56 | assert os.path.isfile("./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
57 | print('Loading from checkpoint/ckpt.t7')
58 | checkpoint = torch.load("./checkpoint/ckpt.t7")
59 | # import ipdb; ipdb.set_trace()
60 | net_dict = checkpoint['net_dict']
61 | net.load_state_dict(net_dict)
62 | best_acc = checkpoint['acc']
63 | start_epoch = checkpoint['epoch']
64 | net.to(device)
65 |
66 | # loss and optimizer
67 | criterion = torch.nn.CrossEntropyLoss()
68 | optimizer = torch.optim.SGD(net.parameters(), args.lr, momentum=0.9, weight_decay=5e-4)
69 | best_acc = 0.
70 |
71 | # train function for each epoch
72 | def train(epoch):
73 | print("\nEpoch : %d"%(epoch+1))
74 | net.train()
75 | training_loss = 0.
76 | train_loss = 0.
77 | correct = 0
78 | total = 0
79 | interval = args.interval
80 | start = time.time()
81 | for idx, (inputs, labels) in enumerate(trainloader):
82 | # forward
83 | inputs,labels = inputs.to(device),labels.to(device)
84 | outputs = net(inputs)
85 | loss = criterion(outputs, labels)
86 |
87 | # backward
88 | optimizer.zero_grad()
89 | loss.backward()
90 | optimizer.step()
91 |
92 | # accumurating
93 | training_loss += loss.item()
94 | train_loss += loss.item()
95 | correct += outputs.max(dim=1)[1].eq(labels).sum().item()
96 | total += labels.size(0)
97 |
98 | # print
99 | if (idx+1)%interval == 0:
100 | end = time.time()
101 | print("[progress:{:.1f}%]time:{:.2f}s Loss:{:.5f} Correct:{}/{} Acc:{:.3f}%".format(
102 | 100.*(idx+1)/len(trainloader), end-start, training_loss/interval, correct, total, 100.*correct/total
103 | ))
104 | training_loss = 0.
105 | start = time.time()
106 |
107 | return train_loss/len(trainloader), 1.- correct/total
108 |
109 | def test(epoch):
110 | global best_acc
111 | net.eval()
112 | test_loss = 0.
113 | correct = 0
114 | total = 0
115 | start = time.time()
116 | with torch.no_grad():
117 | for idx, (inputs, labels) in enumerate(testloader):
118 | inputs, labels = inputs.to(device), labels.to(device)
119 | outputs = net(inputs)
120 | loss = criterion(outputs, labels)
121 |
122 | test_loss += loss.item()
123 | correct += outputs.max(dim=1)[1].eq(labels).sum().item()
124 | total += labels.size(0)
125 |
126 | print("Testing ...")
127 | end = time.time()
128 | print("[progress:{:.1f}%]time:{:.2f}s Loss:{:.5f} Correct:{}/{} Acc:{:.3f}%".format(
129 | 100.*(idx+1)/len(testloader), end-start, test_loss/len(testloader), correct, total, 100.*correct/total
130 | ))
131 |
132 | # saving checkpoint
133 | acc = 100.*correct/total
134 | if acc > best_acc:
135 | best_acc = acc
136 | print("Saving parameters to checkpoint/ckpt.t7")
137 | checkpoint = {
138 | 'net_dict':net.state_dict(),
139 | 'acc':acc,
140 | 'epoch':epoch,
141 | }
142 | if not os.path.isdir('checkpoint'):
143 | os.mkdir('checkpoint')
144 | torch.save(checkpoint, './checkpoint/ckpt.t7')
145 |
146 | return test_loss/len(testloader), 1.- correct/total
147 |
148 | # plot figure
149 | x_epoch = []
150 | record = {'train_loss':[], 'train_err':[], 'test_loss':[], 'test_err':[]}
151 | fig = plt.figure()
152 | ax0 = fig.add_subplot(121, title="loss")
153 | ax1 = fig.add_subplot(122, title="top1err")
154 | def draw_curve(epoch, train_loss, train_err, test_loss, test_err):
155 | global record
156 | record['train_loss'].append(train_loss)
157 | record['train_err'].append(train_err)
158 | record['test_loss'].append(test_loss)
159 | record['test_err'].append(test_err)
160 |
161 | x_epoch.append(epoch)
162 | ax0.plot(x_epoch, record['train_loss'], 'bo-', label='train')
163 | ax0.plot(x_epoch, record['test_loss'], 'ro-', label='val')
164 | ax1.plot(x_epoch, record['train_err'], 'bo-', label='train')
165 | ax1.plot(x_epoch, record['test_err'], 'ro-', label='val')
166 | if epoch == 0:
167 | ax0.legend()
168 | ax1.legend()
169 | fig.savefig("train.jpg")
170 |
171 | # lr decay
172 | def lr_decay():
173 | global optimizer
174 | for params in optimizer.param_groups:
175 | params['lr'] *= 0.1
176 | lr = params['lr']
177 | print("Learning rate adjusted to {}".format(lr))
178 |
179 | def main():
180 | for epoch in range(start_epoch, start_epoch+40):
181 | train_loss, train_err = train(epoch)
182 | test_loss, test_err = test(epoch)
183 | draw_curve(epoch, train_loss, train_err, test_loss, test_err)
184 | if (epoch+1)%20==0:
185 | lr_decay()
186 |
187 |
188 | if __name__ == '__main__':
189 | main()
190 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/deep_sort.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from .deep.feature_extractor import Extractor
5 | from .sort.nn_matching import NearestNeighborDistanceMetric
6 | from .sort.preprocessing import non_max_suppression
7 | from .sort.detection import Detection
8 | from .sort.tracker import Tracker
9 |
10 |
11 | __all__ = ['DeepSort']
12 |
13 |
14 | class DeepSort(object):
15 | def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
16 | self.min_confidence = min_confidence
17 | self.nms_max_overlap = nms_max_overlap
18 |
19 | self.extractor = Extractor(model_path, use_cuda=use_cuda)
20 |
21 | max_cosine_distance = max_dist
22 | nn_budget = 100
23 | metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
24 | self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
25 |
26 | def update(self, bbox_xywh, confidences, ori_img):
27 | self.height, self.width = ori_img.shape[:2]
28 | # generate detections
29 | features = self._get_features(bbox_xywh, ori_img)
30 | bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
31 | detections = [Detection(bbox_tlwh[i], conf, features[i]) for i,conf in enumerate(confidences) if conf>self.min_confidence]
32 |
33 | # run on non-maximum supression
34 | boxes = np.array([d.tlwh for d in detections])
35 | scores = np.array([d.confidence for d in detections])
36 | indices = non_max_suppression(boxes, self.nms_max_overlap, scores)
37 | detections = [detections[i] for i in indices]
38 |
39 | # update tracker
40 | self.tracker.predict()
41 | self.tracker.update(detections)
42 |
43 | # output bbox identities
44 | outputs = []
45 | for track in self.tracker.tracks:
46 | if not track.is_confirmed() or track.time_since_update > 1:
47 | continue
48 | box = track.to_tlwh()
49 | x1,y1,x2,y2 = self._tlwh_to_xyxy(box)
50 | track_id = track.track_id
51 | outputs.append(np.array([x1,y1,x2,y2,track_id], dtype=np.int))
52 | if len(outputs) > 0:
53 | outputs = np.stack(outputs,axis=0)
54 | return outputs
55 |
56 |
57 | """
58 | TODO:
59 | Convert bbox from xc_yc_w_h to xtl_ytl_w_h
60 | Thanks JieChen91@github.com for reporting this bug!
61 | """
62 | @staticmethod
63 | def _xywh_to_tlwh(bbox_xywh):
64 | if isinstance(bbox_xywh, np.ndarray):
65 | bbox_tlwh = bbox_xywh.copy()
66 | elif isinstance(bbox_xywh, torch.Tensor):
67 | bbox_tlwh = bbox_xywh.clone()
68 | bbox_tlwh[:,0] = bbox_xywh[:,0] - bbox_xywh[:,2]/2.
69 | bbox_tlwh[:,1] = bbox_xywh[:,1] - bbox_xywh[:,3]/2.
70 | return bbox_tlwh
71 |
72 |
73 | def _xywh_to_xyxy(self, bbox_xywh):
74 | x,y,w,h = bbox_xywh
75 | x1 = max(int(x-w/2),0)
76 | x2 = min(int(x+w/2),self.width-1)
77 | y1 = max(int(y-h/2),0)
78 | y2 = min(int(y+h/2),self.height-1)
79 | return x1,y1,x2,y2
80 |
81 | def _tlwh_to_xyxy(self, bbox_tlwh):
82 | """
83 | TODO:
84 | Convert bbox from xtl_ytl_w_h to xc_yc_w_h
85 | Thanks JieChen91@github.com for reporting this bug!
86 | """
87 | x,y,w,h = bbox_tlwh
88 | x1 = max(int(x),0)
89 | x2 = min(int(x+w),self.width-1)
90 | y1 = max(int(y),0)
91 | y2 = min(int(y+h),self.height-1)
92 | return x1,y1,x2,y2
93 |
94 | def _xyxy_to_tlwh(self, bbox_xyxy):
95 | x1,y1,x2,y2 = bbox_xyxy
96 |
97 | t = x1
98 | l = y1
99 | w = int(x2-x1)
100 | h = int(y2-y1)
101 | return t,l,w,h
102 |
103 | def _get_features(self, bbox_xywh, ori_img):
104 | im_crops = []
105 | for box in bbox_xywh:
106 | x1,y1,x2,y2 = self._xywh_to_xyxy(box)
107 | im = ori_img[y1:y2,x1:x2]
108 | im_crops.append(im)
109 | if im_crops:
110 | features = self.extractor(im_crops)
111 | else:
112 | features = np.array([])
113 | return features
114 |
115 |
116 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/deep_sort/deep_sort/sort/__init__.py
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/detection.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | import numpy as np
3 |
4 |
5 | class Detection(object):
6 | """
7 | This class represents a bounding box detection in a single image.
8 |
9 | Parameters
10 | ----------
11 | tlwh : array_like
12 | Bounding box in format `(x, y, w, h)`.
13 | confidence : float
14 | Detector confidence score.
15 | feature : array_like
16 | A feature vector that describes the object contained in this image.
17 |
18 | Attributes
19 | ----------
20 | tlwh : ndarray
21 | Bounding box in format `(top left x, top left y, width, height)`.
22 | confidence : ndarray
23 | Detector confidence score.
24 | feature : ndarray | NoneType
25 | A feature vector that describes the object contained in this image.
26 |
27 | """
28 |
29 | def __init__(self, tlwh, confidence, feature):
30 | self.tlwh = np.asarray(tlwh, dtype=np.float)
31 | self.confidence = float(confidence)
32 | self.feature = np.asarray(feature, dtype=np.float32)
33 |
34 | def to_tlbr(self):
35 | """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
36 | `(top left, bottom right)`.
37 | """
38 | ret = self.tlwh.copy()
39 | ret[2:] += ret[:2]
40 | return ret
41 |
42 | def to_xyah(self):
43 | """Convert bounding box to format `(center x, center y, aspect ratio,
44 | height)`, where the aspect ratio is `width / height`.
45 | """
46 | ret = self.tlwh.copy()
47 | ret[:2] += ret[2:] / 2
48 | ret[2] /= ret[3]
49 | return ret
50 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/iou_matching.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | from __future__ import absolute_import
3 | import numpy as np
4 | from . import linear_assignment
5 |
6 |
7 | def iou(bbox, candidates):
8 | """Computer intersection over union.
9 |
10 | Parameters
11 | ----------
12 | bbox : ndarray
13 | A bounding box in format `(top left x, top left y, width, height)`.
14 | candidates : ndarray
15 | A matrix of candidate bounding boxes (one per row) in the same format
16 | as `bbox`.
17 |
18 | Returns
19 | -------
20 | ndarray
21 | The intersection over union in [0, 1] between the `bbox` and each
22 | candidate. A higher score means a larger fraction of the `bbox` is
23 | occluded by the candidate.
24 |
25 | """
26 | bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
27 | candidates_tl = candidates[:, :2]
28 | candidates_br = candidates[:, :2] + candidates[:, 2:]
29 |
30 | tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
31 | np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
32 | br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
33 | np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
34 | wh = np.maximum(0., br - tl)
35 |
36 | area_intersection = wh.prod(axis=1)
37 | area_bbox = bbox[2:].prod()
38 | area_candidates = candidates[:, 2:].prod(axis=1)
39 | return area_intersection / (area_bbox + area_candidates - area_intersection)
40 |
41 |
42 | def iou_cost(tracks, detections, track_indices=None,
43 | detection_indices=None):
44 | """An intersection over union distance metric.
45 |
46 | Parameters
47 | ----------
48 | tracks : List[deep_sort.track.Track]
49 | A list of tracks.
50 | detections : List[deep_sort.detection.Detection]
51 | A list of detections.
52 | track_indices : Optional[List[int]]
53 | A list of indices to tracks that should be matched. Defaults to
54 | all `tracks`.
55 | detection_indices : Optional[List[int]]
56 | A list of indices to detections that should be matched. Defaults
57 | to all `detections`.
58 |
59 | Returns
60 | -------
61 | ndarray
62 | Returns a cost matrix of shape
63 | len(track_indices), len(detection_indices) where entry (i, j) is
64 | `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
65 |
66 | """
67 | if track_indices is None:
68 | track_indices = np.arange(len(tracks))
69 | if detection_indices is None:
70 | detection_indices = np.arange(len(detections))
71 |
72 | cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
73 | for row, track_idx in enumerate(track_indices):
74 | if tracks[track_idx].time_since_update > 1:
75 | cost_matrix[row, :] = linear_assignment.INFTY_COST
76 | continue
77 |
78 | bbox = tracks[track_idx].to_tlwh()
79 | candidates = np.asarray([detections[i].tlwh for i in detection_indices])
80 | cost_matrix[row, :] = 1. - iou(bbox, candidates)
81 | return cost_matrix
82 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/kalman_filter.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | import numpy as np
3 | import scipy.linalg
4 |
5 |
6 | """
7 | Table for the 0.95 quantile of the chi-square distribution with N degrees of
8 | freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
9 | function and used as Mahalanobis gating threshold.
10 | """
11 | chi2inv95 = {
12 | 1: 3.8415,
13 | 2: 5.9915,
14 | 3: 7.8147,
15 | 4: 9.4877,
16 | 5: 11.070,
17 | 6: 12.592,
18 | 7: 14.067,
19 | 8: 15.507,
20 | 9: 16.919}
21 |
22 |
23 | class KalmanFilter(object):
24 | """
25 | A simple Kalman filter for tracking bounding boxes in image space.
26 |
27 | The 8-dimensional state space
28 |
29 | x, y, a, h, vx, vy, va, vh
30 |
31 | contains the bounding box center position (x, y), aspect ratio a, height h,
32 | and their respective velocities.
33 |
34 | Object motion follows a constant velocity model. The bounding box location
35 | (x, y, a, h) is taken as direct observation of the state space (linear
36 | observation model).
37 |
38 | """
39 |
40 | def __init__(self):
41 | ndim, dt = 4, 1.
42 |
43 | # Create Kalman filter model matrices.
44 | self._motion_mat = np.eye(2 * ndim, 2 * ndim)
45 | for i in range(ndim):
46 | self._motion_mat[i, ndim + i] = dt
47 | self._update_mat = np.eye(ndim, 2 * ndim)
48 |
49 | # Motion and observation uncertainty are chosen relative to the current
50 | # state estimate. These weights control the amount of uncertainty in
51 | # the model. This is a bit hacky.
52 | self._std_weight_position = 1. / 20
53 | self._std_weight_velocity = 1. / 160
54 |
55 | def initiate(self, measurement):
56 | """Create track from unassociated measurement.
57 |
58 | Parameters
59 | ----------
60 | measurement : ndarray
61 | Bounding box coordinates (x, y, a, h) with center position (x, y),
62 | aspect ratio a, and height h.
63 |
64 | Returns
65 | -------
66 | (ndarray, ndarray)
67 | Returns the mean vector (8 dimensional) and covariance matrix (8x8
68 | dimensional) of the new track. Unobserved velocities are initialized
69 | to 0 mean.
70 |
71 | """
72 | mean_pos = measurement
73 | mean_vel = np.zeros_like(mean_pos)
74 | mean = np.r_[mean_pos, mean_vel]
75 |
76 | std = [
77 | 2 * self._std_weight_position * measurement[3],
78 | 2 * self._std_weight_position * measurement[3],
79 | 1e-2,
80 | 2 * self._std_weight_position * measurement[3],
81 | 10 * self._std_weight_velocity * measurement[3],
82 | 10 * self._std_weight_velocity * measurement[3],
83 | 1e-5,
84 | 10 * self._std_weight_velocity * measurement[3]]
85 | covariance = np.diag(np.square(std))
86 | return mean, covariance
87 |
88 | def predict(self, mean, covariance):
89 | """Run Kalman filter prediction step.
90 |
91 | Parameters
92 | ----------
93 | mean : ndarray
94 | The 8 dimensional mean vector of the object state at the previous
95 | time step.
96 | covariance : ndarray
97 | The 8x8 dimensional covariance matrix of the object state at the
98 | previous time step.
99 |
100 | Returns
101 | -------
102 | (ndarray, ndarray)
103 | Returns the mean vector and covariance matrix of the predicted
104 | state. Unobserved velocities are initialized to 0 mean.
105 |
106 | """
107 | std_pos = [
108 | self._std_weight_position * mean[3],
109 | self._std_weight_position * mean[3],
110 | 1e-2,
111 | self._std_weight_position * mean[3]]
112 | std_vel = [
113 | self._std_weight_velocity * mean[3],
114 | self._std_weight_velocity * mean[3],
115 | 1e-5,
116 | self._std_weight_velocity * mean[3]]
117 | motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
118 |
119 | mean = np.dot(self._motion_mat, mean)
120 | covariance = np.linalg.multi_dot((
121 | self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
122 |
123 | return mean, covariance
124 |
125 | def project(self, mean, covariance):
126 | """Project state distribution to measurement space.
127 |
128 | Parameters
129 | ----------
130 | mean : ndarray
131 | The state's mean vector (8 dimensional array).
132 | covariance : ndarray
133 | The state's covariance matrix (8x8 dimensional).
134 |
135 | Returns
136 | -------
137 | (ndarray, ndarray)
138 | Returns the projected mean and covariance matrix of the given state
139 | estimate.
140 |
141 | """
142 | std = [
143 | self._std_weight_position * mean[3],
144 | self._std_weight_position * mean[3],
145 | 1e-1,
146 | self._std_weight_position * mean[3]]
147 | innovation_cov = np.diag(np.square(std))
148 |
149 | mean = np.dot(self._update_mat, mean)
150 | covariance = np.linalg.multi_dot((
151 | self._update_mat, covariance, self._update_mat.T))
152 | return mean, covariance + innovation_cov
153 |
154 | def update(self, mean, covariance, measurement):
155 | """Run Kalman filter correction step.
156 |
157 | Parameters
158 | ----------
159 | mean : ndarray
160 | The predicted state's mean vector (8 dimensional).
161 | covariance : ndarray
162 | The state's covariance matrix (8x8 dimensional).
163 | measurement : ndarray
164 | The 4 dimensional measurement vector (x, y, a, h), where (x, y)
165 | is the center position, a the aspect ratio, and h the height of the
166 | bounding box.
167 |
168 | Returns
169 | -------
170 | (ndarray, ndarray)
171 | Returns the measurement-corrected state distribution.
172 |
173 | """
174 | projected_mean, projected_cov = self.project(mean, covariance)
175 |
176 | chol_factor, lower = scipy.linalg.cho_factor(
177 | projected_cov, lower=True, check_finite=False)
178 | kalman_gain = scipy.linalg.cho_solve(
179 | (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
180 | check_finite=False).T
181 | innovation = measurement - projected_mean
182 |
183 | new_mean = mean + np.dot(innovation, kalman_gain.T)
184 | new_covariance = covariance - np.linalg.multi_dot((
185 | kalman_gain, projected_cov, kalman_gain.T))
186 | return new_mean, new_covariance
187 |
188 | def gating_distance(self, mean, covariance, measurements,
189 | only_position=False):
190 | """Compute gating distance between state distribution and measurements.
191 |
192 | A suitable distance threshold can be obtained from `chi2inv95`. If
193 | `only_position` is False, the chi-square distribution has 4 degrees of
194 | freedom, otherwise 2.
195 |
196 | Parameters
197 | ----------
198 | mean : ndarray
199 | Mean vector over the state distribution (8 dimensional).
200 | covariance : ndarray
201 | Covariance of the state distribution (8x8 dimensional).
202 | measurements : ndarray
203 | An Nx4 dimensional matrix of N measurements, each in
204 | format (x, y, a, h) where (x, y) is the bounding box center
205 | position, a the aspect ratio, and h the height.
206 | only_position : Optional[bool]
207 | If True, distance computation is done with respect to the bounding
208 | box center position only.
209 |
210 | Returns
211 | -------
212 | ndarray
213 | Returns an array of length N, where the i-th element contains the
214 | squared Mahalanobis distance between (mean, covariance) and
215 | `measurements[i]`.
216 |
217 | """
218 | mean, covariance = self.project(mean, covariance)
219 | if only_position:
220 | mean, covariance = mean[:2], covariance[:2, :2]
221 | measurements = measurements[:, :2]
222 |
223 | cholesky_factor = np.linalg.cholesky(covariance)
224 | d = measurements - mean
225 | z = scipy.linalg.solve_triangular(
226 | cholesky_factor, d.T, lower=True, check_finite=False,
227 | overwrite_b=True)
228 | squared_maha = np.sum(z * z, axis=0)
229 | return squared_maha
230 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/linear_assignment.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | from __future__ import absolute_import
3 | import numpy as np
4 | # from sklearn.utils.linear_assignment_ import linear_assignment
5 | from scipy.optimize import linear_sum_assignment as linear_assignment
6 | from . import kalman_filter
7 |
8 |
9 | INFTY_COST = 1e+5
10 |
11 |
12 | def min_cost_matching(
13 | distance_metric, max_distance, tracks, detections, track_indices=None,
14 | detection_indices=None):
15 | """Solve linear assignment problem.
16 |
17 | Parameters
18 | ----------
19 | distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
20 | The distance metric is given a list of tracks and detections as well as
21 | a list of N track indices and M detection indices. The metric should
22 | return the NxM dimensional cost matrix, where element (i, j) is the
23 | association cost between the i-th track in the given track indices and
24 | the j-th detection in the given detection_indices.
25 | max_distance : float
26 | Gating threshold. Associations with cost larger than this value are
27 | disregarded.
28 | tracks : List[track.Track]
29 | A list of predicted tracks at the current time step.
30 | detections : List[detection.Detection]
31 | A list of detections at the current time step.
32 | track_indices : List[int]
33 | List of track indices that maps rows in `cost_matrix` to tracks in
34 | `tracks` (see description above).
35 | detection_indices : List[int]
36 | List of detection indices that maps columns in `cost_matrix` to
37 | detections in `detections` (see description above).
38 |
39 | Returns
40 | -------
41 | (List[(int, int)], List[int], List[int])
42 | Returns a tuple with the following three entries:
43 | * A list of matched track and detection indices.
44 | * A list of unmatched track indices.
45 | * A list of unmatched detection indices.
46 |
47 | """
48 | if track_indices is None:
49 | track_indices = np.arange(len(tracks))
50 | if detection_indices is None:
51 | detection_indices = np.arange(len(detections))
52 |
53 | if len(detection_indices) == 0 or len(track_indices) == 0:
54 | return [], track_indices, detection_indices # Nothing to match.
55 |
56 | cost_matrix = distance_metric(
57 | tracks, detections, track_indices, detection_indices)
58 | cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
59 |
60 | row_indices, col_indices = linear_assignment(cost_matrix)
61 |
62 | matches, unmatched_tracks, unmatched_detections = [], [], []
63 | for col, detection_idx in enumerate(detection_indices):
64 | if col not in col_indices:
65 | unmatched_detections.append(detection_idx)
66 | for row, track_idx in enumerate(track_indices):
67 | if row not in row_indices:
68 | unmatched_tracks.append(track_idx)
69 | for row, col in zip(row_indices, col_indices):
70 | track_idx = track_indices[row]
71 | detection_idx = detection_indices[col]
72 | if cost_matrix[row, col] > max_distance:
73 | unmatched_tracks.append(track_idx)
74 | unmatched_detections.append(detection_idx)
75 | else:
76 | matches.append((track_idx, detection_idx))
77 | return matches, unmatched_tracks, unmatched_detections
78 |
79 |
80 | def matching_cascade(
81 | distance_metric, max_distance, cascade_depth, tracks, detections,
82 | track_indices=None, detection_indices=None):
83 | """Run matching cascade.
84 |
85 | Parameters
86 | ----------
87 | distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
88 | The distance metric is given a list of tracks and detections as well as
89 | a list of N track indices and M detection indices. The metric should
90 | return the NxM dimensional cost matrix, where element (i, j) is the
91 | association cost between the i-th track in the given track indices and
92 | the j-th detection in the given detection indices.
93 | max_distance : float
94 | Gating threshold. Associations with cost larger than this value are
95 | disregarded.
96 | cascade_depth: int
97 | The cascade depth, should be se to the maximum track age.
98 | tracks : List[track.Track]
99 | A list of predicted tracks at the current time step.
100 | detections : List[detection.Detection]
101 | A list of detections at the current time step.
102 | track_indices : Optional[List[int]]
103 | List of track indices that maps rows in `cost_matrix` to tracks in
104 | `tracks` (see description above). Defaults to all tracks.
105 | detection_indices : Optional[List[int]]
106 | List of detection indices that maps columns in `cost_matrix` to
107 | detections in `detections` (see description above). Defaults to all
108 | detections.
109 |
110 | Returns
111 | -------
112 | (List[(int, int)], List[int], List[int])
113 | Returns a tuple with the following three entries:
114 | * A list of matched track and detection indices.
115 | * A list of unmatched track indices.
116 | * A list of unmatched detection indices.
117 |
118 | """
119 | if track_indices is None:
120 | track_indices = list(range(len(tracks)))
121 | if detection_indices is None:
122 | detection_indices = list(range(len(detections)))
123 |
124 | unmatched_detections = detection_indices
125 | matches = []
126 | for level in range(cascade_depth):
127 | if len(unmatched_detections) == 0: # No detections left
128 | break
129 |
130 | track_indices_l = [
131 | k for k in track_indices
132 | if tracks[k].time_since_update == 1 + level
133 | ]
134 | if len(track_indices_l) == 0: # Nothing to match at this level
135 | continue
136 |
137 | matches_l, _, unmatched_detections = \
138 | min_cost_matching(
139 | distance_metric, max_distance, tracks, detections,
140 | track_indices_l, unmatched_detections)
141 | matches += matches_l
142 | unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
143 | return matches, unmatched_tracks, unmatched_detections
144 |
145 |
146 | def gate_cost_matrix(
147 | kf, cost_matrix, tracks, detections, track_indices, detection_indices,
148 | gated_cost=INFTY_COST, only_position=False):
149 | """Invalidate infeasible entries in cost matrix based on the state
150 | distributions obtained by Kalman filtering.
151 |
152 | Parameters
153 | ----------
154 | kf : The Kalman filter.
155 | cost_matrix : ndarray
156 | The NxM dimensional cost matrix, where N is the number of track indices
157 | and M is the number of detection indices, such that entry (i, j) is the
158 | association cost between `tracks[track_indices[i]]` and
159 | `detections[detection_indices[j]]`.
160 | tracks : List[track.Track]
161 | A list of predicted tracks at the current time step.
162 | detections : List[detection.Detection]
163 | A list of detections at the current time step.
164 | track_indices : List[int]
165 | List of track indices that maps rows in `cost_matrix` to tracks in
166 | `tracks` (see description above).
167 | detection_indices : List[int]
168 | List of detection indices that maps columns in `cost_matrix` to
169 | detections in `detections` (see description above).
170 | gated_cost : Optional[float]
171 | Entries in the cost matrix corresponding to infeasible associations are
172 | set this value. Defaults to a very large value.
173 | only_position : Optional[bool]
174 | If True, only the x, y position of the state distribution is considered
175 | during gating. Defaults to False.
176 |
177 | Returns
178 | -------
179 | ndarray
180 | Returns the modified cost matrix.
181 |
182 | """
183 | gating_dim = 2 if only_position else 4
184 | gating_threshold = kalman_filter.chi2inv95[gating_dim]
185 | measurements = np.asarray(
186 | [detections[i].to_xyah() for i in detection_indices])
187 | for row, track_idx in enumerate(track_indices):
188 | track = tracks[track_idx]
189 | gating_distance = kf.gating_distance(
190 | track.mean, track.covariance, measurements, only_position)
191 | cost_matrix[row, gating_distance > gating_threshold] = gated_cost
192 | return cost_matrix
193 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/nn_matching.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | import numpy as np
3 |
4 |
5 | def _pdist(a, b):
6 | """Compute pair-wise squared distance between points in `a` and `b`.
7 |
8 | Parameters
9 | ----------
10 | a : array_like
11 | An NxM matrix of N samples of dimensionality M.
12 | b : array_like
13 | An LxM matrix of L samples of dimensionality M.
14 |
15 | Returns
16 | -------
17 | ndarray
18 | Returns a matrix of size len(a), len(b) such that eleement (i, j)
19 | contains the squared distance between `a[i]` and `b[j]`.
20 |
21 | """
22 | a, b = np.asarray(a), np.asarray(b)
23 | if len(a) == 0 or len(b) == 0:
24 | return np.zeros((len(a), len(b)))
25 | a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
26 | r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
27 | r2 = np.clip(r2, 0., float(np.inf))
28 | return r2
29 |
30 |
31 | def _cosine_distance(a, b, data_is_normalized=False):
32 | """Compute pair-wise cosine distance between points in `a` and `b`.
33 |
34 | Parameters
35 | ----------
36 | a : array_like
37 | An NxM matrix of N samples of dimensionality M.
38 | b : array_like
39 | An LxM matrix of L samples of dimensionality M.
40 | data_is_normalized : Optional[bool]
41 | If True, assumes rows in a and b are unit length vectors.
42 | Otherwise, a and b are explicitly normalized to lenght 1.
43 |
44 | Returns
45 | -------
46 | ndarray
47 | Returns a matrix of size len(a), len(b) such that eleement (i, j)
48 | contains the squared distance between `a[i]` and `b[j]`.
49 |
50 | """
51 | if not data_is_normalized:
52 | a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
53 | b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
54 | return 1. - np.dot(a, b.T)
55 |
56 |
57 | def _nn_euclidean_distance(x, y):
58 | """ Helper function for nearest neighbor distance metric (Euclidean).
59 |
60 | Parameters
61 | ----------
62 | x : ndarray
63 | A matrix of N row-vectors (sample points).
64 | y : ndarray
65 | A matrix of M row-vectors (query points).
66 |
67 | Returns
68 | -------
69 | ndarray
70 | A vector of length M that contains for each entry in `y` the
71 | smallest Euclidean distance to a sample in `x`.
72 |
73 | """
74 | distances = _pdist(x, y)
75 | return np.maximum(0.0, distances.min(axis=0))
76 |
77 |
78 | def _nn_cosine_distance(x, y):
79 | """ Helper function for nearest neighbor distance metric (cosine).
80 |
81 | Parameters
82 | ----------
83 | x : ndarray
84 | A matrix of N row-vectors (sample points).
85 | y : ndarray
86 | A matrix of M row-vectors (query points).
87 |
88 | Returns
89 | -------
90 | ndarray
91 | A vector of length M that contains for each entry in `y` the
92 | smallest cosine distance to a sample in `x`.
93 |
94 | """
95 | distances = _cosine_distance(x, y)
96 | return distances.min(axis=0)
97 |
98 |
99 | class NearestNeighborDistanceMetric(object):
100 | """
101 | A nearest neighbor distance metric that, for each target, returns
102 | the closest distance to any sample that has been observed so far.
103 |
104 | Parameters
105 | ----------
106 | metric : str
107 | Either "euclidean" or "cosine".
108 | matching_threshold: float
109 | The matching threshold. Samples with larger distance are considered an
110 | invalid match.
111 | budget : Optional[int]
112 | If not None, fix samples per class to at most this number. Removes
113 | the oldest samples when the budget is reached.
114 |
115 | Attributes
116 | ----------
117 | samples : Dict[int -> List[ndarray]]
118 | A dictionary that maps from target identities to the list of samples
119 | that have been observed so far.
120 |
121 | """
122 |
123 | def __init__(self, metric, matching_threshold, budget=None):
124 |
125 |
126 | if metric == "euclidean":
127 | self._metric = _nn_euclidean_distance
128 | elif metric == "cosine":
129 | self._metric = _nn_cosine_distance
130 | else:
131 | raise ValueError(
132 | "Invalid metric; must be either 'euclidean' or 'cosine'")
133 | self.matching_threshold = matching_threshold
134 | self.budget = budget
135 | self.samples = {}
136 |
137 | def partial_fit(self, features, targets, active_targets):
138 | """Update the distance metric with new data.
139 |
140 | Parameters
141 | ----------
142 | features : ndarray
143 | An NxM matrix of N features of dimensionality M.
144 | targets : ndarray
145 | An integer array of associated target identities.
146 | active_targets : List[int]
147 | A list of targets that are currently present in the scene.
148 |
149 | """
150 | for feature, target in zip(features, targets):
151 | self.samples.setdefault(target, []).append(feature)
152 | if self.budget is not None:
153 | self.samples[target] = self.samples[target][-self.budget:]
154 | self.samples = {k: self.samples[k] for k in active_targets}
155 |
156 | def distance(self, features, targets):
157 | """Compute distance between features and targets.
158 |
159 | Parameters
160 | ----------
161 | features : ndarray
162 | An NxM matrix of N features of dimensionality M.
163 | targets : List[int]
164 | A list of targets to match the given `features` against.
165 |
166 | Returns
167 | -------
168 | ndarray
169 | Returns a cost matrix of shape len(targets), len(features), where
170 | element (i, j) contains the closest squared distance between
171 | `targets[i]` and `features[j]`.
172 |
173 | """
174 | cost_matrix = np.zeros((len(targets), len(features)))
175 | for i, target in enumerate(targets):
176 | cost_matrix[i, :] = self._metric(self.samples[target], features)
177 | return cost_matrix
178 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/preprocessing.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | import numpy as np
3 | import cv2
4 |
5 |
6 | def non_max_suppression(boxes, max_bbox_overlap, scores=None):
7 | """Suppress overlapping detections.
8 |
9 | Original code from [1]_ has been adapted to include confidence score.
10 |
11 | .. [1] http://www.pyimagesearch.com/2015/02/16/
12 | faster-non-maximum-suppression-python/
13 |
14 | Examples
15 | --------
16 |
17 | >>> boxes = [d.roi for d in detections]
18 | >>> scores = [d.confidence for d in detections]
19 | >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
20 | >>> detections = [detections[i] for i in indices]
21 |
22 | Parameters
23 | ----------
24 | boxes : ndarray
25 | Array of ROIs (x, y, width, height).
26 | max_bbox_overlap : float
27 | ROIs that overlap more than this values are suppressed.
28 | scores : Optional[array_like]
29 | Detector confidence score.
30 |
31 | Returns
32 | -------
33 | List[int]
34 | Returns indices of detections that have survived non-maxima suppression.
35 |
36 | """
37 | if len(boxes) == 0:
38 | return []
39 |
40 | boxes = boxes.astype(np.float)
41 | pick = []
42 |
43 | x1 = boxes[:, 0]
44 | y1 = boxes[:, 1]
45 | x2 = boxes[:, 2] + boxes[:, 0]
46 | y2 = boxes[:, 3] + boxes[:, 1]
47 |
48 | area = (x2 - x1 + 1) * (y2 - y1 + 1)
49 | if scores is not None:
50 | idxs = np.argsort(scores)
51 | else:
52 | idxs = np.argsort(y2)
53 |
54 | while len(idxs) > 0:
55 | last = len(idxs) - 1
56 | i = idxs[last]
57 | pick.append(i)
58 |
59 | xx1 = np.maximum(x1[i], x1[idxs[:last]])
60 | yy1 = np.maximum(y1[i], y1[idxs[:last]])
61 | xx2 = np.minimum(x2[i], x2[idxs[:last]])
62 | yy2 = np.minimum(y2[i], y2[idxs[:last]])
63 |
64 | w = np.maximum(0, xx2 - xx1 + 1)
65 | h = np.maximum(0, yy2 - yy1 + 1)
66 |
67 | overlap = (w * h) / area[idxs[:last]]
68 |
69 | idxs = np.delete(
70 | idxs, np.concatenate(
71 | ([last], np.where(overlap > max_bbox_overlap)[0])))
72 |
73 | return pick
74 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/track.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 |
3 |
4 | class TrackState:
5 | """
6 | Enumeration type for the single target track state. Newly created tracks are
7 | classified as `tentative` until enough evidence has been collected. Then,
8 | the track state is changed to `confirmed`. Tracks that are no longer alive
9 | are classified as `deleted` to mark them for removal from the set of active
10 | tracks.
11 |
12 | """
13 |
14 | Tentative = 1
15 | Confirmed = 2
16 | Deleted = 3
17 |
18 |
19 | class Track:
20 | """
21 | A single target track with state space `(x, y, a, h)` and associated
22 | velocities, where `(x, y)` is the center of the bounding box, `a` is the
23 | aspect ratio and `h` is the height.
24 |
25 | Parameters
26 | ----------
27 | mean : ndarray
28 | Mean vector of the initial state distribution.
29 | covariance : ndarray
30 | Covariance matrix of the initial state distribution.
31 | track_id : int
32 | A unique track identifier.
33 | n_init : int
34 | Number of consecutive detections before the track is confirmed. The
35 | track state is set to `Deleted` if a miss occurs within the first
36 | `n_init` frames.
37 | max_age : int
38 | The maximum number of consecutive misses before the track state is
39 | set to `Deleted`.
40 | feature : Optional[ndarray]
41 | Feature vector of the detection this track originates from. If not None,
42 | this feature is added to the `features` cache.
43 |
44 | Attributes
45 | ----------
46 | mean : ndarray
47 | Mean vector of the initial state distribution.
48 | covariance : ndarray
49 | Covariance matrix of the initial state distribution.
50 | track_id : int
51 | A unique track identifier.
52 | hits : int
53 | Total number of measurement updates.
54 | age : int
55 | Total number of frames since first occurance.
56 | time_since_update : int
57 | Total number of frames since last measurement update.
58 | state : TrackState
59 | The current track state.
60 | features : List[ndarray]
61 | A cache of features. On each measurement update, the associated feature
62 | vector is added to this list.
63 |
64 | """
65 |
66 | def __init__(self, mean, covariance, track_id, n_init, max_age,
67 | feature=None):
68 | self.mean = mean
69 | self.covariance = covariance
70 | self.track_id = track_id
71 | self.hits = 1
72 | self.age = 1
73 | self.time_since_update = 0
74 |
75 | self.state = TrackState.Tentative
76 | self.features = []
77 | if feature is not None:
78 | self.features.append(feature)
79 |
80 | self._n_init = n_init
81 | self._max_age = max_age
82 |
83 | def to_tlwh(self):
84 | """Get current position in bounding box format `(top left x, top left y,
85 | width, height)`.
86 |
87 | Returns
88 | -------
89 | ndarray
90 | The bounding box.
91 |
92 | """
93 | ret = self.mean[:4].copy()
94 | ret[2] *= ret[3]
95 | ret[:2] -= ret[2:] / 2
96 | return ret
97 |
98 | def to_tlbr(self):
99 | """Get current position in bounding box format `(min x, miny, max x,
100 | max y)`.
101 |
102 | Returns
103 | -------
104 | ndarray
105 | The bounding box.
106 |
107 | """
108 | ret = self.to_tlwh()
109 | ret[2:] = ret[:2] + ret[2:]
110 | return ret
111 |
112 | def predict(self, kf):
113 | """Propagate the state distribution to the current time step using a
114 | Kalman filter prediction step.
115 |
116 | Parameters
117 | ----------
118 | kf : kalman_filter.KalmanFilter
119 | The Kalman filter.
120 |
121 | """
122 | self.mean, self.covariance = kf.predict(self.mean, self.covariance)
123 | self.age += 1
124 | self.time_since_update += 1
125 |
126 | def update(self, kf, detection):
127 | """Perform Kalman filter measurement update step and update the feature
128 | cache.
129 |
130 | Parameters
131 | ----------
132 | kf : kalman_filter.KalmanFilter
133 | The Kalman filter.
134 | detection : Detection
135 | The associated detection.
136 |
137 | """
138 | self.mean, self.covariance = kf.update(
139 | self.mean, self.covariance, detection.to_xyah())
140 | self.features.append(detection.feature)
141 |
142 | self.hits += 1
143 | self.time_since_update = 0
144 | if self.state == TrackState.Tentative and self.hits >= self._n_init:
145 | self.state = TrackState.Confirmed
146 |
147 | def mark_missed(self):
148 | """Mark this track as missed (no association at the current time step).
149 | """
150 | if self.state == TrackState.Tentative:
151 | self.state = TrackState.Deleted
152 | elif self.time_since_update > self._max_age:
153 | self.state = TrackState.Deleted
154 |
155 | def is_tentative(self):
156 | """Returns True if this track is tentative (unconfirmed).
157 | """
158 | return self.state == TrackState.Tentative
159 |
160 | def is_confirmed(self):
161 | """Returns True if this track is confirmed."""
162 | return self.state == TrackState.Confirmed
163 |
164 | def is_deleted(self):
165 | """Returns True if this track is dead and should be deleted."""
166 | return self.state == TrackState.Deleted
167 |
--------------------------------------------------------------------------------
/deep_sort/deep_sort/sort/tracker.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | from __future__ import absolute_import
3 | import numpy as np
4 | from . import kalman_filter
5 | from . import linear_assignment
6 | from . import iou_matching
7 | from .track import Track
8 |
9 |
10 | class Tracker:
11 | """
12 | This is the multi-target tracker.
13 |
14 | Parameters
15 | ----------
16 | metric : nn_matching.NearestNeighborDistanceMetric
17 | A distance metric for measurement-to-track association.
18 | max_age : int
19 | Maximum number of missed misses before a track is deleted.
20 | n_init : int
21 | Number of consecutive detections before the track is confirmed. The
22 | track state is set to `Deleted` if a miss occurs within the first
23 | `n_init` frames.
24 |
25 | Attributes
26 | ----------
27 | metric : nn_matching.NearestNeighborDistanceMetric
28 | The distance metric used for measurement to track association.
29 | max_age : int
30 | Maximum number of missed misses before a track is deleted.
31 | n_init : int
32 | Number of frames that a track remains in initialization phase.
33 | kf : kalman_filter.KalmanFilter
34 | A Kalman filter to filter target trajectories in image space.
35 | tracks : List[Track]
36 | The list of active tracks at the current time step.
37 |
38 | """
39 |
40 | def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):
41 | self.metric = metric
42 | self.max_iou_distance = max_iou_distance
43 | self.max_age = max_age
44 | self.n_init = n_init
45 |
46 | self.kf = kalman_filter.KalmanFilter()
47 | self.tracks = []
48 | self._next_id = 1
49 |
50 | def predict(self):
51 | """Propagate track state distributions one time step forward.
52 |
53 | This function should be called once every time step, before `update`.
54 | """
55 | for track in self.tracks:
56 | track.predict(self.kf)
57 |
58 | def update(self, detections):
59 | """Perform measurement update and track management.
60 |
61 | Parameters
62 | ----------
63 | detections : List[deep_sort.detection.Detection]
64 | A list of detections at the current time step.
65 |
66 | """
67 | # Run matching cascade.
68 | matches, unmatched_tracks, unmatched_detections = \
69 | self._match(detections)
70 |
71 | # Update track set.
72 | for track_idx, detection_idx in matches:
73 | self.tracks[track_idx].update(
74 | self.kf, detections[detection_idx])
75 | for track_idx in unmatched_tracks:
76 | self.tracks[track_idx].mark_missed()
77 | for detection_idx in unmatched_detections:
78 | self._initiate_track(detections[detection_idx])
79 | self.tracks = [t for t in self.tracks if not t.is_deleted()]
80 |
81 | # Update distance metric.
82 | active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
83 | features, targets = [], []
84 | for track in self.tracks:
85 | if not track.is_confirmed():
86 | continue
87 | features += track.features
88 | targets += [track.track_id for _ in track.features]
89 | track.features = []
90 | self.metric.partial_fit(
91 | np.asarray(features), np.asarray(targets), active_targets)
92 |
93 | def _match(self, detections):
94 |
95 | def gated_metric(tracks, dets, track_indices, detection_indices):
96 | features = np.array([dets[i].feature for i in detection_indices])
97 | targets = np.array([tracks[i].track_id for i in track_indices])
98 | cost_matrix = self.metric.distance(features, targets)
99 | cost_matrix = linear_assignment.gate_cost_matrix(
100 | self.kf, cost_matrix, tracks, dets, track_indices,
101 | detection_indices)
102 |
103 | return cost_matrix
104 |
105 | # Split track set into confirmed and unconfirmed tracks.
106 | confirmed_tracks = [
107 | i for i, t in enumerate(self.tracks) if t.is_confirmed()]
108 | unconfirmed_tracks = [
109 | i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
110 |
111 | # Associate confirmed tracks using appearance features.
112 | matches_a, unmatched_tracks_a, unmatched_detections = \
113 | linear_assignment.matching_cascade(
114 | gated_metric, self.metric.matching_threshold, self.max_age,
115 | self.tracks, detections, confirmed_tracks)
116 |
117 | # Associate remaining tracks together with unconfirmed tracks using IOU.
118 | iou_track_candidates = unconfirmed_tracks + [
119 | k for k in unmatched_tracks_a if
120 | self.tracks[k].time_since_update == 1]
121 | unmatched_tracks_a = [
122 | k for k in unmatched_tracks_a if
123 | self.tracks[k].time_since_update != 1]
124 | matches_b, unmatched_tracks_b, unmatched_detections = \
125 | linear_assignment.min_cost_matching(
126 | iou_matching.iou_cost, self.max_iou_distance, self.tracks,
127 | detections, iou_track_candidates, unmatched_detections)
128 |
129 | matches = matches_a + matches_b
130 | unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
131 | return matches, unmatched_tracks, unmatched_detections
132 |
133 | def _initiate_track(self, detection):
134 | mean, covariance = self.kf.initiate(detection.to_xyah())
135 | self.tracks.append(Track(
136 | mean, covariance, self._next_id, self.n_init, self.max_age,
137 | detection.feature))
138 | self._next_id += 1
139 |
--------------------------------------------------------------------------------
/deep_sort/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/deep_sort/utils/__init__.py
--------------------------------------------------------------------------------
/deep_sort/utils/asserts.py:
--------------------------------------------------------------------------------
1 | from os import environ
2 |
3 |
4 | def assert_in(file, files_to_check):
5 | if file not in files_to_check:
6 | raise AssertionError("{} does not exist in the list".format(str(file)))
7 | return True
8 |
9 |
10 | def assert_in_env(check_list: list):
11 | for item in check_list:
12 | assert_in(item, environ.keys())
13 | return True
14 |
--------------------------------------------------------------------------------
/deep_sort/utils/draw.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 |
4 | palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
5 |
6 |
7 | def compute_color_for_labels(label):
8 | """
9 | Simple function that adds fixed color depending on the class
10 | """
11 | color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
12 | return tuple(color)
13 |
14 |
15 | def draw_boxes(img, bbox, identities=None, offset=(0,0)):
16 | for i,box in enumerate(bbox):
17 | x1,y1,x2,y2 = [int(i) for i in box]
18 | x1 += offset[0]
19 | x2 += offset[0]
20 | y1 += offset[1]
21 | y2 += offset[1]
22 | # box text and bar
23 | id = int(identities[i]) if identities is not None else 0
24 | color = compute_color_for_labels(id)
25 | label = '{}{:d}'.format("", id)
26 | t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
27 | cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
28 | cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
29 | cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
30 | return img
31 |
32 |
33 |
34 | if __name__ == '__main__':
35 | for i in range(82):
36 | print(compute_color_for_labels(i))
37 |
--------------------------------------------------------------------------------
/deep_sort/utils/evaluation.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import copy
4 | import motmetrics as mm
5 | mm.lap.default_solver = 'lap'
6 | from utils.io import read_results, unzip_objs
7 |
8 |
9 | class Evaluator(object):
10 |
11 | def __init__(self, data_root, seq_name, data_type):
12 | self.data_root = data_root
13 | self.seq_name = seq_name
14 | self.data_type = data_type
15 |
16 | self.load_annotations()
17 | self.reset_accumulator()
18 |
19 | def load_annotations(self):
20 | assert self.data_type == 'mot'
21 |
22 | gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
23 | self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
24 | self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
25 |
26 | def reset_accumulator(self):
27 | self.acc = mm.MOTAccumulator(auto_id=True)
28 |
29 | def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
30 | # results
31 | trk_tlwhs = np.copy(trk_tlwhs)
32 | trk_ids = np.copy(trk_ids)
33 |
34 | # gts
35 | gt_objs = self.gt_frame_dict.get(frame_id, [])
36 | gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
37 |
38 | # ignore boxes
39 | ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
40 | ignore_tlwhs = unzip_objs(ignore_objs)[0]
41 |
42 |
43 | # remove ignored results
44 | keep = np.ones(len(trk_tlwhs), dtype=bool)
45 | iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
46 | if len(iou_distance) > 0:
47 | match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
48 | match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
49 | match_ious = iou_distance[match_is, match_js]
50 |
51 | match_js = np.asarray(match_js, dtype=int)
52 | match_js = match_js[np.logical_not(np.isnan(match_ious))]
53 | keep[match_js] = False
54 | trk_tlwhs = trk_tlwhs[keep]
55 | trk_ids = trk_ids[keep]
56 |
57 | # get distance matrix
58 | iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
59 |
60 | # acc
61 | self.acc.update(gt_ids, trk_ids, iou_distance)
62 |
63 | if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
64 | events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
65 | else:
66 | events = None
67 | return events
68 |
69 | def eval_file(self, filename):
70 | self.reset_accumulator()
71 |
72 | result_frame_dict = read_results(filename, self.data_type, is_gt=False)
73 | frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
74 | for frame_id in frames:
75 | trk_objs = result_frame_dict.get(frame_id, [])
76 | trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
77 | self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
78 |
79 | return self.acc
80 |
81 | @staticmethod
82 | def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
83 | names = copy.deepcopy(names)
84 | if metrics is None:
85 | metrics = mm.metrics.motchallenge_metrics
86 | metrics = copy.deepcopy(metrics)
87 |
88 | mh = mm.metrics.create()
89 | summary = mh.compute_many(
90 | accs,
91 | metrics=metrics,
92 | names=names,
93 | generate_overall=True
94 | )
95 |
96 | return summary
97 |
98 | @staticmethod
99 | def save_summary(summary, filename):
100 | import pandas as pd
101 | writer = pd.ExcelWriter(filename)
102 | summary.to_excel(writer)
103 | writer.save()
104 |
--------------------------------------------------------------------------------
/deep_sort/utils/io.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Dict
3 | import numpy as np
4 |
5 | # from utils.log import get_logger
6 |
7 |
8 | def write_results(filename, results, data_type):
9 | if data_type == 'mot':
10 | save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
11 | elif data_type == 'kitti':
12 | save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
13 | else:
14 | raise ValueError(data_type)
15 |
16 | with open(filename, 'w') as f:
17 | for frame_id, tlwhs, track_ids in results:
18 | if data_type == 'kitti':
19 | frame_id -= 1
20 | for tlwh, track_id in zip(tlwhs, track_ids):
21 | if track_id < 0:
22 | continue
23 | x1, y1, w, h = tlwh
24 | x2, y2 = x1 + w, y1 + h
25 | line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
26 | f.write(line)
27 |
28 |
29 | # def write_results(filename, results_dict: Dict, data_type: str):
30 | # if not filename:
31 | # return
32 | # path = os.path.dirname(filename)
33 | # if not os.path.exists(path):
34 | # os.makedirs(path)
35 |
36 | # if data_type in ('mot', 'mcmot', 'lab'):
37 | # save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
38 | # elif data_type == 'kitti':
39 | # save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
40 | # else:
41 | # raise ValueError(data_type)
42 |
43 | # with open(filename, 'w') as f:
44 | # for frame_id, frame_data in results_dict.items():
45 | # if data_type == 'kitti':
46 | # frame_id -= 1
47 | # for tlwh, track_id in frame_data:
48 | # if track_id < 0:
49 | # continue
50 | # x1, y1, w, h = tlwh
51 | # x2, y2 = x1 + w, y1 + h
52 | # line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
53 | # f.write(line)
54 | # logger.info('Save results to {}'.format(filename))
55 |
56 |
57 | def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
58 | if data_type in ('mot', 'lab'):
59 | read_fun = read_mot_results
60 | else:
61 | raise ValueError('Unknown data type: {}'.format(data_type))
62 |
63 | return read_fun(filename, is_gt, is_ignore)
64 |
65 |
66 | """
67 | labels={'ped', ... % 1
68 | 'person_on_vhcl', ... % 2
69 | 'car', ... % 3
70 | 'bicycle', ... % 4
71 | 'mbike', ... % 5
72 | 'non_mot_vhcl', ... % 6
73 | 'static_person', ... % 7
74 | 'distractor', ... % 8
75 | 'occluder', ... % 9
76 | 'occluder_on_grnd', ... %10
77 | 'occluder_full', ... % 11
78 | 'reflection', ... % 12
79 | 'crowd' ... % 13
80 | };
81 | """
82 |
83 |
84 | def read_mot_results(filename, is_gt, is_ignore):
85 | valid_labels = {1}
86 | ignore_labels = {2, 7, 8, 12}
87 | results_dict = dict()
88 | if os.path.isfile(filename):
89 | with open(filename, 'r') as f:
90 | for line in f.readlines():
91 | linelist = line.split(',')
92 | if len(linelist) < 7:
93 | continue
94 | fid = int(linelist[0])
95 | if fid < 1:
96 | continue
97 | results_dict.setdefault(fid, list())
98 |
99 | if is_gt:
100 | if 'MOT16-' in filename or 'MOT17-' in filename:
101 | label = int(float(linelist[7]))
102 | mark = int(float(linelist[6]))
103 | if mark == 0 or label not in valid_labels:
104 | continue
105 | score = 1
106 | elif is_ignore:
107 | if 'MOT16-' in filename or 'MOT17-' in filename:
108 | label = int(float(linelist[7]))
109 | vis_ratio = float(linelist[8])
110 | if label not in ignore_labels and vis_ratio >= 0:
111 | continue
112 | else:
113 | continue
114 | score = 1
115 | else:
116 | score = float(linelist[6])
117 |
118 | tlwh = tuple(map(float, linelist[2:6]))
119 | target_id = int(linelist[1])
120 |
121 | results_dict[fid].append((tlwh, target_id, score))
122 |
123 | return results_dict
124 |
125 |
126 | def unzip_objs(objs):
127 | if len(objs) > 0:
128 | tlwhs, ids, scores = zip(*objs)
129 | else:
130 | tlwhs, ids, scores = [], [], []
131 | tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
132 |
133 | return tlwhs, ids, scores
--------------------------------------------------------------------------------
/deep_sort/utils/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | def get_logger(name='root'):
5 | formatter = logging.Formatter(
6 | # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
7 | fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
8 |
9 | handler = logging.StreamHandler()
10 | handler.setFormatter(formatter)
11 |
12 | logger = logging.getLogger(name)
13 | logger.setLevel(logging.INFO)
14 | logger.addHandler(handler)
15 | return logger
16 |
17 |
18 |
--------------------------------------------------------------------------------
/deep_sort/utils/parser.py:
--------------------------------------------------------------------------------
1 | import os
2 | import yaml
3 | from easydict import EasyDict as edict
4 |
5 |
6 | class YamlParser(edict):
7 | """
8 | This is yaml parser based on EasyDict.
9 | """
10 |
11 | def __init__(self, cfg_dict=None, config_file=None):
12 | if cfg_dict is None:
13 | cfg_dict = {}
14 |
15 | if config_file is not None:
16 | assert (os.path.isfile(config_file))
17 | with open(config_file, 'r') as fo:
18 | cfg_dict.update(yaml.safe_load(fo.read()))
19 |
20 | super(YamlParser, self).__init__(cfg_dict)
21 |
22 | def merge_from_file(self, config_file):
23 | with open(config_file, 'r') as fo:
24 | self.update(yaml.safe_load(fo.read()))
25 |
26 | def merge_from_dict(self, config_dict):
27 | self.update(config_dict)
28 |
29 |
30 | def get_config(config_file=None):
31 | return YamlParser(config_file=config_file)
32 |
33 |
34 | if __name__ == "__main__":
35 | cfg = YamlParser(config_file="../configs/yolov3.yaml")
36 | cfg.merge_from_file("../configs/deep_sort.yaml")
37 |
38 | import ipdb
39 |
40 | ipdb.set_trace()
41 |
--------------------------------------------------------------------------------
/deep_sort/utils/tools.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from time import time
3 |
4 |
5 | def is_video(ext: str):
6 | """
7 | Returns true if ext exists in
8 | allowed_exts for video files.
9 |
10 | Args:
11 | ext:
12 |
13 | Returns:
14 |
15 | """
16 |
17 | allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
18 | return any((ext.endswith(x) for x in allowed_exts))
19 |
20 |
21 | def tik_tok(func):
22 | """
23 | keep track of time for each process.
24 | Args:
25 | func:
26 |
27 | Returns:
28 |
29 | """
30 | @wraps(func)
31 | def _time_it(*args, **kwargs):
32 | start = time()
33 | try:
34 | return func(*args, **kwargs)
35 | finally:
36 | end_ = time()
37 | print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
38 |
39 | return _time_it
40 |
--------------------------------------------------------------------------------
/detector.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | from models.experimental import attempt_load
5 | from utils.datasets import letterbox
6 | from utils.general import non_max_suppression, scale_coords
7 | from utils.torch_utils import select_device
8 |
9 |
10 | class Detector:
11 |
12 | def __init__(self):
13 | self.img_size = 640
14 | self.threshold = 0.3
15 | self.stride = 1
16 |
17 | self.weights = './weights/yolov5m.pt'
18 |
19 | self.device = '0' if torch.cuda.is_available() else 'cpu'
20 | self.device = select_device(self.device)
21 | model = attempt_load(self.weights, map_location=self.device)
22 | model.to(self.device).eval()
23 | model.half()
24 |
25 | self.m = model
26 | self.names = model.module.names if hasattr(
27 | model, 'module') else model.names
28 |
29 | def preprocess(self, img):
30 |
31 | img0 = img.copy()
32 | img = letterbox(img, new_shape=self.img_size)[0]
33 | img = img[:, :, ::-1].transpose(2, 0, 1)
34 | img = np.ascontiguousarray(img)
35 | img = torch.from_numpy(img).to(self.device)
36 | img = img.half()
37 | img /= 255.0
38 | if img.ndimension() == 3:
39 | img = img.unsqueeze(0)
40 |
41 | return img0, img
42 |
43 | def detect(self, im):
44 |
45 | im0, img = self.preprocess(im)
46 |
47 | pred = self.m(img, augment=False)[0]
48 | pred = pred.float()
49 | pred = non_max_suppression(pred, self.threshold, 0.4)
50 |
51 | boxes = []
52 | for det in pred:
53 |
54 | if det is not None and len(det):
55 | det[:, :4] = scale_coords(
56 | img.shape[2:], det[:, :4], im0.shape).round()
57 |
58 | for *x, conf, cls_id in det:
59 | lbl = self.names[int(cls_id)]
60 | if lbl not in ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck']:
61 | continue
62 | pass
63 | x1, y1 = int(x[0]), int(x[1])
64 | x2, y2 = int(x[2]), int(x[3])
65 | boxes.append(
66 | (x1, y1, x2, y2, lbl, conf))
67 |
68 | return boxes
69 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import tracker
4 | from detector import Detector
5 | from cv2 import cv2
6 |
7 | if __name__ == '__main__':
8 |
9 | # 根据视频尺寸,填充一个polygon,供撞线计算使用
10 | mask_image_temp = np.zeros((1080, 1920), dtype=np.uint8)
11 |
12 | # 初始化2个撞线polygon
13 | list_pts_blue = [[204, 305], [227, 431], [605, 522], [1101, 464], [1900, 601], [1902, 495], [1125, 379], [604, 437],
14 | [299, 375], [267, 289]]
15 | ndarray_pts_blue = np.array(list_pts_blue, np.int32)
16 | polygon_blue_value_1 = cv2.fillPoly(mask_image_temp, [ndarray_pts_blue], color=1)
17 | polygon_blue_value_1 = polygon_blue_value_1[:, :, np.newaxis]
18 |
19 | # 填充第二个polygon
20 | mask_image_temp = np.zeros((1080, 1920), dtype=np.uint8)
21 | list_pts_yellow = [[181, 305], [207, 442], [603, 544], [1107, 485], [1898, 625], [1893, 701], [1101, 568],
22 | [594, 637], [118, 483], [109, 303]]
23 | ndarray_pts_yellow = np.array(list_pts_yellow, np.int32)
24 | polygon_yellow_value_2 = cv2.fillPoly(mask_image_temp, [ndarray_pts_yellow], color=2)
25 | polygon_yellow_value_2 = polygon_yellow_value_2[:, :, np.newaxis]
26 |
27 | # 撞线检测用mask,包含2个polygon,(值范围 0、1、2),供撞线计算使用
28 | polygon_mask_blue_and_yellow = polygon_blue_value_1 + polygon_yellow_value_2
29 |
30 | # 缩小尺寸,1920x1080->960x540
31 | polygon_mask_blue_and_yellow = cv2.resize(polygon_mask_blue_and_yellow, (960, 540))
32 |
33 | # 蓝 色盘 b,g,r
34 | blue_color_plate = [255, 0, 0]
35 | # 蓝 polygon图片
36 | blue_image = np.array(polygon_blue_value_1 * blue_color_plate, np.uint8)
37 |
38 | # 黄 色盘
39 | yellow_color_plate = [0, 255, 255]
40 | # 黄 polygon图片
41 | yellow_image = np.array(polygon_yellow_value_2 * yellow_color_plate, np.uint8)
42 |
43 | # 彩色图片(值范围 0-255)
44 | color_polygons_image = blue_image + yellow_image
45 | # 缩小尺寸,1920x1080->960x540
46 | color_polygons_image = cv2.resize(color_polygons_image, (960, 540))
47 |
48 | # list 与蓝色polygon重叠
49 | list_overlapping_blue_polygon = []
50 |
51 | # list 与黄色polygon重叠
52 | list_overlapping_yellow_polygon = []
53 |
54 | # 进入数量
55 | down_count = 0
56 | # 离开数量
57 | up_count = 0
58 |
59 | font_draw_number = cv2.FONT_HERSHEY_SIMPLEX
60 | draw_text_postion = (int(960 * 0.01), int(540 * 0.05))
61 |
62 | # 初始化 yolov5
63 | detector = Detector()
64 |
65 | # 打开视频
66 | capture = cv2.VideoCapture(r'video\test.mp4')
67 | # capture = cv2.VideoCapture('TownCentreXVID.avi')
68 |
69 | while True:
70 | # 读取每帧图片
71 | _, im = capture.read()
72 | if im is None:
73 | break
74 |
75 | # 缩小尺寸,1920x1080->960x540
76 | im = cv2.resize(im, (960, 540))
77 |
78 | list_bboxs = []
79 | bboxes = detector.detect(im)
80 |
81 | # 如果画面中 有bbox
82 | if len(bboxes) > 0:
83 | list_bboxs = tracker.update(bboxes, im)
84 |
85 | # 画框
86 | # 撞线检测点,(x1,y1),y方向偏移比例 0.0~1.0
87 | output_image_frame = tracker.draw_bboxes(im, list_bboxs, line_thickness=None)
88 | pass
89 | else:
90 | # 如果画面中 没有bbox
91 | output_image_frame = im
92 | pass
93 |
94 | # 输出图片
95 | output_image_frame = cv2.add(output_image_frame, color_polygons_image)
96 |
97 | if len(list_bboxs) > 0:
98 | # ----------------------判断撞线----------------------
99 | for item_bbox in list_bboxs:
100 | x1, y1, x2, y2, label, track_id = item_bbox
101 |
102 | # 撞线检测点,(x1,y1),y方向偏移比例 0.0~1.0
103 | y1_offset = int(y1 + ((y2 - y1) * 0.6))
104 |
105 | # 撞线的点
106 | y = y1_offset
107 | x = x1
108 |
109 | if polygon_mask_blue_and_yellow[y, x] == 1:
110 | # 如果撞 蓝polygon
111 | if track_id not in list_overlapping_blue_polygon:
112 | list_overlapping_blue_polygon.append(track_id)
113 | pass
114 |
115 | # 判断 黄polygon list 里是否有此 track_id
116 | # 有此 track_id,则 认为是 外出方向
117 | if track_id in list_overlapping_yellow_polygon:
118 | # 外出+1
119 | up_count += 1
120 |
121 | print(
122 | f'类别: {label} | id: {track_id} | 上行撞线 | 上行撞线总数: {up_count} | 上行id列表: {list_overlapping_yellow_polygon}')
123 |
124 | # 删除 黄polygon list 中的此id
125 | list_overlapping_yellow_polygon.remove(track_id)
126 |
127 | pass
128 | else:
129 | # 无此 track_id,不做其他操作
130 | pass
131 |
132 | elif polygon_mask_blue_and_yellow[y, x] == 2:
133 | # 如果撞 黄polygon
134 | if track_id not in list_overlapping_yellow_polygon:
135 | list_overlapping_yellow_polygon.append(track_id)
136 | pass
137 |
138 | # 判断 蓝polygon list 里是否有此 track_id
139 | # 有此 track_id,则 认为是 进入方向
140 | if track_id in list_overlapping_blue_polygon:
141 | # 进入+1
142 | down_count += 1
143 |
144 | print(
145 | f'类别: {label} | id: {track_id} | 下行撞线 | 下行撞线总数: {down_count} | 下行id列表: {list_overlapping_blue_polygon}')
146 |
147 | # 删除 蓝polygon list 中的此id
148 | list_overlapping_blue_polygon.remove(track_id)
149 |
150 | pass
151 | else:
152 | # 无此 track_id,不做其他操作
153 | pass
154 | pass
155 | else:
156 | pass
157 | pass
158 |
159 | pass
160 |
161 | # ----------------------清除无用id----------------------
162 | list_overlapping_all = list_overlapping_yellow_polygon + list_overlapping_blue_polygon
163 | for id1 in list_overlapping_all:
164 | is_found = False
165 | for _, _, _, _, _, bbox_id in list_bboxs:
166 | if bbox_id == id1:
167 | is_found = True
168 | break
169 | pass
170 | pass
171 |
172 | if not is_found:
173 | # 如果没找到,删除id
174 | if id1 in list_overlapping_yellow_polygon:
175 | list_overlapping_yellow_polygon.remove(id1)
176 | pass
177 | if id1 in list_overlapping_blue_polygon:
178 | list_overlapping_blue_polygon.remove(id1)
179 | pass
180 | pass
181 | list_overlapping_all.clear()
182 | pass
183 |
184 | # 清空list
185 | list_bboxs.clear()
186 |
187 | pass
188 | else:
189 | # 如果图像中没有任何的bbox,则清空list
190 | list_overlapping_blue_polygon.clear()
191 | list_overlapping_yellow_polygon.clear()
192 | pass
193 | pass
194 |
195 | text_draw = 'DOWN: ' + str(down_count) + \
196 | ' , UP: ' + str(up_count)
197 | output_image_frame = cv2.putText(img=output_image_frame, text=text_draw,
198 | org=draw_text_postion,
199 | fontFace=font_draw_number,
200 | fontScale=1, color=(255, 255, 255), thickness=2)
201 |
202 | cv2.imshow('demo', output_image_frame)
203 | cv2.waitKey(1)
204 |
205 | pass
206 | pass
207 |
208 | capture.release()
209 | cv2.destroyAllWindows()
210 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/models/__init__.py
--------------------------------------------------------------------------------
/models/experimental.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Experimental modules
4 | """
5 | import math
6 |
7 | import numpy as np
8 | import torch
9 | import torch.nn as nn
10 |
11 | from models.common import Conv
12 | from utils.downloads import attempt_download
13 |
14 |
15 | class CrossConv(nn.Module):
16 | # Cross Convolution Downsample
17 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
18 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
19 | super().__init__()
20 | c_ = int(c2 * e) # hidden channels
21 | self.cv1 = Conv(c1, c_, (1, k), (1, s))
22 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
23 | self.add = shortcut and c1 == c2
24 |
25 | def forward(self, x):
26 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
27 |
28 |
29 | class Sum(nn.Module):
30 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
31 | def __init__(self, n, weight=False): # n: number of inputs
32 | super().__init__()
33 | self.weight = weight # apply weights boolean
34 | self.iter = range(n - 1) # iter object
35 | if weight:
36 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
37 |
38 | def forward(self, x):
39 | y = x[0] # no weight
40 | if self.weight:
41 | w = torch.sigmoid(self.w) * 2
42 | for i in self.iter:
43 | y = y + x[i + 1] * w[i]
44 | else:
45 | for i in self.iter:
46 | y = y + x[i + 1]
47 | return y
48 |
49 |
50 | class MixConv2d(nn.Module):
51 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
52 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
53 | super().__init__()
54 | n = len(k) # number of convolutions
55 | if equal_ch: # equal c_ per group
56 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
57 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels
58 | else: # equal weight.numel() per group
59 | b = [c2] + [0] * n
60 | a = np.eye(n + 1, n, k=-1)
61 | a -= np.roll(a, 1, axis=1)
62 | a *= np.array(k) ** 2
63 | a[0] = 1
64 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
65 |
66 | self.m = nn.ModuleList(
67 | [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
68 | self.bn = nn.BatchNorm2d(c2)
69 | self.act = nn.SiLU()
70 |
71 | def forward(self, x):
72 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
73 |
74 |
75 | class Ensemble(nn.ModuleList):
76 | # Ensemble of models
77 | def __init__(self):
78 | super().__init__()
79 |
80 | def forward(self, x, augment=False, profile=False, visualize=False):
81 | y = []
82 | for module in self:
83 | y.append(module(x, augment, profile, visualize)[0])
84 | # y = torch.stack(y).max(0)[0] # max ensemble
85 | # y = torch.stack(y).mean(0) # mean ensemble
86 | y = torch.cat(y, 1) # nms ensemble
87 | return y, None # inference, train output
88 |
89 |
90 | def attempt_load(weights, map_location=None, inplace=True, fuse=True):
91 | from models.yolo import Detect, Model
92 |
93 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
94 | model = Ensemble()
95 | for w in weights if isinstance(weights, list) else [weights]:
96 | ckpt = torch.load(attempt_download(w), map_location=map_location) # load
97 | if fuse:
98 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
99 | else:
100 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
101 |
102 | # Compatibility updates
103 | for m in model.modules():
104 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
105 | m.inplace = inplace # pytorch 1.7.0 compatibility
106 | if type(m) is Detect:
107 | if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility
108 | delattr(m, 'anchor_grid')
109 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
110 | elif type(m) is Conv:
111 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
112 |
113 | if len(model) == 1:
114 | return model[-1] # return model
115 | else:
116 | print(f'Ensemble created with {weights}\n')
117 | for k in ['names']:
118 | setattr(model, k, getattr(model[-1], k))
119 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
120 | return model # return ensemble
121 |
--------------------------------------------------------------------------------
/models/hub/anchors.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Default anchors for COCO data
3 |
4 |
5 | # P5 -------------------------------------------------------------------------------------------------------------------
6 | # P5-640:
7 | anchors_p5_640:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 |
13 | # P6 -------------------------------------------------------------------------------------------------------------------
14 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
15 | anchors_p6_640:
16 | - [9,11, 21,19, 17,41] # P3/8
17 | - [43,32, 39,70, 86,64] # P4/16
18 | - [65,131, 134,130, 120,265] # P5/32
19 | - [282,180, 247,354, 512,387] # P6/64
20 |
21 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
22 | anchors_p6_1280:
23 | - [19,27, 44,40, 38,94] # P3/8
24 | - [96,68, 86,152, 180,137] # P4/16
25 | - [140,301, 303,264, 238,542] # P5/32
26 | - [436,615, 739,380, 925,792] # P6/64
27 |
28 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
29 | anchors_p6_1920:
30 | - [28,41, 67,59, 57,141] # P3/8
31 | - [144,103, 129,227, 270,205] # P4/16
32 | - [209,452, 455,396, 358,812] # P5/32
33 | - [653,922, 1109,570, 1387,1187] # P6/64
34 |
35 |
36 | # P7 -------------------------------------------------------------------------------------------------------------------
37 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
38 | anchors_p7_640:
39 | - [11,11, 13,30, 29,20] # P3/8
40 | - [30,46, 61,38, 39,92] # P4/16
41 | - [78,80, 146,66, 79,163] # P5/32
42 | - [149,150, 321,143, 157,303] # P6/64
43 | - [257,402, 359,290, 524,372] # P7/128
44 |
45 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
46 | anchors_p7_1280:
47 | - [19,22, 54,36, 32,77] # P3/8
48 | - [70,83, 138,71, 75,173] # P4/16
49 | - [165,159, 148,334, 375,151] # P5/32
50 | - [334,317, 251,626, 499,474] # P6/64
51 | - [750,326, 534,814, 1079,818] # P7/128
52 |
53 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
54 | anchors_p7_1920:
55 | - [29,34, 81,55, 47,115] # P3/8
56 | - [105,124, 207,107, 113,259] # P4/16
57 | - [247,238, 222,500, 563,227] # P5/32
58 | - [501,476, 376,939, 749,711] # P6/64
59 | - [1126,489, 801,1222, 1618,1227] # P7/128
60 |
--------------------------------------------------------------------------------
/models/hub/yolov3-spp.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3-SPP head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, SPP, [512, [5, 9, 13]]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/models/hub/yolov3-tiny.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,14, 23,27, 37,58] # P4/16
9 | - [81,82, 135,169, 344,319] # P5/32
10 |
11 | # YOLOv3-tiny backbone
12 | backbone:
13 | # [from, number, module, args]
14 | [[-1, 1, Conv, [16, 3, 1]], # 0
15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16 | [-1, 1, Conv, [32, 3, 1]],
17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18 | [-1, 1, Conv, [64, 3, 1]],
19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20 | [-1, 1, Conv, [128, 3, 1]],
21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22 | [-1, 1, Conv, [256, 3, 1]],
23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24 | [-1, 1, Conv, [512, 3, 1]],
25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27 | ]
28 |
29 | # YOLOv3-tiny head
30 | head:
31 | [[-1, 1, Conv, [1024, 3, 1]],
32 | [-1, 1, Conv, [256, 1, 1]],
33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34 |
35 | [-2, 1, Conv, [128, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39 |
40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41 | ]
42 |
--------------------------------------------------------------------------------
/models/hub/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # darknet53 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [32, 3, 1]], # 0
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17 | [-1, 1, Bottleneck, [64]],
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19 | [-1, 2, Bottleneck, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21 | [-1, 8, Bottleneck, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23 | [-1, 8, Bottleneck, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25 | [-1, 4, Bottleneck, [1024]], # 10
26 | ]
27 |
28 | # YOLOv3 head
29 | head:
30 | [[-1, 1, Bottleneck, [1024, False]],
31 | [-1, 1, Conv, [512, 1, 1]],
32 | [-1, 1, Conv, [1024, 3, 1]],
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35 |
36 | [-2, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
39 | [-1, 1, Bottleneck, [512, False]],
40 | [-1, 1, Bottleneck, [512, False]],
41 | [-1, 1, Conv, [256, 1, 1]],
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43 |
44 | [-2, 1, Conv, [128, 1, 1]],
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
47 | [-1, 1, Bottleneck, [256, False]],
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49 |
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51 | ]
52 |
--------------------------------------------------------------------------------
/models/hub/yolov5-bifpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 BiFPN head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5-fpn.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 FPN head
28 | head:
29 | [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
30 |
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
35 |
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 3, C3, [256, False]], # 18 (P3/8-small)
40 |
41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42 | ]
43 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p2.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [1024]],
21 | [-1, 1, SPPF, [1024, 5]], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
25 | head:
26 | [[-1, 1, Conv, [512, 1, 1]],
27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
29 | [-1, 3, C3, [512, False]], # 13
30 |
31 | [-1, 1, Conv, [256, 1, 1]],
32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35 |
36 | [-1, 1, Conv, [128, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 2], 1, Concat, [1]], # cat backbone P2
39 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
40 |
41 | [-1, 1, Conv, [128, 3, 2]],
42 | [[-1, 18], 1, Concat, [1]], # cat head P3
43 | [-1, 3, C3, [256, False]], # 24 (P3/8-small)
44 |
45 | [-1, 1, Conv, [256, 3, 2]],
46 | [[-1, 14], 1, Concat, [1]], # cat head P4
47 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
48 |
49 | [-1, 1, Conv, [512, 3, 2]],
50 | [[-1, 10], 1, Concat, [1]], # cat head P5
51 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
52 |
53 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
54 | ]
55 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p34.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14 | [ -1, 3, C3, [ 128 ] ],
15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16 | [ -1, 6, C3, [ 256 ] ],
17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18 | [ -1, 9, C3, [ 512 ] ],
19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20 | [ -1, 3, C3, [ 1024 ] ],
21 | [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
22 | ]
23 |
24 | # YOLOv5 v6.0 head with (P3, P4) outputs
25 | head:
26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29 | [ -1, 3, C3, [ 512, False ] ], # 13
30 |
31 | [ -1, 1, Conv, [ 256, 1, 1 ] ],
32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35 |
36 | [ -1, 1, Conv, [ 256, 3, 2 ] ],
37 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
38 | [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
39 |
40 | [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
41 | ]
42 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [768]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22 | [-1, 3, C3, [1024]],
23 | [-1, 1, SPPF, [1024, 5]], # 11
24 | ]
25 |
26 | # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
27 | head:
28 | [[-1, 1, Conv, [768, 1, 1]],
29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
31 | [-1, 3, C3, [768, False]], # 15
32 |
33 | [-1, 1, Conv, [512, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
36 | [-1, 3, C3, [512, False]], # 19
37 |
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
40 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
41 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
42 |
43 | [-1, 1, Conv, [256, 3, 2]],
44 | [[-1, 20], 1, Concat, [1]], # cat head P4
45 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
46 |
47 | [-1, 1, Conv, [512, 3, 2]],
48 | [[-1, 16], 1, Concat, [1]], # cat head P5
49 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
50 |
51 | [-1, 1, Conv, [768, 3, 2]],
52 | [[-1, 12], 1, Concat, [1]], # cat head P6
53 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
54 |
55 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
56 | ]
57 |
--------------------------------------------------------------------------------
/models/hub/yolov5-p7.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8 |
9 | # YOLOv5 v6.0 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14 | [-1, 3, C3, [128]],
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16 | [-1, 6, C3, [256]],
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18 | [-1, 9, C3, [512]],
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20 | [-1, 3, C3, [768]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22 | [-1, 3, C3, [1024]],
23 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
24 | [-1, 3, C3, [1280]],
25 | [-1, 1, SPPF, [1280, 5]], # 13
26 | ]
27 |
28 | # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
29 | head:
30 | [[-1, 1, Conv, [1024, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 10], 1, Concat, [1]], # cat backbone P6
33 | [-1, 3, C3, [1024, False]], # 17
34 |
35 | [-1, 1, Conv, [768, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
38 | [-1, 3, C3, [768, False]], # 21
39 |
40 | [-1, 1, Conv, [512, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
43 | [-1, 3, C3, [512, False]], # 25
44 |
45 | [-1, 1, Conv, [256, 1, 1]],
46 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
47 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
48 | [-1, 3, C3, [256, False]], # 29 (P3/8-small)
49 |
50 | [-1, 1, Conv, [256, 3, 2]],
51 | [[-1, 26], 1, Concat, [1]], # cat head P4
52 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
53 |
54 | [-1, 1, Conv, [512, 3, 2]],
55 | [[-1, 22], 1, Concat, [1]], # cat head P5
56 | [-1, 3, C3, [768, False]], # 35 (P5/32-large)
57 |
58 | [-1, 1, Conv, [768, 3, 2]],
59 | [[-1, 18], 1, Concat, [1]], # cat head P6
60 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
61 |
62 | [-1, 1, Conv, [1024, 3, 2]],
63 | [[-1, 14], 1, Concat, [1]], # cat head P7
64 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
65 |
66 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
67 | ]
68 |
--------------------------------------------------------------------------------
/models/hub/yolov5-panet.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 PANet head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5l6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5m6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5n6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-ghost.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3Ghost, [128]],
18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3Ghost, [256]],
20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3Ghost, [512]],
22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3Ghost, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, GhostConv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3Ghost, [512, False]], # 13
33 |
34 | [-1, 1, GhostConv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, GhostConv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, GhostConv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5s-transformer.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/hub/yolov5s6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/hub/yolov5x6.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [19,27, 44,40, 38,94] # P3/8
9 | - [96,68, 86,152, 180,137] # P4/16
10 | - [140,301, 303,264, 238,542] # P5/32
11 | - [436,615, 739,380, 925,792] # P6/64
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/models/yolov5l.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5m.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # model depth multiple
6 | width_multiple: 0.75 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5n.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5s.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # model depth multiple
6 | width_multiple: 0.50 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/models/yolov5x.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.33 # model depth multiple
6 | width_multiple: 1.25 # layer channel multiple
7 | anchors:
8 | - [10,13, 16,30, 33,23] # P3/8
9 | - [30,61, 62,45, 59,119] # P4/16
10 | - [116,90, 156,198, 373,326] # P5/32
11 |
12 | # YOLOv5 v6.0 backbone
13 | backbone:
14 | # [from, number, module, args]
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17 | [-1, 3, C3, [128]],
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19 | [-1, 6, C3, [256]],
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21 | [-1, 9, C3, [512]],
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23 | [-1, 3, C3, [1024]],
24 | [-1, 1, SPPF, [1024, 5]], # 9
25 | ]
26 |
27 | # YOLOv5 v6.0 head
28 | head:
29 | [[-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
32 | [-1, 3, C3, [512, False]], # 13
33 |
34 | [-1, 1, Conv, [256, 1, 1]],
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38 |
39 | [-1, 1, Conv, [256, 3, 2]],
40 | [[-1, 14], 1, Concat, [1]], # cat head P4
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42 |
43 | [-1, 1, Conv, [512, 3, 2]],
44 | [[-1, 10], 1, Concat, [1]], # cat head P5
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46 |
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # Base ----------------------------------------
4 | matplotlib>=3.2.2
5 | numpy>=1.18.5
6 | opencv-python>=4.1.2
7 | Pillow>=7.1.2
8 | PyYAML>=5.3.1
9 | requests>=2.23.0
10 | scipy>=1.4.1
11 | torch>=1.7.0
12 | torchvision>=0.8.1
13 | tqdm>=4.41.0
14 |
15 | # Logging -------------------------------------
16 | tensorboard>=2.4.1
17 | # wandb
18 |
19 | # Plotting ------------------------------------
20 | pandas>=1.1.4
21 | seaborn>=0.11.0
22 |
23 | # Export --------------------------------------
24 | # coremltools>=4.1 # CoreML export
25 | # onnx>=1.9.0 # ONNX export
26 | # onnx-simplifier>=0.3.6 # ONNX simplifier
27 | # scikit-learn==0.19.2 # CoreML quantization
28 | # tensorflow>=2.4.1 # TFLite export
29 | # tensorflowjs>=3.9.0 # TF.js export
30 | # openvino-dev # OpenVINO export
31 |
32 | # Extras --------------------------------------
33 | # albumentations>=1.0.3
34 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
35 | # pycocotools>=2.0 # COCO mAP
36 | # roboflow
37 | thop # FLOPs computation
38 |
39 | easydict
40 |
--------------------------------------------------------------------------------
/tracker.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import torch
3 | import numpy as np
4 |
5 | from deep_sort.utils.parser import get_config
6 | from deep_sort.deep_sort import DeepSort
7 |
8 | cfg = get_config()
9 | cfg.merge_from_file("./deep_sort/configs/deep_sort.yaml")
10 | deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
11 | max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
12 | nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
13 | max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
14 | use_cuda=True)
15 |
16 |
17 | def draw_bboxes(image, bboxes, line_thickness):
18 | line_thickness = line_thickness or round(
19 | 0.002 * (image.shape[0] + image.shape[1]) * 0.5) + 1
20 |
21 | list_pts = []
22 | point_radius = 4
23 |
24 | for (x1, y1, x2, y2, cls_id, pos_id) in bboxes:
25 | color = (0, 255, 0)
26 |
27 | # 撞线的点
28 | check_point_x = x1
29 | check_point_y = int(y1 + ((y2 - y1) * 0.6))
30 |
31 | c1, c2 = (x1, y1), (x2, y2)
32 | cv2.rectangle(image, c1, c2, color, thickness=line_thickness, lineType=cv2.LINE_AA)
33 |
34 | font_thickness = max(line_thickness - 1, 1)
35 | t_size = cv2.getTextSize(cls_id, 0, fontScale=line_thickness / 3, thickness=font_thickness)[0]
36 | c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
37 | cv2.rectangle(image, c1, c2, color, -1, cv2.LINE_AA) # filled
38 | cv2.putText(image, '{} ID-{}'.format(cls_id, pos_id), (c1[0], c1[1] - 2), 0, line_thickness / 3,
39 | [225, 255, 255], thickness=font_thickness, lineType=cv2.LINE_AA)
40 |
41 | list_pts.append([check_point_x - point_radius, check_point_y - point_radius])
42 | list_pts.append([check_point_x - point_radius, check_point_y + point_radius])
43 | list_pts.append([check_point_x + point_radius, check_point_y + point_radius])
44 | list_pts.append([check_point_x + point_radius, check_point_y - point_radius])
45 |
46 | ndarray_pts = np.array(list_pts, np.int32)
47 |
48 | cv2.fillPoly(image, [ndarray_pts], color=(0, 0, 255))
49 |
50 | list_pts.clear()
51 |
52 | return image
53 |
54 |
55 | def update(bboxes, image):
56 | bbox_xywh = []
57 | confs = []
58 | bboxes2draw = []
59 |
60 | if len(bboxes) > 0:
61 | for x1, y1, x2, y2, lbl, conf in bboxes:
62 | obj = [
63 | int((x1 + x2) * 0.5), int((y1 + y2) * 0.5),
64 | x2 - x1, y2 - y1
65 | ]
66 | bbox_xywh.append(obj)
67 | confs.append(conf)
68 |
69 | xywhs = torch.Tensor(bbox_xywh)
70 | confss = torch.Tensor(confs)
71 |
72 | outputs = deepsort.update(xywhs, confss, image)
73 |
74 | for x1, y1, x2, y2, track_id in list(outputs):
75 | # x1, y1, x2, y2, track_id = value
76 | center_x = (x1 + x2) * 0.5
77 | center_y = (y1 + y2) * 0.5
78 |
79 | label = search_label(center_x=center_x, center_y=center_y,
80 | bboxes_xyxy=bboxes, max_dist_threshold=20.0)
81 |
82 | bboxes2draw.append((x1, y1, x2, y2, label, track_id))
83 | pass
84 | pass
85 |
86 | return bboxes2draw
87 |
88 |
89 | def search_label(center_x, center_y, bboxes_xyxy, max_dist_threshold):
90 | """
91 | 在 yolov5 的 bbox 中搜索中心点最接近的label
92 | :param center_x:
93 | :param center_y:
94 | :param bboxes_xyxy:
95 | :param max_dist_threshold:
96 | :return: 字符串
97 | """
98 | label = ''
99 | # min_label = ''
100 | min_dist = -1.0
101 |
102 | for x1, y1, x2, y2, lbl, conf in bboxes_xyxy:
103 | center_x2 = (x1 + x2) * 0.5
104 | center_y2 = (y1 + y2) * 0.5
105 |
106 | # 横纵距离都小于 max_dist
107 | min_x = abs(center_x2 - center_x)
108 | min_y = abs(center_y2 - center_y)
109 |
110 | if min_x < max_dist_threshold and min_y < max_dist_threshold:
111 | # 距离阈值,判断是否在允许误差范围内
112 | # 取 x, y 方向上的距离平均值
113 | avg_dist = (min_x + min_y) * 0.5
114 | if min_dist == -1.0:
115 | # 第一次赋值
116 | min_dist = avg_dist
117 | # 赋值label
118 | label = lbl
119 | pass
120 | else:
121 | # 若不是第一次,则距离小的优先
122 | if avg_dist < min_dist:
123 | min_dist = avg_dist
124 | # label
125 | label = lbl
126 | pass
127 | pass
128 | pass
129 |
130 | return label
131 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 |
7 | def notebook_init(verbose=True):
8 | # Check system software and hardware
9 | print('Checking setup...')
10 |
11 | import os
12 | import shutil
13 |
14 | from utils.general import check_requirements, emojis, is_colab
15 | from utils.torch_utils import select_device # imports
16 |
17 | check_requirements(('psutil', 'IPython'))
18 | import psutil
19 | from IPython import display # to display images and clear console output
20 |
21 | if is_colab():
22 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
23 |
24 | if verbose:
25 | # System info
26 | # gb = 1 / 1000 ** 3 # bytes to GB
27 | gib = 1 / 1024 ** 3 # bytes to GiB
28 | ram = psutil.virtual_memory().total
29 | total, used, free = shutil.disk_usage("/")
30 | display.clear_output()
31 | s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)'
32 | else:
33 | s = ''
34 |
35 | select_device(newline=False)
36 | print(emojis(f'Setup complete ✅ {s}'))
37 | return display
38 |
--------------------------------------------------------------------------------
/utils/activations.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Activation functions
4 | """
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | # SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
12 | class SiLU(nn.Module): # export-friendly version of nn.SiLU()
13 | @staticmethod
14 | def forward(x):
15 | return x * torch.sigmoid(x)
16 |
17 |
18 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
19 | @staticmethod
20 | def forward(x):
21 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML
22 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
23 |
24 |
25 | # Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
26 | class Mish(nn.Module):
27 | @staticmethod
28 | def forward(x):
29 | return x * F.softplus(x).tanh()
30 |
31 |
32 | class MemoryEfficientMish(nn.Module):
33 | class F(torch.autograd.Function):
34 | @staticmethod
35 | def forward(ctx, x):
36 | ctx.save_for_backward(x)
37 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
38 |
39 | @staticmethod
40 | def backward(ctx, grad_output):
41 | x = ctx.saved_tensors[0]
42 | sx = torch.sigmoid(x)
43 | fx = F.softplus(x).tanh()
44 | return grad_output * (fx + x * sx * (1 - fx * fx))
45 |
46 | def forward(self, x):
47 | return self.F.apply(x)
48 |
49 |
50 | # FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
51 | class FReLU(nn.Module):
52 | def __init__(self, c1, k=3): # ch_in, kernel
53 | super().__init__()
54 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
55 | self.bn = nn.BatchNorm2d(c1)
56 |
57 | def forward(self, x):
58 | return torch.max(x, self.bn(self.conv(x)))
59 |
60 |
61 | # ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
62 | class AconC(nn.Module):
63 | r""" ACON activation (activate or not).
64 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
65 | according to "Activate or Not: Learning Customized Activation" .
66 | """
67 |
68 | def __init__(self, c1):
69 | super().__init__()
70 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
71 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
72 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
73 |
74 | def forward(self, x):
75 | dpx = (self.p1 - self.p2) * x
76 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
77 |
78 |
79 | class MetaAconC(nn.Module):
80 | r""" ACON activation (activate or not).
81 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
82 | according to "Activate or Not: Learning Customized Activation" .
83 | """
84 |
85 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
86 | super().__init__()
87 | c2 = max(r, c1 // r)
88 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
89 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
90 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
91 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
92 | # self.bn1 = nn.BatchNorm2d(c2)
93 | # self.bn2 = nn.BatchNorm2d(c1)
94 |
95 | def forward(self, x):
96 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
97 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
98 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
99 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
100 | dpx = (self.p1 - self.p2) * x
101 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
102 |
--------------------------------------------------------------------------------
/utils/autoanchor.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | AutoAnchor utils
4 | """
5 |
6 | import random
7 |
8 | import numpy as np
9 | import torch
10 | import yaml
11 | from tqdm import tqdm
12 |
13 | from utils.general import LOGGER, colorstr, emojis
14 |
15 | PREFIX = colorstr('AutoAnchor: ')
16 |
17 |
18 | def check_anchor_order(m):
19 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
20 | a = m.anchors.prod(-1).view(-1) # anchor area
21 | da = a[-1] - a[0] # delta a
22 | ds = m.stride[-1] - m.stride[0] # delta s
23 | if da.sign() != ds.sign(): # same order
24 | LOGGER.info(f'{PREFIX}Reversing anchor order')
25 | m.anchors[:] = m.anchors.flip(0)
26 |
27 |
28 | def check_anchors(dataset, model, thr=4.0, imgsz=640):
29 | # Check anchor fit to data, recompute if necessary
30 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
31 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
32 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
33 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
34 |
35 | def metric(k): # compute metric
36 | r = wh[:, None] / k[None]
37 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric
38 | best = x.max(1)[0] # best_x
39 | aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
40 | bpr = (best > 1 / thr).float().mean() # best possible recall
41 | return bpr, aat
42 |
43 | anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors
44 | bpr, aat = metric(anchors.cpu().view(-1, 2))
45 | s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
46 | if bpr > 0.98: # threshold to recompute
47 | LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅'))
48 | else:
49 | LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...'))
50 | na = m.anchors.numel() // 2 # number of anchors
51 | try:
52 | anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
53 | except Exception as e:
54 | LOGGER.info(f'{PREFIX}ERROR: {e}')
55 | new_bpr = metric(anchors)[0]
56 | if new_bpr > bpr: # replace anchors
57 | anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
58 | m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
59 | check_anchor_order(m)
60 | LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
61 | else:
62 | LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.')
63 |
64 |
65 | def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
66 | """ Creates kmeans-evolved anchors from training dataset
67 |
68 | Arguments:
69 | dataset: path to data.yaml, or a loaded dataset
70 | n: number of anchors
71 | img_size: image size used for training
72 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
73 | gen: generations to evolve anchors using genetic algorithm
74 | verbose: print all results
75 |
76 | Return:
77 | k: kmeans evolved anchors
78 |
79 | Usage:
80 | from utils.autoanchor import *; _ = kmean_anchors()
81 | """
82 | from scipy.cluster.vq import kmeans
83 |
84 | npr = np.random
85 | thr = 1 / thr
86 |
87 | def metric(k, wh): # compute metrics
88 | r = wh[:, None] / k[None]
89 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric
90 | # x = wh_iou(wh, torch.tensor(k)) # iou metric
91 | return x, x.max(1)[0] # x, best_x
92 |
93 | def anchor_fitness(k): # mutation fitness
94 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
95 | return (best * (best > thr).float()).mean() # fitness
96 |
97 | def print_results(k, verbose=True):
98 | k = k[np.argsort(k.prod(1))] # sort small to large
99 | x, best = metric(k, wh0)
100 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
101 | s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \
102 | f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \
103 | f'past_thr={x[x > thr].mean():.3f}-mean: '
104 | for i, x in enumerate(k):
105 | s += '%i,%i, ' % (round(x[0]), round(x[1]))
106 | if verbose:
107 | LOGGER.info(s[:-2])
108 | return k
109 |
110 | if isinstance(dataset, str): # *.yaml file
111 | with open(dataset, errors='ignore') as f:
112 | data_dict = yaml.safe_load(f) # model dict
113 | from utils.datasets import LoadImagesAndLabels
114 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
115 |
116 | # Get label wh
117 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
118 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
119 |
120 | # Filter
121 | i = (wh0 < 3.0).any(1).sum()
122 | if i:
123 | LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
124 | wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
125 | # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
126 |
127 | # Kmeans calculation
128 | LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')
129 | s = wh.std(0) # sigmas for whitening
130 | k = kmeans(wh / s, n, iter=30)[0] * s # points
131 | if len(k) != n: # kmeans may return fewer points than requested if wh is insufficient or too similar
132 | LOGGER.warning(f'{PREFIX}WARNING: scipy.cluster.vq.kmeans returned only {len(k)} of {n} requested points')
133 | k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
134 | wh = torch.tensor(wh, dtype=torch.float32) # filtered
135 | wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
136 | k = print_results(k, verbose=False)
137 |
138 | # Plot
139 | # k, d = [None] * 20, [None] * 20
140 | # for i in tqdm(range(1, 21)):
141 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
142 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
143 | # ax = ax.ravel()
144 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
145 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
146 | # ax[0].hist(wh[wh[:, 0]<100, 0],400)
147 | # ax[1].hist(wh[wh[:, 1]<100, 1],400)
148 | # fig.savefig('wh.png', dpi=200)
149 |
150 | # Evolve
151 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
152 | pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar
153 | for _ in pbar:
154 | v = np.ones(sh)
155 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
156 | v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
157 | kg = (k.copy() * v).clip(min=2.0)
158 | fg = anchor_fitness(kg)
159 | if fg > f:
160 | f, k = fg, kg.copy()
161 | pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
162 | if verbose:
163 | print_results(k, verbose)
164 |
165 | return print_results(k)
166 |
--------------------------------------------------------------------------------
/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 | from torch.cuda import amp
11 |
12 | from utils.general import LOGGER, colorstr
13 | from utils.torch_utils import profile
14 |
15 |
16 | def check_train_batch_size(model, imgsz=640):
17 | # Check YOLOv5 training batch size
18 | with amp.autocast():
19 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
20 |
21 |
22 | def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
23 | # Automatically estimate best batch size to use `fraction` of available CUDA memory
24 | # Usage:
25 | # import torch
26 | # from utils.autobatch import autobatch
27 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
28 | # print(autobatch(model))
29 |
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 |
37 | d = str(device).upper() # 'CUDA:0'
38 | properties = torch.cuda.get_device_properties(device) # device properties
39 | t = properties.total_memory / 1024 ** 3 # (GiB)
40 | r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB)
41 | a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB)
42 | f = t - (r + a) # free inside reserved
43 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
44 |
45 | batch_sizes = [1, 2, 4, 8, 16]
46 | try:
47 | img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
48 | y = profile(img, model, n=3, device=device)
49 | except Exception as e:
50 | LOGGER.warning(f'{prefix}{e}')
51 |
52 | y = [x[2] for x in y if x] # memory [2]
53 | batch_sizes = batch_sizes[:len(y)]
54 | p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit
55 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
56 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)')
57 | return b
58 |
--------------------------------------------------------------------------------
/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/utils/aws/__init__.py
--------------------------------------------------------------------------------
/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/utils/benchmarks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Run YOLOv5 benchmarks on all supported export formats
4 |
5 | Format | `export.py --include` | Model
6 | --- | --- | ---
7 | PyTorch | - | yolov5s.pt
8 | TorchScript | `torchscript` | yolov5s.torchscript
9 | ONNX | `onnx` | yolov5s.onnx
10 | OpenVINO | `openvino` | yolov5s_openvino_model/
11 | TensorRT | `engine` | yolov5s.engine
12 | CoreML | `coreml` | yolov5s.mlmodel
13 | TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14 | TensorFlow GraphDef | `pb` | yolov5s.pb
15 | TensorFlow Lite | `tflite` | yolov5s.tflite
16 | TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17 | TensorFlow.js | `tfjs` | yolov5s_web_model/
18 |
19 | Requirements:
20 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
21 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
22 |
23 | Usage:
24 | $ python utils/benchmarks.py --weights yolov5s.pt --img 640
25 | """
26 |
27 | import argparse
28 | import sys
29 | import time
30 | from pathlib import Path
31 |
32 | import pandas as pd
33 |
34 | FILE = Path(__file__).resolve()
35 | ROOT = FILE.parents[1] # YOLOv5 root directory
36 | if str(ROOT) not in sys.path:
37 | sys.path.append(str(ROOT)) # add ROOT to PATH
38 | # ROOT = ROOT.relative_to(Path.cwd()) # relative
39 |
40 | import export
41 | import val
42 | from utils import notebook_init
43 | from utils.general import LOGGER, print_args
44 |
45 |
46 | def run(weights=ROOT / 'yolov5s.pt', # weights path
47 | imgsz=640, # inference size (pixels)
48 | batch_size=1, # batch size
49 | data=ROOT / 'data/coco128.yaml', # dataset.yaml path
50 | ):
51 | y, t = [], time.time()
52 | formats = export.export_formats()
53 | for i, (name, f, suffix) in formats.iterrows(): # index, (name, file, suffix)
54 | try:
55 | w = weights if f == '-' else export.run(weights=weights, imgsz=[imgsz], include=[f], device='cpu')[-1]
56 | assert suffix in str(w), 'export failed'
57 | result = val.run(data, w, batch_size, imgsz=imgsz, plots=False, device='cpu', task='benchmark')
58 | metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls))
59 | speeds = result[2] # times (preprocess, inference, postprocess)
60 | y.append([name, metrics[3], speeds[1]]) # mAP, t_inference
61 | except Exception as e:
62 | LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
63 | y.append([name, None, None]) # mAP, t_inference
64 |
65 | # Print results
66 | LOGGER.info('\n')
67 | parse_opt()
68 | notebook_init() # print system info
69 | py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)'])
70 | LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
71 | LOGGER.info(str(py))
72 | return py
73 |
74 |
75 | def parse_opt():
76 | parser = argparse.ArgumentParser()
77 | parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
78 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
79 | parser.add_argument('--batch-size', type=int, default=1, help='batch size')
80 | parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
81 | opt = parser.parse_args()
82 | print_args(FILE.stem, opt)
83 | return opt
84 |
85 |
86 | def main(opt):
87 | run(**vars(opt))
88 |
89 |
90 | if __name__ == "__main__":
91 | opt = parse_opt()
92 | main(opt)
93 |
--------------------------------------------------------------------------------
/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Callback utils
4 | """
5 |
6 |
7 | class Callbacks:
8 | """"
9 | Handles all registered callbacks for YOLOv5 Hooks
10 | """
11 |
12 | def __init__(self):
13 | # Define the available callbacks
14 | self._callbacks = {
15 | 'on_pretrain_routine_start': [],
16 | 'on_pretrain_routine_end': [],
17 |
18 | 'on_train_start': [],
19 | 'on_train_epoch_start': [],
20 | 'on_train_batch_start': [],
21 | 'optimizer_step': [],
22 | 'on_before_zero_grad': [],
23 | 'on_train_batch_end': [],
24 | 'on_train_epoch_end': [],
25 |
26 | 'on_val_start': [],
27 | 'on_val_batch_start': [],
28 | 'on_val_image_end': [],
29 | 'on_val_batch_end': [],
30 | 'on_val_end': [],
31 |
32 | 'on_fit_epoch_end': [], # fit = train + val
33 | 'on_model_save': [],
34 | 'on_train_end': [],
35 | 'on_params_update': [],
36 | 'teardown': [],
37 | }
38 | self.stop_training = False # set True to interrupt training
39 |
40 | def register_action(self, hook, name='', callback=None):
41 | """
42 | Register a new action to a callback hook
43 |
44 | Args:
45 | hook The callback hook name to register the action to
46 | name The name of the action for later reference
47 | callback The callback to fire
48 | """
49 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
50 | assert callable(callback), f"callback '{callback}' is not callable"
51 | self._callbacks[hook].append({'name': name, 'callback': callback})
52 |
53 | def get_registered_actions(self, hook=None):
54 | """"
55 | Returns all the registered actions by callback hook
56 |
57 | Args:
58 | hook The name of the hook to check, defaults to all
59 | """
60 | if hook:
61 | return self._callbacks[hook]
62 | else:
63 | return self._callbacks
64 |
65 | def run(self, hook, *args, **kwargs):
66 | """
67 | Loop through the registered actions and fire all callbacks
68 |
69 | Args:
70 | hook The name of the hook to check, defaults to all
71 | args Arguments to receive from YOLOv5
72 | kwargs Keyword Arguments to receive from YOLOv5
73 | """
74 |
75 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
76 |
77 | for logger in self._callbacks[hook]:
78 | logger['callback'](*args, **kwargs)
79 |
--------------------------------------------------------------------------------
/utils/downloads.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Download utils
4 | """
5 |
6 | import os
7 | import platform
8 | import subprocess
9 | import time
10 | import urllib
11 | from pathlib import Path
12 | from zipfile import ZipFile
13 |
14 | import requests
15 | import torch
16 |
17 |
18 | def gsutil_getsize(url=''):
19 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
20 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
21 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes
22 |
23 |
24 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
25 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
26 | file = Path(file)
27 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
28 | try: # url1
29 | print(f'Downloading {url} to {file}...')
30 | torch.hub.download_url_to_file(url, str(file))
31 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
32 | except Exception as e: # url2
33 | file.unlink(missing_ok=True) # remove partial downloads
34 | print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
35 | os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
36 | finally:
37 | if not file.exists() or file.stat().st_size < min_bytes: # check
38 | file.unlink(missing_ok=True) # remove partial downloads
39 | print(f"ERROR: {assert_msg}\n{error_msg}")
40 | print('')
41 |
42 |
43 | def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download()
44 | # Attempt file download if does not exist
45 | file = Path(str(file).strip().replace("'", ''))
46 |
47 | if not file.exists():
48 | # URL specified
49 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
50 | if str(file).startswith(('http:/', 'https:/')): # download
51 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
52 | file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
53 | if Path(file).is_file():
54 | print(f'Found {url} locally at {file}') # file already exists
55 | else:
56 | safe_download(file=file, url=url, min_bytes=1E5)
57 | return file
58 |
59 | # GitHub assets
60 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
61 | try:
62 | response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
63 | assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
64 | tag = response['tag_name'] # i.e. 'v1.0'
65 | except Exception: # fallback plan
66 | assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
67 | 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
68 | try:
69 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
70 | except Exception:
71 | tag = 'v6.0' # current release
72 |
73 | if name in assets:
74 | safe_download(file,
75 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
76 | # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional)
77 | min_bytes=1E5,
78 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
79 |
80 | return str(file)
81 |
82 |
83 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
84 | # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download()
85 | t = time.time()
86 | file = Path(file)
87 | cookie = Path('cookie') # gdrive cookie
88 | print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
89 | file.unlink(missing_ok=True) # remove existing file
90 | cookie.unlink(missing_ok=True) # remove existing cookie
91 |
92 | # Attempt file download
93 | out = "NUL" if platform.system() == "Windows" else "/dev/null"
94 | os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
95 | if os.path.exists('cookie'): # large file
96 | s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
97 | else: # small file
98 | s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
99 | r = os.system(s) # execute, capture return
100 | cookie.unlink(missing_ok=True) # remove existing cookie
101 |
102 | # Error check
103 | if r != 0:
104 | file.unlink(missing_ok=True) # remove partial
105 | print('Download error ') # raise Exception('Download error')
106 | return r
107 |
108 | # Unzip if archive
109 | if file.suffix == '.zip':
110 | print('unzipping... ', end='')
111 | ZipFile(file).extractall(path=file.parent) # unzip
112 | file.unlink() # remove zip
113 |
114 | print(f'Done ({time.time() - t:.1f}s)')
115 | return r
116 |
117 |
118 | def get_token(cookie="./cookie"):
119 | with open(cookie) as f:
120 | for line in f:
121 | if "download" in line:
122 | return line.split()[-1]
123 | return ""
124 |
125 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries ----------------------------------------------
126 | #
127 | #
128 | # def upload_blob(bucket_name, source_file_name, destination_blob_name):
129 | # # Uploads a file to a bucket
130 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
131 | #
132 | # storage_client = storage.Client()
133 | # bucket = storage_client.get_bucket(bucket_name)
134 | # blob = bucket.blob(destination_blob_name)
135 | #
136 | # blob.upload_from_filename(source_file_name)
137 | #
138 | # print('File {} uploaded to {}.'.format(
139 | # source_file_name,
140 | # destination_blob_name))
141 | #
142 | #
143 | # def download_blob(bucket_name, source_blob_name, destination_file_name):
144 | # # Uploads a blob from a bucket
145 | # storage_client = storage.Client()
146 | # bucket = storage_client.get_bucket(bucket_name)
147 | # blob = bucket.blob(source_blob_name)
148 | #
149 | # blob.download_to_filename(destination_file_name)
150 | #
151 | # print('Blob {} downloaded to {}.'.format(
152 | # source_blob_name,
153 | # destination_file_name))
154 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | """Perform test request"""
2 | import pprint
3 |
4 | import requests
5 |
6 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
7 | TEST_IMAGE = "zidane.jpg"
8 |
9 | image_data = open(TEST_IMAGE, "rb").read()
10 |
11 | response = requests.post(DETECTION_URL, files={"image": image_data}).json()
12 |
13 | pprint.pprint(response)
14 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | """
2 | Run a rest API exposing the yolov5s object detection model
3 | """
4 | import argparse
5 | import io
6 |
7 | import torch
8 | from flask import Flask, request
9 | from PIL import Image
10 |
11 | app = Flask(__name__)
12 |
13 | DETECTION_URL = "/v1/object-detection/yolov5s"
14 |
15 |
16 | @app.route(DETECTION_URL, methods=["POST"])
17 | def predict():
18 | if not request.method == "POST":
19 | return
20 |
21 | if request.files.get("image"):
22 | image_file = request.files["image"]
23 | image_bytes = image_file.read()
24 |
25 | img = Image.open(io.BytesIO(image_bytes))
26 |
27 | results = model(img, size=640) # reduce size=320 for faster inference
28 | return results.pandas().xyxy[0].to_json(orient="records")
29 |
30 |
31 | if __name__ == "__main__":
32 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
33 | parser.add_argument("--port", default=5000, type=int, help="port number")
34 | args = parser.parse_args()
35 |
36 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
37 | app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
38 |
--------------------------------------------------------------------------------
/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/utils/loggers/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Logging utils
4 | """
5 |
6 | import os
7 | import warnings
8 | from threading import Thread
9 |
10 | import pkg_resources as pkg
11 | import torch
12 | from torch.utils.tensorboard import SummaryWriter
13 |
14 | from utils.general import colorstr, emojis
15 | from utils.loggers.wandb.wandb_utils import WandbLogger
16 | from utils.plots import plot_images, plot_results
17 | from utils.torch_utils import de_parallel
18 |
19 | LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
20 | RANK = int(os.getenv('RANK', -1))
21 |
22 | try:
23 | import wandb
24 |
25 | assert hasattr(wandb, '__version__') # verify package import not local dir
26 | if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:
27 | try:
28 | wandb_login_success = wandb.login(timeout=30)
29 | except wandb.errors.UsageError: # known non-TTY terminal issue
30 | wandb_login_success = False
31 | if not wandb_login_success:
32 | wandb = None
33 | except (ImportError, AssertionError):
34 | wandb = None
35 |
36 |
37 | class Loggers():
38 | # YOLOv5 Loggers class
39 | def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
40 | self.save_dir = save_dir
41 | self.weights = weights
42 | self.opt = opt
43 | self.hyp = hyp
44 | self.logger = logger # for printing results to console
45 | self.include = include
46 | self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
47 | 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
48 | 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
49 | 'x/lr0', 'x/lr1', 'x/lr2'] # params
50 | self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',]
51 | for k in LOGGERS:
52 | setattr(self, k, None) # init empty logger dictionary
53 | self.csv = True # always log to csv
54 |
55 | # Message
56 | if not wandb:
57 | prefix = colorstr('Weights & Biases: ')
58 | s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
59 | print(emojis(s))
60 |
61 | # TensorBoard
62 | s = self.save_dir
63 | if 'tb' in self.include and not self.opt.evolve:
64 | prefix = colorstr('TensorBoard: ')
65 | self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
66 | self.tb = SummaryWriter(str(s))
67 |
68 | # W&B
69 | if wandb and 'wandb' in self.include:
70 | wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
71 | run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
72 | self.opt.hyp = self.hyp # add hyperparameters
73 | self.wandb = WandbLogger(self.opt, run_id)
74 | else:
75 | self.wandb = None
76 |
77 | def on_pretrain_routine_end(self):
78 | # Callback runs on pre-train routine end
79 | paths = self.save_dir.glob('*labels*.jpg') # training labels
80 | if self.wandb:
81 | self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
82 |
83 | def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
84 | # Callback runs on train batch end
85 | if plots:
86 | if ni == 0:
87 | if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
88 | with warnings.catch_warnings():
89 | warnings.simplefilter('ignore') # suppress jit trace warning
90 | self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
91 | if ni < 3:
92 | f = self.save_dir / f'train_batch{ni}.jpg' # filename
93 | Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
94 | if self.wandb and ni == 10:
95 | files = sorted(self.save_dir.glob('train*.jpg'))
96 | self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
97 |
98 | def on_train_epoch_end(self, epoch):
99 | # Callback runs on train epoch end
100 | if self.wandb:
101 | self.wandb.current_epoch = epoch + 1
102 |
103 | def on_val_image_end(self, pred, predn, path, names, im):
104 | # Callback runs on val image end
105 | if self.wandb:
106 | self.wandb.val_one_image(pred, predn, path, names, im)
107 |
108 | def on_val_end(self):
109 | # Callback runs on val end
110 | if self.wandb:
111 | files = sorted(self.save_dir.glob('val*.jpg'))
112 | self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
113 |
114 | def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
115 | # Callback runs at the end of each fit (train+val) epoch
116 | x = {k: v for k, v in zip(self.keys, vals)} # dict
117 | if self.csv:
118 | file = self.save_dir / 'results.csv'
119 | n = len(x) + 1 # number of cols
120 | s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
121 | with open(file, 'a') as f:
122 | f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
123 |
124 | if self.tb:
125 | for k, v in x.items():
126 | self.tb.add_scalar(k, v, epoch)
127 |
128 | if self.wandb:
129 | if best_fitness == fi:
130 | best_results = [epoch] + vals[3:7]
131 | for i, name in enumerate(self.best_keys):
132 | self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary
133 | self.wandb.log(x)
134 | self.wandb.end_epoch(best_result=best_fitness == fi)
135 |
136 | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
137 | # Callback runs on model save event
138 | if self.wandb:
139 | if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
140 | self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
141 |
142 | def on_train_end(self, last, best, plots, epoch, results):
143 | # Callback runs on training end
144 | if plots:
145 | plot_results(file=self.save_dir / 'results.csv') # save results.png
146 | files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
147 | files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
148 |
149 | if self.tb:
150 | import cv2
151 | for f in files:
152 | self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
153 |
154 | if self.wandb:
155 | self.wandb.log({k: v for k, v in zip(self.keys[3:10], results)}) # log best.pt val results
156 | self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
157 | # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
158 | if not self.opt.evolve:
159 | wandb.log_artifact(str(best if best.exists() else last), type='model',
160 | name='run_' + self.wandb.wandb_run.id + '_model',
161 | aliases=['latest', 'best', 'stripped'])
162 | self.wandb.finish_run()
163 |
164 | def on_params_update(self, params):
165 | # Update hyperparams or configs of the experiment
166 | # params: A dict containing {param: value} pairs
167 | if self.wandb:
168 | self.wandb.wandb_run.config.update(params, allow_val_change=True)
169 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/utils/loggers/wandb/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from wandb_utils import WandbLogger
4 |
5 | from utils.general import LOGGER
6 |
7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
8 |
9 |
10 | def create_dataset_artifact(opt):
11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
12 | if not logger.wandb:
13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset")
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
21 | parser.add_argument('--entity', default=None, help='W&B entity')
22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
23 |
24 | opt = parser.parse_args()
25 | opt.resume = False # Explicitly disallow resume check for dataset upload job
26 |
27 | create_dataset_artifact(opt)
28 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import wandb
5 |
6 | FILE = Path(__file__).resolve()
7 | ROOT = FILE.parents[3] # YOLOv5 root directory
8 | if str(ROOT) not in sys.path:
9 | sys.path.append(str(ROOT)) # add ROOT to PATH
10 |
11 | from train import parse_opt, train
12 | from utils.callbacks import Callbacks
13 | from utils.general import increment_path
14 | from utils.torch_utils import select_device
15 |
16 |
17 | def sweep():
18 | wandb.init()
19 | # Get hyp dict from sweep agent
20 | hyp_dict = vars(wandb.config).get("_items")
21 |
22 | # Workaround: get necessary opt args
23 | opt = parse_opt(known=True)
24 | opt.batch_size = hyp_dict.get("batch_size")
25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26 | opt.epochs = hyp_dict.get("epochs")
27 | opt.nosave = True
28 | opt.data = hyp_dict.get("data")
29 | opt.weights = str(opt.weights)
30 | opt.cfg = str(opt.cfg)
31 | opt.data = str(opt.data)
32 | opt.hyp = str(opt.hyp)
33 | opt.project = str(opt.project)
34 | device = select_device(opt.device, batch_size=opt.batch_size)
35 |
36 | # train
37 | train(hyp_dict, opt, device, callbacks=Callbacks())
38 |
39 |
40 | if __name__ == "__main__":
41 | sweep()
42 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.yaml:
--------------------------------------------------------------------------------
1 | # Hyperparameters for training
2 | # To set range-
3 | # Provide min and max values as:
4 | # parameter:
5 | #
6 | # min: scalar
7 | # max: scalar
8 | # OR
9 | #
10 | # Set a specific list of search space-
11 | # parameter:
12 | # values: [scalar1, scalar2, scalar3...]
13 | #
14 | # You can use grid, bayesian and hyperopt search strategy
15 | # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
16 |
17 | program: utils/loggers/wandb/sweep.py
18 | method: random
19 | metric:
20 | name: metrics/mAP_0.5
21 | goal: maximize
22 |
23 | parameters:
24 | # hyperparameters: set either min, max range or values list
25 | data:
26 | value: "data/coco128.yaml"
27 | batch_size:
28 | values: [64]
29 | epochs:
30 | values: [10]
31 |
32 | lr0:
33 | distribution: uniform
34 | min: 1e-5
35 | max: 1e-1
36 | lrf:
37 | distribution: uniform
38 | min: 0.01
39 | max: 1.0
40 | momentum:
41 | distribution: uniform
42 | min: 0.6
43 | max: 0.98
44 | weight_decay:
45 | distribution: uniform
46 | min: 0.0
47 | max: 0.001
48 | warmup_epochs:
49 | distribution: uniform
50 | min: 0.0
51 | max: 5.0
52 | warmup_momentum:
53 | distribution: uniform
54 | min: 0.0
55 | max: 0.95
56 | warmup_bias_lr:
57 | distribution: uniform
58 | min: 0.0
59 | max: 0.2
60 | box:
61 | distribution: uniform
62 | min: 0.02
63 | max: 0.2
64 | cls:
65 | distribution: uniform
66 | min: 0.2
67 | max: 4.0
68 | cls_pw:
69 | distribution: uniform
70 | min: 0.5
71 | max: 2.0
72 | obj:
73 | distribution: uniform
74 | min: 0.2
75 | max: 4.0
76 | obj_pw:
77 | distribution: uniform
78 | min: 0.5
79 | max: 2.0
80 | iou_t:
81 | distribution: uniform
82 | min: 0.1
83 | max: 0.7
84 | anchor_t:
85 | distribution: uniform
86 | min: 2.0
87 | max: 8.0
88 | fl_gamma:
89 | distribution: uniform
90 | min: 0.0
91 | max: 0.1
92 | hsv_h:
93 | distribution: uniform
94 | min: 0.0
95 | max: 0.1
96 | hsv_s:
97 | distribution: uniform
98 | min: 0.0
99 | max: 0.9
100 | hsv_v:
101 | distribution: uniform
102 | min: 0.0
103 | max: 0.9
104 | degrees:
105 | distribution: uniform
106 | min: 0.0
107 | max: 45.0
108 | translate:
109 | distribution: uniform
110 | min: 0.0
111 | max: 0.9
112 | scale:
113 | distribution: uniform
114 | min: 0.0
115 | max: 0.9
116 | shear:
117 | distribution: uniform
118 | min: 0.0
119 | max: 10.0
120 | perspective:
121 | distribution: uniform
122 | min: 0.0
123 | max: 0.001
124 | flipud:
125 | distribution: uniform
126 | min: 0.0
127 | max: 1.0
128 | fliplr:
129 | distribution: uniform
130 | min: 0.0
131 | max: 1.0
132 | mosaic:
133 | distribution: uniform
134 | min: 0.0
135 | max: 1.0
136 | mixup:
137 | distribution: uniform
138 | min: 0.0
139 | max: 1.0
140 | copy_paste:
141 | distribution: uniform
142 | min: 0.0
143 | max: 1.0
144 |
--------------------------------------------------------------------------------
/utils/loss.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Loss functions
4 | """
5 |
6 | import torch
7 | import torch.nn as nn
8 |
9 | from utils.metrics import bbox_iou
10 | from utils.torch_utils import de_parallel
11 |
12 |
13 | def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
14 | # return positive, negative label smoothing BCE targets
15 | return 1.0 - 0.5 * eps, 0.5 * eps
16 |
17 |
18 | class BCEBlurWithLogitsLoss(nn.Module):
19 | # BCEwithLogitLoss() with reduced missing label effects.
20 | def __init__(self, alpha=0.05):
21 | super().__init__()
22 | self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
23 | self.alpha = alpha
24 |
25 | def forward(self, pred, true):
26 | loss = self.loss_fcn(pred, true)
27 | pred = torch.sigmoid(pred) # prob from logits
28 | dx = pred - true # reduce only missing label effects
29 | # dx = (pred - true).abs() # reduce missing label and false label effects
30 | alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
31 | loss *= alpha_factor
32 | return loss.mean()
33 |
34 |
35 | class FocalLoss(nn.Module):
36 | # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
37 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
38 | super().__init__()
39 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
40 | self.gamma = gamma
41 | self.alpha = alpha
42 | self.reduction = loss_fcn.reduction
43 | self.loss_fcn.reduction = 'none' # required to apply FL to each element
44 |
45 | def forward(self, pred, true):
46 | loss = self.loss_fcn(pred, true)
47 | # p_t = torch.exp(-loss)
48 | # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
49 |
50 | # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
51 | pred_prob = torch.sigmoid(pred) # prob from logits
52 | p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
53 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
54 | modulating_factor = (1.0 - p_t) ** self.gamma
55 | loss *= alpha_factor * modulating_factor
56 |
57 | if self.reduction == 'mean':
58 | return loss.mean()
59 | elif self.reduction == 'sum':
60 | return loss.sum()
61 | else: # 'none'
62 | return loss
63 |
64 |
65 | class QFocalLoss(nn.Module):
66 | # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
67 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
68 | super().__init__()
69 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
70 | self.gamma = gamma
71 | self.alpha = alpha
72 | self.reduction = loss_fcn.reduction
73 | self.loss_fcn.reduction = 'none' # required to apply FL to each element
74 |
75 | def forward(self, pred, true):
76 | loss = self.loss_fcn(pred, true)
77 |
78 | pred_prob = torch.sigmoid(pred) # prob from logits
79 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
80 | modulating_factor = torch.abs(true - pred_prob) ** self.gamma
81 | loss *= alpha_factor * modulating_factor
82 |
83 | if self.reduction == 'mean':
84 | return loss.mean()
85 | elif self.reduction == 'sum':
86 | return loss.sum()
87 | else: # 'none'
88 | return loss
89 |
90 |
91 | class ComputeLoss:
92 | # Compute losses
93 | def __init__(self, model, autobalance=False):
94 | self.sort_obj_iou = False
95 | device = next(model.parameters()).device # get model device
96 | h = model.hyp # hyperparameters
97 |
98 | # Define criteria
99 | BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
100 | BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
101 |
102 | # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
103 | self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
104 |
105 | # Focal loss
106 | g = h['fl_gamma'] # focal loss gamma
107 | if g > 0:
108 | BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
109 |
110 | det = de_parallel(model).model[-1] # Detect() module
111 | self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
112 | self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
113 | self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
114 | for k in 'na', 'nc', 'nl', 'anchors':
115 | setattr(self, k, getattr(det, k))
116 |
117 | def __call__(self, p, targets): # predictions, targets, model
118 | device = targets.device
119 | lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
120 | tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
121 |
122 | # Losses
123 | for i, pi in enumerate(p): # layer index, layer predictions
124 | b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
125 | tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
126 |
127 | n = b.shape[0] # number of targets
128 | if n:
129 | ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
130 |
131 | # Regression
132 | pxy = ps[:, :2].sigmoid() * 2 - 0.5
133 | pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
134 | pbox = torch.cat((pxy, pwh), 1) # predicted box
135 | iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
136 | lbox += (1.0 - iou).mean() # iou loss
137 |
138 | # Objectness
139 | score_iou = iou.detach().clamp(0).type(tobj.dtype)
140 | if self.sort_obj_iou:
141 | sort_id = torch.argsort(score_iou)
142 | b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]
143 | tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
144 |
145 | # Classification
146 | if self.nc > 1: # cls loss (only if multiple classes)
147 | t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
148 | t[range(n), tcls[i]] = self.cp
149 | lcls += self.BCEcls(ps[:, 5:], t) # BCE
150 |
151 | # Append targets to text file
152 | # with open('targets.txt', 'a') as file:
153 | # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
154 |
155 | obji = self.BCEobj(pi[..., 4], tobj)
156 | lobj += obji * self.balance[i] # obj loss
157 | if self.autobalance:
158 | self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
159 |
160 | if self.autobalance:
161 | self.balance = [x / self.balance[self.ssi] for x in self.balance]
162 | lbox *= self.hyp['box']
163 | lobj *= self.hyp['obj']
164 | lcls *= self.hyp['cls']
165 | bs = tobj.shape[0] # batch size
166 |
167 | return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
168 |
169 | def build_targets(self, p, targets):
170 | # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
171 | na, nt = self.na, targets.shape[0] # number of anchors, targets
172 | tcls, tbox, indices, anch = [], [], [], []
173 | gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
174 | ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
175 | targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
176 |
177 | g = 0.5 # bias
178 | off = torch.tensor([[0, 0],
179 | [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
180 | # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
181 | ], device=targets.device).float() * g # offsets
182 |
183 | for i in range(self.nl):
184 | anchors = self.anchors[i]
185 | gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
186 |
187 | # Match targets to anchors
188 | t = targets * gain
189 | if nt:
190 | # Matches
191 | r = t[:, :, 4:6] / anchors[:, None] # wh ratio
192 | j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare
193 | # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
194 | t = t[j] # filter
195 |
196 | # Offsets
197 | gxy = t[:, 2:4] # grid xy
198 | gxi = gain[[2, 3]] - gxy # inverse
199 | j, k = ((gxy % 1 < g) & (gxy > 1)).T
200 | l, m = ((gxi % 1 < g) & (gxi > 1)).T
201 | j = torch.stack((torch.ones_like(j), j, k, l, m))
202 | t = t.repeat((5, 1, 1))[j]
203 | offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
204 | else:
205 | t = targets[0]
206 | offsets = 0
207 |
208 | # Define
209 | b, c = t[:, :2].long().T # image, class
210 | gxy = t[:, 2:4] # grid xy
211 | gwh = t[:, 4:6] # grid wh
212 | gij = (gxy - offsets).long()
213 | gi, gj = gij.T # grid xy indices
214 |
215 | # Append
216 | a = t[:, 6].long() # anchor indices
217 | indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
218 | tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
219 | anch.append(anchors[a]) # anchors
220 | tcls.append(c) # class
221 |
222 | return tcls, tbox, indices, anch
223 |
--------------------------------------------------------------------------------
/video/test.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/video/test.mp4
--------------------------------------------------------------------------------
/weights/yolov5m.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dyh/win10_yolov5_deepsort_counting/adb686ee839e89177990c27f5da35bfae7ab4b9b/weights/yolov5m.pt
--------------------------------------------------------------------------------