├── README.md ├── data ├── image │ ├── 1.jpg │ ├── 10.jpg │ ├── 2.jpg │ ├── 3.jpg │ ├── 4.jpg │ ├── 5.jpg │ ├── 6.jpg │ ├── 7.jpg │ ├── 8.jpg │ └── 9.jpg └── video │ └── test.mp4 ├── detect_lprnet.py ├── detect_yolov5.py ├── mainwindow.py ├── mainwindow.qrc ├── mainwindow.ui ├── mainwindow_rc.py ├── mainwindow_ui.py ├── models ├── LPRNet.py ├── common.py ├── experimental.py └── yolo.py ├── my_env.txt ├── requirements.txt ├── utils ├── augmentations.py ├── autoanchor.py ├── datasets.py ├── downloads.py ├── general.py ├── metrics.py ├── plots.py └── torch_utils.py ├── weights ├── Final_LPRNet_model.pth └── yolov5_best.pt └── windowico.png /README.md: -------------------------------------------------------------------------------- 1 | # Yolov5-6.1_LPRNet_PySide6__vehicle_license_plate_recognition 2 | 基于Yolov5_6.1、LPRNet、PySide6开发的车牌识别系统 3 | ## 概述 4 | 仅支持常见的蓝牌和绿牌 5 | ## 环境 6 | PyTorch版本 本项目是1.8,要求要大于1.7,经检验版本太高会报错 7 | 项目中的[requirements.txt](requirements.txt)来源于Yolov5_6.1 8 | 我的项目环境在[my_env.txt](my_env.txt),如果遇到各种包的版本问题,可以作为参考 9 | 10 | ## 演示 11 | 12 | 13 | https://github.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/assets/123156312/0b46308d-f5e6-43a5-9596-f411b696eb03 14 | 15 | -------------------------------------------------------------------------------- /data/image/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/1.jpg -------------------------------------------------------------------------------- /data/image/10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/10.jpg -------------------------------------------------------------------------------- /data/image/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/2.jpg -------------------------------------------------------------------------------- /data/image/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/3.jpg -------------------------------------------------------------------------------- /data/image/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/4.jpg -------------------------------------------------------------------------------- /data/image/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/5.jpg -------------------------------------------------------------------------------- /data/image/6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/6.jpg -------------------------------------------------------------------------------- /data/image/7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/7.jpg -------------------------------------------------------------------------------- /data/image/8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/8.jpg -------------------------------------------------------------------------------- /data/image/9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/image/9.jpg -------------------------------------------------------------------------------- /data/video/test.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/data/video/test.mp4 -------------------------------------------------------------------------------- /detect_lprnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import cv2 3 | import numpy as np 4 | 5 | from models.LPRNet import build_lprnet, CHARS 6 | 7 | 8 | 9 | def transform(img, device): 10 | img = cv2.resize(img, (94, 24)) 11 | img = img.astype('float32') 12 | img -= 127.5 13 | img *= 0.0078125 14 | img = np.transpose(img, (2, 0, 1)) 15 | img = np.expand_dims(img, axis=0) 16 | img = torch.from_numpy(img) 17 | img = img.to(device) 18 | return img 19 | 20 | 21 | def check_license_plate(plate_number): 22 | if len(plate_number) != 7 and len(plate_number) != 8: 23 | return False 24 | if plate_number[0] not in CHARS[:31]: 25 | return False 26 | if plate_number[1] not in CHARS[41:67]: 27 | return False 28 | for char in plate_number[2:]: 29 | if char not in CHARS[31:67]: 30 | return False 31 | return True 32 | 33 | 34 | def detect_lprnet(images, model, device): 35 | # model = build_lprnet(lpr_max_len=8, phase=False, class_num=len(CHARS), dropout_rate=0.0) 36 | # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 37 | # model.to(device) 38 | # model.load_state_dict(torch.load("weights/Final_LPRNet_model.pth")) 39 | 40 | resultlist=[] 41 | for img in images: 42 | img = transform(img, device) 43 | 44 | prebs = model(img) 45 | prebs = prebs.cpu().detach().numpy() 46 | 47 | result='' 48 | for i in range(prebs.shape[0]): 49 | preb = prebs[i, :, :] 50 | preb_label = list() 51 | for j in range(preb.shape[1]): 52 | preb_label.append(np.argmax(preb[:, j], axis=0)) 53 | no_repeat_blank_label = list() 54 | pre_c = preb_label[0] 55 | if pre_c != len(CHARS) - 1: 56 | no_repeat_blank_label.append(pre_c) 57 | for c in preb_label: # dropout repeate label and blank label 58 | if (pre_c == c) or (c == len(CHARS) - 1): 59 | if c == len(CHARS) - 1: 60 | pre_c = c 61 | continue 62 | result += CHARS[c] 63 | pre_c = c 64 | resultlist.append(result) 65 | return resultlist 66 | 67 | 68 | 69 | if __name__ == '__main__': 70 | images = [] 71 | img1 = cv2.imdecode(np.fromfile("C:/Users/hamlet/Desktop/123/沪AD58333.jpg", dtype=np.uint8), 1) 72 | img2 = cv2.imdecode(np.fromfile("C:/Users/hamlet/Desktop/123/沪ADE6010.jpg", dtype=np.uint8), 1) 73 | img3 = cv2.imdecode(np.fromfile("C:/Users/hamlet/Desktop/123/苏CD05935.jpg", dtype=np.uint8), 1) 74 | img4 = cv2.imdecode(np.fromfile("C:/Users/hamlet/Desktop/123/皖AD10222.jpg", dtype=np.uint8), 1) 75 | images.append(img1) 76 | images.append(img2) 77 | images.append(img3) 78 | images.append(img4) 79 | resultlist = detect_lprnet(images) 80 | 81 | for result in resultlist: 82 | print(result) -------------------------------------------------------------------------------- /detect_yolov5.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import cv2 3 | import numpy as np 4 | from models.experimental import attempt_load 5 | from utils.general import non_max_suppression 6 | 7 | def transform(img, device): 8 | # Convert 9 | img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 10 | img = np.ascontiguousarray(img) 11 | img = torch.from_numpy(img).to(device) 12 | img=img.float() 13 | img/=255.0 14 | if img.ndimension() == 3: 15 | img = img.unsqueeze(0) 16 | return img 17 | 18 | def detect_yolov5(image, model, device): 19 | # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 20 | # model = attempt_load('weights/yolov5_best.pt', map_location=device) 21 | 22 | image = cv2.resize(image, (640, 640)) 23 | new_image = transform(image, device) 24 | pred = model(new_image)[0] 25 | pred = non_max_suppression(pred, 0.8, 0.5)[0] 26 | pred = pred.cpu().tolist() 27 | 28 | result1 = image.copy() 29 | result2 = [] 30 | result3 = [] 31 | for i in pred: 32 | x1,y1,x2,y2 =map(int,i[:4]) 33 | p, c=i[4], i[5] 34 | classs='Other' 35 | if -0.01 0: 325 | self.ui.tableWidget.removeRow(0) 326 | for idex, item in enumerate(self.table_widget_items): 327 | self.ui.tableWidget.insertRow(idex) 328 | for i in range(len(item)): 329 | self.ui.tableWidget.setItem(idex, i, QTableWidgetItem(item[i])) 330 | self.ui.tableWidget.scrollToBottom() 331 | 332 | 333 | 334 | if __name__ == "__main__": 335 | app = QApplication(sys.argv) 336 | app.setStyleSheet("QMessageBox QPushButton[text = 'OK']{qproperty-text: '好的';}") 337 | window = MainWindow() 338 | window.show() 339 | sys.exit(app.exec()) 340 | -------------------------------------------------------------------------------- /mainwindow.qrc: -------------------------------------------------------------------------------- 1 | 2 | 3 | windowico.png 4 | 5 | 6 | -------------------------------------------------------------------------------- /mainwindow.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | MainWindow 4 | 5 | 6 | 7 | 0 8 | 0 9 | 1306 10 | 930 11 | 12 | 13 | 14 | 15 | 1306 16 | 930 17 | 18 | 19 | 20 | 21 | 1306 22 | 930 23 | 24 | 25 | 26 | 27 | Consolas 28 | 10 29 | 30 | 31 | 32 | false 33 | 34 | 35 | 车牌识别Demo 36 | 37 | 38 | 39 | :/Image/windowico.png:/Image/windowico.png 40 | 41 | 42 | QLabel#source, #division{ 43 | color: #CBCBCB; 44 | border: 2px solid #3194D1; 45 | background: #FFFFFF 46 | } 47 | 48 | QListWidget#listWidget{ 49 | border: 2px solid #3194D1; 50 | background: #FFFFFF 51 | } 52 | 53 | QListWidget::item:hover#listWidget{ 54 | border: 2px solid #006363 55 | } 56 | 57 | QListWidget::item:selected#listWidget{ 58 | color:black; 59 | background:#60D6A7 60 | } 61 | 62 | QTableWidget#tableWidget{ 63 | border: 2px solid #3194D1; 64 | } 65 | 66 | 67 | 68 | 69 | 1306 70 | 900 71 | 72 | 73 | 74 | 75 | 1306 76 | 900 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | Consolas 87 | 20 88 | true 89 | 90 | 91 | 92 | 原图 93 | 94 | 95 | Qt::AlignCenter 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | Consolas 104 | 20 105 | true 106 | 107 | 108 | 109 | 车牌检测图 110 | 111 | 112 | Qt::AlignCenter 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 640 125 | 640 126 | 127 | 128 | 129 | 130 | 640 131 | 640 132 | 133 | 134 | 135 | 136 | Consolas 137 | 20 138 | true 139 | 140 | 141 | 142 | 143 | 144 | 145 | SOURCE_IMAGE 146 | 147 | 148 | Qt::AlignCenter 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 640 157 | 640 158 | 159 | 160 | 161 | 162 | 640 163 | 640 164 | 165 | 166 | 167 | 168 | Consolas 169 | 20 170 | true 171 | 172 | 173 | 174 | DIVISION_IMAGE 175 | 176 | 177 | Qt::AlignCenter 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | Consolas 188 | 12 189 | 190 | 191 | 192 | false 193 | 194 | 195 | 0 196 | 197 | 198 | 199 | 图片名称 200 | 201 | 202 | 203 | Consolas 204 | 10 205 | true 206 | 207 | 208 | 209 | 210 | 211 | 录入时间 212 | 213 | 214 | 215 | Consolas 216 | 10 217 | true 218 | 219 | 220 | 221 | 222 | 223 | 车牌号 224 | 225 | 226 | 227 | Consolas 228 | 10 229 | true 230 | 231 | 232 | 233 | 234 | 235 | 车牌类型 236 | 237 | 238 | 239 | Consolas 240 | 10 241 | true 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 0 253 | 0 254 | 1306 255 | 24 256 | 257 | 258 | 259 | 260 | Consolas 261 | 11 262 | true 263 | 264 | 265 | 266 | 267 | 268 | Consolas 269 | 10 270 | false 271 | 272 | 273 | 274 | 功能 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 关于 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 选择文件 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 车牌定位 311 | 312 | 313 | 314 | 315 | 号码提取 316 | 317 | 318 | 319 | 320 | 去重 321 | 322 | 323 | 324 | 325 | 导出 326 | 327 | 328 | 329 | 330 | 选择图片 331 | 332 | 333 | 334 | 335 | 信息 336 | 337 | 338 | 339 | 340 | 选择文件夹 341 | 342 | 343 | 344 | 345 | 清空 346 | 347 | 348 | 349 | 350 | 选择摄像头 351 | 352 | 353 | 354 | 355 | 关闭摄像头 356 | 357 | 358 | 359 | 360 | 选择视频 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | -------------------------------------------------------------------------------- /mainwindow_rc.py: -------------------------------------------------------------------------------- 1 | # Resource object code (Python 3) 2 | # Created by: object code 3 | # Created by: The Resource Compiler for Qt version 6.5.2 4 | # WARNING! All changes made in this file will be lost! 5 | 6 | from PySide6 import QtCore 7 | 8 | qt_resource_data = b"\ 9 | \x00\x00\x13\x0a\ 10 | \x89\ 11 | PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ 12 | \x00\x00\xc8\x00\x00\x00\xc8\x08\x06\x00\x00\x00\xadX\xae\x9e\ 13 | \x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x12\ 14 | \xc4IDATx^\xed\x9d}\xac\x15\xc5\x19\xc6\x1f\ 15 | \xa3M\x95\xd0\xc3WE0\x17\x04\x91b!iQ!\ 16 | A4\x01J\xfd\x8a&\xc5\xa4\xf5\xb3D\xa8\xb6\x7f\xd4\ 17 | Zk\x8a\xd5\xc64\x17\xf8\xc3\xa2\x16Qi\xfc\xa7Z\ 18 | \xb0H\xd5\xb4\x0dFb\x95R\x85\x1b\x08\x98\x08\xc54\ 19 | \xb5\xe1\xa3\x0a\x0a\xa2\x85*\x85\xda\x844&4\xcf\xf1\ 20 | l{8wwgv\xcf\x9c=3\xbb\xcf$7(\ 21 | \xcc\xce\xbc\xf3\xbc\xf3\xdb\x99wv\xee\xcc)P\x92\x02\ 22 | R Q\x81S\xa4\x8d\x14\x90\x02\xc9\x0at\x02\x901\ 23 | \x00\xa2\x1f\xd6|N\x97\x1d\xf0\x14\x80}]\xb6\xa1,\ 24 | \xd5\xcf\x04\xb0\xa1\x8b\x8d\xa1\x1f#_F\xff\xdd\xd7\xf8\ 25 | \xbb\x8e\xf8\xd8\x05 \x84\x81\xc2\xcd\x000\xaf\x8b\xe2%\ 26 | U=\x0b\xc0F\x0f\xed\x0a\xd1\xa4n\x03\x92\xa6\x19}\ 27 | \xcc\x1f\x02\xe3\xcc\xdf\xed\x00B\x18z\x1b\xa3\x85\xcf\xce\ 28 | \x16 \xee\xbc\xe33 \xcd\xad$ \x8b\x5c\x8c,y\ 29 | \x00\x09\x05\x8cH0\x01R=@\xa2\x16G\xa0\xe4\x1e\ 30 | Q\xb2\x00\xc2\xb7\x07G\x0c\xfe\x19R\x12 \xee\xbc\x15\ 31 | \xca\x08\xd2\xda\xe2\x85\x00r\xc5\xa2\xb6\x80\x10\x0cV\x12\ 32 | b\x12 \xee\xbc\x16* T\x80\xa3\xc8\xfc\xac\x0b6\ 33 | 6\x80p\xd5\x22\xb4Q\xa3\xb9K\x08\x10\x01\xd2\xac\xc0\ 34 | \xd8,\x90\x98\x00\x09\x1d\x0e\x0a#@\xdc\x01\xc2\x15K\ 35 | \xce&\xba\x95X?S\xeb\x9fY\xed\xb1\x86$\x0d\x90\ 36 | \xbcp\xacl,\xb55\xafYgm\x80\xcb\xfc\x1dY\ 37 | \x1fwi\xa0\xca\xca\xad@\xf4y\x81\x7ff\x99\xe5\xb0\ 38 | O\xf0\xc5i\xec\x1bI\x80d\x8d9\x08E\xb4\xac\x96\ 39 | \xbb\xb5zP\x0a\xb4\xa1\x00\x01\xb9\xa5\x01J4\xc2\xa4\ 40 | \x15g\x15\x93\xc4\x01\xc2\xc2\xf7Z\x1aJ0\x18\xf8(\ 41 | I\x01_\x14 (+,\xbf\xcfq\xe1\x89/\xf6\xc4\ 42 | \x14\x07\x08\xe10\x11\xc8\xa1\x89`\xe4^_\xf6EM\ 43 | \xd9QZ\x05\x08\x89\xcd\xce\x8e\xd4x\xa4\x15\x10\x9b\xa9\ 44 | \x15\xe1`\xa1JR\xc0w\x05l\xfas\xea(\xd2\x0a\ 45 | \x88i\xf4\x10\x1c\xbew\x09\xd9\xd7\xaa\x80\xcdH\x928\ 46 | \x8a4\x03b\x13{h\xc9T\x1d04\x05\xd8\xaf\xb9\ 47 | \x22\x9b\x1660\x5c`<\xdd/5\x03b\x1a\x8e\x8c\ 48 | \x01Mh\xca\xc9\xde\xca(`\xea\xdb\x89\x8bM\xcd\x80\ 49 | \x98\xa6W\xd6\x1fW*#\xbb\x1a\x1a\x92\x02i\xfd;\ 50 | 1th\x06\xe4DJk\x15{\x84\xd4\x15dk\x9c\ 51 | \x02\xa6X$\xf6\x9b`\xf4\x97\xa6\xf8C\xd3+u\xba\ 52 | \xd0\x150\x01\x12;C\x12 \xa1\xbb]\xf6\xdb*\xc0\ 53 | o\x22\x84$)\xc5.@E\x80\x98\xb61k\xf5\xca\ 54 | \xd6\x0d\xca\xe7\xab\x02\x02\xc4W\xcf\xc8./\x14\x10 \ 55 | ^\xb8AF\xf8\xaa\x80\x00\xf1\xd53\xb2\xcb\x0b\x05\x04\ 56 | \x88\x17n\x90\x11\xbe* @|\xf5\x8c\xec\xf2B\x01\ 57 | \x01\xe2\x85\x1bd\x84\xaf\x0a\x08\x10_=#\xbb\xbcP\ 58 | @\x80x\xe1\x06\x19\xe1\xab\x02\x02\xc4W\xcf\xc8./\ 59 | \x14\x10 ^\xb8AF\xf8\xaa\x80\x00\xf1\xd53\xb2\xcb\ 60 | \x0b\x05\x04\x88\x17n\x90\x11\xbe* @|\xf5\x8c\xec\ 61 | \xf2B\x01\x01\xe2\x85\x1bd\x84\xaf\x0a\x08\x10_=#\ 62 | \xbb\xbcP@\x80x\xe1\x06\x19\xe1\xab\x02\x02\xc4W\xcf\ 63 | \xc8./\x14\x10 \x8e\xdc\x10]J\xea\xa8\xb8\xa0\x8b\ 64 | \x89N\xe87\x9e\x82\x1e@+\x05H\x9bN2\x9d\x9d\ 65 | \xd4f\xf1A?\xce3\x98y\x85Y\xec\xe1j\x81\xb4\ 66 | L\x80\xe4t\x14G\x0c\xfe2\x7f\x96\xfb%rV\x15\ 67 | \xfcc!_s!@rt?\x9bc)s\x14[\ 68 | \xeaGr_\x88\xd9eU\x04H\x0e\x07\xe4\xbdE+\ 69 | GU\xa5z$\xc4s\xd2\x04H\xc6.\xa8\x98#\xa3\ 70 | `-\xd9C;\x8aV\x80d\xf4w\xecY\xadc\xc6\ 71 | \x8cAoo7\xef\xa9\xcc\xd8\x8a\x0ef\xef\xeb\xeb\xc3\ 72 | \xc6\x8d\x1b\xb1o_\xec\x22Vh\xa3\x88\x00\xc9\xd0W\ 73 | \x12\x8fZ\xdd\xb0a\x03f\xceT\xbc\x1ei\xb9h\xd1\ 74 | \x22,\x5cH\x16bSH\xa3H[\x80\x98\xae\xf7-\ 75 | \xdb\x05\x9d\xb1\xd3+v\x04\x8d\x1e\xfdA\x18;vl\ 76 | \xd2(\x92x\xafF\x86\x97U\x91Y\xd3\xee\x08\x89\x1d\ 77 | &M\xf7\xa4\x17i|\x91u\xc5N\xafN\x9cH;\ 78 | \xe0\xbeH\xf3\xfc\xaak\xe5\xca\x95\x98??\xf6\xae\xd6\ 79 | \xd2_\xe2ZE@b\xa7W\x8c=\xf6\xee\xb5\xbd\xdc\ 80 | \xd7\xaf\x0e\xdcik\x18\x83p\x14\x89I\xfc\x80\xc8s\ 81 | \x9bK\x9b\xaa\x08\x88\xa6W9\xbas\xca4+\xa48\ 82 | $s\xcb\x05HC2\x05\xe7\xe9}\x87S,N\xb5\ 83 | bRhqH&H\xaa\x08Hl\xa0\xa1\xf8#\xbd\ 84 | \xdfT5\x0e\xa9\x1a \x8a?2\xbd?O\xce\x9c2\ 85 | \xcd*m?*m\xc3\x12\xfa\x81\xe2\x8f\xce\x00R\xda\ 86 | 8\xa4j\x80\xc4\xee\xbdR\xfcaGM\xcaG\xc3\xd2\ 87 | \xc6!U\x03\xa4_\xfc\xa1\xe5];8\x98\x8b\xdbN\ 88 | f\xcd\x8a]\xd5-\xedro\x95\x00\x89\x8d?\xb8\xad\ 89 | \x84#\x88\x92\x9d\x02U[\xee\xad\x12 \x8a?\xec\x18\ 90 | H\xcd\x95\xb2\xdc[\xca\x8b^\xab\x04Hl\xfc\xc1\xaf\ 91 | \xe7\x9cf)\xd9)\x90\xb2\xdc\x1b\xda\xee^\xab\x06W\ 92 | \x05\x10-\xefZu\x07s\xa6\xaa\xc5!U\x01$v\ 93 | \xab\xb3v\xef\x9a\x81\x88\xcbQ\xa58\xa4*\x80(\xfe\ 94 | \xc8\xc7B\xecSU\xdavR\x15@\xb4\xbd\xc4! \ 95 | U\xdavR\x05@\x14\x7f8\x84#*\xea\x94Sb\ 96 | \xbbN\xe9\xbe\x87T\x01\x10M\xaf:\x00HU\xe2\x90\ 97 | *\x00\xc2C\xe1\x18\xa4\x9f\x94\xb4\xbd\xa4=j\xaa\x12\ 98 | \x87T\x01\x10\xc5\x1f\xed\xb1\x10\xfbt\xcaro\xa9~\ 99 | \x0d\xb7\xec\x80(\xfe\xe8\x00\x1cQ\x91U\xd8\xfe^v\ 100 | @\x14\x7ft\x10\x10n\x5c\xe4H\x12\x93J\xb3\xfd\xbd\ 101 | \xec\x80h{{\x07\x01\xa9\xc2\xf6\xf7\xb2\x02\xf2y\x00\ 102 | \x9f\x01p\xb0\xb5\x7fh{\xbb;bR\xe2\x90-\x00\ 103 | \xbe\x0e\xe0?\x00>tWc\xf1%\x95\x05\x90\xf1\x00\ 104 | \xe6\x02\xb8\x1c\xc0\xe4Z\xad\xf6\xd9\x11g\x8d\xc0\xee=\ 105 | \xbb\xfb):o\xde<\xacX\xc1\x85-%\x17\x0a\xc4\ 106 | \xc5!\xa7\x9dv\x1a&|a\x02\xde\xfc\xeb\x9b\xac\xe2\ 107 | \xdf\x00^\x07\xf0\x02\x80'\x00\xfc\xcbE\xbdE\x95\x11\ 108 | : \x1c%\x96\x02\xb8\x83\x82\xd5j\xb5\xfa\xcf\xb1c\ 109 | \xc7\xea?qI\xfb\xaf\xdcv\xad\xa48d\xe0\xc0\x81\ 110 | \x18\xf8\xe0\x83\ 146 | \xfa\x0f;\xfa\xbd\xf7\xde\x9b\xc9\xba\x97^z\x09W^\ 147 | ye\xa6g|\xcfv\xa5|6\xe9~\ 206 | \x90|\xba%=\xb5f\xd2\xc4Ist\x81\xa7[Q\ 207 | \xbbYZ\xe3\x86)]\xa0\xe3\xc8\x09\xcfM\x9a8\xe9\ 208 | :\x01\xe2HM\x0f\x8ai\x00r\x0b\x80_y`N\ 209 | ?\x13\x82\x9ab\x01\xf8Y\xadV\xfb!o%\xd24\ 210 | \xcb\xc7\xee\x94\xcd\xa6\xa6[n\xbf\x02`C\xb6\xa7\x8b\ 211 | \xc9\x1d\x1a 7\x02\xf8\xb5\xeeI/\xa6st\xb2\x96\ 212 | \xa6{\xd2Y\xcd`\x00G;Y_\xde\xb2C\x03\xe4\ 213 | s\x00\x8e\xd4j\xb5SG\xf5\x8c\x82\xa6Zy\xdd\xde\ 214 | \xfd\xe7\xb8\xc3\x9a\x9b\x1c\x01\xfc\x1e\xc0\xd5\xdd\xb7(\xde\ 215 | \x82\xd0\x00a+\x1e\x05\xf0}\xfeGc\xfe\xea\xab\xb6\ 216 | \xb2+A\x81\x96\x19\xc0\xd7\x00\xbc\xe0\xabX!\x022\ 217 | \x0c\xc0_\x00\x8c \xbc\x12Z\xd7B\xfb\xda\xbd\xfa\ 218 | \xdb\xd5\xf2R\xfb\x0d\x80\xeb|\xb6>D@\xa8'\x0f\ 219 | \xc8]\x1b\x09\xab\x98\xc4\xe7.\xf6\x7f\xdbZ\xe0\xf8\x1b\ 220 | \x00\x1e\xb6\xf5\xa1\xcf\xd6\x87\x0a\x085\xbd\x09\xc0j\xfe\ 221 | \x07\x03>\xc5$\xfev\xb3\x18\xff\x10\x0e\xbe\xe4v\xf9\ 222 | k\xf5\xa7\x96\x85\x0c\x08\xed\x9f\x01\xe01\x00_\x8aV\ 223 | E\x06\xd5\x06)x\xf7\xa4\xd7E`\x1c=v\xb4y\ 224 | Y\xfe9\x00\xdf\x05`\x7fcO\x17\xdb\x13: \x91\ 225 | t\xdf\x030\x8fWMD\xa0\xf0\x1f\x08\xcb\xfe\x03\xfb\ 226 | \x15\xa3\x14\xd8\xc1\x9a\xa1h\x8d\x0fG\x8e\x1c\xb9\xfd\xfd\ 227 | \xf7\xdf\xf7\xf7\xd7.ct*\x0b Q\xd3x\x10\x13\ 228 | ?8\x8d\x8b\xa6^t\x18A\xe1[,k:\xf5\xd4\ 229 | S\x0f\x0d\x1f>\xfc\x97Y\x9f+S\xfe\xe3\xc7\x8f\x7f\ 230 | \xeb\xc8\x91#\xc6\x0b\x19\xa3\xf8\x22m\xd1d\xc2\x84\x09\ 231 | /\xef\xda\xb5+\xf9\xf0c\x0f\x85+\x1b \x94x3\ 232 | \x80\xf4S\xd0\xec\x1d\xc1{\x93\xa7\xdbg/_\xces\ 233 | \xcf=w\xc7\xdbo\xbf\x9d\xef`\xde\x169\x04\x88\x1f\ 234 | \xfd\xe3O\x00\xcc\x97R\xd8\xd9*@\x04\x88]O\x09\ 235 | (\x97\x00q\xe8,\x8d \x0e\xc5\xf4\xa4\xa8?\x00\xb8\ 236 | \xcc\x91-\xaf\x02\x08\xf7\xf0+\x07\x22\x0c\x1d:t\xeb\ 237 | '\x9f|2\xad\xdd\x8f\xb1\xfcVu\xe8\xf0\xa1\xdf\x1e\ 238 | >|\xf8\x1b\x0e\xcc*\xac\x882\xc6 ?\x05\x90\xed\ 239 | \xf2\xf3d\xb9yk\xe5\x8f\x0a\xf3\x86\x9f\x15\xad\x984\ 240 | q\xd2\xbcv\xf7\xbd5>\xe6\xf2\xd7jW\xfa\xd9\xcc\ 241 | x\xab\xca\x08\xc8L\x87[\xa7\xbf\x0a\xe0\x95\x90\x1c\xda\ 242 | \x01[{\x01,lg\xdf\x1bW\x12\x1b#\x10\xefy\ 243 | \xdb\xd7\x01\x1b;Vd\x19\x01\xa1X\x07\x01\x98/>\ 244 | O\x97\xf5\xef\xdc\xef\xd51\xe5\xc3)\x98\xc7\xb9o\xe8\ 245 | \xe9\xe9\x19\x93g\xdf[t\xba\xfe\x81\x03\x076\x02\x98\ 246 | \x15N\xb3?\xb5\xb4\xac\x800n\xf8c\x9b\xce\xb8\x02\ 247 | \x00\xe3\x19%\xa0>*s\x9a\xc4d;\xddj\x82\x83\ 248 | \x8f\x057z\x94\x19\x10\xb6\x8d\x90,\xce\xf1\x1d\x83#\ 249 | \xc7\x5c\x00\xed\x9d\xea\x5c>\xac\xea#\x09o\x95h\xe9\ 250 | \xf8\xfdZ\xda\xf2\xcbP\x1c9\x18{\x045\xb5\x8a\x1a\ 251 | U\xd6\x11\xa4\xd9i|\xfb]\x0e\xe0B\x00\x03b\xfa\ 252 | -\xff\xee\x1f\x00v4F\x0c/\x7f\xf5\xd3\x13\xde\x08\ 253 | I\xfd\xe7\xcc3\xcf\xbcz\xe8\xd0\xa1\x03i\xd7\x90\xc1\ 254 | C\x06\x1e\xf9\xe7\x91\xfai\xd8\xa3G\x8f\x1e\xb1~\xfd\ 255 | z\xde/A \xfax\xbb\xb6'\xb6\xe72\xa3\x0a\x80\ 256 | \xe4\x12F\x0fI\x81\xb2O\xb1\xe4a)\xd0\xb6\x02\x1a\ 257 | A\xda\x96P\x05\x94Y\x01\x01Rf\xef\xaamm+\ 258 | @\xda\x96P\x05\x94Y\x01\x01Rf\xef\xaamm\ 259 | + @\xda\x96P\x05\x94Y\x01\x01Rf\xef\xaam\ 260 | m+ @\xda\x96P\x05\x04\xa4\x00?\x1a'%~\ 261 | \xd8\xec\xf7\xb5?\x02\x84_GW\xa4<\xfcTh\xdb\ 262 | \x94\x03r\x9aL-F\x01\x1e\xea\x91\xd6\xc7\xb9\x91\xb2\ 263 | \xdfW\xff\x08\x10\xd3\x16\xf1\xe0\xf6\xf1\x17\xa3\xb9j\x09\ 264 | H\x01\x01\x12\x90\xb3dj\xf1\x0a\x08\x90\xe25W\x8d\ 265 | \x01) @\x02r\x96L-^\x01\x01R\xbc\xe6\xaa\ 266 | 1 \x05\x04H@\xce\x92\xa9\xc5+ @\x8a\xd7\x5c\ 267 | 5\x06\xa4\x80\x00\x09\xc8Y2\xb5x\x05\x04H\xf1\x9a\ 268 | \xab\xc6\x80\x14\x10 \x019K\xa6\x16\xaf\x80\x00)^\ 269 | s\xd5\x18\x90\x02\x02$ g\xc9\xd4\xe2\x15\x10 \xc5\ 270 | k\xae\x1a\x03R@\x80\x04\xe4,\x99Z\xbc\x02\x02\xa4\ 271 | x\xcdUc@\x0a\x08\x90\x80\x9c%S\x8bW@\x80\ 272 | \x14\xaf\xb9j\x0cH\x01\x01\x12\x90\xb3dj\xf1\x0a\x08\ 273 | \x90\xe25W\x8d\x01) @\x02r\x96L-^\x01\ 274 | \x01R\xbc\xe6\xaa1 \x05\x04H@\xce\x92\xa9\xc5+\ 275 | @\x8a\xd7\x5c5\x06\xa4\x80\x00\x09\xc8Y2\xb5x\ 276 | \x05\x04H\xf1\x9a\xab\xc6\x80\x14\x10 \x019K\xa6\x16\ 277 | \xaf\x80\x00)^s\xd5\x18\x90\x02\xbd\x00\x16\xa6\xd8\x1b\ 278 | {Nu\xf3_\x9eHyxe\xe3*\xdf\x80\xf4\x90\ 279 | \xa9R\xe0$\x05x./G\x91\xa4d\x04do\xe3\ 280 | \x8a\xdf\xb8\x02x\xa8/\x0f\xf7U\x92\x02!*\xc0\xc3\ 281 | \xd9\xd9\xbf\x93\x12Ou\x1f\x1b\xf7\x8f\xcd\xd4\xa4\x01\xc2\ 282 | gY@\x90\x97\xc1\x87\xe8Q\xd9\xecT\x01\xd3\xf4*\ 283 | q\x00h\x06\xc4T\x08\xe7o\x8b\x9c\x9a\xad\xc2\xa4@\ 284 | \xe7\x150\x8d\x1e\xb4 \xf1\xf6\x82f@LW \xa4\ 285 | \x16\xd4\xf9v\xaa\x06)\x90K\x81\x0d\x00L\x17\xe7\xc4\ 286 | N\xafX[k`b\x9afi\xaa\x95\xcbGz\xa8\ 287 | K\x0a\x98\x02s\x9a\x95:3j\x05\xc44\xcd\x8a\xda\ 288 | \xa9x\xa4K\x1eW\xb5\xd6\x0a\x98F\x8e\xa8\xa0\xd4k\ 289 | \x08\xe3\xfe\xd1f\x14\xd1t\xcb\xdaO\xcaX\xb0\x02\x9c\ 290 | N\xf1E\x9f6\xad\x8aL2\xc6\xd5q\x800\xa8!\ 291 | }\xfc\xd3\x94\x18\xfd3\xc0\xd1\xea\x96I)\xfd{\xa7\ 292 | \x15 \x10\xb7\x18\xbeu4\xdb\x90\xb8\xb4\xdb\x9c)i\ 293 | x\xb1\x9djEe\x11\x14\xaep\xc5\xde\x14\xdaie\ 294 | T~e\x15\xe0\x87\xbfs\x1a\xa3\x85\xcd\x88\x11\x09\xc5\ 295 | ~\xca\xefz\xc6\x17{\xda\xfc++$\xcd\xb0D\xb7\ 296 | \x85\xbeccD\x87\xdd+h\xdd\x09l\xba\x0d\xd9]\ 297 | M\xc9%E3\x1b\x9b\x19N\x5c)\xec\x0f\x9c\xf5\xf4\ 298 | \xbb\xd16.\xb3\xe9\x9e\xf4\xbc\x90\x14!\x94m\x1d\xb1\ 299 | \xd7\xfb\xda>\xac|')`\xf3)\xc0w\xc92\xf5\ 300 | \x07\x13 l\xac\xcdR\x99\xcf\xa2d\x12\xc4\xe7\x86x\ 301 | `[\xc8\x80d\x1a9\x22\xadm\x00a^\xce\xf58\ 302 | \x9a\xe4\x1d\xd6\xba\xe9[\x01\xe2N\xfdP\x011\xaeV\ 303 | %Id\x0b\x08\x9f\x8f\xe6\x9fY\x82!w\xae\xc9_\ 304 | \x92\x00\xc9\xaf]\xeb\x93\xa1\x01\x12-\x1eY\xc5\x1by\ 305 | b\x90\xb8g\xb2\xac3\xbbsM\xfe\x92\x04H~\xed\ 306 | B\x05\xa4m0\xb2N\xb1\xd2@\xe1\xc8\xe2\xf3\xd4K\ 307 | \x80T\x03\x10\xc6\x18\x04\xe3)\xdb\x15*\x1bY\xb2L\ 308 | \xb1\xd2\xca\xe3\xa82\xa3\x11\xab\xf8\x06\x8b\x00\xb1\xe9\x09\ 309 | vy\xe8g.\xdat3E\xdf.\xa2?\xfb\x1a@\ 310 | \x18\xbfi\xe41\xda\x15 \xadu7\x8f*\xdd\x06\x86\ 311 | o\x95\x8e\x88\x97Gp=\x13\x96\x02\x9d\x02$,\x15\ 312 | d\xad\x14HP@\x80\xa8kH\x81\x14\x05\x04\x88\xba\ 313 | \x87\x14\x10 \xea\x03R \x9f\x02\x1aA\xf2\xe9\xa6\xa7\ 314 | *\xa2\xc0\x7f\x01\xbaDs#\x97\xfa\x10{\x00\x00\x00\ 315 | \x00IEND\xaeB`\x82\ 316 | " 317 | 318 | qt_resource_name = b"\ 319 | \x00\x05\ 320 | \x00P7\xd5\ 321 | \x00I\ 322 | \x00m\x00a\x00g\x00e\ 323 | \x00\x0d\ 324 | \x03\xab;\x07\ 325 | \x00w\ 326 | \x00i\x00n\x00d\x00o\x00w\x00i\x00c\x00o\x00.\x00p\x00n\x00g\ 327 | " 328 | 329 | qt_resource_struct = b"\ 330 | \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ 331 | \x00\x00\x00\x00\x00\x00\x00\x00\ 332 | \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ 333 | \x00\x00\x00\x00\x00\x00\x00\x00\ 334 | \x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ 335 | \x00\x00\x01\x89\xf9\xfc\x82\xb5\ 336 | " 337 | 338 | def qInitResources(): 339 | QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) 340 | 341 | def qCleanupResources(): 342 | QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) 343 | 344 | qInitResources() 345 | -------------------------------------------------------------------------------- /mainwindow_ui.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ################################################################################ 4 | ## Form generated from reading UI file 'mainwindow.ui' 5 | ## 6 | ## Created by: Qt User Interface Compiler version 6.5.2 7 | ## 8 | ## WARNING! All changes made in this file will be lost when recompiling UI file! 9 | ################################################################################ 10 | 11 | from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, 12 | QMetaObject, QObject, QPoint, QRect, 13 | QSize, QTime, QUrl, Qt) 14 | from PySide6.QtGui import (QAction, QBrush, QColor, QConicalGradient, 15 | QCursor, QFont, QFontDatabase, QGradient, 16 | QIcon, QImage, QKeySequence, QLinearGradient, 17 | QPainter, QPalette, QPixmap, QRadialGradient, 18 | QTransform) 19 | from PySide6.QtWidgets import (QApplication, QHBoxLayout, QHeaderView, QLabel, 20 | QMainWindow, QMenu, QMenuBar, QSizePolicy, 21 | QTableWidget, QTableWidgetItem, QVBoxLayout, QWidget) 22 | import mainwindow_rc 23 | 24 | class Ui_MainWindow(object): 25 | def setupUi(self, MainWindow): 26 | if not MainWindow.objectName(): 27 | MainWindow.setObjectName(u"MainWindow") 28 | MainWindow.resize(1306, 930) 29 | MainWindow.setMinimumSize(QSize(1306, 930)) 30 | MainWindow.setMaximumSize(QSize(1306, 930)) 31 | font = QFont() 32 | font.setFamilies([u"Consolas"]) 33 | font.setPointSize(10) 34 | MainWindow.setFont(font) 35 | MainWindow.setMouseTracking(False) 36 | icon = QIcon() 37 | icon.addFile(u":/Image/windowico.png", QSize(), QIcon.Normal, QIcon.Off) 38 | MainWindow.setWindowIcon(icon) 39 | MainWindow.setStyleSheet(u"QLabel#source, #division{\n" 40 | " color: #CBCBCB;\n" 41 | " border: 2px solid #3194D1;\n" 42 | " background: #FFFFFF\n" 43 | "}\n" 44 | "\n" 45 | "QListWidget#listWidget{\n" 46 | " border: 2px solid #3194D1;\n" 47 | " background: #FFFFFF\n" 48 | "}\n" 49 | "\n" 50 | "QListWidget::item:hover#listWidget{\n" 51 | " border: 2px solid #006363\n" 52 | "}\n" 53 | "\n" 54 | "QListWidget::item:selected#listWidget{\n" 55 | " color:black;\n" 56 | " background:#60D6A7\n" 57 | "}\n" 58 | "\n" 59 | "QTableWidget#tableWidget{\n" 60 | " border: 2px solid #3194D1;\n" 61 | "}") 62 | self.file = QAction(MainWindow) 63 | self.file.setObjectName(u"file") 64 | font1 = QFont() 65 | self.file.setFont(font1) 66 | self.location = QAction(MainWindow) 67 | self.location.setObjectName(u"location") 68 | self.number = QAction(MainWindow) 69 | self.number.setObjectName(u"number") 70 | self.repetition = QAction(MainWindow) 71 | self.repetition.setObjectName(u"repetition") 72 | self.output = QAction(MainWindow) 73 | self.output.setObjectName(u"output") 74 | self.select = QAction(MainWindow) 75 | self.select.setObjectName(u"select") 76 | self.information = QAction(MainWindow) 77 | self.information.setObjectName(u"information") 78 | self.selects = QAction(MainWindow) 79 | self.selects.setObjectName(u"selects") 80 | self.clear = QAction(MainWindow) 81 | self.clear.setObjectName(u"clear") 82 | self.select_camera = QAction(MainWindow) 83 | self.select_camera.setObjectName(u"select_camera") 84 | self.close_camera = QAction(MainWindow) 85 | self.close_camera.setObjectName(u"close_camera") 86 | self.select_video = QAction(MainWindow) 87 | self.select_video.setObjectName(u"select_video") 88 | self.centralwidget = QWidget(MainWindow) 89 | self.centralwidget.setObjectName(u"centralwidget") 90 | self.centralwidget.setMinimumSize(QSize(1306, 900)) 91 | self.centralwidget.setMaximumSize(QSize(1306, 900)) 92 | self.verticalLayout = QVBoxLayout(self.centralwidget) 93 | self.verticalLayout.setObjectName(u"verticalLayout") 94 | self.horizontalLayout = QHBoxLayout() 95 | self.horizontalLayout.setObjectName(u"horizontalLayout") 96 | self.label_1 = QLabel(self.centralwidget) 97 | self.label_1.setObjectName(u"label_1") 98 | font2 = QFont() 99 | font2.setFamilies([u"Consolas"]) 100 | font2.setPointSize(20) 101 | font2.setBold(True) 102 | self.label_1.setFont(font2) 103 | self.label_1.setAlignment(Qt.AlignCenter) 104 | 105 | self.horizontalLayout.addWidget(self.label_1) 106 | 107 | self.label_2 = QLabel(self.centralwidget) 108 | self.label_2.setObjectName(u"label_2") 109 | self.label_2.setFont(font2) 110 | self.label_2.setAlignment(Qt.AlignCenter) 111 | 112 | self.horizontalLayout.addWidget(self.label_2) 113 | 114 | 115 | self.verticalLayout.addLayout(self.horizontalLayout) 116 | 117 | self.horizontalLayout_2 = QHBoxLayout() 118 | self.horizontalLayout_2.setObjectName(u"horizontalLayout_2") 119 | self.source = QLabel(self.centralwidget) 120 | self.source.setObjectName(u"source") 121 | self.source.setMinimumSize(QSize(640, 640)) 122 | self.source.setMaximumSize(QSize(640, 640)) 123 | self.source.setFont(font2) 124 | self.source.setStyleSheet(u"") 125 | self.source.setAlignment(Qt.AlignCenter) 126 | 127 | self.horizontalLayout_2.addWidget(self.source) 128 | 129 | self.division = QLabel(self.centralwidget) 130 | self.division.setObjectName(u"division") 131 | self.division.setMinimumSize(QSize(640, 640)) 132 | self.division.setMaximumSize(QSize(640, 640)) 133 | self.division.setFont(font2) 134 | self.division.setAlignment(Qt.AlignCenter) 135 | 136 | self.horizontalLayout_2.addWidget(self.division) 137 | 138 | 139 | self.verticalLayout.addLayout(self.horizontalLayout_2) 140 | 141 | self.tableWidget = QTableWidget(self.centralwidget) 142 | if (self.tableWidget.columnCount() < 4): 143 | self.tableWidget.setColumnCount(4) 144 | font3 = QFont() 145 | font3.setFamilies([u"Consolas"]) 146 | font3.setPointSize(10) 147 | font3.setBold(True) 148 | __qtablewidgetitem = QTableWidgetItem() 149 | __qtablewidgetitem.setFont(font3); 150 | self.tableWidget.setHorizontalHeaderItem(0, __qtablewidgetitem) 151 | __qtablewidgetitem1 = QTableWidgetItem() 152 | __qtablewidgetitem1.setFont(font3); 153 | self.tableWidget.setHorizontalHeaderItem(1, __qtablewidgetitem1) 154 | __qtablewidgetitem2 = QTableWidgetItem() 155 | __qtablewidgetitem2.setFont(font3); 156 | self.tableWidget.setHorizontalHeaderItem(2, __qtablewidgetitem2) 157 | __qtablewidgetitem3 = QTableWidgetItem() 158 | __qtablewidgetitem3.setFont(font3); 159 | self.tableWidget.setHorizontalHeaderItem(3, __qtablewidgetitem3) 160 | self.tableWidget.setObjectName(u"tableWidget") 161 | font4 = QFont() 162 | font4.setFamilies([u"Consolas"]) 163 | font4.setPointSize(12) 164 | self.tableWidget.setFont(font4) 165 | self.tableWidget.setAutoFillBackground(False) 166 | self.tableWidget.setRowCount(0) 167 | 168 | self.verticalLayout.addWidget(self.tableWidget) 169 | 170 | MainWindow.setCentralWidget(self.centralwidget) 171 | self.menubar = QMenuBar(MainWindow) 172 | self.menubar.setObjectName(u"menubar") 173 | self.menubar.setGeometry(QRect(0, 0, 1306, 24)) 174 | font5 = QFont() 175 | font5.setFamilies([u"Consolas"]) 176 | font5.setPointSize(11) 177 | font5.setBold(True) 178 | self.menubar.setFont(font5) 179 | self.menu = QMenu(self.menubar) 180 | self.menu.setObjectName(u"menu") 181 | font6 = QFont() 182 | font6.setFamilies([u"Consolas"]) 183 | font6.setPointSize(10) 184 | font6.setBold(False) 185 | self.menu.setFont(font6) 186 | self.menu_2 = QMenu(self.menubar) 187 | self.menu_2.setObjectName(u"menu_2") 188 | MainWindow.setMenuBar(self.menubar) 189 | 190 | self.menubar.addAction(self.menu.menuAction()) 191 | self.menubar.addAction(self.menu_2.menuAction()) 192 | self.menu.addAction(self.select) 193 | self.menu.addAction(self.location) 194 | self.menu.addAction(self.number) 195 | self.menu.addSeparator() 196 | self.menu.addAction(self.selects) 197 | self.menu.addSeparator() 198 | self.menu.addAction(self.select_video) 199 | self.menu.addSeparator() 200 | self.menu.addAction(self.select_camera) 201 | self.menu.addAction(self.close_camera) 202 | self.menu.addSeparator() 203 | self.menu.addAction(self.clear) 204 | self.menu.addAction(self.repetition) 205 | self.menu.addAction(self.output) 206 | self.menu_2.addAction(self.information) 207 | 208 | self.retranslateUi(MainWindow) 209 | 210 | QMetaObject.connectSlotsByName(MainWindow) 211 | # setupUi 212 | 213 | def retranslateUi(self, MainWindow): 214 | MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"\u8f66\u724c\u8bc6\u522bDemo", None)) 215 | self.file.setText(QCoreApplication.translate("MainWindow", u"\u9009\u62e9\u6587\u4ef6", None)) 216 | self.location.setText(QCoreApplication.translate("MainWindow", u"\u8f66\u724c\u5b9a\u4f4d", None)) 217 | self.number.setText(QCoreApplication.translate("MainWindow", u"\u53f7\u7801\u63d0\u53d6", None)) 218 | self.repetition.setText(QCoreApplication.translate("MainWindow", u"\u53bb\u91cd", None)) 219 | self.output.setText(QCoreApplication.translate("MainWindow", u"\u5bfc\u51fa", None)) 220 | self.select.setText(QCoreApplication.translate("MainWindow", u"\u9009\u62e9\u56fe\u7247", None)) 221 | self.information.setText(QCoreApplication.translate("MainWindow", u"\u4fe1\u606f", None)) 222 | self.selects.setText(QCoreApplication.translate("MainWindow", u"\u9009\u62e9\u6587\u4ef6\u5939", None)) 223 | self.clear.setText(QCoreApplication.translate("MainWindow", u"\u6e05\u7a7a", None)) 224 | self.select_camera.setText(QCoreApplication.translate("MainWindow", u"\u9009\u62e9\u6444\u50cf\u5934", None)) 225 | self.close_camera.setText(QCoreApplication.translate("MainWindow", u"\u5173\u95ed\u6444\u50cf\u5934", None)) 226 | self.select_video.setText(QCoreApplication.translate("MainWindow", u"\u9009\u62e9\u89c6\u9891", None)) 227 | self.label_1.setText(QCoreApplication.translate("MainWindow", u"\u539f\u56fe", None)) 228 | self.label_2.setText(QCoreApplication.translate("MainWindow", u"\u8f66\u724c\u68c0\u6d4b\u56fe", None)) 229 | self.source.setText(QCoreApplication.translate("MainWindow", u"SOURCE_IMAGE", None)) 230 | self.division.setText(QCoreApplication.translate("MainWindow", u"DIVISION_IMAGE", None)) 231 | ___qtablewidgetitem = self.tableWidget.horizontalHeaderItem(0) 232 | ___qtablewidgetitem.setText(QCoreApplication.translate("MainWindow", u"\u56fe\u7247\u540d\u79f0", None)); 233 | ___qtablewidgetitem1 = self.tableWidget.horizontalHeaderItem(1) 234 | ___qtablewidgetitem1.setText(QCoreApplication.translate("MainWindow", u"\u5f55\u5165\u65f6\u95f4", None)); 235 | ___qtablewidgetitem2 = self.tableWidget.horizontalHeaderItem(2) 236 | ___qtablewidgetitem2.setText(QCoreApplication.translate("MainWindow", u"\u8f66\u724c\u53f7", None)); 237 | ___qtablewidgetitem3 = self.tableWidget.horizontalHeaderItem(3) 238 | ___qtablewidgetitem3.setText(QCoreApplication.translate("MainWindow", u"\u8f66\u724c\u7c7b\u578b", None)); 239 | self.menu.setTitle(QCoreApplication.translate("MainWindow", u"\u529f\u80fd", None)) 240 | self.menu_2.setTitle(QCoreApplication.translate("MainWindow", u"\u5173\u4e8e", None)) 241 | # retranslateUi 242 | 243 | -------------------------------------------------------------------------------- /models/LPRNet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | 4 | CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑', 5 | '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤', 6 | '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁', 7 | '新', 8 | '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 9 | 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 10 | 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 11 | 'W', 'X', 'Y', 'Z', 'I', 'O', '-' 12 | ] 13 | 14 | class small_basic_block(nn.Module): 15 | def __init__(self, ch_in, ch_out): 16 | super(small_basic_block, self).__init__() 17 | self.block = nn.Sequential( 18 | nn.Conv2d(ch_in, ch_out // 4, kernel_size=1), 19 | nn.ReLU(), 20 | nn.Conv2d(ch_out // 4, ch_out // 4, kernel_size=(3, 1), padding=(1, 0)), 21 | nn.ReLU(), 22 | nn.Conv2d(ch_out // 4, ch_out // 4, kernel_size=(1, 3), padding=(0, 1)), 23 | nn.ReLU(), 24 | nn.Conv2d(ch_out // 4, ch_out, kernel_size=1), 25 | ) 26 | def forward(self, x): 27 | return self.block(x) 28 | 29 | class LPRNet(nn.Module): 30 | def __init__(self, lpr_max_len, phase, class_num, dropout_rate): 31 | super(LPRNet, self).__init__() 32 | self.phase = phase 33 | self.lpr_max_len = lpr_max_len 34 | self.class_num = class_num 35 | self.backbone = nn.Sequential( 36 | nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1), # 0 37 | nn.BatchNorm2d(num_features=64), 38 | nn.ReLU(), # 2 39 | nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 1, 1)), 40 | small_basic_block(ch_in=64, ch_out=128), # *** 4 *** 41 | nn.BatchNorm2d(num_features=128), 42 | nn.ReLU(), # 6 43 | nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(2, 1, 2)), 44 | small_basic_block(ch_in=64, ch_out=256), # 8 45 | nn.BatchNorm2d(num_features=256), 46 | nn.ReLU(), # 10 47 | small_basic_block(ch_in=256, ch_out=256), # *** 11 *** 48 | nn.BatchNorm2d(num_features=256), # 12 49 | nn.ReLU(), 50 | nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(4, 1, 2)), # 14 51 | nn.Dropout(dropout_rate), 52 | nn.Conv2d(in_channels=64, out_channels=256, kernel_size=(1, 4), stride=1), # 16 53 | nn.BatchNorm2d(num_features=256), 54 | nn.ReLU(), # 18 55 | nn.Dropout(dropout_rate), 56 | nn.Conv2d(in_channels=256, out_channels=class_num, kernel_size=(13, 1), stride=1), # 20 57 | nn.BatchNorm2d(num_features=class_num), 58 | nn.ReLU(), # *** 22 *** 59 | ) 60 | self.container = nn.Sequential( 61 | nn.Conv2d(in_channels=448+self.class_num, out_channels=self.class_num, kernel_size=(1, 1), stride=(1, 1)), 62 | # nn.BatchNorm2d(num_features=self.class_num), 63 | # nn.ReLU(), 64 | # nn.Conv2d(in_channels=self.class_num, out_channels=self.lpr_max_len+1, kernel_size=3, stride=2), 65 | # nn.ReLU(), 66 | ) 67 | 68 | def forward(self, x): 69 | keep_features = list() 70 | for i, layer in enumerate(self.backbone.children()): 71 | x = layer(x) 72 | if i in [2, 6, 13, 22]: # [2, 4, 8, 11, 22] 73 | keep_features.append(x) 74 | 75 | global_context = list() 76 | for i, f in enumerate(keep_features): 77 | if i in [0, 1]: 78 | f = nn.AvgPool2d(kernel_size=5, stride=5)(f) 79 | if i in [2]: 80 | f = nn.AvgPool2d(kernel_size=(4, 10), stride=(4, 2))(f) 81 | f_pow = torch.pow(f, 2) 82 | f_mean = torch.mean(f_pow) 83 | f = torch.div(f, f_mean) 84 | global_context.append(f) 85 | 86 | x = torch.cat(global_context, 1) 87 | x = self.container(x) 88 | logits = torch.mean(x, dim=2) 89 | 90 | return logits 91 | 92 | def build_lprnet(lpr_max_len=8, phase=False, class_num=68, dropout_rate=0.0): 93 | 94 | Net = LPRNet(lpr_max_len, phase, class_num, dropout_rate) 95 | 96 | if phase == "train": 97 | return Net.train() 98 | else: 99 | return Net.eval() 100 | -------------------------------------------------------------------------------- /models/common.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Common modules 4 | """ 5 | 6 | import json 7 | import math 8 | import platform 9 | import warnings 10 | from collections import OrderedDict, namedtuple 11 | from copy import copy 12 | from pathlib import Path 13 | 14 | import cv2 15 | import numpy as np 16 | import pandas as pd 17 | import requests 18 | import torch 19 | import torch.nn as nn 20 | import yaml 21 | from PIL import Image 22 | from torch.cuda import amp 23 | 24 | from utils.datasets import exif_transpose, letterbox 25 | from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, 26 | make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) 27 | from utils.plots import Annotator, colors, save_one_box 28 | from utils.torch_utils import copy_attr, time_sync 29 | 30 | 31 | def autopad(k, p=None): # kernel, padding 32 | # Pad to 'same' 33 | if p is None: 34 | p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad 35 | return p 36 | 37 | 38 | class Conv(nn.Module): 39 | # Standard convolution 40 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups 41 | super().__init__() 42 | self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) 43 | self.bn = nn.BatchNorm2d(c2) 44 | self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) 45 | 46 | def forward(self, x): 47 | return self.act(self.bn(self.conv(x))) 48 | 49 | def forward_fuse(self, x): 50 | return self.act(self.conv(x)) 51 | 52 | 53 | class DWConv(Conv): 54 | # Depth-wise convolution class 55 | def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups 56 | super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) 57 | 58 | 59 | class TransformerLayer(nn.Module): 60 | # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) 61 | def __init__(self, c, num_heads): 62 | super().__init__() 63 | self.q = nn.Linear(c, c, bias=False) 64 | self.k = nn.Linear(c, c, bias=False) 65 | self.v = nn.Linear(c, c, bias=False) 66 | self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) 67 | self.fc1 = nn.Linear(c, c, bias=False) 68 | self.fc2 = nn.Linear(c, c, bias=False) 69 | 70 | def forward(self, x): 71 | x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x 72 | x = self.fc2(self.fc1(x)) + x 73 | return x 74 | 75 | 76 | class TransformerBlock(nn.Module): 77 | # Vision Transformer https://arxiv.org/abs/2010.11929 78 | def __init__(self, c1, c2, num_heads, num_layers): 79 | super().__init__() 80 | self.conv = None 81 | if c1 != c2: 82 | self.conv = Conv(c1, c2) 83 | self.linear = nn.Linear(c2, c2) # learnable position embedding 84 | self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) 85 | self.c2 = c2 86 | 87 | def forward(self, x): 88 | if self.conv is not None: 89 | x = self.conv(x) 90 | b, _, w, h = x.shape 91 | p = x.flatten(2).permute(2, 0, 1) 92 | return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) 93 | 94 | 95 | class Bottleneck(nn.Module): 96 | # Standard bottleneck 97 | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion 98 | super().__init__() 99 | c_ = int(c2 * e) # hidden channels 100 | self.cv1 = Conv(c1, c_, 1, 1) 101 | self.cv2 = Conv(c_, c2, 3, 1, g=g) 102 | self.add = shortcut and c1 == c2 103 | 104 | def forward(self, x): 105 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 106 | 107 | 108 | class BottleneckCSP(nn.Module): 109 | # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks 110 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion 111 | super().__init__() 112 | c_ = int(c2 * e) # hidden channels 113 | self.cv1 = Conv(c1, c_, 1, 1) 114 | self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) 115 | self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) 116 | self.cv4 = Conv(2 * c_, c2, 1, 1) 117 | self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) 118 | self.act = nn.SiLU() 119 | self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) 120 | 121 | def forward(self, x): 122 | y1 = self.cv3(self.m(self.cv1(x))) 123 | y2 = self.cv2(x) 124 | return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) 125 | 126 | 127 | class C3(nn.Module): 128 | # CSP Bottleneck with 3 convolutions 129 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion 130 | super().__init__() 131 | c_ = int(c2 * e) # hidden channels 132 | self.cv1 = Conv(c1, c_, 1, 1) 133 | self.cv2 = Conv(c1, c_, 1, 1) 134 | self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) 135 | self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) 136 | # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) 137 | 138 | def forward(self, x): 139 | return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) 140 | 141 | 142 | class C3TR(C3): 143 | # C3 module with TransformerBlock() 144 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): 145 | super().__init__(c1, c2, n, shortcut, g, e) 146 | c_ = int(c2 * e) 147 | self.m = TransformerBlock(c_, c_, 4, n) 148 | 149 | 150 | class C3SPP(C3): 151 | # C3 module with SPP() 152 | def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): 153 | super().__init__(c1, c2, n, shortcut, g, e) 154 | c_ = int(c2 * e) 155 | self.m = SPP(c_, c_, k) 156 | 157 | 158 | class C3Ghost(C3): 159 | # C3 module with GhostBottleneck() 160 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): 161 | super().__init__(c1, c2, n, shortcut, g, e) 162 | c_ = int(c2 * e) # hidden channels 163 | self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) 164 | 165 | 166 | class SPP(nn.Module): 167 | # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 168 | def __init__(self, c1, c2, k=(5, 9, 13)): 169 | super().__init__() 170 | c_ = c1 // 2 # hidden channels 171 | self.cv1 = Conv(c1, c_, 1, 1) 172 | self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) 173 | self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) 174 | 175 | def forward(self, x): 176 | x = self.cv1(x) 177 | with warnings.catch_warnings(): 178 | warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning 179 | return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) 180 | 181 | 182 | class SPPF(nn.Module): 183 | # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher 184 | def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) 185 | super().__init__() 186 | c_ = c1 // 2 # hidden channels 187 | self.cv1 = Conv(c1, c_, 1, 1) 188 | self.cv2 = Conv(c_ * 4, c2, 1, 1) 189 | self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) 190 | 191 | def forward(self, x): 192 | x = self.cv1(x) 193 | with warnings.catch_warnings(): 194 | warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning 195 | y1 = self.m(x) 196 | y2 = self.m(y1) 197 | return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) 198 | 199 | 200 | class Focus(nn.Module): 201 | # Focus wh information into c-space 202 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups 203 | super().__init__() 204 | self.conv = Conv(c1 * 4, c2, k, s, p, g, act) 205 | # self.contract = Contract(gain=2) 206 | 207 | def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) 208 | return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) 209 | # return self.conv(self.contract(x)) 210 | 211 | 212 | class GhostConv(nn.Module): 213 | # Ghost Convolution https://github.com/huawei-noah/ghostnet 214 | def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups 215 | super().__init__() 216 | c_ = c2 // 2 # hidden channels 217 | self.cv1 = Conv(c1, c_, k, s, None, g, act) 218 | self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) 219 | 220 | def forward(self, x): 221 | y = self.cv1(x) 222 | return torch.cat([y, self.cv2(y)], 1) 223 | 224 | 225 | class GhostBottleneck(nn.Module): 226 | # Ghost Bottleneck https://github.com/huawei-noah/ghostnet 227 | def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride 228 | super().__init__() 229 | c_ = c2 // 2 230 | self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw 231 | DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw 232 | GhostConv(c_, c2, 1, 1, act=False)) # pw-linear 233 | self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), 234 | Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() 235 | 236 | def forward(self, x): 237 | return self.conv(x) + self.shortcut(x) 238 | 239 | 240 | class Contract(nn.Module): 241 | # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) 242 | def __init__(self, gain=2): 243 | super().__init__() 244 | self.gain = gain 245 | 246 | def forward(self, x): 247 | b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' 248 | s = self.gain 249 | x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) 250 | x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) 251 | return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) 252 | 253 | 254 | class Expand(nn.Module): 255 | # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) 256 | def __init__(self, gain=2): 257 | super().__init__() 258 | self.gain = gain 259 | 260 | def forward(self, x): 261 | b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' 262 | s = self.gain 263 | x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) 264 | x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) 265 | return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) 266 | 267 | 268 | class Concat(nn.Module): 269 | # Concatenate a list of tensors along dimension 270 | def __init__(self, dimension=1): 271 | super().__init__() 272 | self.d = dimension 273 | 274 | def forward(self, x): 275 | return torch.cat(x, self.d) 276 | 277 | 278 | class DetectMultiBackend(nn.Module): 279 | # YOLOv5 MultiBackend class for python inference on various backends 280 | def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): 281 | # Usage: 282 | # PyTorch: weights = *.pt 283 | # TorchScript: *.torchscript 284 | # ONNX Runtime: *.onnx 285 | # ONNX OpenCV DNN: *.onnx with --dnn 286 | # OpenVINO: *.xml 287 | # CoreML: *.mlmodel 288 | # TensorRT: *.engine 289 | # TensorFlow SavedModel: *_saved_model 290 | # TensorFlow GraphDef: *.pb 291 | # TensorFlow Lite: *.tflite 292 | # TensorFlow Edge TPU: *_edgetpu.tflite 293 | from models.experimental import attempt_download, attempt_load # scoped to avoid circular import 294 | 295 | super().__init__() 296 | w = str(weights[0] if isinstance(weights, list) else weights) 297 | pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend 298 | stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults 299 | w = attempt_download(w) # download if not local 300 | if data: # data.yaml path (optional) 301 | with open(data, errors='ignore') as f: 302 | names = yaml.safe_load(f)['names'] # class names 303 | 304 | if pt: # PyTorch 305 | model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) 306 | stride = max(int(model.stride.max()), 32) # model stride 307 | names = model.module.names if hasattr(model, 'module') else model.names # get class names 308 | self.model = model # explicitly assign for to(), cpu(), cuda(), half() 309 | elif jit: # TorchScript 310 | LOGGER.info(f'Loading {w} for TorchScript inference...') 311 | extra_files = {'config.txt': ''} # model metadata 312 | model = torch.jit.load(w, _extra_files=extra_files) 313 | if extra_files['config.txt']: 314 | d = json.loads(extra_files['config.txt']) # extra_files dict 315 | stride, names = int(d['stride']), d['names'] 316 | elif dnn: # ONNX OpenCV DNN 317 | LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') 318 | check_requirements(('opencv-python>=4.5.4',)) 319 | net = cv2.dnn.readNetFromONNX(w) 320 | elif onnx: # ONNX Runtime 321 | LOGGER.info(f'Loading {w} for ONNX Runtime inference...') 322 | cuda = torch.cuda.is_available() 323 | check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) 324 | import onnxruntime 325 | providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] 326 | session = onnxruntime.InferenceSession(w, providers=providers) 327 | elif xml: # OpenVINO 328 | LOGGER.info(f'Loading {w} for OpenVINO inference...') 329 | check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ 330 | import openvino.inference_engine as ie 331 | core = ie.IECore() 332 | if not Path(w).is_file(): # if not *.xml 333 | w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir 334 | network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths 335 | executable_network = core.load_network(network, device_name='CPU', num_requests=1) 336 | elif engine: # TensorRT 337 | LOGGER.info(f'Loading {w} for TensorRT inference...') 338 | import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download 339 | check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 340 | Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) 341 | logger = trt.Logger(trt.Logger.INFO) 342 | with open(w, 'rb') as f, trt.Runtime(logger) as runtime: 343 | model = runtime.deserialize_cuda_engine(f.read()) 344 | bindings = OrderedDict() 345 | for index in range(model.num_bindings): 346 | name = model.get_binding_name(index) 347 | dtype = trt.nptype(model.get_binding_dtype(index)) 348 | shape = tuple(model.get_binding_shape(index)) 349 | data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) 350 | bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) 351 | binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) 352 | context = model.create_execution_context() 353 | batch_size = bindings['images'].shape[0] 354 | elif coreml: # CoreML 355 | LOGGER.info(f'Loading {w} for CoreML inference...') 356 | import coremltools as ct 357 | model = ct.models.MLModel(w) 358 | else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) 359 | if saved_model: # SavedModel 360 | LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') 361 | import tensorflow as tf 362 | keras = False # assume TF1 saved_model 363 | model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) 364 | elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt 365 | LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') 366 | import tensorflow as tf 367 | 368 | def wrap_frozen_graph(gd, inputs, outputs): 369 | x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped 370 | ge = x.graph.as_graph_element 371 | return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) 372 | 373 | gd = tf.Graph().as_graph_def() # graph_def 374 | gd.ParseFromString(open(w, 'rb').read()) 375 | frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") 376 | elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python 377 | try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu 378 | from tflite_runtime.interpreter import Interpreter, load_delegate 379 | except ImportError: 380 | import tensorflow as tf 381 | Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, 382 | if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime 383 | LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') 384 | delegate = {'Linux': 'libedgetpu.so.1', 385 | 'Darwin': 'libedgetpu.1.dylib', 386 | 'Windows': 'edgetpu.dll'}[platform.system()] 387 | interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) 388 | else: # Lite 389 | LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') 390 | interpreter = Interpreter(model_path=w) # load TFLite model 391 | interpreter.allocate_tensors() # allocate 392 | input_details = interpreter.get_input_details() # inputs 393 | output_details = interpreter.get_output_details() # outputs 394 | elif tfjs: 395 | raise Exception('ERROR: YOLOv5 TF.js inference is not supported') 396 | self.__dict__.update(locals()) # assign all variables to self 397 | 398 | def forward(self, im, augment=False, visualize=False, val=False): 399 | # YOLOv5 MultiBackend inference 400 | b, ch, h, w = im.shape # batch, channel, height, width 401 | if self.pt or self.jit: # PyTorch 402 | y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) 403 | return y if val else y[0] 404 | elif self.dnn: # ONNX OpenCV DNN 405 | im = im.cpu().numpy() # torch to numpy 406 | self.net.setInput(im) 407 | y = self.net.forward() 408 | elif self.onnx: # ONNX Runtime 409 | im = im.cpu().numpy() # torch to numpy 410 | y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] 411 | elif self.xml: # OpenVINO 412 | im = im.cpu().numpy() # FP32 413 | desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description 414 | request = self.executable_network.requests[0] # inference request 415 | request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) 416 | request.infer() 417 | y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) 418 | elif self.engine: # TensorRT 419 | assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) 420 | self.binding_addrs['images'] = int(im.data_ptr()) 421 | self.context.execute_v2(list(self.binding_addrs.values())) 422 | y = self.bindings['output'].data 423 | elif self.coreml: # CoreML 424 | im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) 425 | im = Image.fromarray((im[0] * 255).astype('uint8')) 426 | # im = im.resize((192, 320), Image.ANTIALIAS) 427 | y = self.model.predict({'image': im}) # coordinates are xywh normalized 428 | if 'confidence' in y: 429 | box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels 430 | conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) 431 | y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) 432 | else: 433 | k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key 434 | y = y[k] # output 435 | else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) 436 | im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) 437 | if self.saved_model: # SavedModel 438 | y = (self.model(im, training=False) if self.keras else self.model(im)[0]).numpy() 439 | elif self.pb: # GraphDef 440 | y = self.frozen_func(x=self.tf.constant(im)).numpy() 441 | else: # Lite or Edge TPU 442 | input, output = self.input_details[0], self.output_details[0] 443 | int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model 444 | if int8: 445 | scale, zero_point = input['quantization'] 446 | im = (im / scale + zero_point).astype(np.uint8) # de-scale 447 | self.interpreter.set_tensor(input['index'], im) 448 | self.interpreter.invoke() 449 | y = self.interpreter.get_tensor(output['index']) 450 | if int8: 451 | scale, zero_point = output['quantization'] 452 | y = (y.astype(np.float32) - zero_point) * scale # re-scale 453 | y[..., :4] *= [w, h, w, h] # xywh normalized to pixels 454 | 455 | y = torch.tensor(y) if isinstance(y, np.ndarray) else y 456 | return (y, []) if val else y 457 | 458 | def warmup(self, imgsz=(1, 3, 640, 640), half=False): 459 | # Warmup model by running inference once 460 | if self.pt or self.jit or self.onnx or self.engine: # warmup types 461 | if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models 462 | im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image 463 | self.forward(im) # warmup 464 | 465 | @staticmethod 466 | def model_type(p='path/to/model.pt'): 467 | # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx 468 | from export import export_formats 469 | suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes 470 | check_suffix(p, suffixes) # checks 471 | p = Path(p).name # eliminate trailing separators 472 | pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) 473 | xml |= xml2 # *_openvino_model or *.xml 474 | tflite &= not edgetpu # *.tflite 475 | return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs 476 | 477 | 478 | class AutoShape(nn.Module): 479 | # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS 480 | conf = 0.25 # NMS confidence threshold 481 | iou = 0.45 # NMS IoU threshold 482 | agnostic = False # NMS class-agnostic 483 | multi_label = False # NMS multiple labels per box 484 | classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs 485 | max_det = 1000 # maximum number of detections per image 486 | amp = False # Automatic Mixed Precision (AMP) inference 487 | 488 | def __init__(self, model): 489 | super().__init__() 490 | LOGGER.info('Adding AutoShape... ') 491 | copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes 492 | self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance 493 | self.pt = not self.dmb or model.pt # PyTorch model 494 | self.model = model.eval() 495 | 496 | def _apply(self, fn): 497 | # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers 498 | self = super()._apply(fn) 499 | if self.pt: 500 | m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() 501 | m.stride = fn(m.stride) 502 | m.grid = list(map(fn, m.grid)) 503 | if isinstance(m.anchor_grid, list): 504 | m.anchor_grid = list(map(fn, m.anchor_grid)) 505 | return self 506 | 507 | @torch.no_grad() 508 | def forward(self, imgs, size=640, augment=False, profile=False): 509 | # Inference from various sources. For height=640, width=1280, RGB images example inputs are: 510 | # file: imgs = 'data/images/zidane.jpg' # str or PosixPath 511 | # URI: = 'https://ultralytics.com/images/zidane.jpg' 512 | # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) 513 | # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) 514 | # numpy: = np.zeros((640,1280,3)) # HWC 515 | # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) 516 | # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images 517 | 518 | t = [time_sync()] 519 | p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type 520 | autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference 521 | if isinstance(imgs, torch.Tensor): # torch 522 | with amp.autocast(enabled=autocast): 523 | return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference 524 | 525 | # Pre-process 526 | n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images 527 | shape0, shape1, files = [], [], [] # image and inference shapes, filenames 528 | for i, im in enumerate(imgs): 529 | f = f'image{i}' # filename 530 | if isinstance(im, (str, Path)): # filename or uri 531 | im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im 532 | im = np.asarray(exif_transpose(im)) 533 | elif isinstance(im, Image.Image): # PIL Image 534 | im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f 535 | files.append(Path(f).with_suffix('.jpg').name) 536 | if im.shape[0] < 5: # image in CHW 537 | im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) 538 | im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input 539 | s = im.shape[:2] # HWC 540 | shape0.append(s) # image shape 541 | g = (size / max(s)) # gain 542 | shape1.append([y * g for y in s]) 543 | imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update 544 | shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 0).max(0)] # inference shape 545 | x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=False)[0] for im in imgs] # pad 546 | x = np.stack(x, 0) if n > 1 else x[0][None] # stack 547 | x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW 548 | x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 549 | t.append(time_sync()) 550 | 551 | with amp.autocast(enabled=autocast): 552 | # Inference 553 | y = self.model(x, augment, profile) # forward 554 | t.append(time_sync()) 555 | 556 | # Post-process 557 | y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, 558 | agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS 559 | for i in range(n): 560 | scale_coords(shape1, y[i][:, :4], shape0[i]) 561 | 562 | t.append(time_sync()) 563 | return Detections(imgs, y, files, t, self.names, x.shape) 564 | 565 | 566 | class Detections: 567 | # YOLOv5 detections class for inference results 568 | def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): 569 | super().__init__() 570 | d = pred[0].device # device 571 | gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations 572 | self.imgs = imgs # list of images as numpy arrays 573 | self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) 574 | self.names = names # class names 575 | self.files = files # image filenames 576 | self.times = times # profiling times 577 | self.xyxy = pred # xyxy pixels 578 | self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels 579 | self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized 580 | self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized 581 | self.n = len(self.pred) # number of images (batch size) 582 | self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) 583 | self.s = shape # inference BCHW shape 584 | 585 | def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): 586 | crops = [] 587 | for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): 588 | s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string 589 | if pred.shape[0]: 590 | for c in pred[:, -1].unique(): 591 | n = (pred[:, -1] == c).sum() # detections per class 592 | s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string 593 | if show or save or render or crop: 594 | annotator = Annotator(im, example=str(self.names)) 595 | for *box, conf, cls in reversed(pred): # xyxy, confidence, class 596 | label = f'{self.names[int(cls)]} {conf:.2f}' 597 | if crop: 598 | file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None 599 | crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, 600 | 'im': save_one_box(box, im, file=file, save=save)}) 601 | else: # all others 602 | annotator.box_label(box, label, color=colors(cls)) 603 | im = annotator.im 604 | else: 605 | s += '(no detections)' 606 | 607 | im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np 608 | if pprint: 609 | LOGGER.info(s.rstrip(', ')) 610 | if show: 611 | im.show(self.files[i]) # show 612 | if save: 613 | f = self.files[i] 614 | im.save(save_dir / f) # save 615 | if i == self.n - 1: 616 | LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") 617 | if render: 618 | self.imgs[i] = np.asarray(im) 619 | if crop: 620 | if save: 621 | LOGGER.info(f'Saved results to {save_dir}\n') 622 | return crops 623 | 624 | def print(self): 625 | self.display(pprint=True) # print results 626 | LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % 627 | self.t) 628 | 629 | def show(self): 630 | self.display(show=True) # show results 631 | 632 | def save(self, save_dir='runs/detect/exp'): 633 | save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir 634 | self.display(save=True, save_dir=save_dir) # save results 635 | 636 | def crop(self, save=True, save_dir='runs/detect/exp'): 637 | save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None 638 | return self.display(crop=True, save=save, save_dir=save_dir) # crop results 639 | 640 | def render(self): 641 | self.display(render=True) # render results 642 | return self.imgs 643 | 644 | def pandas(self): 645 | # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) 646 | new = copy(self) # return copy 647 | ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns 648 | cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns 649 | for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): 650 | a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update 651 | setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) 652 | return new 653 | 654 | def tolist(self): 655 | # return a list of Detections objects, i.e. 'for result in results.tolist():' 656 | r = range(self.n) # iterable 657 | x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] 658 | # for d in x: 659 | # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: 660 | # setattr(d, k, getattr(d, k)[0]) # pop out of list 661 | return x 662 | 663 | def __len__(self): 664 | return self.n 665 | 666 | 667 | class Classify(nn.Module): 668 | # Classification head, i.e. x(b,c1,20,20) to x(b,c2) 669 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups 670 | super().__init__() 671 | self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) 672 | self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) 673 | self.flat = nn.Flatten() 674 | 675 | def forward(self, x): 676 | z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list 677 | return self.flat(self.conv(z)) # flatten to x(b,c2) 678 | -------------------------------------------------------------------------------- /models/experimental.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Experimental modules 4 | """ 5 | import math 6 | 7 | import numpy as np 8 | import torch 9 | import torch.nn as nn 10 | 11 | from models.common import Conv 12 | from utils.downloads import attempt_download 13 | 14 | 15 | class CrossConv(nn.Module): 16 | # Cross Convolution Downsample 17 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 18 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 19 | super().__init__() 20 | c_ = int(c2 * e) # hidden channels 21 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 22 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 23 | self.add = shortcut and c1 == c2 24 | 25 | def forward(self, x): 26 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 27 | 28 | 29 | class Sum(nn.Module): 30 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 31 | def __init__(self, n, weight=False): # n: number of inputs 32 | super().__init__() 33 | self.weight = weight # apply weights boolean 34 | self.iter = range(n - 1) # iter object 35 | if weight: 36 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights 37 | 38 | def forward(self, x): 39 | y = x[0] # no weight 40 | if self.weight: 41 | w = torch.sigmoid(self.w) * 2 42 | for i in self.iter: 43 | y = y + x[i + 1] * w[i] 44 | else: 45 | for i in self.iter: 46 | y = y + x[i + 1] 47 | return y 48 | 49 | 50 | class MixConv2d(nn.Module): 51 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 52 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy 53 | super().__init__() 54 | n = len(k) # number of convolutions 55 | if equal_ch: # equal c_ per group 56 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices 57 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels 58 | else: # equal weight.numel() per group 59 | b = [c2] + [0] * n 60 | a = np.eye(n + 1, n, k=-1) 61 | a -= np.roll(a, 1, axis=1) 62 | a *= np.array(k) ** 2 63 | a[0] = 1 64 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 65 | 66 | self.m = nn.ModuleList( 67 | [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) 68 | self.bn = nn.BatchNorm2d(c2) 69 | self.act = nn.SiLU() 70 | 71 | def forward(self, x): 72 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 73 | 74 | 75 | class Ensemble(nn.ModuleList): 76 | # Ensemble of models 77 | def __init__(self): 78 | super().__init__() 79 | 80 | def forward(self, x, augment=False, profile=False, visualize=False): 81 | y = [] 82 | for module in self: 83 | y.append(module(x, augment, profile, visualize)[0]) 84 | # y = torch.stack(y).max(0)[0] # max ensemble 85 | # y = torch.stack(y).mean(0) # mean ensemble 86 | y = torch.cat(y, 1) # nms ensemble 87 | return y, None # inference, train output 88 | 89 | 90 | def attempt_load(weights, map_location=None, inplace=True, fuse=True): 91 | from models.yolo import Detect, Model 92 | 93 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 94 | model = Ensemble() 95 | for w in weights if isinstance(weights, list) else [weights]: 96 | ckpt = torch.load(attempt_download(w), map_location=map_location) # load 97 | if fuse: 98 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model 99 | else: 100 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse 101 | 102 | # Compatibility updates 103 | for m in model.modules(): 104 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: 105 | m.inplace = inplace # pytorch 1.7.0 compatibility 106 | if type(m) is Detect: 107 | if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility 108 | delattr(m, 'anchor_grid') 109 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) 110 | elif type(m) is Conv: 111 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 112 | 113 | if len(model) == 1: 114 | return model[-1] # return model 115 | else: 116 | print(f'Ensemble created with {weights}\n') 117 | for k in ['names']: 118 | setattr(model, k, getattr(model[-1], k)) 119 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride 120 | return model # return ensemble 121 | -------------------------------------------------------------------------------- /models/yolo.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | YOLO-specific modules 4 | 5 | Usage: 6 | $ python path/to/models/yolo.py --cfg yolov5s.yaml 7 | """ 8 | 9 | import argparse 10 | import sys 11 | from copy import deepcopy 12 | from pathlib import Path 13 | 14 | FILE = Path(__file__).resolve() 15 | ROOT = FILE.parents[1] # YOLOv5 root directory 16 | if str(ROOT) not in sys.path: 17 | sys.path.append(str(ROOT)) # add ROOT to PATH 18 | # ROOT = ROOT.relative_to(Path.cwd()) # relative 19 | 20 | from models.common import * 21 | from models.experimental import * 22 | from utils.autoanchor import check_anchor_order 23 | from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args 24 | from utils.plots import feature_visualization 25 | from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync 26 | 27 | try: 28 | import thop # for FLOPs computation 29 | except ImportError: 30 | thop = None 31 | 32 | 33 | class Detect(nn.Module): 34 | stride = None # strides computed during build 35 | onnx_dynamic = False # ONNX export parameter 36 | 37 | def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer 38 | super().__init__() 39 | self.nc = nc # number of classes 40 | self.no = nc + 5 # number of outputs per anchor 41 | self.nl = len(anchors) # number of detection layers 42 | self.na = len(anchors[0]) // 2 # number of anchors 43 | self.grid = [torch.zeros(1)] * self.nl # init grid 44 | self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid 45 | self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) 46 | self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv 47 | self.inplace = inplace # use in-place ops (e.g. slice assignment) 48 | 49 | def forward(self, x): 50 | z = [] # inference output 51 | for i in range(self.nl): 52 | x[i] = self.m[i](x[i]) # conv 53 | bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) 54 | x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() 55 | 56 | if not self.training: # inference 57 | if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: 58 | self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) 59 | 60 | y = x[i].sigmoid() 61 | if self.inplace: 62 | y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy 63 | y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh 64 | else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 65 | xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy 66 | wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh 67 | y = torch.cat((xy, wh, y[..., 4:]), -1) 68 | z.append(y.view(bs, -1, self.no)) 69 | 70 | return x if self.training else (torch.cat(z, 1), x) 71 | 72 | def _make_grid(self, nx=20, ny=20, i=0): 73 | d = self.anchors[i].device 74 | if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility 75 | yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') 76 | else: 77 | yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) 78 | grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() 79 | anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ 80 | .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() 81 | return grid, anchor_grid 82 | 83 | 84 | class Model(nn.Module): 85 | def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes 86 | super().__init__() 87 | if isinstance(cfg, dict): 88 | self.yaml = cfg # model dict 89 | else: # is *.yaml 90 | import yaml # for torch hub 91 | self.yaml_file = Path(cfg).name 92 | with open(cfg, encoding='ascii', errors='ignore') as f: 93 | self.yaml = yaml.safe_load(f) # model dict 94 | 95 | # Define model 96 | ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels 97 | if nc and nc != self.yaml['nc']: 98 | LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") 99 | self.yaml['nc'] = nc # override yaml value 100 | if anchors: 101 | LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') 102 | self.yaml['anchors'] = round(anchors) # override yaml value 103 | self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist 104 | self.names = [str(i) for i in range(self.yaml['nc'])] # default names 105 | self.inplace = self.yaml.get('inplace', True) 106 | 107 | # Build strides, anchors 108 | m = self.model[-1] # Detect() 109 | if isinstance(m, Detect): 110 | s = 256 # 2x min stride 111 | m.inplace = self.inplace 112 | m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward 113 | m.anchors /= m.stride.view(-1, 1, 1) 114 | check_anchor_order(m) 115 | self.stride = m.stride 116 | self._initialize_biases() # only run once 117 | 118 | # Init weights, biases 119 | initialize_weights(self) 120 | self.info() 121 | LOGGER.info('') 122 | 123 | def forward(self, x, augment=False, profile=False, visualize=False): 124 | if augment: 125 | return self._forward_augment(x) # augmented inference, None 126 | return self._forward_once(x, profile, visualize) # single-scale inference, train 127 | 128 | def _forward_augment(self, x): 129 | img_size = x.shape[-2:] # height, width 130 | s = [1, 0.83, 0.67] # scales 131 | f = [None, 3, None] # flips (2-ud, 3-lr) 132 | y = [] # outputs 133 | for si, fi in zip(s, f): 134 | xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) 135 | yi = self._forward_once(xi)[0] # forward 136 | # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save 137 | yi = self._descale_pred(yi, fi, si, img_size) 138 | y.append(yi) 139 | y = self._clip_augmented(y) # clip augmented tails 140 | return torch.cat(y, 1), None # augmented inference, train 141 | 142 | def _forward_once(self, x, profile=False, visualize=False): 143 | y, dt = [], [] # outputs 144 | for m in self.model: 145 | if m.f != -1: # if not from previous layer 146 | x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers 147 | if profile: 148 | self._profile_one_layer(m, x, dt) 149 | x = m(x) # run 150 | y.append(x if m.i in self.save else None) # save output 151 | if visualize: 152 | feature_visualization(x, m.type, m.i, save_dir=visualize) 153 | return x 154 | 155 | def _descale_pred(self, p, flips, scale, img_size): 156 | # de-scale predictions following augmented inference (inverse operation) 157 | if self.inplace: 158 | p[..., :4] /= scale # de-scale 159 | if flips == 2: 160 | p[..., 1] = img_size[0] - p[..., 1] # de-flip ud 161 | elif flips == 3: 162 | p[..., 0] = img_size[1] - p[..., 0] # de-flip lr 163 | else: 164 | x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale 165 | if flips == 2: 166 | y = img_size[0] - y # de-flip ud 167 | elif flips == 3: 168 | x = img_size[1] - x # de-flip lr 169 | p = torch.cat((x, y, wh, p[..., 4:]), -1) 170 | return p 171 | 172 | def _clip_augmented(self, y): 173 | # Clip YOLOv5 augmented inference tails 174 | nl = self.model[-1].nl # number of detection layers (P3-P5) 175 | g = sum(4 ** x for x in range(nl)) # grid points 176 | e = 1 # exclude layer count 177 | i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices 178 | y[0] = y[0][:, :-i] # large 179 | i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices 180 | y[-1] = y[-1][:, i:] # small 181 | return y 182 | 183 | def _profile_one_layer(self, m, x, dt): 184 | c = isinstance(m, Detect) # is final layer, copy input as inplace fix 185 | o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs 186 | t = time_sync() 187 | for _ in range(10): 188 | m(x.copy() if c else x) 189 | dt.append((time_sync() - t) * 100) 190 | if m == self.model[0]: 191 | LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") 192 | LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') 193 | if c: 194 | LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") 195 | 196 | def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency 197 | # https://arxiv.org/abs/1708.02002 section 3.3 198 | # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. 199 | m = self.model[-1] # Detect() module 200 | for mi, s in zip(m.m, m.stride): # from 201 | b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) 202 | b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) 203 | b.data[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls 204 | mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) 205 | 206 | def _print_biases(self): 207 | m = self.model[-1] # Detect() module 208 | for mi in m.m: # from 209 | b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) 210 | LOGGER.info( 211 | ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) 212 | 213 | # def _print_weights(self): 214 | # for m in self.model.modules(): 215 | # if type(m) is Bottleneck: 216 | # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights 217 | 218 | def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers 219 | LOGGER.info('Fusing layers... ') 220 | for m in self.model.modules(): 221 | if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): 222 | m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv 223 | delattr(m, 'bn') # remove batchnorm 224 | m.forward = m.forward_fuse # update forward 225 | self.info() 226 | return self 227 | 228 | def info(self, verbose=False, img_size=640): # print model information 229 | model_info(self, verbose, img_size) 230 | 231 | def _apply(self, fn): 232 | # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers 233 | self = super()._apply(fn) 234 | m = self.model[-1] # Detect() 235 | if isinstance(m, Detect): 236 | m.stride = fn(m.stride) 237 | m.grid = list(map(fn, m.grid)) 238 | if isinstance(m.anchor_grid, list): 239 | m.anchor_grid = list(map(fn, m.anchor_grid)) 240 | return self 241 | 242 | 243 | def parse_model(d, ch): # model_dict, input_channels(3) 244 | LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") 245 | anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] 246 | na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors 247 | no = na * (nc + 5) # number of outputs = anchors * (classes + 5) 248 | 249 | layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out 250 | for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args 251 | m = eval(m) if isinstance(m, str) else m # eval strings 252 | for j, a in enumerate(args): 253 | try: 254 | args[j] = eval(a) if isinstance(a, str) else a # eval strings 255 | except NameError: 256 | pass 257 | 258 | n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain 259 | if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, 260 | BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: 261 | c1, c2 = ch[f], args[0] 262 | if c2 != no: # if not output 263 | c2 = make_divisible(c2 * gw, 8) 264 | 265 | args = [c1, c2, *args[1:]] 266 | if m in [BottleneckCSP, C3, C3TR, C3Ghost]: 267 | args.insert(2, n) # number of repeats 268 | n = 1 269 | elif m is nn.BatchNorm2d: 270 | args = [ch[f]] 271 | elif m is Concat: 272 | c2 = sum(ch[x] for x in f) 273 | elif m is Detect: 274 | args.append([ch[x] for x in f]) 275 | if isinstance(args[1], int): # number of anchors 276 | args[1] = [list(range(args[1] * 2))] * len(f) 277 | elif m is Contract: 278 | c2 = ch[f] * args[0] ** 2 279 | elif m is Expand: 280 | c2 = ch[f] // args[0] ** 2 281 | else: 282 | c2 = ch[f] 283 | 284 | m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module 285 | t = str(m)[8:-2].replace('__main__.', '') # module type 286 | np = sum(x.numel() for x in m_.parameters()) # number params 287 | m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params 288 | LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print 289 | save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist 290 | layers.append(m_) 291 | if i == 0: 292 | ch = [] 293 | ch.append(c2) 294 | return nn.Sequential(*layers), sorted(save) 295 | 296 | 297 | if __name__ == '__main__': 298 | parser = argparse.ArgumentParser() 299 | parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') 300 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 301 | parser.add_argument('--profile', action='store_true', help='profile model speed') 302 | parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') 303 | opt = parser.parse_args() 304 | opt.cfg = check_yaml(opt.cfg) # check YAML 305 | print_args(FILE.stem, opt) 306 | device = select_device(opt.device) 307 | 308 | # Create model 309 | model = Model(opt.cfg).to(device) 310 | model.train() 311 | 312 | # Profile 313 | if opt.profile: 314 | img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) 315 | y = model(img, profile=True) 316 | 317 | # Test all models 318 | if opt.test: 319 | for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): 320 | try: 321 | _ = Model(cfg) 322 | except Exception as e: 323 | print(f'Error in {cfg}: {e}') 324 | 325 | # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) 326 | # from torch.utils.tensorboard import SummaryWriter 327 | # tb_writer = SummaryWriter('.') 328 | # LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") 329 | # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph 330 | -------------------------------------------------------------------------------- /my_env.txt: -------------------------------------------------------------------------------- 1 | Package Version 2 | ------------------------- -------------------- 3 | absl-py 1.4.0 4 | altgraph 0.17.3 5 | cachetools 5.3.1 6 | certifi 2023.7.22 7 | charset-normalizer 3.2.0 8 | colorama 0.4.6 9 | contourpy 1.1.0 10 | cycler 0.11.0 11 | EasyProcess 1.1 12 | entrypoint2 1.1 13 | fonttools 4.41.1 14 | google-auth 2.22.0 15 | google-auth-oauthlib 1.0.0 16 | grpcio 1.56.2 17 | idna 3.4 18 | imageio 2.31.1 19 | importlib-metadata 6.8.0 20 | importlib-resources 6.0.0 21 | imutils 0.5.4 22 | Jinja2 3.1.2 23 | kiwisolver 1.4.4 24 | lazy_loader 0.3 25 | Markdown 3.4.4 26 | MarkupSafe 2.1.3 27 | matplotlib 3.7.2 28 | MouseInfo 0.1.3 29 | mss 9.0.1 30 | networkx 3.1 31 | numpy 1.25.2 32 | oauthlib 3.2.2 33 | opencv-python 4.8.0.74 34 | packaging 23.1 35 | pandas 2.0.3 36 | pefile 2023.2.7 37 | Pillow 9.3.0 38 | pip 23.2.1 39 | protobuf 4.23.4 40 | pyasn1 0.5.0 41 | pyasn1-modules 0.3.0 42 | PyAutoGUI 0.9.54 43 | PyGetWindow 0.0.9 44 | pyinstaller 5.13.0 45 | pyinstaller-hooks-contrib 2023.6 46 | PyMsgBox 1.0.9 47 | pyparsing 3.0.9 48 | pyperclip 1.8.2 49 | PyRect 0.2.0 50 | pyscreenshot 3.1 51 | PyScreeze 0.1.29 52 | PySide6 6.5.2 53 | PySide6-Addons 6.5.2 54 | PySide6-Essentials 6.5.2 55 | python-dateutil 2.8.2 56 | pytweening 1.0.7 57 | pytz 2023.3 58 | PyWavelets 1.4.1 59 | pywin32-ctypes 0.2.2 60 | PyYAML 6.0.1 61 | qt-material 2.14 62 | requests 2.31.0 63 | requests-oauthlib 1.3.1 64 | rsa 4.9 65 | scikit-image 0.21.0 66 | scipy 1.11.1 67 | screeninfo 0.8.1 68 | seaborn 0.12.2 69 | setuptools 68.0.0 70 | shiboken6 6.5.2 71 | six 1.16.0 72 | tensorboard 2.13.0 73 | tensorboard-data-server 0.7.1 74 | thop 0.1.1.post2209072238 75 | tifffile 2023.7.18 76 | torch 1.8.2+cu111 77 | torchaudio 0.8.2 78 | torchvision 0.9.2+cu111 79 | tqdm 4.65.0 80 | typing_extensions 4.7.1 81 | tzdata 2023.3 82 | urllib3 1.26.16 83 | Werkzeug 2.3.6 84 | wheel 0.38.4 85 | zipp 3.16.2 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # pip install -r requirements.txt 2 | 3 | # Base ---------------------------------------- 4 | matplotlib>=3.2.2 5 | numpy>=1.18.5 6 | opencv-python>=4.1.2 7 | Pillow>=7.1.2 8 | PyYAML>=5.3.1 9 | requests>=2.23.0 10 | scipy>=1.4.1 11 | torch>=1.7.0 12 | torchvision>=0.8.1 13 | tqdm>=4.41.0 14 | 15 | # Logging ------------------------------------- 16 | tensorboard>=2.4.1 17 | # wandb 18 | 19 | # Plotting ------------------------------------ 20 | pandas>=1.1.4 21 | seaborn>=0.11.0 22 | 23 | # Export -------------------------------------- 24 | # coremltools>=4.1 # CoreML export 25 | # onnx>=1.9.0 # ONNX export 26 | # onnx-simplifier>=0.3.6 # ONNX simplifier 27 | # scikit-learn==0.19.2 # CoreML quantization 28 | # tensorflow>=2.4.1 # TFLite export 29 | # tensorflowjs>=3.9.0 # TF.js export 30 | # openvino-dev # OpenVINO export 31 | 32 | # Extras -------------------------------------- 33 | # albumentations>=1.0.3 34 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 35 | # pycocotools>=2.0 # COCO mAP 36 | # roboflow 37 | thop # FLOPs computation 38 | -------------------------------------------------------------------------------- /utils/augmentations.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Image augmentation functions 4 | """ 5 | 6 | import math 7 | import random 8 | 9 | import cv2 10 | import numpy as np 11 | 12 | from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box 13 | from utils.metrics import bbox_ioa 14 | 15 | 16 | class Albumentations: 17 | # YOLOv5 Albumentations class (optional, only used if package is installed) 18 | def __init__(self): 19 | self.transform = None 20 | try: 21 | import albumentations as A 22 | check_version(A.__version__, '1.0.3', hard=True) # version requirement 23 | 24 | self.transform = A.Compose([ 25 | A.Blur(p=0.01), 26 | A.MedianBlur(p=0.01), 27 | A.ToGray(p=0.01), 28 | A.CLAHE(p=0.01), 29 | A.RandomBrightnessContrast(p=0.0), 30 | A.RandomGamma(p=0.0), 31 | A.ImageCompression(quality_lower=75, p=0.0)], 32 | bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) 33 | 34 | LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) 35 | except ImportError: # package not installed, skip 36 | pass 37 | except Exception as e: 38 | LOGGER.info(colorstr('albumentations: ') + f'{e}') 39 | 40 | def __call__(self, im, labels, p=1.0): 41 | if self.transform and random.random() < p: 42 | new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed 43 | im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) 44 | return im, labels 45 | 46 | 47 | def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): 48 | # HSV color-space augmentation 49 | if hgain or sgain or vgain: 50 | r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains 51 | hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) 52 | dtype = im.dtype # uint8 53 | 54 | x = np.arange(0, 256, dtype=r.dtype) 55 | lut_hue = ((x * r[0]) % 180).astype(dtype) 56 | lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) 57 | lut_val = np.clip(x * r[2], 0, 255).astype(dtype) 58 | 59 | im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) 60 | cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed 61 | 62 | 63 | def hist_equalize(im, clahe=True, bgr=False): 64 | # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 65 | yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) 66 | if clahe: 67 | c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) 68 | yuv[:, :, 0] = c.apply(yuv[:, :, 0]) 69 | else: 70 | yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram 71 | return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB 72 | 73 | 74 | def replicate(im, labels): 75 | # Replicate labels 76 | h, w = im.shape[:2] 77 | boxes = labels[:, 1:].astype(int) 78 | x1, y1, x2, y2 = boxes.T 79 | s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) 80 | for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices 81 | x1b, y1b, x2b, y2b = boxes[i] 82 | bh, bw = y2b - y1b, x2b - x1b 83 | yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y 84 | x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] 85 | im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] 86 | labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) 87 | 88 | return im, labels 89 | 90 | 91 | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): 92 | # Resize and pad image while meeting stride-multiple constraints 93 | shape = im.shape[:2] # current shape [height, width] 94 | if isinstance(new_shape, int): 95 | new_shape = (new_shape, new_shape) 96 | 97 | # Scale ratio (new / old) 98 | r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) 99 | if not scaleup: # only scale down, do not scale up (for better val mAP) 100 | r = min(r, 1.0) 101 | 102 | # Compute padding 103 | ratio = r, r # width, height ratios 104 | new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) 105 | dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding 106 | if auto: # minimum rectangle 107 | dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding 108 | elif scaleFill: # stretch 109 | dw, dh = 0.0, 0.0 110 | new_unpad = (new_shape[1], new_shape[0]) 111 | ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios 112 | 113 | dw /= 2 # divide padding into 2 sides 114 | dh /= 2 115 | 116 | if shape[::-1] != new_unpad: # resize 117 | im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) 118 | top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) 119 | left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) 120 | im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border 121 | return im, ratio, (dw, dh) 122 | 123 | 124 | def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, 125 | border=(0, 0)): 126 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) 127 | # targets = [cls, xyxy] 128 | 129 | height = im.shape[0] + border[0] * 2 # shape(h,w,c) 130 | width = im.shape[1] + border[1] * 2 131 | 132 | # Center 133 | C = np.eye(3) 134 | C[0, 2] = -im.shape[1] / 2 # x translation (pixels) 135 | C[1, 2] = -im.shape[0] / 2 # y translation (pixels) 136 | 137 | # Perspective 138 | P = np.eye(3) 139 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) 140 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) 141 | 142 | # Rotation and Scale 143 | R = np.eye(3) 144 | a = random.uniform(-degrees, degrees) 145 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations 146 | s = random.uniform(1 - scale, 1 + scale) 147 | # s = 2 ** random.uniform(-scale, scale) 148 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) 149 | 150 | # Shear 151 | S = np.eye(3) 152 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) 153 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) 154 | 155 | # Translation 156 | T = np.eye(3) 157 | T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) 158 | T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) 159 | 160 | # Combined rotation matrix 161 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT 162 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed 163 | if perspective: 164 | im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) 165 | else: # affine 166 | im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) 167 | 168 | # Visualize 169 | # import matplotlib.pyplot as plt 170 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() 171 | # ax[0].imshow(im[:, :, ::-1]) # base 172 | # ax[1].imshow(im2[:, :, ::-1]) # warped 173 | 174 | # Transform label coordinates 175 | n = len(targets) 176 | if n: 177 | use_segments = any(x.any() for x in segments) 178 | new = np.zeros((n, 4)) 179 | if use_segments: # warp segments 180 | segments = resample_segments(segments) # upsample 181 | for i, segment in enumerate(segments): 182 | xy = np.ones((len(segment), 3)) 183 | xy[:, :2] = segment 184 | xy = xy @ M.T # transform 185 | xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine 186 | 187 | # clip 188 | new[i] = segment2box(xy, width, height) 189 | 190 | else: # warp boxes 191 | xy = np.ones((n * 4, 3)) 192 | xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 193 | xy = xy @ M.T # transform 194 | xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine 195 | 196 | # create new boxes 197 | x = xy[:, [0, 2, 4, 6]] 198 | y = xy[:, [1, 3, 5, 7]] 199 | new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T 200 | 201 | # clip 202 | new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) 203 | new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) 204 | 205 | # filter candidates 206 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) 207 | targets = targets[i] 208 | targets[:, 1:5] = new[i] 209 | 210 | return im, targets 211 | 212 | 213 | def copy_paste(im, labels, segments, p=0.5): 214 | # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) 215 | n = len(segments) 216 | if p and n: 217 | h, w, c = im.shape # height, width, channels 218 | im_new = np.zeros(im.shape, np.uint8) 219 | for j in random.sample(range(n), k=round(p * n)): 220 | l, s = labels[j], segments[j] 221 | box = w - l[3], l[2], w - l[1], l[4] 222 | ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area 223 | if (ioa < 0.30).all(): # allow 30% obscuration of existing labels 224 | labels = np.concatenate((labels, [[l[0], *box]]), 0) 225 | segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) 226 | cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) 227 | 228 | result = cv2.bitwise_and(src1=im, src2=im_new) 229 | result = cv2.flip(result, 1) # augment segments (flip left-right) 230 | i = result > 0 # pixels to replace 231 | # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch 232 | im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug 233 | 234 | return im, labels, segments 235 | 236 | 237 | def cutout(im, labels, p=0.5): 238 | # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 239 | if random.random() < p: 240 | h, w = im.shape[:2] 241 | scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction 242 | for s in scales: 243 | mask_h = random.randint(1, int(h * s)) # create random masks 244 | mask_w = random.randint(1, int(w * s)) 245 | 246 | # box 247 | xmin = max(0, random.randint(0, w) - mask_w // 2) 248 | ymin = max(0, random.randint(0, h) - mask_h // 2) 249 | xmax = min(w, xmin + mask_w) 250 | ymax = min(h, ymin + mask_h) 251 | 252 | # apply random color mask 253 | im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] 254 | 255 | # return unobscured labels 256 | if len(labels) and s > 0.03: 257 | box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) 258 | ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area 259 | labels = labels[ioa < 0.60] # remove >60% obscured labels 260 | 261 | return labels 262 | 263 | 264 | def mixup(im, labels, im2, labels2): 265 | # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf 266 | r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 267 | im = (im * r + im2 * (1 - r)).astype(np.uint8) 268 | labels = np.concatenate((labels, labels2), 0) 269 | return im, labels 270 | 271 | 272 | def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) 273 | # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio 274 | w1, h1 = box1[2] - box1[0], box1[3] - box1[1] 275 | w2, h2 = box2[2] - box2[0], box2[3] - box2[1] 276 | ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio 277 | return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates 278 | -------------------------------------------------------------------------------- /utils/autoanchor.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | AutoAnchor utils 4 | """ 5 | 6 | import random 7 | 8 | import numpy as np 9 | import torch 10 | import yaml 11 | from tqdm import tqdm 12 | 13 | from utils.general import LOGGER, colorstr, emojis 14 | 15 | PREFIX = colorstr('AutoAnchor: ') 16 | 17 | 18 | def check_anchor_order(m): 19 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary 20 | a = m.anchors.prod(-1).view(-1) # anchor area 21 | da = a[-1] - a[0] # delta a 22 | ds = m.stride[-1] - m.stride[0] # delta s 23 | if da.sign() != ds.sign(): # same order 24 | LOGGER.info(f'{PREFIX}Reversing anchor order') 25 | m.anchors[:] = m.anchors.flip(0) 26 | 27 | 28 | def check_anchors(dataset, model, thr=4.0, imgsz=640): 29 | # Check anchor fit to data, recompute if necessary 30 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() 31 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) 32 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale 33 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh 34 | 35 | def metric(k): # compute metric 36 | r = wh[:, None] / k[None] 37 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric 38 | best = x.max(1)[0] # best_x 39 | aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold 40 | bpr = (best > 1 / thr).float().mean() # best possible recall 41 | return bpr, aat 42 | 43 | anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors 44 | bpr, aat = metric(anchors.cpu().view(-1, 2)) 45 | s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' 46 | if bpr > 0.98: # threshold to recompute 47 | LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) 48 | else: 49 | LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) 50 | na = m.anchors.numel() // 2 # number of anchors 51 | try: 52 | anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) 53 | except Exception as e: 54 | LOGGER.info(f'{PREFIX}ERROR: {e}') 55 | new_bpr = metric(anchors)[0] 56 | if new_bpr > bpr: # replace anchors 57 | anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) 58 | m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss 59 | check_anchor_order(m) 60 | LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.') 61 | else: 62 | LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.') 63 | 64 | 65 | def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): 66 | """ Creates kmeans-evolved anchors from training dataset 67 | 68 | Arguments: 69 | dataset: path to data.yaml, or a loaded dataset 70 | n: number of anchors 71 | img_size: image size used for training 72 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 73 | gen: generations to evolve anchors using genetic algorithm 74 | verbose: print all results 75 | 76 | Return: 77 | k: kmeans evolved anchors 78 | 79 | Usage: 80 | from utils.autoanchor import *; _ = kmean_anchors() 81 | """ 82 | from scipy.cluster.vq import kmeans 83 | 84 | npr = np.random 85 | thr = 1 / thr 86 | 87 | def metric(k, wh): # compute metrics 88 | r = wh[:, None] / k[None] 89 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric 90 | # x = wh_iou(wh, torch.tensor(k)) # iou metric 91 | return x, x.max(1)[0] # x, best_x 92 | 93 | def anchor_fitness(k): # mutation fitness 94 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) 95 | return (best * (best > thr).float()).mean() # fitness 96 | 97 | def print_results(k, verbose=True): 98 | k = k[np.argsort(k.prod(1))] # sort small to large 99 | x, best = metric(k, wh0) 100 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr 101 | s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ 102 | f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ 103 | f'past_thr={x[x > thr].mean():.3f}-mean: ' 104 | for i, x in enumerate(k): 105 | s += '%i,%i, ' % (round(x[0]), round(x[1])) 106 | if verbose: 107 | LOGGER.info(s[:-2]) 108 | return k 109 | 110 | if isinstance(dataset, str): # *.yaml file 111 | with open(dataset, errors='ignore') as f: 112 | data_dict = yaml.safe_load(f) # model dict 113 | from utils.datasets import LoadImagesAndLabels 114 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) 115 | 116 | # Get label wh 117 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) 118 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh 119 | 120 | # Filter 121 | i = (wh0 < 3.0).any(1).sum() 122 | if i: 123 | LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') 124 | wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels 125 | # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 126 | 127 | # Kmeans calculation 128 | LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') 129 | s = wh.std(0) # sigmas for whitening 130 | k = kmeans(wh / s, n, iter=30)[0] * s # points 131 | if len(k) != n: # kmeans may return fewer points than requested if wh is insufficient or too similar 132 | LOGGER.warning(f'{PREFIX}WARNING: scipy.cluster.vq.kmeans returned only {len(k)} of {n} requested points') 133 | k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init 134 | wh = torch.tensor(wh, dtype=torch.float32) # filtered 135 | wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered 136 | k = print_results(k, verbose=False) 137 | 138 | # Plot 139 | # k, d = [None] * 20, [None] * 20 140 | # for i in tqdm(range(1, 21)): 141 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance 142 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) 143 | # ax = ax.ravel() 144 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') 145 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh 146 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) 147 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) 148 | # fig.savefig('wh.png', dpi=200) 149 | 150 | # Evolve 151 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma 152 | pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar 153 | for _ in pbar: 154 | v = np.ones(sh) 155 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) 156 | v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) 157 | kg = (k.copy() * v).clip(min=2.0) 158 | fg = anchor_fitness(kg) 159 | if fg > f: 160 | f, k = fg, kg.copy() 161 | pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' 162 | if verbose: 163 | print_results(k, verbose) 164 | 165 | return print_results(k) 166 | -------------------------------------------------------------------------------- /utils/downloads.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Download utils 4 | """ 5 | 6 | import os 7 | import platform 8 | import subprocess 9 | import time 10 | import urllib 11 | from pathlib import Path 12 | from zipfile import ZipFile 13 | 14 | import requests 15 | import torch 16 | 17 | 18 | def gsutil_getsize(url=''): 19 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 20 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') 21 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes 22 | 23 | 24 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): 25 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes 26 | file = Path(file) 27 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" 28 | try: # url1 29 | print(f'Downloading {url} to {file}...') 30 | torch.hub.download_url_to_file(url, str(file)) 31 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check 32 | except Exception as e: # url2 33 | file.unlink(missing_ok=True) # remove partial downloads 34 | print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') 35 | os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail 36 | finally: 37 | if not file.exists() or file.stat().st_size < min_bytes: # check 38 | file.unlink(missing_ok=True) # remove partial downloads 39 | print(f"ERROR: {assert_msg}\n{error_msg}") 40 | print('') 41 | 42 | 43 | def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download() 44 | # Attempt file download if does not exist 45 | file = Path(str(file).strip().replace("'", '')) 46 | 47 | if not file.exists(): 48 | # URL specified 49 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. 50 | if str(file).startswith(('http:/', 'https:/')): # download 51 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ 52 | file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... 53 | if Path(file).is_file(): 54 | print(f'Found {url} locally at {file}') # file already exists 55 | else: 56 | safe_download(file=file, url=url, min_bytes=1E5) 57 | return file 58 | 59 | # GitHub assets 60 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) 61 | try: 62 | response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api 63 | assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] 64 | tag = response['tag_name'] # i.e. 'v1.0' 65 | except Exception: # fallback plan 66 | assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 67 | 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] 68 | try: 69 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] 70 | except Exception: 71 | tag = 'v6.0' # current release 72 | 73 | if name in assets: 74 | safe_download(file, 75 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}', 76 | # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) 77 | min_bytes=1E5, 78 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') 79 | 80 | return str(file) 81 | 82 | 83 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): 84 | # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() 85 | t = time.time() 86 | file = Path(file) 87 | cookie = Path('cookie') # gdrive cookie 88 | print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') 89 | file.unlink(missing_ok=True) # remove existing file 90 | cookie.unlink(missing_ok=True) # remove existing cookie 91 | 92 | # Attempt file download 93 | out = "NUL" if platform.system() == "Windows" else "/dev/null" 94 | os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') 95 | if os.path.exists('cookie'): # large file 96 | s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' 97 | else: # small file 98 | s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' 99 | r = os.system(s) # execute, capture return 100 | cookie.unlink(missing_ok=True) # remove existing cookie 101 | 102 | # Error check 103 | if r != 0: 104 | file.unlink(missing_ok=True) # remove partial 105 | print('Download error ') # raise Exception('Download error') 106 | return r 107 | 108 | # Unzip if archive 109 | if file.suffix == '.zip': 110 | print('unzipping... ', end='') 111 | ZipFile(file).extractall(path=file.parent) # unzip 112 | file.unlink() # remove zip 113 | 114 | print(f'Done ({time.time() - t:.1f}s)') 115 | return r 116 | 117 | 118 | def get_token(cookie="./cookie"): 119 | with open(cookie) as f: 120 | for line in f: 121 | if "download" in line: 122 | return line.split()[-1] 123 | return "" 124 | 125 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- 126 | # 127 | # 128 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): 129 | # # Uploads a file to a bucket 130 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python 131 | # 132 | # storage_client = storage.Client() 133 | # bucket = storage_client.get_bucket(bucket_name) 134 | # blob = bucket.blob(destination_blob_name) 135 | # 136 | # blob.upload_from_filename(source_file_name) 137 | # 138 | # print('File {} uploaded to {}.'.format( 139 | # source_file_name, 140 | # destination_blob_name)) 141 | # 142 | # 143 | # def download_blob(bucket_name, source_blob_name, destination_file_name): 144 | # # Uploads a blob from a bucket 145 | # storage_client = storage.Client() 146 | # bucket = storage_client.get_bucket(bucket_name) 147 | # blob = bucket.blob(source_blob_name) 148 | # 149 | # blob.download_to_filename(destination_file_name) 150 | # 151 | # print('Blob {} downloaded to {}.'.format( 152 | # source_blob_name, 153 | # destination_file_name)) 154 | -------------------------------------------------------------------------------- /utils/metrics.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Model validation metrics 4 | """ 5 | 6 | import math 7 | import warnings 8 | from pathlib import Path 9 | 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | import torch 13 | 14 | 15 | def fitness(x): 16 | # Model fitness as a weighted combination of metrics 17 | w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] 18 | return (x[:, :4] * w).sum(1) 19 | 20 | 21 | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): 22 | """ Compute the average precision, given the recall and precision curves. 23 | Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. 24 | # Arguments 25 | tp: True positives (nparray, nx1 or nx10). 26 | conf: Objectness value from 0-1 (nparray). 27 | pred_cls: Predicted object classes (nparray). 28 | target_cls: True object classes (nparray). 29 | plot: Plot precision-recall curve at mAP@0.5 30 | save_dir: Plot save directory 31 | # Returns 32 | The average precision as computed in py-faster-rcnn. 33 | """ 34 | 35 | # Sort by objectness 36 | i = np.argsort(-conf) 37 | tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] 38 | 39 | # Find unique classes 40 | unique_classes, nt = np.unique(target_cls, return_counts=True) 41 | nc = unique_classes.shape[0] # number of classes, number of detections 42 | 43 | # Create Precision-Recall curve and compute AP for each class 44 | px, py = np.linspace(0, 1, 1000), [] # for plotting 45 | ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) 46 | for ci, c in enumerate(unique_classes): 47 | i = pred_cls == c 48 | n_l = nt[ci] # number of labels 49 | n_p = i.sum() # number of predictions 50 | 51 | if n_p == 0 or n_l == 0: 52 | continue 53 | else: 54 | # Accumulate FPs and TPs 55 | fpc = (1 - tp[i]).cumsum(0) 56 | tpc = tp[i].cumsum(0) 57 | 58 | # Recall 59 | recall = tpc / (n_l + eps) # recall curve 60 | r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases 61 | 62 | # Precision 63 | precision = tpc / (tpc + fpc) # precision curve 64 | p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score 65 | 66 | # AP from recall-precision curve 67 | for j in range(tp.shape[1]): 68 | ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) 69 | if plot and j == 0: 70 | py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 71 | 72 | # Compute F1 (harmonic mean of precision and recall) 73 | f1 = 2 * p * r / (p + r + eps) 74 | names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data 75 | names = {i: v for i, v in enumerate(names)} # to dict 76 | if plot: 77 | plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) 78 | plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') 79 | plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') 80 | plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') 81 | 82 | i = f1.mean(0).argmax() # max F1 index 83 | p, r, f1 = p[:, i], r[:, i], f1[:, i] 84 | tp = (r * nt).round() # true positives 85 | fp = (tp / (p + eps) - tp).round() # false positives 86 | return tp, fp, p, r, f1, ap, unique_classes.astype('int32') 87 | 88 | 89 | def compute_ap(recall, precision): 90 | """ Compute the average precision, given the recall and precision curves 91 | # Arguments 92 | recall: The recall curve (list) 93 | precision: The precision curve (list) 94 | # Returns 95 | Average precision, precision curve, recall curve 96 | """ 97 | 98 | # Append sentinel values to beginning and end 99 | mrec = np.concatenate(([0.0], recall, [1.0])) 100 | mpre = np.concatenate(([1.0], precision, [0.0])) 101 | 102 | # Compute the precision envelope 103 | mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) 104 | 105 | # Integrate area under curve 106 | method = 'interp' # methods: 'continuous', 'interp' 107 | if method == 'interp': 108 | x = np.linspace(0, 1, 101) # 101-point interp (COCO) 109 | ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate 110 | else: # 'continuous' 111 | i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes 112 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve 113 | 114 | return ap, mpre, mrec 115 | 116 | 117 | class ConfusionMatrix: 118 | # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix 119 | def __init__(self, nc, conf=0.25, iou_thres=0.45): 120 | self.matrix = np.zeros((nc + 1, nc + 1)) 121 | self.nc = nc # number of classes 122 | self.conf = conf 123 | self.iou_thres = iou_thres 124 | 125 | def process_batch(self, detections, labels): 126 | """ 127 | Return intersection-over-union (Jaccard index) of boxes. 128 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 129 | Arguments: 130 | detections (Array[N, 6]), x1, y1, x2, y2, conf, class 131 | labels (Array[M, 5]), class, x1, y1, x2, y2 132 | Returns: 133 | None, updates confusion matrix accordingly 134 | """ 135 | detections = detections[detections[:, 4] > self.conf] 136 | gt_classes = labels[:, 0].int() 137 | detection_classes = detections[:, 5].int() 138 | iou = box_iou(labels[:, 1:], detections[:, :4]) 139 | 140 | x = torch.where(iou > self.iou_thres) 141 | if x[0].shape[0]: 142 | matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() 143 | if x[0].shape[0] > 1: 144 | matches = matches[matches[:, 2].argsort()[::-1]] 145 | matches = matches[np.unique(matches[:, 1], return_index=True)[1]] 146 | matches = matches[matches[:, 2].argsort()[::-1]] 147 | matches = matches[np.unique(matches[:, 0], return_index=True)[1]] 148 | else: 149 | matches = np.zeros((0, 3)) 150 | 151 | n = matches.shape[0] > 0 152 | m0, m1, _ = matches.transpose().astype(np.int16) 153 | for i, gc in enumerate(gt_classes): 154 | j = m0 == i 155 | if n and sum(j) == 1: 156 | self.matrix[detection_classes[m1[j]], gc] += 1 # correct 157 | else: 158 | self.matrix[self.nc, gc] += 1 # background FP 159 | 160 | if n: 161 | for i, dc in enumerate(detection_classes): 162 | if not any(m1 == i): 163 | self.matrix[dc, self.nc] += 1 # background FN 164 | 165 | def matrix(self): 166 | return self.matrix 167 | 168 | def tp_fp(self): 169 | tp = self.matrix.diagonal() # true positives 170 | fp = self.matrix.sum(1) - tp # false positives 171 | # fn = self.matrix.sum(0) - tp # false negatives (missed detections) 172 | return tp[:-1], fp[:-1] # remove background class 173 | 174 | def plot(self, normalize=True, save_dir='', names=()): 175 | try: 176 | import seaborn as sn 177 | 178 | array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns 179 | array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) 180 | 181 | fig = plt.figure(figsize=(12, 9), tight_layout=True) 182 | nc, nn = self.nc, len(names) # number of classes, names 183 | sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size 184 | labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels 185 | with warnings.catch_warnings(): 186 | warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered 187 | sn.heatmap(array, annot=nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, 188 | xticklabels=names + ['background FP'] if labels else "auto", 189 | yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) 190 | fig.axes[0].set_xlabel('True') 191 | fig.axes[0].set_ylabel('Predicted') 192 | fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) 193 | plt.close() 194 | except Exception as e: 195 | print(f'WARNING: ConfusionMatrix plot failure: {e}') 196 | 197 | def print(self): 198 | for i in range(self.nc + 1): 199 | print(' '.join(map(str, self.matrix[i]))) 200 | 201 | 202 | def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): 203 | # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 204 | box2 = box2.T 205 | 206 | # Get the coordinates of bounding boxes 207 | if x1y1x2y2: # x1, y1, x2, y2 = box1 208 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] 209 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] 210 | else: # transform from xywh to xyxy 211 | b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 212 | b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 213 | b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 214 | b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 215 | 216 | # Intersection area 217 | inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ 218 | (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) 219 | 220 | # Union Area 221 | w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps 222 | w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps 223 | union = w1 * h1 + w2 * h2 - inter + eps 224 | 225 | iou = inter / union 226 | if CIoU or DIoU or GIoU: 227 | cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width 228 | ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height 229 | if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 230 | c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared 231 | rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + 232 | (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared 233 | if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 234 | v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) 235 | with torch.no_grad(): 236 | alpha = v / (v - iou + (1 + eps)) 237 | return iou - (rho2 / c2 + v * alpha) # CIoU 238 | return iou - rho2 / c2 # DIoU 239 | c_area = cw * ch + eps # convex area 240 | return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf 241 | return iou # IoU 242 | 243 | 244 | def box_iou(box1, box2): 245 | # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py 246 | """ 247 | Return intersection-over-union (Jaccard index) of boxes. 248 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 249 | Arguments: 250 | box1 (Tensor[N, 4]) 251 | box2 (Tensor[M, 4]) 252 | Returns: 253 | iou (Tensor[N, M]): the NxM matrix containing the pairwise 254 | IoU values for every element in boxes1 and boxes2 255 | """ 256 | 257 | def box_area(box): 258 | # box = 4xn 259 | return (box[2] - box[0]) * (box[3] - box[1]) 260 | 261 | area1 = box_area(box1.T) 262 | area2 = box_area(box2.T) 263 | 264 | # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) 265 | inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) 266 | return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) 267 | 268 | 269 | def bbox_ioa(box1, box2, eps=1E-7): 270 | """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 271 | box1: np.array of shape(4) 272 | box2: np.array of shape(nx4) 273 | returns: np.array of shape(n) 274 | """ 275 | 276 | box2 = box2.transpose() 277 | 278 | # Get the coordinates of bounding boxes 279 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] 280 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] 281 | 282 | # Intersection area 283 | inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ 284 | (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) 285 | 286 | # box2 area 287 | box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps 288 | 289 | # Intersection over box2 area 290 | return inter_area / box2_area 291 | 292 | 293 | def wh_iou(wh1, wh2): 294 | # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 295 | wh1 = wh1[:, None] # [N,1,2] 296 | wh2 = wh2[None] # [1,M,2] 297 | inter = torch.min(wh1, wh2).prod(2) # [N,M] 298 | return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) 299 | 300 | 301 | # Plots ---------------------------------------------------------------------------------------------------------------- 302 | 303 | def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): 304 | # Precision-recall curve 305 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 306 | py = np.stack(py, axis=1) 307 | 308 | if 0 < len(names) < 21: # display per-class legend if < 21 classes 309 | for i, y in enumerate(py.T): 310 | ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) 311 | else: 312 | ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) 313 | 314 | ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) 315 | ax.set_xlabel('Recall') 316 | ax.set_ylabel('Precision') 317 | ax.set_xlim(0, 1) 318 | ax.set_ylim(0, 1) 319 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 320 | fig.savefig(Path(save_dir), dpi=250) 321 | plt.close() 322 | 323 | 324 | def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): 325 | # Metric-confidence curve 326 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 327 | 328 | if 0 < len(names) < 21: # display per-class legend if < 21 classes 329 | for i, y in enumerate(py): 330 | ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) 331 | else: 332 | ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) 333 | 334 | y = py.mean(0) 335 | ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') 336 | ax.set_xlabel(xlabel) 337 | ax.set_ylabel(ylabel) 338 | ax.set_xlim(0, 1) 339 | ax.set_ylim(0, 1) 340 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 341 | fig.savefig(Path(save_dir), dpi=250) 342 | plt.close() 343 | -------------------------------------------------------------------------------- /utils/plots.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | Plotting utils 4 | """ 5 | 6 | import math 7 | import os 8 | from copy import copy 9 | from pathlib import Path 10 | 11 | import cv2 12 | import matplotlib 13 | import matplotlib.pyplot as plt 14 | import numpy as np 15 | import pandas as pd 16 | import seaborn as sn 17 | import torch 18 | from PIL import Image, ImageDraw, ImageFont 19 | 20 | from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, 21 | increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) 22 | from utils.metrics import fitness 23 | 24 | # Settings 25 | RANK = int(os.getenv('RANK', -1)) 26 | matplotlib.rc('font', **{'size': 11}) 27 | matplotlib.use('Agg') # for writing to files only 28 | 29 | 30 | class Colors: 31 | # Ultralytics color palette https://ultralytics.com/ 32 | def __init__(self): 33 | # hex = matplotlib.colors.TABLEAU_COLORS.values() 34 | hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', 35 | '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') 36 | self.palette = [self.hex2rgb('#' + c) for c in hex] 37 | self.n = len(self.palette) 38 | 39 | def __call__(self, i, bgr=False): 40 | c = self.palette[int(i) % self.n] 41 | return (c[2], c[1], c[0]) if bgr else c 42 | 43 | @staticmethod 44 | def hex2rgb(h): # rgb order (PIL) 45 | return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) 46 | 47 | 48 | colors = Colors() # create instance for 'from utils.plots import colors' 49 | 50 | 51 | def check_pil_font(font=FONT, size=10): 52 | # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary 53 | font = Path(font) 54 | font = font if font.exists() else (CONFIG_DIR / font.name) 55 | try: 56 | return ImageFont.truetype(str(font) if font.exists() else font.name, size) 57 | except Exception: # download if missing 58 | check_font(font) 59 | try: 60 | return ImageFont.truetype(str(font), size) 61 | except TypeError: 62 | check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 63 | 64 | 65 | class Annotator: 66 | if RANK in (-1, 0): 67 | check_pil_font() # download TTF if necessary 68 | 69 | # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations 70 | def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): 71 | assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' 72 | self.pil = pil or not is_ascii(example) or is_chinese(example) 73 | if self.pil: # use PIL 74 | self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) 75 | self.draw = ImageDraw.Draw(self.im) 76 | self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, 77 | size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) 78 | else: # use cv2 79 | self.im = im 80 | self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width 81 | 82 | def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): 83 | # Add one xyxy box to image with label 84 | if self.pil or not is_ascii(label): 85 | self.draw.rectangle(box, width=self.lw, outline=color) # box 86 | if label: 87 | w, h = self.font.getsize(label) # text width, height 88 | outside = box[1] - h >= 0 # label fits outside box 89 | self.draw.rectangle((box[0], 90 | box[1] - h if outside else box[1], 91 | box[0] + w + 1, 92 | box[1] + 1 if outside else box[1] + h + 1), fill=color) 93 | # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 94 | self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) 95 | else: # cv2 96 | p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) 97 | cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) 98 | if label: 99 | tf = max(self.lw - 1, 1) # font thickness 100 | w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height 101 | outside = p1[1] - h - 3 >= 0 # label fits outside box 102 | p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 103 | cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled 104 | cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, 105 | thickness=tf, lineType=cv2.LINE_AA) 106 | 107 | def rectangle(self, xy, fill=None, outline=None, width=1): 108 | # Add rectangle to image (PIL-only) 109 | self.draw.rectangle(xy, fill, outline, width) 110 | 111 | def text(self, xy, text, txt_color=(255, 255, 255)): 112 | # Add text to image (PIL-only) 113 | w, h = self.font.getsize(text) # text width, height 114 | self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) 115 | 116 | def result(self): 117 | # Return annotated image as array 118 | return np.asarray(self.im) 119 | 120 | 121 | def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): 122 | """ 123 | x: Features to be visualized 124 | module_type: Module type 125 | stage: Module stage within model 126 | n: Maximum number of feature maps to plot 127 | save_dir: Directory to save results 128 | """ 129 | if 'Detect' not in module_type: 130 | batch, channels, height, width = x.shape # batch, channels, height, width 131 | if height > 1 and width > 1: 132 | f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename 133 | 134 | blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels 135 | n = min(n, channels) # number of plots 136 | fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols 137 | ax = ax.ravel() 138 | plt.subplots_adjust(wspace=0.05, hspace=0.05) 139 | for i in range(n): 140 | ax[i].imshow(blocks[i].squeeze()) # cmap='gray' 141 | ax[i].axis('off') 142 | 143 | LOGGER.info(f'Saving {f}... ({n}/{channels})') 144 | plt.savefig(f, dpi=300, bbox_inches='tight') 145 | plt.close() 146 | np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save 147 | 148 | 149 | def hist2d(x, y, n=100): 150 | # 2d histogram used in labels.png and evolve.png 151 | xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) 152 | hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) 153 | xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) 154 | yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) 155 | return np.log(hist[xidx, yidx]) 156 | 157 | 158 | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): 159 | from scipy.signal import butter, filtfilt 160 | 161 | # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy 162 | def butter_lowpass(cutoff, fs, order): 163 | nyq = 0.5 * fs 164 | normal_cutoff = cutoff / nyq 165 | return butter(order, normal_cutoff, btype='low', analog=False) 166 | 167 | b, a = butter_lowpass(cutoff, fs, order=order) 168 | return filtfilt(b, a, data) # forward-backward filter 169 | 170 | 171 | def output_to_target(output): 172 | # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] 173 | targets = [] 174 | for i, o in enumerate(output): 175 | for *box, conf, cls in o.cpu().numpy(): 176 | targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) 177 | return np.array(targets) 178 | 179 | 180 | def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): 181 | # Plot image grid with labels 182 | if isinstance(images, torch.Tensor): 183 | images = images.cpu().float().numpy() 184 | if isinstance(targets, torch.Tensor): 185 | targets = targets.cpu().numpy() 186 | if np.max(images[0]) <= 1: 187 | images *= 255 # de-normalise (optional) 188 | bs, _, h, w = images.shape # batch size, _, height, width 189 | bs = min(bs, max_subplots) # limit plot images 190 | ns = np.ceil(bs ** 0.5) # number of subplots (square) 191 | 192 | # Build Image 193 | mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init 194 | for i, im in enumerate(images): 195 | if i == max_subplots: # if last batch has fewer images than we expect 196 | break 197 | x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin 198 | im = im.transpose(1, 2, 0) 199 | mosaic[y:y + h, x:x + w, :] = im 200 | 201 | # Resize (optional) 202 | scale = max_size / ns / max(h, w) 203 | if scale < 1: 204 | h = math.ceil(scale * h) 205 | w = math.ceil(scale * w) 206 | mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) 207 | 208 | # Annotate 209 | fs = int((h + w) * ns * 0.01) # font size 210 | annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) 211 | for i in range(i + 1): 212 | x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin 213 | annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders 214 | if paths: 215 | annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames 216 | if len(targets) > 0: 217 | ti = targets[targets[:, 0] == i] # image targets 218 | boxes = xywh2xyxy(ti[:, 2:6]).T 219 | classes = ti[:, 1].astype('int') 220 | labels = ti.shape[1] == 6 # labels if no conf column 221 | conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) 222 | 223 | if boxes.shape[1]: 224 | if boxes.max() <= 1.01: # if normalized with tolerance 0.01 225 | boxes[[0, 2]] *= w # scale to pixels 226 | boxes[[1, 3]] *= h 227 | elif scale < 1: # absolute coords need scale if image scales 228 | boxes *= scale 229 | boxes[[0, 2]] += x 230 | boxes[[1, 3]] += y 231 | for j, box in enumerate(boxes.T.tolist()): 232 | cls = classes[j] 233 | color = colors(cls) 234 | cls = names[cls] if names else cls 235 | if labels or conf[j] > 0.25: # 0.25 conf thresh 236 | label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' 237 | annotator.box_label(box, label, color=color) 238 | annotator.im.save(fname) # save 239 | 240 | 241 | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): 242 | # Plot LR simulating training for full epochs 243 | optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals 244 | y = [] 245 | for _ in range(epochs): 246 | scheduler.step() 247 | y.append(optimizer.param_groups[0]['lr']) 248 | plt.plot(y, '.-', label='LR') 249 | plt.xlabel('epoch') 250 | plt.ylabel('LR') 251 | plt.grid() 252 | plt.xlim(0, epochs) 253 | plt.ylim(0) 254 | plt.savefig(Path(save_dir) / 'LR.png', dpi=200) 255 | plt.close() 256 | 257 | 258 | def plot_val_txt(): # from utils.plots import *; plot_val() 259 | # Plot val.txt histograms 260 | x = np.loadtxt('val.txt', dtype=np.float32) 261 | box = xyxy2xywh(x[:, :4]) 262 | cx, cy = box[:, 0], box[:, 1] 263 | 264 | fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) 265 | ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) 266 | ax.set_aspect('equal') 267 | plt.savefig('hist2d.png', dpi=300) 268 | 269 | fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) 270 | ax[0].hist(cx, bins=600) 271 | ax[1].hist(cy, bins=600) 272 | plt.savefig('hist1d.png', dpi=200) 273 | 274 | 275 | def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() 276 | # Plot targets.txt histograms 277 | x = np.loadtxt('targets.txt', dtype=np.float32).T 278 | s = ['x targets', 'y targets', 'width targets', 'height targets'] 279 | fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) 280 | ax = ax.ravel() 281 | for i in range(4): 282 | ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') 283 | ax[i].legend() 284 | ax[i].set_title(s[i]) 285 | plt.savefig('targets.jpg', dpi=200) 286 | 287 | 288 | def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() 289 | # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) 290 | save_dir = Path(file).parent if file else Path(dir) 291 | plot2 = False # plot additional results 292 | if plot2: 293 | ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() 294 | 295 | fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) 296 | # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: 297 | for f in sorted(save_dir.glob('study*.txt')): 298 | y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T 299 | x = np.arange(y.shape[1]) if x is None else np.array(x) 300 | if plot2: 301 | s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] 302 | for i in range(7): 303 | ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) 304 | ax[i].set_title(s[i]) 305 | 306 | j = y[3].argmax() + 1 307 | ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, 308 | label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) 309 | 310 | ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], 311 | 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') 312 | 313 | ax2.grid(alpha=0.2) 314 | ax2.set_yticks(np.arange(20, 60, 5)) 315 | ax2.set_xlim(0, 57) 316 | ax2.set_ylim(25, 55) 317 | ax2.set_xlabel('GPU Speed (ms/img)') 318 | ax2.set_ylabel('COCO AP val') 319 | ax2.legend(loc='lower right') 320 | f = save_dir / 'study.png' 321 | print(f'Saving {f}...') 322 | plt.savefig(f, dpi=300) 323 | 324 | 325 | @try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 326 | @Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 327 | def plot_labels(labels, names=(), save_dir=Path('')): 328 | # plot dataset labels 329 | LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") 330 | c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes 331 | nc = int(c.max() + 1) # number of classes 332 | x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) 333 | 334 | # seaborn correlogram 335 | sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) 336 | plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) 337 | plt.close() 338 | 339 | # matplotlib labels 340 | matplotlib.use('svg') # faster 341 | ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() 342 | y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) 343 | try: # color histogram bars by class 344 | [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 345 | except Exception: 346 | pass 347 | ax[0].set_ylabel('instances') 348 | if 0 < len(names) < 30: 349 | ax[0].set_xticks(range(len(names))) 350 | ax[0].set_xticklabels(names, rotation=90, fontsize=10) 351 | else: 352 | ax[0].set_xlabel('classes') 353 | sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) 354 | sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) 355 | 356 | # rectangles 357 | labels[:, 1:3] = 0.5 # center 358 | labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 359 | img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) 360 | for cls, *box in labels[:1000]: 361 | ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot 362 | ax[1].imshow(img) 363 | ax[1].axis('off') 364 | 365 | for a in [0, 1, 2, 3]: 366 | for s in ['top', 'right', 'left', 'bottom']: 367 | ax[a].spines[s].set_visible(False) 368 | 369 | plt.savefig(save_dir / 'labels.jpg', dpi=200) 370 | matplotlib.use('Agg') 371 | plt.close() 372 | 373 | 374 | def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() 375 | # Plot evolve.csv hyp evolution results 376 | evolve_csv = Path(evolve_csv) 377 | data = pd.read_csv(evolve_csv) 378 | keys = [x.strip() for x in data.columns] 379 | x = data.values 380 | f = fitness(x) 381 | j = np.argmax(f) # max fitness index 382 | plt.figure(figsize=(10, 12), tight_layout=True) 383 | matplotlib.rc('font', **{'size': 8}) 384 | print(f'Best results from row {j} of {evolve_csv}:') 385 | for i, k in enumerate(keys[7:]): 386 | v = x[:, 7 + i] 387 | mu = v[j] # best single result 388 | plt.subplot(6, 5, i + 1) 389 | plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') 390 | plt.plot(mu, f.max(), 'k+', markersize=15) 391 | plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters 392 | if i % 5 != 0: 393 | plt.yticks([]) 394 | print(f'{k:>15}: {mu:.3g}') 395 | f = evolve_csv.with_suffix('.png') # filename 396 | plt.savefig(f, dpi=200) 397 | plt.close() 398 | print(f'Saved {f}') 399 | 400 | 401 | def plot_results(file='path/to/results.csv', dir=''): 402 | # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') 403 | save_dir = Path(file).parent if file else Path(dir) 404 | fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) 405 | ax = ax.ravel() 406 | files = list(save_dir.glob('results*.csv')) 407 | assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' 408 | for fi, f in enumerate(files): 409 | try: 410 | data = pd.read_csv(f) 411 | s = [x.strip() for x in data.columns] 412 | x = data.values[:, 0] 413 | for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): 414 | y = data.values[:, j] 415 | # y[y == 0] = np.nan # don't show zero values 416 | ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) 417 | ax[i].set_title(s[j], fontsize=12) 418 | # if j in [8, 9, 10]: # share train and val loss y axes 419 | # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) 420 | except Exception as e: 421 | LOGGER.info(f'Warning: Plotting error for {f}: {e}') 422 | ax[1].legend() 423 | fig.savefig(save_dir / 'results.png', dpi=200) 424 | plt.close() 425 | 426 | 427 | def profile_idetection(start=0, stop=0, labels=(), save_dir=''): 428 | # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() 429 | ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() 430 | s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] 431 | files = list(Path(save_dir).glob('frames*.txt')) 432 | for fi, f in enumerate(files): 433 | try: 434 | results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows 435 | n = results.shape[1] # number of rows 436 | x = np.arange(start, min(stop, n) if stop else n) 437 | results = results[:, x] 438 | t = (results[0] - results[0].min()) # set t0=0s 439 | results[0] = x 440 | for i, a in enumerate(ax): 441 | if i < len(results): 442 | label = labels[fi] if len(labels) else f.stem.replace('frames_', '') 443 | a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) 444 | a.set_title(s[i]) 445 | a.set_xlabel('time (s)') 446 | # if fi == len(files) - 1: 447 | # a.set_ylim(bottom=0) 448 | for side in ['top', 'right']: 449 | a.spines[side].set_visible(False) 450 | else: 451 | a.remove() 452 | except Exception as e: 453 | print(f'Warning: Plotting error for {f}; {e}') 454 | ax[1].legend() 455 | plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) 456 | 457 | 458 | def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): 459 | # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop 460 | xyxy = torch.tensor(xyxy).view(-1, 4) 461 | b = xyxy2xywh(xyxy) # boxes 462 | if square: 463 | b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square 464 | b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad 465 | xyxy = xywh2xyxy(b).long() 466 | clip_coords(xyxy, im.shape) 467 | crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] 468 | if save: 469 | file.parent.mkdir(parents=True, exist_ok=True) # make directory 470 | cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) 471 | return crop 472 | -------------------------------------------------------------------------------- /utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | """ 3 | PyTorch utils 4 | """ 5 | 6 | import datetime 7 | import math 8 | import os 9 | import platform 10 | import subprocess 11 | import time 12 | import warnings 13 | from contextlib import contextmanager 14 | from copy import deepcopy 15 | from pathlib import Path 16 | 17 | import torch 18 | import torch.distributed as dist 19 | import torch.nn as nn 20 | import torch.nn.functional as F 21 | 22 | from utils.general import LOGGER 23 | 24 | try: 25 | import thop # for FLOPs computation 26 | except ImportError: 27 | thop = None 28 | 29 | # Suppress PyTorch warnings 30 | warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') 31 | 32 | 33 | @contextmanager 34 | def torch_distributed_zero_first(local_rank: int): 35 | """ 36 | Decorator to make all processes in distributed training wait for each local_master to do something. 37 | """ 38 | if local_rank not in [-1, 0]: 39 | dist.barrier(device_ids=[local_rank]) 40 | yield 41 | if local_rank == 0: 42 | dist.barrier(device_ids=[0]) 43 | 44 | 45 | def date_modified(path=__file__): 46 | # return human-readable file modification date, i.e. '2021-3-26' 47 | t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) 48 | return f'{t.year}-{t.month}-{t.day}' 49 | 50 | 51 | def git_describe(path=Path(__file__).parent): # path must be a directory 52 | # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe 53 | s = f'git -C {path} describe --tags --long --always' 54 | try: 55 | return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] 56 | except subprocess.CalledProcessError: 57 | return '' # not a git repository 58 | 59 | 60 | def device_count(): 61 | # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. 62 | assert platform.system() == 'Linux', 'device_count() function only works on Linux' 63 | try: 64 | cmd = 'nvidia-smi -L | wc -l' 65 | return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) 66 | except Exception: 67 | return 0 68 | 69 | 70 | def select_device(device='', batch_size=0, newline=True): 71 | # device = 'cpu' or '0' or '0,1,2,3' 72 | s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string 73 | device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' 74 | cpu = device == 'cpu' 75 | if cpu: 76 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False 77 | elif device: # non-cpu device requested 78 | os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() 79 | assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ 80 | f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" 81 | 82 | cuda = not cpu and torch.cuda.is_available() 83 | if cuda: 84 | devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 85 | n = len(devices) # device count 86 | if n > 1 and batch_size > 0: # check batch_size is divisible by device_count 87 | assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' 88 | space = ' ' * (len(s) + 1) 89 | for i, d in enumerate(devices): 90 | p = torch.cuda.get_device_properties(i) 91 | s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB 92 | else: 93 | s += 'CPU\n' 94 | 95 | if not newline: 96 | s = s.rstrip() 97 | LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe 98 | return torch.device('cuda:0' if cuda else 'cpu') 99 | 100 | 101 | def time_sync(): 102 | # pytorch-accurate time 103 | if torch.cuda.is_available(): 104 | torch.cuda.synchronize() 105 | return time.time() 106 | 107 | 108 | def profile(input, ops, n=10, device=None): 109 | # YOLOv5 speed/memory/FLOPs profiler 110 | # 111 | # Usage: 112 | # input = torch.randn(16, 3, 640, 640) 113 | # m1 = lambda x: x * torch.sigmoid(x) 114 | # m2 = nn.SiLU() 115 | # profile(input, [m1, m2], n=100) # profile over 100 iterations 116 | 117 | results = [] 118 | device = device or select_device() 119 | print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" 120 | f"{'input':>24s}{'output':>24s}") 121 | 122 | for x in input if isinstance(input, list) else [input]: 123 | x = x.to(device) 124 | x.requires_grad = True 125 | for m in ops if isinstance(ops, list) else [ops]: 126 | m = m.to(device) if hasattr(m, 'to') else m # device 127 | m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m 128 | tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward 129 | try: 130 | flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs 131 | except Exception: 132 | flops = 0 133 | 134 | try: 135 | for _ in range(n): 136 | t[0] = time_sync() 137 | y = m(x) 138 | t[1] = time_sync() 139 | try: 140 | _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() 141 | t[2] = time_sync() 142 | except Exception: # no backward method 143 | # print(e) # for debug 144 | t[2] = float('nan') 145 | tf += (t[1] - t[0]) * 1000 / n # ms per op forward 146 | tb += (t[2] - t[1]) * 1000 / n # ms per op backward 147 | mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) 148 | s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' 149 | s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' 150 | p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters 151 | print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') 152 | results.append([p, flops, mem, tf, tb, s_in, s_out]) 153 | except Exception as e: 154 | print(e) 155 | results.append(None) 156 | torch.cuda.empty_cache() 157 | return results 158 | 159 | 160 | def is_parallel(model): 161 | # Returns True if model is of type DP or DDP 162 | return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) 163 | 164 | 165 | def de_parallel(model): 166 | # De-parallelize a model: returns single-GPU model if model is of type DP or DDP 167 | return model.module if is_parallel(model) else model 168 | 169 | 170 | def initialize_weights(model): 171 | for m in model.modules(): 172 | t = type(m) 173 | if t is nn.Conv2d: 174 | pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 175 | elif t is nn.BatchNorm2d: 176 | m.eps = 1e-3 177 | m.momentum = 0.03 178 | elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: 179 | m.inplace = True 180 | 181 | 182 | def find_modules(model, mclass=nn.Conv2d): 183 | # Finds layer indices matching module class 'mclass' 184 | return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] 185 | 186 | 187 | def sparsity(model): 188 | # Return global model sparsity 189 | a, b = 0, 0 190 | for p in model.parameters(): 191 | a += p.numel() 192 | b += (p == 0).sum() 193 | return b / a 194 | 195 | 196 | def prune(model, amount=0.3): 197 | # Prune model to requested global sparsity 198 | import torch.nn.utils.prune as prune 199 | print('Pruning model... ', end='') 200 | for name, m in model.named_modules(): 201 | if isinstance(m, nn.Conv2d): 202 | prune.l1_unstructured(m, name='weight', amount=amount) # prune 203 | prune.remove(m, 'weight') # make permanent 204 | print(' %.3g global sparsity' % sparsity(model)) 205 | 206 | 207 | def fuse_conv_and_bn(conv, bn): 208 | # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ 209 | fusedconv = nn.Conv2d(conv.in_channels, 210 | conv.out_channels, 211 | kernel_size=conv.kernel_size, 212 | stride=conv.stride, 213 | padding=conv.padding, 214 | groups=conv.groups, 215 | bias=True).requires_grad_(False).to(conv.weight.device) 216 | 217 | # prepare filters 218 | w_conv = conv.weight.clone().view(conv.out_channels, -1) 219 | w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) 220 | fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) 221 | 222 | # prepare spatial bias 223 | b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias 224 | b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) 225 | fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) 226 | 227 | return fusedconv 228 | 229 | 230 | def model_info(model, verbose=False, img_size=640): 231 | # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] 232 | n_p = sum(x.numel() for x in model.parameters()) # number parameters 233 | n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients 234 | if verbose: 235 | print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") 236 | for i, (name, p) in enumerate(model.named_parameters()): 237 | name = name.replace('module_list.', '') 238 | print('%5g %40s %9s %12g %20s %10.3g %10.3g' % 239 | (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) 240 | 241 | try: # FLOPs 242 | from thop import profile 243 | stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 244 | img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input 245 | flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs 246 | img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float 247 | fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs 248 | except (ImportError, Exception): 249 | fs = '' 250 | 251 | LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") 252 | 253 | 254 | def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) 255 | # scales img(bs,3,y,x) by ratio constrained to gs-multiple 256 | if ratio == 1.0: 257 | return img 258 | else: 259 | h, w = img.shape[2:] 260 | s = (int(h * ratio), int(w * ratio)) # new size 261 | img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize 262 | if not same_shape: # pad/crop img 263 | h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) 264 | return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean 265 | 266 | 267 | def copy_attr(a, b, include=(), exclude=()): 268 | # Copy attributes from b to a, options to only include [...] and to exclude [...] 269 | for k, v in b.__dict__.items(): 270 | if (len(include) and k not in include) or k.startswith('_') or k in exclude: 271 | continue 272 | else: 273 | setattr(a, k, v) 274 | 275 | 276 | class EarlyStopping: 277 | # YOLOv5 simple early stopper 278 | def __init__(self, patience=30): 279 | self.best_fitness = 0.0 # i.e. mAP 280 | self.best_epoch = 0 281 | self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop 282 | self.possible_stop = False # possible stop may occur next epoch 283 | 284 | def __call__(self, epoch, fitness): 285 | if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training 286 | self.best_epoch = epoch 287 | self.best_fitness = fitness 288 | delta = epoch - self.best_epoch # epochs without improvement 289 | self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch 290 | stop = delta >= self.patience # stop training if patience exceeded 291 | if stop: 292 | LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' 293 | f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' 294 | f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' 295 | f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') 296 | return stop 297 | 298 | 299 | class ModelEMA: 300 | """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models 301 | Keeps a moving average of everything in the model state_dict (parameters and buffers) 302 | For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage 303 | """ 304 | 305 | def __init__(self, model, decay=0.9999, updates=0): 306 | # Create EMA 307 | self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA 308 | # if next(model.parameters()).device.type != 'cpu': 309 | # self.ema.half() # FP16 EMA 310 | self.updates = updates # number of EMA updates 311 | self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) 312 | for p in self.ema.parameters(): 313 | p.requires_grad_(False) 314 | 315 | def update(self, model): 316 | # Update EMA parameters 317 | with torch.no_grad(): 318 | self.updates += 1 319 | d = self.decay(self.updates) 320 | 321 | msd = de_parallel(model).state_dict() # model state_dict 322 | for k, v in self.ema.state_dict().items(): 323 | if v.dtype.is_floating_point: 324 | v *= d 325 | v += (1 - d) * msd[k].detach() 326 | 327 | def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): 328 | # Update EMA attributes 329 | copy_attr(self.ema, model, include, exclude) 330 | -------------------------------------------------------------------------------- /weights/Final_LPRNet_model.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/weights/Final_LPRNet_model.pth -------------------------------------------------------------------------------- /weights/yolov5_best.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/weights/yolov5_best.pt -------------------------------------------------------------------------------- /windowico.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hamletlx/Yolov5_61_LPRNet_PySide__vehicle_license_plate_recognition/b48a4c2f343aa1821d8e7bc9b4a43fddb2d63d4a/windowico.png --------------------------------------------------------------------------------