├── README.md ├── UI_file.py ├── config └── config.ini ├── lib └── yolo │ ├── coco.names │ └── yolov3.cfg ├── main.py ├── opencv_ffmpeg410_64.dll └── png └── demo.png /README.md: -------------------------------------------------------------------------------- 1 | 2 | DEMO 3 | # get started: 4 | 1. PyQt5, 3.3以上的cv2 ,hyperlpr 5 | 2. 暂时不提供车型识别与颜色分类的模型 6 | 3. 下载 https://pjreddie.com/media/files/yolov3.weights ,并保存到yolo 目录下 7 | 8 | # INTRO 9 | ![界面预览](https://github.com/PT123123/Vehicle-recognition-system/blob/master/png/demo.png) 10 | 模型采用opencv DNN模块读取,所以确认你安装了含有DNN模块版本(3.3以上)的cv2 11 | 12 | 1. 车辆定位采用darknet yolov3在coco数据集上的预训练模型 13 | 2. 车牌识别采用开源的hyperlpr: 14 | https://github.com/zeusees/HyperLPR 15 | 3. 视频播放界面基础: 16 | https://github.com/fengtangzheng/pyqt5-opencv-video 17 | 18 | # ~~TODO~~ 19 | ~~连接KNN做颜色识别~~ 20 | -------------------------------------------------------------------------------- /UI_file.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PT123123/Vehicle-recognition-system/c57c83c98df8f3a6cae8eeb32ec85b4748fb6b3c/UI_file.py -------------------------------------------------------------------------------- /config/config.ini: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PT123123/Vehicle-recognition-system/c57c83c98df8f3a6cae8eeb32ec85b4748fb6b3c/config/config.ini -------------------------------------------------------------------------------- /lib/yolo/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /lib/yolo/yolov3.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | # batch=64 7 | # subdivisions=16 8 | width=416 9 | height=416 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=32 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | # Downsample 34 | 35 | [convolutional] 36 | batch_normalize=1 37 | filters=64 38 | size=3 39 | stride=2 40 | pad=1 41 | activation=leaky 42 | 43 | [convolutional] 44 | batch_normalize=1 45 | filters=32 46 | size=1 47 | stride=1 48 | pad=1 49 | activation=leaky 50 | 51 | [convolutional] 52 | batch_normalize=1 53 | filters=64 54 | size=3 55 | stride=1 56 | pad=1 57 | activation=leaky 58 | 59 | [shortcut] 60 | from=-3 61 | activation=linear 62 | 63 | # Downsample 64 | 65 | [convolutional] 66 | batch_normalize=1 67 | filters=128 68 | size=3 69 | stride=2 70 | pad=1 71 | activation=leaky 72 | 73 | [convolutional] 74 | batch_normalize=1 75 | filters=64 76 | size=1 77 | stride=1 78 | pad=1 79 | activation=leaky 80 | 81 | [convolutional] 82 | batch_normalize=1 83 | filters=128 84 | size=3 85 | stride=1 86 | pad=1 87 | activation=leaky 88 | 89 | [shortcut] 90 | from=-3 91 | activation=linear 92 | 93 | [convolutional] 94 | batch_normalize=1 95 | filters=64 96 | size=1 97 | stride=1 98 | pad=1 99 | activation=leaky 100 | 101 | [convolutional] 102 | batch_normalize=1 103 | filters=128 104 | size=3 105 | stride=1 106 | pad=1 107 | activation=leaky 108 | 109 | [shortcut] 110 | from=-3 111 | activation=linear 112 | 113 | # Downsample 114 | 115 | [convolutional] 116 | batch_normalize=1 117 | filters=256 118 | size=3 119 | stride=2 120 | pad=1 121 | activation=leaky 122 | 123 | [convolutional] 124 | batch_normalize=1 125 | filters=128 126 | size=1 127 | stride=1 128 | pad=1 129 | activation=leaky 130 | 131 | [convolutional] 132 | batch_normalize=1 133 | filters=256 134 | size=3 135 | stride=1 136 | pad=1 137 | activation=leaky 138 | 139 | [shortcut] 140 | from=-3 141 | activation=linear 142 | 143 | [convolutional] 144 | batch_normalize=1 145 | filters=128 146 | size=1 147 | stride=1 148 | pad=1 149 | activation=leaky 150 | 151 | [convolutional] 152 | batch_normalize=1 153 | filters=256 154 | size=3 155 | stride=1 156 | pad=1 157 | activation=leaky 158 | 159 | [shortcut] 160 | from=-3 161 | activation=linear 162 | 163 | [convolutional] 164 | batch_normalize=1 165 | filters=128 166 | size=1 167 | stride=1 168 | pad=1 169 | activation=leaky 170 | 171 | [convolutional] 172 | batch_normalize=1 173 | filters=256 174 | size=3 175 | stride=1 176 | pad=1 177 | activation=leaky 178 | 179 | [shortcut] 180 | from=-3 181 | activation=linear 182 | 183 | [convolutional] 184 | batch_normalize=1 185 | filters=128 186 | size=1 187 | stride=1 188 | pad=1 189 | activation=leaky 190 | 191 | [convolutional] 192 | batch_normalize=1 193 | filters=256 194 | size=3 195 | stride=1 196 | pad=1 197 | activation=leaky 198 | 199 | [shortcut] 200 | from=-3 201 | activation=linear 202 | 203 | 204 | [convolutional] 205 | batch_normalize=1 206 | filters=128 207 | size=1 208 | stride=1 209 | pad=1 210 | activation=leaky 211 | 212 | [convolutional] 213 | batch_normalize=1 214 | filters=256 215 | size=3 216 | stride=1 217 | pad=1 218 | activation=leaky 219 | 220 | [shortcut] 221 | from=-3 222 | activation=linear 223 | 224 | [convolutional] 225 | batch_normalize=1 226 | filters=128 227 | size=1 228 | stride=1 229 | pad=1 230 | activation=leaky 231 | 232 | [convolutional] 233 | batch_normalize=1 234 | filters=256 235 | size=3 236 | stride=1 237 | pad=1 238 | activation=leaky 239 | 240 | [shortcut] 241 | from=-3 242 | activation=linear 243 | 244 | [convolutional] 245 | batch_normalize=1 246 | filters=128 247 | size=1 248 | stride=1 249 | pad=1 250 | activation=leaky 251 | 252 | [convolutional] 253 | batch_normalize=1 254 | filters=256 255 | size=3 256 | stride=1 257 | pad=1 258 | activation=leaky 259 | 260 | [shortcut] 261 | from=-3 262 | activation=linear 263 | 264 | [convolutional] 265 | batch_normalize=1 266 | filters=128 267 | size=1 268 | stride=1 269 | pad=1 270 | activation=leaky 271 | 272 | [convolutional] 273 | batch_normalize=1 274 | filters=256 275 | size=3 276 | stride=1 277 | pad=1 278 | activation=leaky 279 | 280 | [shortcut] 281 | from=-3 282 | activation=linear 283 | 284 | # Downsample 285 | 286 | [convolutional] 287 | batch_normalize=1 288 | filters=512 289 | size=3 290 | stride=2 291 | pad=1 292 | activation=leaky 293 | 294 | [convolutional] 295 | batch_normalize=1 296 | filters=256 297 | size=1 298 | stride=1 299 | pad=1 300 | activation=leaky 301 | 302 | [convolutional] 303 | batch_normalize=1 304 | filters=512 305 | size=3 306 | stride=1 307 | pad=1 308 | activation=leaky 309 | 310 | [shortcut] 311 | from=-3 312 | activation=linear 313 | 314 | 315 | [convolutional] 316 | batch_normalize=1 317 | filters=256 318 | size=1 319 | stride=1 320 | pad=1 321 | activation=leaky 322 | 323 | [convolutional] 324 | batch_normalize=1 325 | filters=512 326 | size=3 327 | stride=1 328 | pad=1 329 | activation=leaky 330 | 331 | [shortcut] 332 | from=-3 333 | activation=linear 334 | 335 | 336 | [convolutional] 337 | batch_normalize=1 338 | filters=256 339 | size=1 340 | stride=1 341 | pad=1 342 | activation=leaky 343 | 344 | [convolutional] 345 | batch_normalize=1 346 | filters=512 347 | size=3 348 | stride=1 349 | pad=1 350 | activation=leaky 351 | 352 | [shortcut] 353 | from=-3 354 | activation=linear 355 | 356 | 357 | [convolutional] 358 | batch_normalize=1 359 | filters=256 360 | size=1 361 | stride=1 362 | pad=1 363 | activation=leaky 364 | 365 | [convolutional] 366 | batch_normalize=1 367 | filters=512 368 | size=3 369 | stride=1 370 | pad=1 371 | activation=leaky 372 | 373 | [shortcut] 374 | from=-3 375 | activation=linear 376 | 377 | [convolutional] 378 | batch_normalize=1 379 | filters=256 380 | size=1 381 | stride=1 382 | pad=1 383 | activation=leaky 384 | 385 | [convolutional] 386 | batch_normalize=1 387 | filters=512 388 | size=3 389 | stride=1 390 | pad=1 391 | activation=leaky 392 | 393 | [shortcut] 394 | from=-3 395 | activation=linear 396 | 397 | 398 | [convolutional] 399 | batch_normalize=1 400 | filters=256 401 | size=1 402 | stride=1 403 | pad=1 404 | activation=leaky 405 | 406 | [convolutional] 407 | batch_normalize=1 408 | filters=512 409 | size=3 410 | stride=1 411 | pad=1 412 | activation=leaky 413 | 414 | [shortcut] 415 | from=-3 416 | activation=linear 417 | 418 | 419 | [convolutional] 420 | batch_normalize=1 421 | filters=256 422 | size=1 423 | stride=1 424 | pad=1 425 | activation=leaky 426 | 427 | [convolutional] 428 | batch_normalize=1 429 | filters=512 430 | size=3 431 | stride=1 432 | pad=1 433 | activation=leaky 434 | 435 | [shortcut] 436 | from=-3 437 | activation=linear 438 | 439 | [convolutional] 440 | batch_normalize=1 441 | filters=256 442 | size=1 443 | stride=1 444 | pad=1 445 | activation=leaky 446 | 447 | [convolutional] 448 | batch_normalize=1 449 | filters=512 450 | size=3 451 | stride=1 452 | pad=1 453 | activation=leaky 454 | 455 | [shortcut] 456 | from=-3 457 | activation=linear 458 | 459 | # Downsample 460 | 461 | [convolutional] 462 | batch_normalize=1 463 | filters=1024 464 | size=3 465 | stride=2 466 | pad=1 467 | activation=leaky 468 | 469 | [convolutional] 470 | batch_normalize=1 471 | filters=512 472 | size=1 473 | stride=1 474 | pad=1 475 | activation=leaky 476 | 477 | [convolutional] 478 | batch_normalize=1 479 | filters=1024 480 | size=3 481 | stride=1 482 | pad=1 483 | activation=leaky 484 | 485 | [shortcut] 486 | from=-3 487 | activation=linear 488 | 489 | [convolutional] 490 | batch_normalize=1 491 | filters=512 492 | size=1 493 | stride=1 494 | pad=1 495 | activation=leaky 496 | 497 | [convolutional] 498 | batch_normalize=1 499 | filters=1024 500 | size=3 501 | stride=1 502 | pad=1 503 | activation=leaky 504 | 505 | [shortcut] 506 | from=-3 507 | activation=linear 508 | 509 | [convolutional] 510 | batch_normalize=1 511 | filters=512 512 | size=1 513 | stride=1 514 | pad=1 515 | activation=leaky 516 | 517 | [convolutional] 518 | batch_normalize=1 519 | filters=1024 520 | size=3 521 | stride=1 522 | pad=1 523 | activation=leaky 524 | 525 | [shortcut] 526 | from=-3 527 | activation=linear 528 | 529 | [convolutional] 530 | batch_normalize=1 531 | filters=512 532 | size=1 533 | stride=1 534 | pad=1 535 | activation=leaky 536 | 537 | [convolutional] 538 | batch_normalize=1 539 | filters=1024 540 | size=3 541 | stride=1 542 | pad=1 543 | activation=leaky 544 | 545 | [shortcut] 546 | from=-3 547 | activation=linear 548 | 549 | ###################### 550 | 551 | [convolutional] 552 | batch_normalize=1 553 | filters=512 554 | size=1 555 | stride=1 556 | pad=1 557 | activation=leaky 558 | 559 | [convolutional] 560 | batch_normalize=1 561 | size=3 562 | stride=1 563 | pad=1 564 | filters=1024 565 | activation=leaky 566 | 567 | [convolutional] 568 | batch_normalize=1 569 | filters=512 570 | size=1 571 | stride=1 572 | pad=1 573 | activation=leaky 574 | 575 | [convolutional] 576 | batch_normalize=1 577 | size=3 578 | stride=1 579 | pad=1 580 | filters=1024 581 | activation=leaky 582 | 583 | [convolutional] 584 | batch_normalize=1 585 | filters=512 586 | size=1 587 | stride=1 588 | pad=1 589 | activation=leaky 590 | 591 | [convolutional] 592 | batch_normalize=1 593 | size=3 594 | stride=1 595 | pad=1 596 | filters=1024 597 | activation=leaky 598 | 599 | [convolutional] 600 | size=1 601 | stride=1 602 | pad=1 603 | filters=255 604 | activation=linear 605 | 606 | 607 | [yolo] 608 | mask = 6,7,8 609 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 610 | classes=80 611 | num=9 612 | jitter=.3 613 | ignore_thresh = .5 614 | truth_thresh = 1 615 | random=1 616 | 617 | 618 | [route] 619 | layers = -4 620 | 621 | [convolutional] 622 | batch_normalize=1 623 | filters=256 624 | size=1 625 | stride=1 626 | pad=1 627 | activation=leaky 628 | 629 | [upsample] 630 | stride=2 631 | 632 | [route] 633 | layers = -1, 61 634 | 635 | 636 | 637 | [convolutional] 638 | batch_normalize=1 639 | filters=256 640 | size=1 641 | stride=1 642 | pad=1 643 | activation=leaky 644 | 645 | [convolutional] 646 | batch_normalize=1 647 | size=3 648 | stride=1 649 | pad=1 650 | filters=512 651 | activation=leaky 652 | 653 | [convolutional] 654 | batch_normalize=1 655 | filters=256 656 | size=1 657 | stride=1 658 | pad=1 659 | activation=leaky 660 | 661 | [convolutional] 662 | batch_normalize=1 663 | size=3 664 | stride=1 665 | pad=1 666 | filters=512 667 | activation=leaky 668 | 669 | [convolutional] 670 | batch_normalize=1 671 | filters=256 672 | size=1 673 | stride=1 674 | pad=1 675 | activation=leaky 676 | 677 | [convolutional] 678 | batch_normalize=1 679 | size=3 680 | stride=1 681 | pad=1 682 | filters=512 683 | activation=leaky 684 | 685 | [convolutional] 686 | size=1 687 | stride=1 688 | pad=1 689 | filters=255 690 | activation=linear 691 | 692 | 693 | [yolo] 694 | mask = 3,4,5 695 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 696 | classes=80 697 | num=9 698 | jitter=.3 699 | ignore_thresh = .5 700 | truth_thresh = 1 701 | random=1 702 | 703 | 704 | 705 | [route] 706 | layers = -4 707 | 708 | [convolutional] 709 | batch_normalize=1 710 | filters=128 711 | size=1 712 | stride=1 713 | pad=1 714 | activation=leaky 715 | 716 | [upsample] 717 | stride=2 718 | 719 | [route] 720 | layers = -1, 36 721 | 722 | 723 | 724 | [convolutional] 725 | batch_normalize=1 726 | filters=128 727 | size=1 728 | stride=1 729 | pad=1 730 | activation=leaky 731 | 732 | [convolutional] 733 | batch_normalize=1 734 | size=3 735 | stride=1 736 | pad=1 737 | filters=256 738 | activation=leaky 739 | 740 | [convolutional] 741 | batch_normalize=1 742 | filters=128 743 | size=1 744 | stride=1 745 | pad=1 746 | activation=leaky 747 | 748 | [convolutional] 749 | batch_normalize=1 750 | size=3 751 | stride=1 752 | pad=1 753 | filters=256 754 | activation=leaky 755 | 756 | [convolutional] 757 | batch_normalize=1 758 | filters=128 759 | size=1 760 | stride=1 761 | pad=1 762 | activation=leaky 763 | 764 | [convolutional] 765 | batch_normalize=1 766 | size=3 767 | stride=1 768 | pad=1 769 | filters=256 770 | activation=leaky 771 | 772 | [convolutional] 773 | size=1 774 | stride=1 775 | pad=1 776 | filters=255 777 | activation=linear 778 | 779 | 780 | [yolo] 781 | mask = 0,1,2 782 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 783 | classes=80 784 | num=9 785 | jitter=.3 786 | ignore_thresh = .5 787 | truth_thresh = 1 788 | random=1 789 | 790 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | #created by pt,2019-3-18 16:31:56, 3 | import os 4 | import sys 5 | import cv2 6 | from lib.hyperlpr import hyperlpr#this is new plate recognition function package 7 | from PyQt5.QtWidgets import QFileDialog,QMessageBox,QStyle 8 | from PyQt5.QtGui import QImage,QPixmap 9 | from PyQt5.QtCore import QObject,pyqtSignal,QThread,QMutex,QMutexLocker,Qt 10 | from UI_file import * 11 | import numpy as np 12 | import time 13 | import threading 14 | from queue import Queue 15 | import xlwt 16 | import ctypes 17 | CURPATH=os.path.dirname(os.path.realpath(__file__)) 18 | try: 19 | temp = ctypes.windll.LoadLibrary(os.path.join(CURPATH,'opencv_ffmpeg410_64.dll')) 20 | except: 21 | print('opencv') 22 | color_q=Queue()#used to pass the result of color 23 | plate_q=Queue()#used to pass the result of plate recognition 24 | type_q=Queue()#same as above 25 | pinpai_q=Queue() 26 | pinpai_img_q=Queue()# used to pass the cv2-format img between threads 27 | im_q=Queue() 28 | img_car_q=Queue() 29 | to_color_q=Queue() 30 | time_q=Queue() 31 | 32 | even_yolo=threading.Event()# to synchronize between tasks 33 | even_model=threading.Event() 34 | even_color=threading.Event() 35 | even_license=threading.Event() 36 | ########################################################################################## 37 | 38 | def compute_IOU(rec1,rec2): 39 | left_column_max = max(rec1[0],rec2[0]) 40 | right_column_min = min(rec1[2],rec2[2]) 41 | up_row_max = max(rec1[1],rec2[1]) 42 | down_row_min = min(rec1[3],rec2[3]) 43 | #两矩形无相交区域的情况 44 | if left_column_max>=right_column_min or down_row_min<=up_row_max: 45 | return 0 46 | # 两矩形有相交区域的情况 47 | else: 48 | S1 = (rec1[2]-rec1[0])*(rec1[3]-rec1[1]) 49 | S2 = (rec2[2]-rec2[0])*(rec2[3]-rec2[1]) 50 | S_cross = (down_row_min-up_row_max)*(right_column_min-left_column_max) 51 | return S_cross/(S1+S2-S_cross) 52 | 53 | 54 | def transfer_time_format(str): 55 | if "_" in str: 56 | _ = str.split("_") 57 | return _[0]+"时"+_[1]+"分"+_[2]+"秒"+_[3]+"年"+_[4]+"月"+_[5]+"日" 58 | else: 59 | #print(str.replace("年","_").replace("月","_").replace("日","").replace("时","_").replace("分","_").replace("秒","_")) 60 | return str.replace("年","_").replace("月","_").replace("日","").replace("时","_").replace("分","_").replace("秒","_") 61 | #https://www.jianshu.com/p/7b6a80faf33f 62 | class MODEL_thread(threading.Thread): 63 | def __init__(self): 64 | threading.Thread.__init__(self) 65 | #model_thread_running used to stop the thread when needed 66 | self.model_thread_running=True 67 | #self.daemon = True 68 | ''' 69 | self.classes=('background','richan','jipu','benchi','bentian','fengtian','aodi','wuling','baoma','dazhong', 70 | 'qirui','xuefulan','lufeng','futian','mazida','dongfeng','sibalu','biaozhi','jianghuai', 71 | 'xiandai','ouge','tianye','changcheng','changan','zhongtai','qiya','sanling','bieke', 72 | 'xuetielong','changhe','sikeda','haima','biyadi','shuanglong','lingmu','guangqijiao', 73 | 'woerwo','yiweike','zhonghua','feiyate','changanshangyong','mingjue','dongnan','jinbei', 74 | 'baoshijie','lifan','leikesasi','yiqi','luhu','fute','rongwei','leinuo','fudi','kairui', 75 | 'qichen','datong','jiebao','hafu','lianhua','kaidilake','jiangling','huanghai','kelaisilei', 76 | 'baojun','yingfeinidi','hafei','mini') 77 | self.modelPATH=[os.path.join(CURPATH,'lib/model/deploy.prototxt'), 78 | os.path.join(CURPATH,'lib/model/VGG_VOC2007_SSD_300x300_iter_20710.caffemodel')] 79 | self.model=cv2.dnn.readNetFromCaffe(*self.modelPATH) 80 | '''#暂不提供车牌功能,注释掉 81 | def run(self): 82 | global what_pt_want 83 | print("pinpai functionality loaded successfully-------------------------------------") 84 | while self.model_thread_running: 85 | even_model.wait() 86 | confid_result=[] 87 | class_result=[] 88 | time.sleep(1.0)#buffer 89 | if not pinpai_img_q.empty(): 90 | img=pinpai_img_q.get() 91 | else: 92 | nd.settable('ERROR',False,False,False,False) 93 | continue 94 | nd.settable('NOT IMPLEMENTED',False,False,False,False)#added 95 | ''' 96 | height=img.shape[0] 97 | width=img.shape[1] 98 | blob =cv2.dnn.blobFromImage(img,1.0,(300,300),(104,117,123),False,False) 99 | self.model.setInput(blob) 100 | detections=self.model.forward() 101 | 102 | for i in np.arange(0, detections.shape[2]): 103 | # extract the confidence 104 | confidence = detections[0, 0, i, 2] 105 | if confidence > 0.1:#confidence 106 | # extract index of class label 107 | idx = int(detections[0, 0, i, 1]) 108 | box = detections[0, 0, i, 3:7] * np.array([width, height, width, height]) 109 | (startX, startY, endX, endY) = box.astype("int") 110 | class_result.append(self.classes[idx]) 111 | confid_result.append(confidence* 100) 112 | if not len(confid_result) == 0: 113 | location=confid_result.index(max(confid_result)) 114 | nd.settable(class_result[location],False,False,False,False) 115 | else: 116 | nd.settable('NONE',False,False,False,False) 117 | even_model.clear() 118 | ''' 119 | def close(self): 120 | pass 121 | 122 | class YOLO_thread(threading.Thread): 123 | def __init__(self): 124 | threading.Thread.__init__(self) 125 | self.yolo_thread_running=True 126 | #self.daemon = True 127 | weightsPath=os.path.join(CURPATH,'lib/yolo/yolov3.weights') 128 | configPath=os.path.join(CURPATH,'lib/yolo/yolov3.cfg') 129 | labelsPath = os.path.join(CURPATH,'lib/yolo/coco.names') 130 | self.LABELS = open(labelsPath).read().strip().split("\n") 131 | self.net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) 132 | self.BOUNDING = [0.300,0.600,0.300,0.600] 133 | self.IOU=0.1 134 | def run(self): 135 | global what_pt_want 136 | global yoloimg 137 | print('yolo done') 138 | while self.yolo_thread_running: 139 | even_yolo.wait() 140 | #emptify if queue is not empty 141 | if not pinpai_q.empty(): 142 | pinpai_q.get() 143 | if not color_q.empty(): 144 | color_q.get() 145 | if not plate_q.empty(): 146 | plate_q.get() 147 | if not type_q.empty(): 148 | type_q.get() 149 | ############################################################### 150 | confid_result=[] 151 | class_result=[] 152 | classIDs=[] 153 | if im_q.empty(): 154 | time.sleep(0.5) 155 | continue 156 | img = cv2.cvtColor(im_q.get(), cv2.COLOR_RGB2BGR) 157 | (H, W) = img.shape[:2] 158 | ln = self.net.getLayerNames() 159 | ln = [ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()] 160 | blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416),swapRB=True, crop=False) 161 | self.net.setInput(blob) 162 | layerOutputs = self.net.forward(ln) 163 | boxes=[] 164 | confidences=[] 165 | for output in layerOutputs: 166 | # 对每个检测进行循环 167 | for detection in output: 168 | scores = detection[5:] 169 | classID = np.argmax(scores) 170 | if self.LABELS[classID] not in ('truck','car','bus','train'):#'motorbike' 171 | continue 172 | confidence = scores[classID] 173 | if confidence > 0.5: 174 | box = detection[0:4] * np.array([W, H, W, H]) 175 | (centerX, centerY, width, height) = box.astype("int") 176 | #边框的左上角 177 | if centerX<0 or centerY<0 or width<0 or height<0: 178 | continue 179 | x = int(centerX - (width / 2)) 180 | y = int(centerY - (height / 2)) 181 | boxes.append([x, y, int(width), int(height)]) 182 | confidences.append(float(confidence)) 183 | classIDs.append(classID) 184 | # 极大值抑制 185 | idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.2,0.3) 186 | if len(idxs) > 0: 187 | for i in idxs.flatten(): 188 | (x, y) = (boxes[i][0], boxes[i][1]) 189 | (w, h) = (boxes[i][2], boxes[i][3]) 190 | #, confidences[i] 191 | type_q.put(self.LABELS[classIDs[i]]) 192 | rec_=[W*self.BOUNDING[0],H*self.BOUNDING[2],W*self.BOUNDING[1],H*self.BOUNDING[3]] 193 | rec_car=[x,y,x+w,y+h] 194 | IOU = compute_IOU(rec_,rec_car) 195 | print(IOU) 196 | expression1=True 197 | expression2=IOU>self.IOU 198 | if expression1: 199 | if expression2: 200 | nd.settable(False,False,self.LABELS[classID],False,False) 201 | img_crop=img[y:y+h,x:x+w] 202 | ################################ 203 | save_time = time.strftime('%H{h}%M{f}%S{s}%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日', h='时', f='分', s='秒') 204 | flag_=cv2.imwrite(os.path.join(CURPATH,'archives/{}.jpg'.format(transfer_time_format(save_time))),img_crop)#save vehicle image by name of time 205 | if not flag_: 206 | print('图片保存失败') 207 | nd.settable(False,False,False,False,save_time) 208 | ######################### 209 | pinpai_img_q.put(img_crop) 210 | to_color_q.put(img_crop)#img_crop is a PIL format image 211 | img_car_q.put(img_crop) 212 | if not even_model.is_set(): 213 | even_model.set() 214 | even_color.set() 215 | even_license.set() 216 | even_yolo.clear() 217 | break#tem 218 | def close(self): 219 | self.yolo_thread_running=False 220 | print('closing yolo session') 221 | 222 | 223 | 224 | class LICENSE_thread(threading.Thread): 225 | def __init__(self): 226 | threading.Thread.__init__(self) 227 | self.license_thread_running=True 228 | #self.daemon = True 229 | def run(self): 230 | PATH=os.path.dirname(hyperlpr.__file__)#CURPATH,'lib/hyperlpr/models') 231 | PATH=os.path.join(PATH,'models') 232 | PR = hyperlpr.LPR(PATH) 233 | def HyperLPR_PlateRecogntion(Input_BGR,minSize=30,charSelectionDeskew=True): 234 | return PR.plateRecognition(Input_BGR,minSize,charSelectionDeskew) 235 | #so called warm up 236 | license_warmup=np.zeros([400,400,3],np.uint8) 237 | HyperLPR_PlateRecogntion(license_warmup) 238 | del license_warmup 239 | while self.license_thread_running: 240 | even_license.wait() 241 | image=img_car_q.get() 242 | tem=HyperLPR_PlateRecogntion(image) 243 | plate="".join('%s' %id for id in tem) 244 | if plate!='': 245 | nd.settable(False,plate.split(',')[0].replace('[',''),False,False,False) 246 | else: 247 | nd.settable(False,'NONE',False,False,False) 248 | def close(self): 249 | self.license_thread_running=False 250 | print('closing license') 251 | 252 | 253 | class COLOR_thread(threading.Thread): 254 | #thread-2 255 | def __init__(self): 256 | threading.Thread.__init__(self) 257 | self.color_thread_running=True 258 | ''' 259 | #self.daemon = True 260 | #modelRecognitionPath = [os.path.join(CURPATH,'lib/color_model/moxing/deploy.prototxt'), 261 | # os.path.join(CURPATH,'lib/color_model/moxing/color.caffemodel')] #color_train_iter_20000.caffemodel"] 262 | #self.modelRecognition = cv2.dnn.readNetFromCaffe(*modelRecognitionPath) 263 | #self.color = ('brown','grey','white','pink','purple','red','green','blue','yellow','black') 264 | '''#颜色模型已去除,暂不提供,请fork后自行DIY 265 | def run(self): 266 | while self.color_thread_running: 267 | even_color.wait() 268 | img_crop=to_color_q.get() 269 | nd.settable(False,False,False,'NOT IMPLEMENTED',False) 270 | try: 271 | blob = cv2.dnn.blobFromImage(cv2.cvtColor(np.asarray(img_crop),cv2.COLOR_RGB2HSV), 1.0, (227, 227),(81.5,48.5,102.4), False, False) 272 | self.modelRecognition.setInput(blob) 273 | a = np.asarray(self.modelRecognition.forward()[0]) 274 | nd.settable(False,False,False,self.color[np.argmax(a)],False) 275 | except: 276 | nd.settable(False,False,False,'failed',False) 277 | def close(self): 278 | self.color_thread_running=False 279 | print('color closed') 280 | 281 | class mywindow(Ui_Dialog): 282 | global what_pt_want 283 | status = 0 284 | video_type = 0 285 | TYPE_VIDEO = 0 286 | TYPE_CAMERA = 1 287 | STATUS_INIT = 0 288 | STATUS_PLAYING = 1 289 | STATUS_PAUSE = 2 290 | CAMID='qq' 291 | BEFORE='0' 292 | global pt_video_counter 293 | def __init__(self): 294 | super(mywindow,self).__init__() 295 | self.setupUi(self) 296 | ############################ 297 | self.yolo_thread=YOLO_thread() 298 | self.license_thread=LICENSE_thread() 299 | self.model_thread=MODEL_thread() 300 | self.color_thread=COLOR_thread() 301 | self.color_thread.start() 302 | self.yolo_thread.start() 303 | self.license_thread.start() 304 | self.model_thread.start() 305 | ########################### 306 | self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) 307 | self.line_counter=0 308 | self.table_dict={} 309 | self.table_dict['time']=Queue() 310 | self.table_dict['pinpai']=Queue() 311 | self.table_dict['color']=Queue() 312 | self.table_dict['plate']=Queue() 313 | self.table_dict['type']=Queue() 314 | self.timer = VideoTimer() 315 | self.timer.timeSignal.signal[str].connect(self.show_video_images)#once timer emit a signal,run show_video_images() 316 | self.INTERVAL = 1 317 | self.VIS_RECGTANGLE = 0 318 | def openimage(self): 319 | global pt_video_counter 320 | self.video_type=self.TYPE_VIDEO 321 | self.reset() 322 | if 'self.playCapture' in locals() or 'self.playCapture' in globals(): 323 | self.playCapture.release() 324 | imgName,imgType= QFileDialog.getOpenFileName(self,"open the image",""," All Files (*);;*.asf;;*.mp4;;*.mpg;;*.avi") 325 | global what_pt_want 326 | what_pt_want=imgName 327 | if 'what_pt_want' in locals() or 'what_pt_want' in globals(): 328 | pass 329 | if what_pt_want == "" or what_pt_want is None: 330 | return 331 | else: 332 | self.pushButton.setEnabled(True) 333 | self.playCapture = cv2.VideoCapture() 334 | pt_video_counter=1 335 | self.playCapture.open(what_pt_want) 336 | fps = self.playCapture.get(cv2.CAP_PROP_FPS)#used to be cv2.CAP_PROP_FPS 337 | if fps ==0: 338 | QMessageBox.warning(self,'error','fps不能为0') 339 | return 340 | else: 341 | self.timer.set_fps(fps) 342 | self.timer.start() 343 | #self.timer.stopped=False 344 | self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause)) 345 | self.status = (self.STATUS_PLAYING,self.STATUS_PAUSE,self.STATUS_PLAYING)[self.status] 346 | else: 347 | return 348 | 349 | def settable_function(self,type_signal,result): 350 | self.table_dict[type_signal].put(result) 351 | #print(type_signal,' : ',self.table_dict[type_signal].qsize()) 352 | if not (self.table_dict['time'].empty() or self.table_dict['type'].empty() or self.table_dict['color'].empty() 353 | or self.table_dict['plate'].empty() or self.table_dict['pinpai'].empty()): 354 | pinpai=self.table_dict['pinpai'].get() 355 | 356 | plate=self.table_dict['plate'].get() 357 | time= self.table_dict['time'].get() 358 | type= self.table_dict['type'].get() 359 | color=self.table_dict['color'].get() 360 | if self.BEFORE == plate.strip(): 361 | return#6_13.P.M. 362 | else: 363 | pass 364 | #print(plate.strip()+' '+self.BEFORE) 365 | if not pinpai ==plate: 366 | self.tableWidget.insertRow(self.line_counter) 367 | self.tableWidget.setItem(self.line_counter,0,QTableWidgetItem(time)) 368 | self.tableWidget.setItem(self.line_counter,1,QTableWidgetItem(pinpai)) 369 | self.tableWidget.setItem(self.line_counter,2,QTableWidgetItem(color)) 370 | self.tableWidget.setItem(self.line_counter,3,QTableWidgetItem(type)) 371 | self.tableWidget.setItem(self.line_counter,4,QTableWidgetItem(plate)) 372 | self.line_counter+=1 373 | self.tableWidget.verticalScrollBar().setValue(self.line_counter) 374 | self.BEFORE=plate.strip() 375 | 376 | 377 | def reset(self): 378 | self.timer.stopped=True 379 | if 'self.playCapture' in locals() or 'self.playCapture' in globals(): 380 | self.playCapture.release() 381 | self.status = self.STATUS_INIT 382 | self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) 383 | 384 | def show_video_images(self): 385 | '''it detected a car of interst in wanted region,after inference,run this function''' 386 | #if video is open successfully,read it 387 | if self.playCapture.isOpened(): 388 | success, frame = self.playCapture.read() 389 | #,frame is a ndarray,frame.shape index 0 and 1 stand for height and width 390 | if success: 391 | if self.VIS_RECGTANGLE: 392 | cv2.rectangle(frame,(int(frame.shape[1]*self.yolo_thread.BOUNDING[0]),int(frame.shape[0]*self.yolo_thread.BOUNDING[2])), 393 | (int(frame.shape[1]*self.yolo_thread.BOUNDING[1]),int(frame.shape[0]*self.yolo_thread.BOUNDING[3])),(0,255,0),3) 394 | global pt_video_counter 395 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 396 | height, width = frame.shape[:2] 397 | temp_image = QImage(frame.flatten(), width, height,QImage.Format_RGB888) 398 | temp_pixmap = QPixmap.fromImage(temp_image) 399 | temp_pixmap=temp_pixmap.scaled(self.graphicsView.width(),self.graphicsView.height()) 400 | self.graphicsView.setPixmap(temp_pixmap) 401 | pt_video_counter+=1 402 | if (pt_video_counter%(int)(self.timer.frequent*self.INTERVAL)==0):#INTERVAL 403 | if True:#not even_yolo.is_set(): 404 | if True:#not even_model.is_set(): 405 | im_q.put(frame)#6_16,modified 406 | even_yolo.set() 407 | else: 408 | print("read failed, no frame data") 409 | self.reset() 410 | 411 | else: 412 | print('end') 413 | self.reset()#open file or capturing device error, init again 414 | 415 | def webcamera(self): 416 | self.reset() 417 | global pt_video_counter 418 | if not self.pushButton_3.isEnabled(): 419 | return 420 | else: 421 | self.pushButton.setEnabled(True) 422 | self.playCapture = cv2.VideoCapture(self.CAMID.strip()) 423 | fps = self.playCapture.get(cv2.CAP_PROP_FPS)#used to be cv2.CAP_PROP_FPS 424 | if fps ==0: 425 | QMessageBox.warning(self,'error','fps不能为0') 426 | return 427 | self.timer.set_fps(fps) 428 | self.video_type = self.TYPE_CAMERA 429 | self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause)) 430 | pt_video_counter=1 431 | self.timer.start() 432 | self.status = (self.STATUS_PLAYING,self.STATUS_PAUSE,self.STATUS_PLAYING)[self.status] 433 | 434 | def inquiry(self): 435 | global pt_video_counter 436 | if not self.pushButton.isEnabled(): 437 | return 438 | if self.status is self.STATUS_INIT: 439 | pass 440 | elif self.status is self.STATUS_PLAYING: 441 | self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) 442 | self.timer.stopped=True 443 | if self.video_type is self.TYPE_CAMERA: 444 | self.playCapture.release() 445 | elif self.status is self.STATUS_PAUSE: 446 | self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause)) 447 | if self.video_type is self.TYPE_CAMERA: 448 | self.webcamera() 449 | else: 450 | self.timer.start() 451 | self.status = (self.STATUS_PLAYING,self.STATUS_PAUSE,self.STATUS_PLAYING)[self.status] 452 | 453 | def closeEvent(self,event): 454 | reply = QMessageBox.question(self,'warning',"u sure u wanna quit?",QMessageBox.Yes | QMessageBox.No,QMessageBox.No) 455 | if reply == QMessageBox.Yes: 456 | for filepath in os.listdir(os.path.join(CURPATH,'archives')): 457 | os.remove(os.path.join(os.path.join(CURPATH,'archives'),filepath)) 458 | guanbudiaoa 459 | event.accept() 460 | self.reset() 461 | self.yolo_thread.close() 462 | self.model_thread.close() 463 | self.color_thread.close() 464 | self.license_thread.close() 465 | self.timer.wait() 466 | nd.wait() 467 | pass 468 | sys.exit(app.exec_()) 469 | else: 470 | event.ignore() 471 | 472 | def display_table(self): 473 | line=self.tableWidget.currentRow() 474 | value=self.tableWidget.item(line, 0).text() 475 | image_file=os.path.join(CURPATH,'archives/{}.jpg'.format(transfer_time_format(value))) 476 | if not os.path.exists(image_file): 477 | QMessageBox.about(self,'error','图片不存在') 478 | return 479 | result_image=QtGui.QPixmap(image_file).scaled(window.graphicsView_frame.width(), window.graphicsView_frame.height()) 480 | window.graphicsView_frame.setPixmap(result_image) 481 | def export_txt(self): 482 | save_path=QFileDialog.getSaveFileName(self,'save file',CURPATH,'txt(*txt)') 483 | save_path=save_path[0] 484 | if not save_path.endswith('.xls'): 485 | save_path = save_path+'.txt' 486 | try: 487 | predix=' 保存时间:'+time.strftime('%H{h}%M{f}%S{s}%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日', h='时', f='分', s='秒') 488 | if not os.path.exists(os.path.join(CURPATH,save_path)): 489 | predix='%s'%('时间')+ '%45s'%('车标')+'%20s'%('颜色')+'%15s'%('车型')+'%25s'%('车牌')+'\n'+predix 490 | with open(os.path.join(CURPATH,save_path),'a+') as f: 491 | f.write(predix+'\n') 492 | for line in range(self.line_counter): 493 | values=[] 494 | values.append('%20s'%(self.tableWidget.item(line, 0).text())) 495 | values.append('%10s'%(self.tableWidget.item(line, 1).text())) 496 | values.append('%10s'%(self.tableWidget.item(line, 2).text())) 497 | values.append('%10s'%(self.tableWidget.item(line, 3).text())) 498 | values.append('%5s'%(self.tableWidget.item(line, 4).text())) 499 | f.write(' '.join(values)+'\n') 500 | QMessageBox.information(self,'Great!','已成功保存为%s'%(save_path)) 501 | except Exception as e: 502 | print(repr(e)) 503 | fname='error.txt' 504 | predix=' 出错时间:'+time.strftime('%H{h}%M{f}%S{s}%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日', h='时', f='分', s='秒') 505 | with open(fname, 'a+') as f: 506 | f.write('\n'+repr(e)) 507 | QMessageBox.warning(self,'save error!!',' save error message to {}'.format(fname)) 508 | def export(self):#先用.xls格式保存结果 509 | save_path=QFileDialog.getSaveFileName(self,'save file',CURPATH,'xls(*xls)') 510 | save_path=save_path[0] 511 | if not save_path: 512 | return 513 | try: 514 | workbook = xlwt.Workbook(encoding = 'utf-8') 515 | worksheet = workbook.add_sheet('My worksheet',cell_overwrite_ok=True) 516 | if not os.path.exists(os.path.join(CURPATH,save_path)): 517 | pass 518 | _=0 519 | for content_ in ['时间','车标','颜色','车型','车牌']: 520 | worksheet.write(0,_,label=content_) 521 | _+=1 522 | for line in range(self.line_counter): 523 | for _ in range(5): 524 | worksheet.write(line+1, _ ,label=self.tableWidget.item(line,_).text()) 525 | if not save_path.endswith('.xls'): 526 | save_path = save_path+'.xls' 527 | workbook.save(save_path) 528 | except Exception as e: 529 | print(repr(e)) 530 | QMessageBox.warning(self,'保存失败',repr(e)) 531 | return 532 | QMessageBox.information(self,'保存成功',' 已保存到 %s'%(str(save_path))) 533 | 534 | class Communicate(QObject): 535 | signal = pyqtSignal(str) 536 | 537 | #https://blog.csdn.net/qq_34710142/article/details/80936986 538 | class VideoTimer(QThread): 539 | def __init__(self, frequent=100): 540 | QThread.__init__(self) 541 | self.stopped = False 542 | self.frequent = frequent 543 | self.timeSignal = Communicate() 544 | self.mutex = QMutex()#the lock between threads 545 | 546 | def run(self):#method run just play the pix of the video one by one 547 | with QMutexLocker(self.mutex): 548 | self.stopped = False 549 | self.run_() 550 | def run_(self): 551 | while True: 552 | if self.stopped: 553 | return 554 | self.timeSignal.signal.emit("1") 555 | time.sleep(1 / self.frequent) 556 | def set_fps(self, fps): 557 | self.frequent = fps 558 | 559 | class Network_daemon(QThread): 560 | '''daemon thread, function haha is used to display brand, license plate number,color and model''' 561 | trigger_table = pyqtSignal(str,str) 562 | def __int__(self): 563 | super(Network_daemon, self).__init__() 564 | def run(self): 565 | while True: 566 | time.sleep(5) 567 | if not pinpai_img_q.empty(): 568 | if not even_model.is_set(): 569 | even_model.set() 570 | return 571 | def settable(self,pinpaistr,platestr,typestr,colorstr,timestr): 572 | #run the inquiry1 573 | if pinpaistr is not False: 574 | pinpai_q.put(pinpaistr) 575 | self.trigger_table.emit('pinpai',pinpai_q.get()) 576 | if platestr is not False: 577 | plate_q.put(platestr) 578 | self.trigger_table.emit('plate',plate_q.get()) 579 | if typestr is not False: 580 | type_q.put(typestr) 581 | self.trigger_table.emit('type',type_q.get()) 582 | if colorstr is not False: 583 | color_q.put(colorstr) 584 | self.trigger_table.emit('color',color_q.get()) 585 | if timestr is not False: 586 | time_q.put(timestr) 587 | self.trigger_table.emit('time',time_q.get()) 588 | 589 | if __name__=='__main__': 590 | app = QtWidgets.QApplication(sys.argv) 591 | global window 592 | window = mywindow() 593 | nd=Network_daemon() 594 | nd.trigger_table.connect(window.settable_function) 595 | nd.start() 596 | window.show() 597 | window.pushButton_2.setEnabled(True) 598 | sys.exit(app.exec_()) 599 | 600 | -------------------------------------------------------------------------------- /opencv_ffmpeg410_64.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PT123123/Vehicle-recognition-system/c57c83c98df8f3a6cae8eeb32ec85b4748fb6b3c/opencv_ffmpeg410_64.dll -------------------------------------------------------------------------------- /png/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PT123123/Vehicle-recognition-system/c57c83c98df8f3a6cae8eeb32ec85b4748fb6b3c/png/demo.png --------------------------------------------------------------------------------