├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── agentclpr ├── __init__.py ├── infer │ ├── __init__.py │ ├── detector.py │ ├── server.py │ ├── system.py │ └── utility.py └── resources │ ├── char_dicts │ └── clp_dict.txt │ └── pretrained_models │ ├── ch_mul_m_cls.onnx │ ├── ch_mul_v2_c_det.onnx │ ├── clp_det.onnx │ └── clp_v2_c_rec.onnx ├── requirements.txt └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include agentclpr/resources/char_dicts *.txt 2 | recursive-include agentclpr/resources/pretrained_models *.onnx -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AgentCLPR 2 | 3 | ![GitHub forks](https://img.shields.io/github/forks/AgentMaker/AgentCLPR) 4 | ![GitHub Repo stars](https://img.shields.io/github/stars/AgentMaker/AgentCLPR) 5 | ![Pypi Downloads](https://pepy.tech/badge/agentclpr) 6 | ![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/AgentMaker/AgentCLPR?include_prereleases) 7 | ![GitHub](https://img.shields.io/github/license/AgentMaker/AgentCLPR) 8 | 9 | ## 简介 10 | 11 | * 一个基于 [ONNXRuntime](https://github.com/microsoft/onnxruntime)、[AgentOCR](https://github.com/AgentMaker/AgentOCR) 和 [License-Plate-Detector](https://github.com/zeusees/License-Plate-Detector) 项目开发的中国车牌检测识别系统。 12 | 13 | ## 车牌识别效果 14 | 15 | * 支持多种车牌的检测和识别(其中单层车牌识别效果较好): 16 | 17 | * 单层车牌: 18 | 19 | ![](https://img-blog.csdnimg.cn/e5801d1a4d394d8ba7b50bed4b0a6b55.png) 20 | 21 | [[[[373, 282], [69, 284], [73, 188], [377, 185]], ['苏E05EV8', 0.9923506379127502]]] 22 | [[[[393, 278], [318, 279], [318, 257], [393, 255]], ['VA30093', 0.7386096119880676]]] 23 | [[[[[487, 366], [359, 372], [361, 331], [488, 324]], ['皖K66666', 0.9409016370773315]]]] 24 | [[[[304, 500], [198, 498], [199, 467], [305, 468]], ['鲁QF02599', 0.995299220085144]]] 25 | [[[[309, 219], [162, 223], [160, 181], [306, 177]], ['使198476', 0.9938704371452332]]] 26 | [[[[957, 918], [772, 920], [771, 862], [956, 860]], ['陕A06725D', 0.9791222810745239]]] 27 | 28 | * 双层车牌: 29 | 30 | ![](https://ai-studio-static-online.cdn.bcebos.com/4e34243377b8461e90ae4a5b5b63577d2d1434b2811245a1a6ea86a76752f0e6) 31 | 32 | [[[[399, 298], [256, 301], [256, 232], [400, 230]], ['浙G66666', 0.8870148431461757]]] 33 | [[[[398, 308], [228, 305], [227, 227], [398, 230]], ['陕A00087', 0.9578166644088313]]] 34 | [[[[352, 234], [190, 244], [190, 171], [352, 161]], ['宁A66666', 0.9958433652812175]]] 35 | 36 | ## 快速使用 37 | 38 | * 快速安装 39 | 40 | ```bash 41 | # 安装 AgentCLPR 42 | $ pip install agentclpr 43 | 44 | # 根据设备平台安装合适版本的 ONNXRuntime 45 | 46 | # CPU 版本(推荐非 win10 系统,无 CUDA 支持的设备安装) 47 | $ pip install onnxruntime 48 | 49 | # GPU 版本(推荐有 CUDA 支持的设备安装) 50 | $ pip install onnxruntime-gpu 51 | 52 | # DirectML 版本(推荐 win10 系统的设备安装,可实现通用的显卡加速) 53 | $ pip install onnxruntime-directml 54 | 55 | # 更多版本的安装详情请参考 ONNXRuntime 官网 56 | ``` 57 | 58 | * 简单调用: 59 | 60 | ```python 61 | # 导入 CLPSystem 模块 62 | from agentclpr import CLPSystem 63 | 64 | # 初始化车牌识别模型 65 | clp = CLPSystem() 66 | 67 | # 使用模型对图像进行车牌识别 68 | results = clp('test.jpg') 69 | ``` 70 | 71 | * 服务器部署: 72 | 73 | * 启动 AgentCLPR Server 服务 74 | 75 | ```shell 76 | $ agentclpr server 77 | ``` 78 | 79 | * Python 调用 80 | 81 | ```python 82 | import cv2 83 | import json 84 | import base64 85 | import requests 86 | 87 | # 图片 Base64 编码 88 | def cv2_to_base64(image): 89 | data = cv2.imencode('.jpg', image)[1] 90 | image_base64 = base64.b64encode(data.tobytes()).decode('UTF-8') 91 | return image_base64 92 | 93 | # 读取图片 94 | image = cv2.imread('test.jpg') 95 | image_base64 = cv2_to_base64(image) 96 | 97 | # 构建请求数据 98 | data = { 99 | 'image': image_base64 100 | } 101 | 102 | # 发送请求 103 | url = "http://127.0.0.1:5000/ocr" 104 | r = requests.post(url=url, data=json.dumps(data)) 105 | 106 | # 打印预测结果 107 | print(r.json()) 108 | ``` 109 | 110 | ## Contact us 111 | Email : [agentmaker@163.com]()
112 | QQ Group : 1005109853 113 | -------------------------------------------------------------------------------- /agentclpr/__init__.py: -------------------------------------------------------------------------------- 1 | from .infer import CLPSystem 2 | 3 | 4 | def command(): 5 | import argparse 6 | parser = argparse.ArgumentParser() 7 | 8 | # Command mode 9 | parser.add_argument(dest="mode", type=str) 10 | 11 | # Server config 12 | parser.add_argument("--host", type=str, default='127.0.0.1') 13 | parser.add_argument("--port", type=int, default=5000) 14 | 15 | args = parser.parse_known_args()[0] 16 | 17 | if args.mode == 'server': 18 | from gevent.pywsgi import WSGIServer 19 | from .infer.server import clp_server 20 | server = WSGIServer((args.host, args.port), clp_server) 21 | server.serve_forever() 22 | else: 23 | raise ValueError('Please check the mode.') 24 | -------------------------------------------------------------------------------- /agentclpr/infer/__init__.py: -------------------------------------------------------------------------------- 1 | from .detector import CLPDetector 2 | from .system import CLPSystem 3 | -------------------------------------------------------------------------------- /agentclpr/infer/detector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from math import ceil 4 | from itertools import product 5 | 6 | from .utility import load_onnx, preprocess, gen_scale, decode, decode_landm, nms 7 | 8 | 9 | class CLPDetector: 10 | def __init__( 11 | self, 12 | model_path, 13 | confidence_threshold=0.02, 14 | top_k=1000, 15 | nms_threshold=0.4, 16 | vis_threshold=0.7, 17 | keep_top_k=500, 18 | min_sizes=[[24, 48], [96, 192], [384, 768]], 19 | steps=[8, 16, 32], 20 | clip=False, 21 | variance=[0.1, 0.2], 22 | providers='auto'): 23 | self.session, self.inputs_name, self.outputs_name = load_onnx( 24 | model_path, providers) 25 | self.confidence_threshold = confidence_threshold 26 | self.top_k = top_k 27 | self.nms_threshold = nms_threshold 28 | self.keep_top_k = keep_top_k 29 | self.vis_threshold = vis_threshold 30 | 31 | self.min_sizes = min_sizes 32 | self.steps = steps 33 | self.clip = clip 34 | self.variance = variance 35 | 36 | def __call__(self, image): 37 | h, w, _ = image.shape 38 | image = preprocess(image) 39 | loc, conf, landms = self.session.run( 40 | self.outputs_name, 41 | input_feed={ 42 | self.inputs_name[0]: image 43 | } 44 | ) 45 | priors = self.gen_priors(h, w) 46 | 47 | boxes = decode(loc.squeeze(0), priors, self.variance) 48 | boxes = boxes * gen_scale(h, w) 49 | 50 | landms = decode_landm(landms.squeeze(0), priors, self.variance) 51 | landms = landms * gen_scale(h, w, 4) 52 | 53 | scores = conf.squeeze(0)[:, 1] 54 | 55 | inds = np.where(scores > self.confidence_threshold)[0] 56 | boxes = boxes[inds] 57 | landms = landms[inds] 58 | scores = scores[inds] 59 | 60 | # keep top-K before NMS 61 | order = scores.argsort()[::-1][:self.top_k] 62 | boxes = boxes[order] 63 | landms = landms[order] 64 | scores = scores[order] 65 | 66 | # do NMS 67 | dets = np.hstack((boxes, scores[:, np.newaxis])).astype( 68 | np.float32, copy=False) 69 | keep = nms(dets, self.nms_threshold) 70 | dets = dets[keep, :] 71 | landms = landms[keep] 72 | 73 | # keep top-K faster NMS 74 | dets = dets[:self.keep_top_k, :] 75 | landms = landms[:self.keep_top_k, :] 76 | 77 | dets = np.concatenate((dets, landms), axis=1) 78 | 79 | inds = np.where(dets[:, 4] > self.vis_threshold)[0] 80 | dets = dets[inds] 81 | return dets 82 | 83 | def gen_priors(self, h, w): 84 | anchors = [] 85 | self.feature_maps = [ 86 | [ceil(h/step), ceil(w/step)] for step in self.steps] 87 | for k, f in enumerate(self.feature_maps): 88 | min_sizes = self.min_sizes[k] 89 | for i, j in product(range(f[0]), range(f[1])): 90 | for min_size in min_sizes: 91 | s_kx = min_size / w 92 | s_ky = min_size / h 93 | dense_cx = [x * self.steps[k] / w 94 | for x in [j + 0.5]] 95 | dense_cy = [y * self.steps[k] / h 96 | for y in [i + 0.5]] 97 | for cy, cx in product(dense_cy, dense_cx): 98 | anchors += [cx, cy, s_kx, s_ky] 99 | 100 | # back to torch land 101 | output = np.array(anchors).reshape(-1, 4) 102 | if self.clip: 103 | np.clip(output, 0, 1) 104 | return output 105 | -------------------------------------------------------------------------------- /agentclpr/infer/server.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from flask import Flask, request 4 | from .system import CLPSystem 5 | from .utility import base64_to_cv2 6 | 7 | clp = CLPSystem() 8 | clp_server = Flask(__name__) 9 | 10 | 11 | @clp_server.route("/ocr", methods=['POST']) 12 | def predict_server(): 13 | if not request.data: 14 | return json.dumps([], ensure_ascii=False) 15 | 16 | request_data = request.data.decode('utf-8') 17 | request_json = json.loads(request_data) 18 | 19 | if 'image' not in request_json: 20 | return json.dumps([], ensure_ascii=False) 21 | 22 | image_base64 = request_json.pop('image') 23 | image = base64_to_cv2(image_base64) 24 | results = clp(image, **request_json) 25 | 26 | return json.dumps(results, ensure_ascii=False) 27 | -------------------------------------------------------------------------------- /agentclpr/infer/system.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import json 3 | import numpy as np 4 | 5 | from agentocr import OCRSystem 6 | 7 | from .detector import CLPDetector 8 | from .utility import clp_det_model, det_model, cls_model, rec_model, rec_char_dict_path, base64_to_cv2 9 | 10 | 11 | class CLPSystem: 12 | def __init__( 13 | self, 14 | clp_det_model=clp_det_model, 15 | det_model=det_model, 16 | cls_model=cls_model, 17 | rec_model=rec_model, 18 | rec_char_dict_path=rec_char_dict_path, 19 | det_db_score_mode='slow', 20 | det_db_unclip_ratio=1.3, 21 | **kwarg): 22 | self.det = CLPDetector(clp_det_model, **kwarg) 23 | self.ocr = OCRSystem(det_model=det_model, 24 | cls_model=cls_model, 25 | rec_model=rec_model, 26 | rec_char_dict_path=rec_char_dict_path, 27 | det_db_score_mode=det_db_score_mode, 28 | det_db_unclip_ratio=det_db_unclip_ratio, 29 | **kwarg) 30 | 31 | def __call__(self, image): 32 | if isinstance(image, np.ndarray): 33 | results = self.det_ocr(image) 34 | elif isinstance(image, str): 35 | image = cv2.imdecode(np.fromfile(image, dtype=np.uint8), 1) 36 | results = self.det_ocr(image) 37 | else: 38 | raise ValueError('Please Check the image format.') 39 | return results 40 | 41 | def det_ocr(self, image): 42 | results = [] 43 | det_results = self.det(image) 44 | for det_result in det_results: 45 | bbox = list(map(int, det_result[:4])) 46 | points = np.array( 47 | list(map(int, det_result[5:13])) 48 | ).reshape(4, 2).tolist() 49 | x1, y1, x2, y2 = tuple(bbox) 50 | w, h = x2-x1+20, y2-y1+20 51 | img_ocr_infer = np.pad( 52 | cv2.resize(image[y1-10:y2 + 11, x1-10:x2 + 11, :], (256, int(256/w*h))), 53 | ((100, 100), (100, 100), (0, 0)), 54 | mode='constant', 55 | constant_values=(255, 255) 56 | ) 57 | ocr_results = self.ocr(img_ocr_infer) 58 | text = ''.join([ocr_result[1][0] for ocr_result in ocr_results]) 59 | rec_score = np.prod( 60 | [ocr_result[1][1] for ocr_result in ocr_results] 61 | ) 62 | results.append([points, [text, rec_score]]) 63 | return results 64 | -------------------------------------------------------------------------------- /agentclpr/infer/utility.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import base64 4 | import numpy as np 5 | import onnxruntime as ort 6 | 7 | 8 | file_path = os.path.abspath(__file__) 9 | file_dir = os.path.dirname(file_path) 10 | package_dir = os.path.dirname(file_dir) 11 | 12 | 13 | clp_det_model = os.path.join( 14 | package_dir, 15 | 'resources', 16 | 'pretrained_models', 17 | 'clp_det.onnx' 18 | ) 19 | 20 | det_model = os.path.join( 21 | package_dir, 22 | 'resources', 23 | 'pretrained_models', 24 | 'ch_mul_v2_c_det.onnx' 25 | ) 26 | cls_model = os.path.join( 27 | package_dir, 28 | 'resources', 29 | 'pretrained_models', 30 | 'ch_mul_m_cls.onnx' 31 | ) 32 | rec_model = os.path.join( 33 | package_dir, 34 | 'resources', 35 | 'pretrained_models', 36 | 'clp_v2_c_rec.onnx' 37 | ) 38 | rec_char_dict_path = os.path.join( 39 | package_dir, 40 | 'resources', 41 | 'char_dicts', 42 | 'clp_dict.txt' 43 | ) 44 | 45 | 46 | def preprocess(img): 47 | img = img.astype('float32') 48 | img -= (104, 117, 123) 49 | img = img.transpose(2, 0, 1) 50 | img = img[None, ...] 51 | return img 52 | 53 | 54 | def str2providers(str): 55 | available_providers = ort.get_available_providers() 56 | 57 | if str.lower() == 'auto': 58 | return available_providers 59 | 60 | providers_dict = { 61 | provider.lower(): provider 62 | for provider in available_providers 63 | } 64 | 65 | provider_strs = [(provider_str + 'ExecutionProvider').lower() 66 | for provider_str in str.split(',')] 67 | 68 | select_providers = [ 69 | providers_dict[provider_str] for provider_str in provider_strs 70 | if provider_str in providers_dict.keys() 71 | ] 72 | 73 | if len(select_providers) == 0: 74 | select_providers = available_providers 75 | 76 | return select_providers 77 | 78 | 79 | def load_onnx(model_path, providers='auto'): 80 | providers = str2providers(providers) 81 | sess_options = ort.SessionOptions() 82 | if 'DmlExecutionProvider' in providers: 83 | sess_options.enable_mem_pattern = False 84 | session = ort.InferenceSession(model_path, sess_options=sess_options) 85 | inputs_name = [input.name for input in session.get_inputs()] 86 | outputs_name = [output.name for output in session.get_outputs()] 87 | return session, inputs_name, outputs_name 88 | 89 | 90 | def gen_scale(h, w, num=2): 91 | return np.array([w, h] * num) 92 | 93 | 94 | def nms(dets, thresh): 95 | """Pure Python NMS baseline.""" 96 | x1 = dets[:, 0] 97 | y1 = dets[:, 1] 98 | x2 = dets[:, 2] 99 | y2 = dets[:, 3] 100 | scores = dets[:, 4] 101 | 102 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 103 | order = scores.argsort()[::-1] 104 | 105 | keep = [] 106 | while order.size > 0: 107 | i = order[0] 108 | keep.append(i) 109 | xx1 = np.maximum(x1[i], x1[order[1:]]) 110 | yy1 = np.maximum(y1[i], y1[order[1:]]) 111 | xx2 = np.minimum(x2[i], x2[order[1:]]) 112 | yy2 = np.minimum(y2[i], y2[order[1:]]) 113 | 114 | w = np.maximum(0.0, xx2 - xx1 + 1) 115 | h = np.maximum(0.0, yy2 - yy1 + 1) 116 | inter = w * h 117 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 118 | 119 | inds = np.where(ovr <= thresh)[0] 120 | order = order[inds + 1] 121 | 122 | return keep 123 | 124 | 125 | def decode(loc, priors, variances): 126 | """Decode locations from predictions using priors to undo 127 | the encoding we did for offset regression at train time. 128 | Args: 129 | loc (tensor): location predictions for loc layers, 130 | Shape: [num_priors,4] 131 | priors (tensor): Prior boxes in center-offset form. 132 | Shape: [num_priors,4]. 133 | variances: (list[float]) Variances of priorboxes 134 | Return: 135 | decoded bounding box predictions 136 | """ 137 | 138 | boxes = np.concatenate(( 139 | priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], 140 | priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])), 1) 141 | boxes[:, :2] -= boxes[:, 2:] / 2 142 | boxes[:, 2:] += boxes[:, :2] 143 | return boxes 144 | 145 | 146 | def decode_landm(pre, priors, variances): 147 | """Decode landm from predictions using priors to undo 148 | the encoding we did for offset regression at train time. 149 | Args: 150 | pre (tensor): landm predictions for loc layers, 151 | Shape: [num_priors,10] 152 | priors (tensor): Prior boxes in center-offset form. 153 | Shape: [num_priors,4]. 154 | variances: (list[float]) Variances of priorboxes 155 | Return: 156 | decoded landm predictions 157 | """ 158 | landms = np.concatenate((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], 159 | priors[:, :2] + pre[:, 2:4] * 160 | variances[0] * priors[:, 2:], 161 | #priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], 162 | priors[:, :2] + pre[:, 4:6] * \ 163 | variances[0] * priors[:, 2:], 164 | priors[:, :2] + pre[:, 6:8] * \ 165 | variances[0] * priors[:, 2:], 166 | ), axis=1) 167 | return landms 168 | 169 | 170 | def base64_to_cv2(b64str): 171 | data = base64.b64decode(b64str.encode('utf8')) 172 | data = np.fromstring(data, np.uint8) 173 | data = cv2.imdecode(data, cv2.IMREAD_COLOR) 174 | return data -------------------------------------------------------------------------------- /agentclpr/resources/char_dicts/clp_dict.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 2 4 | 3 5 | 4 6 | 5 7 | 6 8 | 7 9 | 8 10 | 9 11 | A 12 | B 13 | C 14 | D 15 | E 16 | F 17 | G 18 | H 19 | J 20 | K 21 | L 22 | M 23 | N 24 | P 25 | Q 26 | R 27 | S 28 | T 29 | U 30 | V 31 | W 32 | X 33 | Y 34 | Z 35 | 云 36 | 京 37 | 使 38 | 冀 39 | 吉 40 | 学 41 | 宁 42 | 川 43 | 挂 44 | 新 45 | 晋 46 | 桂 47 | 沪 48 | 津 49 | 浙 50 | 渝 51 | 港 52 | 湘 53 | 澳 54 | 琼 55 | 甘 56 | 皖 57 | 粤 58 | 苏 59 | 蒙 60 | 藏 61 | 警 62 | 豫 63 | 贵 64 | 赣 65 | 辽 66 | 鄂 67 | 闽 68 | 陕 69 | 青 70 | 领 71 | 鲁 72 | 黑 73 | -------------------------------------------------------------------------------- /agentclpr/resources/pretrained_models/ch_mul_m_cls.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AgentMaker/AgentCLPR/3dfa6a2390176493965f432be38990fee9f4a931/agentclpr/resources/pretrained_models/ch_mul_m_cls.onnx -------------------------------------------------------------------------------- /agentclpr/resources/pretrained_models/ch_mul_v2_c_det.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AgentMaker/AgentCLPR/3dfa6a2390176493965f432be38990fee9f4a931/agentclpr/resources/pretrained_models/ch_mul_v2_c_det.onnx -------------------------------------------------------------------------------- /agentclpr/resources/pretrained_models/clp_det.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AgentMaker/AgentCLPR/3dfa6a2390176493965f432be38990fee9f4a931/agentclpr/resources/pretrained_models/clp_det.onnx -------------------------------------------------------------------------------- /agentclpr/resources/pretrained_models/clp_v2_c_rec.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AgentMaker/AgentCLPR/3dfa6a2390176493965f432be38990fee9f4a931/agentclpr/resources/pretrained_models/clp_v2_c_rec.onnx -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | agentocr 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | 4 | def readme(): 5 | with open('README.md', 'r', encoding="UTF-8") as f: 6 | README = f.read() 7 | return README 8 | 9 | 10 | def requirements(): 11 | with open('requirements.txt', 'r', encoding='UTF-8') as f: 12 | REQUIREMENTS = f.read().split('\n') 13 | return REQUIREMENTS 14 | 15 | 16 | setup(name='agentclpr', 17 | packages=[ 18 | 'agentclpr', 'agentclpr.infer' 19 | ], 20 | include_package_data=True, 21 | entry_points={"console_scripts": ["agentclpr = agentclpr:command"]}, 22 | version='1.1.0', 23 | install_requires=requirements(), 24 | license='Apache License 2.0', 25 | description='An easy-to-use Chinese license plate recognition system.', 26 | url='https://github.com/AgentMaker/AgentCLPR', 27 | author='jm12138', 28 | long_description=readme(), 29 | long_description_content_type='text/markdown') --------------------------------------------------------------------------------