├── LICENSE ├── README.md ├── collection ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── learner.cpython-38.pyc │ ├── picker.cpython-38.pyc │ └── predictor.cpython-38.pyc ├── learner.py ├── picker.py └── predictor.py ├── config ├── angle.yml ├── global.yml └── wind.yml ├── dataset ├── angle.zip ├── valid.zip └── wind.zip ├── demo_angle.py ├── demo_wind.py ├── tools └── show_split.py └── utils ├── __init__.py ├── __pycache__ ├── __init__.cpython-38.pyc └── split.cpython-38.pyc └── split.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Simple indefinite length text recognition 2 | ## 基于Opencv的简单不定长文本识别 3 | 4 | > Author: github.com/SummerColdWind 5 | 6 | > 写在前面: 7 | > 8 | > 本项目是一个基于Python-Opencv,针对简单的单行不定长文本的识别。 9 | > 本项目旨在为简单场景下的文字识别提供一个轻量级的解决方案。 10 | > 项目基于简单的矩阵运算来决定输出,因此针对复杂场景下的文字识别, 11 | > 你应该使用深度学习的方法来解决任务。 12 | 13 | --- 14 | 15 | ## 1.准备数据集 16 | 17 | 数据集应具有以下结构: 18 | ``` 19 | ---dir_name 20 | |---image 21 | |---label.txt 22 | ``` 23 | 数据集文件夹本身的名字可以是任意的,其中应该包含储存图片的文件夹**image**和储存标注信息的文本文件**label.txt**。 24 | 标注文件**label.txt**应具有如下格式: 25 | ``` 26 | 1.png -18 27 | 3.png -18 28 | 4.png -19 29 | 5.png -30 30 | 6.png -29 31 | 7.png -28 32 | 8.png -27 33 | 9.png -26 34 | 10.png -26 35 | 11.png -26 36 | ``` 37 | 每一行为一条数据,用 **'\t'** 进行分割,左侧为**文件名**,右侧为图片中的文字。 38 | 39 | ## 2.分割测试 40 | 使用**tools**文件夹下的**show_split.py**进行字符分割测试。 41 | 42 | 你应该在**config**文件夹中复制**global.yml**,然后重命名为自己的名字,进行分割参数调整。 43 | ``` 44 | Split: 45 | enlarge: 1 46 | enhance_contrast: False 47 | threshold: 100 48 | adaptedThreshold: False 49 | threshold_bias: 0 50 | min_area: 0 51 | close_morphology: False 52 | kernel_size: (3,3) 53 | ``` 54 | - enlarge: 图片放大倍数 55 | - enhance_contrast: 是否进行直方图均衡以增强对比度 56 | - threshold: 二值化图像阈值 57 | - adaptedThreshold: 是否启用自适应阈值 58 | - threshold_bias: 自适应阈值基础上进行调整的数值 59 | - min_area: 轮廓最小面积,低于此面积的轮廓将不视为一个字符 60 | - close_morphology: 是否进行轮廓闭合操作 61 | - kernel_size: 闭运算卷积核大小 62 | 63 | 然后启动**show_split.py**,输入数据集和配置文件的路径,观察分割效果并调整。 64 | 65 | 66 | ## 3.启动学习 67 | 68 | 导入学习器**Learner**进行模型的学习。 69 | ```python 70 | from opencv_digit_recognize.collection import Learner 71 | 72 | result_path = './models/angle.qmodel' 73 | 74 | learner = Learner() 75 | learner.load(dataset='./dataset/angle', config_path='./config/angle.yml') 76 | learner.learn() 77 | learner.save(result_path) 78 | ``` 79 | 实例化学习器后,指定数据集文件夹,然后使用**learn**方法进行学习,整个过程您无需其他操作。 80 | 81 | 最后使用**save**方法保存识别得到的模型,我们建议您将文件扩展名设置为“**.qmodel**”。 82 | 83 | - 对测试使用的300+张数据集进行学习所耗费的时间约为0.17s。 84 | 85 | >Tips: 86 | > 87 | > 打印learner可以输出所有学习到的字符列表。 88 | > 89 | > 调用learner的show方法可以查看学习到的标准化图片。 90 | 91 | ## 4.进行预测 92 | ```python 93 | from opencv_digit_recognize.collection import Predictor 94 | 95 | result_path = './models/angle.qmodel' 96 | predict_image = './dataset/angle/image/1.png' 97 | 98 | predictor = Predictor() 99 | predictor.load_model(result_path) 100 | result = predictor.predict(predict_image) 101 | print(result) 102 | ``` 103 | 实例化预测器后,使用**load_model**方法导入模型,然后使用**predict**方法进行预测。 104 | 105 | - 对测试使用的300+张数据集进行预测所耗费的平均时间约为0.001s。 106 | -------------------------------------------------------------------------------- /collection/__init__.py: -------------------------------------------------------------------------------- 1 | from opencv_digit_recognize.collection.learner import Learner 2 | from opencv_digit_recognize.collection.predictor import Predictor 3 | -------------------------------------------------------------------------------- /collection/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/collection/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /collection/__pycache__/learner.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/collection/__pycache__/learner.cpython-38.pyc -------------------------------------------------------------------------------- /collection/__pycache__/picker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/collection/__pycache__/picker.cpython-38.pyc -------------------------------------------------------------------------------- /collection/__pycache__/predictor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/collection/__pycache__/predictor.cpython-38.pyc -------------------------------------------------------------------------------- /collection/learner.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | import pickle 5 | import yaml 6 | 7 | from pathlib import Path 8 | from typing import Union 9 | from collections import defaultdict, namedtuple 10 | 11 | from opencv_digit_recognize.utils import split_char 12 | 13 | 14 | CharInfo = namedtuple('CharInfo', 'image area') 15 | 16 | class Learner: 17 | def __init__(self): 18 | self.dataset = None 19 | self.config = None 20 | self.labels = None 21 | self.data = defaultdict(list) 22 | self.model = {} 23 | 24 | def __repr__(self): 25 | return '\t'.join((key for key in self.model.keys())) 26 | 27 | def load(self, dataset: Union[Path, str], config_path: Union[Path, str]): 28 | self.dataset = dataset 29 | with open(os.path.join(dataset, 'label.txt'), 'r') as file: 30 | self.labels = (tuple(line.strip().split('\t')) for line in file.readlines()) 31 | 32 | with open(config_path, 'r') as file: 33 | config = yaml.load(file, Loader=yaml.FullLoader) 34 | self.config = config['Split'] 35 | 36 | def learn(self): 37 | for name, label in self.labels: 38 | image = cv2.imread(os.path.join(self.dataset, 'image', name)) 39 | contours = split_char(image, self.config) 40 | for char, info in zip(label, contours): 41 | self.data[char].append(info) 42 | 43 | for char in self.data.keys(): 44 | canvas = np.zeros((100, 100), np.uint64) 45 | total_area = 0 46 | count = len(self.data[char]) 47 | for image, area in self.data[char]: 48 | canvas += image 49 | total_area += area 50 | 51 | canvas = np.floor_divide(canvas, count) 52 | canvas = np.uint8(canvas) 53 | mean_area = total_area / count 54 | 55 | self.model[char] = CharInfo(canvas, mean_area) 56 | 57 | def show(self): 58 | cv2.namedWindow('show', cv2.WINDOW_AUTOSIZE) 59 | cv2.imshow('show', np.hstack([value.image for value in self.model.values()])) 60 | cv2.waitKey(0) 61 | cv2.destroyAllWindows() 62 | 63 | def save(self, path='./result.qmodel'): 64 | assert path.endswith('.qmodel') 65 | output = { 66 | 'model': self.model, 67 | 'config': self.config 68 | } 69 | with open(path, 'wb') as file: 70 | pickle.dump(output, file) 71 | 72 | -------------------------------------------------------------------------------- /collection/picker.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | from collections import defaultdict 5 | from collections.abc import Sequence 6 | from typing import Optional 7 | 8 | from opencv_digit_recognize.utils import split_char 9 | 10 | 11 | class Picker: 12 | def __init__(self): 13 | self.image = None 14 | self.contours = defaultdict(list) 15 | 16 | def __repr__(self): 17 | return '\n'.join((f'{key}: {len(self.contours[key])}' for key in self.contours.keys())) 18 | 19 | def load(self, image): 20 | if isinstance(image, str): 21 | self.image = cv2.imread(image) 22 | else: 23 | self.image = image 24 | 25 | def detect( 26 | self, 27 | image_slice: Sequence = None, 28 | index: int = 0 29 | ) -> Optional[np.ndarray]: 30 | """ 31 | 检测轮廓并返回 32 | :param image_slice: 图像切片 33 | :param index: 如果产生了多个轮廓,选择轮廓的索引,默认为第一个 34 | :return: 轮廓数组 35 | """ 36 | if image_slice is not None: 37 | y1, y2, x1, x2 = image_slice 38 | image = self.image[y1:y2, x1:x2] 39 | else: 40 | image = self.image 41 | 42 | return split_char(image)[index] 43 | 44 | def show(self): 45 | cv2.namedWindow('show', cv2.WINDOW_AUTOSIZE) 46 | for key in self.contours.keys(): 47 | for cnt in self.contours[key]: 48 | print(f'Show contour: {key}') 49 | x, y, w, h = cv2.boundingRect(cnt) 50 | canvas = np.zeros((y * 2 + h, x * 2 + w, 3), dtype=np.uint8) 51 | cv2.drawContours(canvas, [cnt], 0, (0, 0, 255), 1) 52 | cv2.imshow('show', canvas) 53 | cv2.waitKey(0) 54 | cv2.destroyAllWindows() 55 | 56 | def update(self, key, contour): 57 | self.contours[key].append(contour) 58 | 59 | def loads( 60 | self, 61 | images: Sequence, 62 | label: Sequence, 63 | image_slice: Sequence = None 64 | ): 65 | """ 66 | 加载数据集,识别并保存轮廓信息 67 | :param images: 图像或图像路径 68 | :param label: 标注 69 | :param image_slice: 图像切片 70 | :return: 71 | """ 72 | for i, image in enumerate(images): 73 | self.load(image) 74 | contour = self.detect(image_slice) 75 | self.update(label[i], contour) 76 | 77 | 78 | 79 | 80 | 81 | if __name__ == '__main__': 82 | import os 83 | 84 | picker = Picker() 85 | sliced = (555, 576, 29, 73) 86 | image_dir = '../dataset' 87 | image_paths = [os.path.join(image_dir, name) for name in os.listdir(image_dir) if not name.startswith('-')] 88 | image_paths += [os.path.join(image_dir, '-1.png')] 89 | picker.loads(image_paths, '0123456789-', sliced) 90 | # picker.show() 91 | print(picker) 92 | -------------------------------------------------------------------------------- /collection/predictor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pickle 3 | from operator import truediv 4 | from opencv_digit_recognize.utils import split_char 5 | 6 | 7 | class Predictor: 8 | def __init__(self): 9 | self.active = 'current' 10 | self.data_dict = {} 11 | self.model = None 12 | self.config = None 13 | 14 | def load_model(self, path, name='current'): 15 | assert path.endswith('.qmodel') 16 | with open(path, 'rb') as file: 17 | data = pickle.load(file) 18 | self.data_dict.update({name: data}) 19 | self.model, self.config = data['model'], data['config'] 20 | 21 | def use(self, name='current'): 22 | self.active = name 23 | self.model = self.data_dict[name]['model'] 24 | self.config = self.data_dict[name]['config'] 25 | 26 | def predict(self, image): 27 | contours = split_char(image, self.config) 28 | result = [] 29 | for cnt in contours: 30 | score, predict_char = 0, None 31 | for key in self.model.keys(): 32 | char_info = self.model[key] 33 | templ, mean_area = char_info.image, char_info.area 34 | delta = np.sum(np.where(templ == cnt.image, 0, 255)) 35 | shape_delta = truediv(*(mean_area, cnt.area)[::1 if mean_area > cnt.area else -1]) 36 | delta = 1 - (delta * shape_delta) / (255 * 100 * 100) 37 | if delta > score: 38 | score, predict_char = delta, key 39 | result.append(predict_char) 40 | 41 | if len(result) > 0: 42 | result = ''.join(result) 43 | return result 44 | -------------------------------------------------------------------------------- /config/angle.yml: -------------------------------------------------------------------------------- 1 | Split: 2 | enlarge: 4 3 | enhance_contrast: True 4 | threshold: 170 5 | adaptedThreshold: True 6 | threshold_bias: 0 7 | min_area: 100 8 | close_morphology: False 9 | kernel_size: (3,3) 10 | -------------------------------------------------------------------------------- /config/global.yml: -------------------------------------------------------------------------------- 1 | Split: 2 | enlarge: 1 3 | enhance_contrast: False 4 | threshold: 100 5 | adaptedThreshold: False 6 | threshold_bias: 0 7 | min_area: 0 8 | close_morphology: False 9 | kernel_size: (3,3) 10 | -------------------------------------------------------------------------------- /config/wind.yml: -------------------------------------------------------------------------------- 1 | Split: 2 | enlarge: 4 3 | enhance_contrast: False 4 | threshold: 170 5 | adaptedThreshold: False 6 | threshold_bias: 0 7 | min_area: 1000 8 | close_morphology: True 9 | kernel_size: (3,3) 10 | -------------------------------------------------------------------------------- /dataset/angle.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/dataset/angle.zip -------------------------------------------------------------------------------- /dataset/valid.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/dataset/valid.zip -------------------------------------------------------------------------------- /dataset/wind.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/dataset/wind.zip -------------------------------------------------------------------------------- /demo_angle.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import cv2 4 | 5 | from opencv_digit_recognize.collection import Learner, Predictor 6 | 7 | if __name__ == '__main__': 8 | result_path = './models/angle.qmodel' 9 | 10 | learner = Learner() 11 | learner.load(dataset='./dataset/angle', config_path='./config/angle.yml') 12 | learner.learn() 13 | learner.save(result_path) 14 | 15 | predictor = Predictor() 16 | predictor.load_model(result_path) 17 | 18 | with open('./dataset/angle/label.txt', 'r') as file: 19 | labels = (tuple(line.strip().split('\t')) for line in file.readlines()) 20 | acc, total, used = 0, 0, 0 21 | for name, label in labels: 22 | image = cv2.imread(os.path.join('./dataset/angle/image', name)) 23 | start = time.perf_counter() 24 | predict = predictor.predict(image) 25 | used += time.perf_counter() - start 26 | if label == predict: 27 | acc += 1 28 | total += 1 29 | print(f'acc: {(acc / total) * 100:.2f}%, per_used: {(used / total) * 1000:.2f}ms') 30 | 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /demo_wind.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import cv2 4 | 5 | from opencv_digit_recognize.collection import Learner, Predictor 6 | 7 | if __name__ == '__main__': 8 | result_path = './models/wind.qmodel' 9 | 10 | learner = Learner() 11 | learner.load(dataset='./dataset/wind', config_path='./config/wind.yml') 12 | learner.learn() 13 | print(learner) 14 | learner.show() 15 | learner.save(result_path) 16 | 17 | predictor = Predictor() 18 | predictor.load_model(result_path) 19 | 20 | with open('./dataset/valid/label.txt', 'r', encoding='utf-8') as file: 21 | labels = (tuple(line.strip().split('\t')) for line in file.readlines()) 22 | acc, total, used = 0, 0, 0 23 | for name, label in labels: 24 | image = cv2.imread(os.path.join('./dataset/valid/image', name)) 25 | start = time.perf_counter() 26 | predict = predictor.predict(image) 27 | used += time.perf_counter() - start 28 | if label == predict: 29 | acc += 1 30 | else: 31 | print(label, predict) 32 | total += 1 33 | print(f'acc: {(acc / total) * 100:.2f}%, per_used: {(used / total) * 1000:.2f}ms') 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /tools/show_split.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | import yaml 5 | 6 | from opencv_digit_recognize.utils import split_char 7 | 8 | def get_config(config_path): 9 | with open(config_path, 'r') as f: 10 | config = yaml.load(f, Loader=yaml.FullLoader) 11 | config = config['Split'] 12 | return config 13 | 14 | 15 | if __name__ == '__main__': 16 | dataset = '../dataset/angle' 17 | config = '../config/angle.yml' 18 | 19 | with open(os.path.join(dataset, 'label.txt'), 'r', encoding='utf-8') as file: 20 | labels = (tuple(line.strip().split('\t')) for line in file.readlines()) 21 | acc, total, used = 0, 0, 0 22 | for name, label in labels: 23 | image = cv2.imread(os.path.join(dataset, 'image', name)) 24 | contours = split_char(image, get_config(config)) 25 | cv2.imshow('show_split', np.hstack([cnt.image for cnt in contours])) 26 | cv2.waitKey(0) 27 | cv2.destroyAllWindows() 28 | 29 | 30 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | from opencv_digit_recognize.utils.split import split_char 2 | 3 | -------------------------------------------------------------------------------- /utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/split.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SummerColdWind/Indefinite-length-text-recognition/4bc6c24909fbf3aa07802408171330d39610837a/utils/__pycache__/split.cpython-38.pyc -------------------------------------------------------------------------------- /utils/split.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from collections import namedtuple 4 | from typing import List 5 | 6 | CharInfo = namedtuple('CharInfo', 'image area') 7 | 8 | 9 | def sort_char(contours): 10 | return sorted(contours, key=lambda x: cv2.boundingRect(x)[0]) 11 | 12 | 13 | def split_char( 14 | image, 15 | config: dict, 16 | ) -> List[CharInfo]: 17 | enlarge, thresh, is_adapted, bias, min_area, is_closed, kernel, is_clahe = \ 18 | config['enlarge'], config['threshold'], config['adaptedThreshold'], config['threshold_bias'], \ 19 | config['min_area'], config['close_morphology'], config['kernel_size'], config['enhance_contrast'] 20 | image = cv2.resize(image, None, fx=enlarge, fy=enlarge) 21 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 22 | if is_clahe: 23 | gray = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(4, 4)).apply(gray) 24 | # gray = cv2.bilateralFilter(gray, 9, 10, 10) 25 | if not is_adapted: 26 | edges = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1] 27 | else: 28 | thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0] 29 | edges = cv2.threshold(gray, thresh + bias, 255, cv2.THRESH_BINARY)[1] 30 | if is_closed: 31 | kernel = tuple(int(size) for size in kernel.strip('()').split(',')) 32 | edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, np.ones(kernel, np.uint8)) 33 | contours = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] 34 | contours = sort_char(contours) 35 | result = [] 36 | for cnt in contours: 37 | area = cv2.contourArea(cnt) 38 | x, y, w, h = cv2.boundingRect(cnt) 39 | h0, w0 = image.shape[:2] 40 | if area > min_area and x > 10 and y > 10 and (w0 - x - w) > 10 and (h0 - y - h) > 10: 41 | cv2.drawContours(edges, [cnt], 0, 255, 1) 42 | canvas = edges[y:y + h, x:x + w] 43 | canvas = cv2.resize(canvas, (100, 100)) 44 | 45 | result.append(CharInfo(canvas, area)) 46 | 47 | return result 48 | --------------------------------------------------------------------------------