├── .gitignore ├── README.md ├── bert_base ├── __init__.py └── bert │ ├── CONTRIBUTING.md │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── create_pretraining_data.py │ ├── extract_features.py │ ├── modeling.py │ ├── modeling_test.py │ ├── multilingual.md │ ├── optimization.py │ ├── optimization_test.py │ ├── requirements.txt │ ├── run_classifier.py │ ├── run_pretraining.py │ ├── run_squad.py │ ├── sample_text.txt │ ├── tokenization.py │ └── tokenization_test.py ├── data ├── dataset │ ├── dev.txt │ ├── test.txt │ └── train.txt └── sample_files │ ├── 展示预测的示例地址文件.csv │ └── 手工标记好的示例地址.xlsx ├── other ├── pictures │ ├── 切分地址要素层级说明.png │ ├── 单条地址分词效果.png │ └── 打标签示例.png ├── predict_base.py └── preprocessing.py ├── predict.py ├── train.py └── train ├── __init__.py ├── bert_lstm_ner.py ├── conlleval.py ├── helper.py ├── lstm_crf_layer.py ├── models.py └── tf_metrics.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 地址分词 2 | 网络的发展使得地址数量指数级增长,海量的数据对多个行业都提出了挑战。此项目旨在提供一个基于深度学习的地址分词器,使用监督学习的方式、BERT+BiLSTM+CRF技术,从语义角度对地址进行分词。 3 | 4 | --- 5 | ## 下载与安装 6 | Windows: 7 | - ~~Clone代码(目前github只有代码,模型文件太大,为了方便性,暂时不采取这种方式)~~ 8 | - IDE打开(推荐Pycharm) 9 | - 安装必须的package 10 | 11 | --- 12 | 13 | ## 项目结构 14 | ``` 15 | └── . 16 | ├── bert_base 17 | ├── bert # Google发布的BERT源码 18 | └── chinese_L-12_H-768_A-12 # BERT模型 19 | ├── data 20 | └── dataset # 用于训练的最终数据集 21 | ├── dev.txt 22 | ├── test.txt 23 | └── train.txt 24 | ├── sample_files # 示例代码中用到的文件 25 | ├── other 26 | ├── pictures # README中用到的图片 27 | ├── predict_base.py # 分词的基础代码 28 | └── preprocessing.py # 预处理的代码,主要用于生成用于训练的最终训练集 29 | ├── output # 存放训练的模型、log以及中间文件 30 | ├── train # 网络结构、计算准确率、超参数指定 31 | ├── predict.py # 示例分词文件 32 | └── train.py # 示例训练文件 33 | ``` 34 | --- 35 | 36 | ## 如何运行代码,对地址分词 37 | 目前版本支持对单条地址、excel系列文件(xlsx、csv等)进行分词。根目录下的predict.py主方法中有两段代码,可以根据注释提示运行 38 | - 单条地址分词 39 | - 直接运行根目录下的predict.py,就可以得到结果,结果如下 40 | ![单条地址分词效果](./other/pictures/单条地址分词效果.png) 41 | - 对文件中的所有地址进行分词 42 | - 注释掉根目录下的predict.py中 *预测单个文件代码块* ,打开 *预测整个文件代码块* ,运行predict.py 43 | ## 如何使用自己的数据训练模型 44 | ### 概述 45 | 目前项目版本使用监督学习方法,为了保证标签的准确性,我们挑选了不同省份、不同特色的1000多条手工标注的地址进行模型训练。我们将地址分为如下11个地址要素: 46 | ![地址要素说明](./other/pictures/切分地址要素层级说明.png) 47 | 48 | 其次按照规定的层级对地址进行打标签操作,如下图: 49 | ![打标签示例](./other/pictures/打标签示例.png) 50 | 51 | 有了带标签的数据,结合Google发布的BERT预训练语言模型,就可以构建自己的网络,从而训练自己的模型。 52 | 53 | ### 制作数据集 54 | 深度学习中数据的质量对最终的效果有非常大的影响。除了概述中提到的纯手动打标签的方式。 55 | 56 | 目前比较推荐的方式是:先使用训练好的模型对要训练的数据进行一次分词,人工对得到的结果进行部分检查以及修正,再进行训练。 57 | 原始数据标签文件参考 **data/sample_files/手工标记好的示例地址.xlsx**。 58 | 59 | 最后使用**other/preprocessing.py**中的主方法,生成最终的数据集。作为示例,已经使用**data/sample_files/手工标记好的示例地址.xlsx**生成了最终要使用的数据集,位于**data/dataset**下的dev.txt、test.txt、train.txt。 60 | 该示例中,将所有地址切分成三部分,训练集、测试集以及验证集,占比分别为:60%、20%以及20% 61 | 62 | ### 超参数调整 63 | 训练用到的所有超参数都在 **train/helper.py** 文件中指定,请留意有中文注释的代码,我们挑选了10个常用超参数,可以对其进行修改。 64 | ### 训练 65 | 准备好了数据集、调好了超参数,直接运行**train.py**,模型就会在**train/helper.py**中指定的output_dir中生成。 66 | 67 | 到此,训练结束。 68 | 69 | --- 70 | ## 后记 71 | - 后期代码维护以及升级后,可以直接在[github](https://github.com/SuperMap/address-matching)上拉代码。 72 | 73 | ## 引用 74 | >https://github.com/macanv/BERT-BiLSTM-CRF-NER -------------------------------------------------------------------------------- /bert_base/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /bert_base/bert/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | BERT needs to maintain permanent compatibility with the pre-trained model files, 4 | so we do not plan to make any major changes to this library (other than what was 5 | promised in the README). However, we can accept small patches related to 6 | re-factoring and documentation. To submit contributes, there are just a few 7 | small guidelines you need to follow. 8 | 9 | ## Contributor License Agreement 10 | 11 | Contributions to this project must be accompanied by a Contributor License 12 | Agreement. You (or your employer) retain the copyright to your contribution; 13 | this simply gives us permission to use and redistribute your contributions as 14 | part of the project. Head over to to see 15 | your current agreements on file or to sign a new one. 16 | 17 | You generally only need to submit a CLA once, so if you've already submitted one 18 | (even if it was for a different project), you probably don't need to do it 19 | again. 20 | 21 | ## Code reviews 22 | 23 | All submissions, including submissions by project members, require review. We 24 | use GitHub pull requests for this purpose. Consult 25 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more 26 | information on using pull requests. 27 | 28 | ## Community Guidelines 29 | 30 | This project follows 31 | [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/). 32 | -------------------------------------------------------------------------------- /bert_base/bert/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /bert_base/bert/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuperMap/address-matching/be9dd02786533db12583d1d3b2b782af647a6bda/bert_base/bert/__init__.py -------------------------------------------------------------------------------- /bert_base/bert/create_pretraining_data.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Create masked LM/next sentence masked_lm TF examples for BERT.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import collections 22 | import random 23 | 24 | import tokenization 25 | import tensorflow as tf 26 | 27 | flags = tf.flags 28 | 29 | FLAGS = flags.FLAGS 30 | 31 | flags.DEFINE_string("input_file", None, 32 | "Input raw text file (or comma-separated list of files).") 33 | 34 | flags.DEFINE_string( 35 | "output_file", None, 36 | "Output TF example file (or comma-separated list of files).") 37 | 38 | flags.DEFINE_string("vocab_file", None, 39 | "The vocabulary file that the BERT model was trained on.") 40 | 41 | flags.DEFINE_bool( 42 | "do_lower_case", True, 43 | "Whether to lower case the input text. Should be True for uncased " 44 | "models and False for cased models.") 45 | 46 | flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.") 47 | 48 | flags.DEFINE_integer("max_predictions_per_seq", 20, 49 | "Maximum number of masked LM predictions per sequence.") 50 | 51 | flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.") 52 | 53 | flags.DEFINE_integer( 54 | "dupe_factor", 10, 55 | "Number of times to duplicate the input data (with different masks).") 56 | 57 | flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.") 58 | 59 | flags.DEFINE_float( 60 | "short_seq_prob", 0.1, 61 | "Probability of creating sequences which are shorter than the " 62 | "maximum length.") 63 | 64 | 65 | class TrainingInstance(object): 66 | """A single training instance (sentence pair).""" 67 | 68 | def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, 69 | is_random_next): 70 | self.tokens = tokens 71 | self.segment_ids = segment_ids 72 | self.is_random_next = is_random_next 73 | self.masked_lm_positions = masked_lm_positions 74 | self.masked_lm_labels = masked_lm_labels 75 | 76 | def __str__(self): 77 | s = "" 78 | s += "tokens: %s\n" % (" ".join( 79 | [tokenization.printable_text(x) for x in self.tokens])) 80 | s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) 81 | s += "is_random_next: %s\n" % self.is_random_next 82 | s += "masked_lm_positions: %s\n" % (" ".join( 83 | [str(x) for x in self.masked_lm_positions])) 84 | s += "masked_lm_labels: %s\n" % (" ".join( 85 | [tokenization.printable_text(x) for x in self.masked_lm_labels])) 86 | s += "\n" 87 | return s 88 | 89 | def __repr__(self): 90 | return self.__str__() 91 | 92 | 93 | def write_instance_to_example_files(instances, tokenizer, max_seq_length, 94 | max_predictions_per_seq, output_files): 95 | """Create TF example files from `TrainingInstance`s.""" 96 | writers = [] 97 | for output_file in output_files: 98 | writers.append(tf.python_io.TFRecordWriter(output_file)) 99 | 100 | writer_index = 0 101 | 102 | total_written = 0 103 | for (inst_index, instance) in enumerate(instances): 104 | input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) 105 | input_mask = [1] * len(input_ids) 106 | segment_ids = list(instance.segment_ids) 107 | assert len(input_ids) <= max_seq_length 108 | 109 | while len(input_ids) < max_seq_length: 110 | input_ids.append(0) 111 | input_mask.append(0) 112 | segment_ids.append(0) 113 | 114 | assert len(input_ids) == max_seq_length 115 | assert len(input_mask) == max_seq_length 116 | assert len(segment_ids) == max_seq_length 117 | 118 | masked_lm_positions = list(instance.masked_lm_positions) 119 | masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) 120 | masked_lm_weights = [1.0] * len(masked_lm_ids) 121 | 122 | while len(masked_lm_positions) < max_predictions_per_seq: 123 | masked_lm_positions.append(0) 124 | masked_lm_ids.append(0) 125 | masked_lm_weights.append(0.0) 126 | 127 | next_sentence_label = 1 if instance.is_random_next else 0 128 | 129 | features = collections.OrderedDict() 130 | features["input_ids"] = create_int_feature(input_ids) 131 | features["input_mask"] = create_int_feature(input_mask) 132 | features["segment_ids"] = create_int_feature(segment_ids) 133 | features["masked_lm_positions"] = create_int_feature(masked_lm_positions) 134 | features["masked_lm_ids"] = create_int_feature(masked_lm_ids) 135 | features["masked_lm_weights"] = create_float_feature(masked_lm_weights) 136 | features["next_sentence_labels"] = create_int_feature([next_sentence_label]) 137 | 138 | tf_example = tf.train.Example(features=tf.train.Features(feature=features)) 139 | 140 | writers[writer_index].write(tf_example.SerializeToString()) 141 | writer_index = (writer_index + 1) % len(writers) 142 | 143 | total_written += 1 144 | 145 | if inst_index < 20: 146 | tf.logging.info("*** Example ***") 147 | tf.logging.info("tokens: %s" % " ".join( 148 | [tokenization.printable_text(x) for x in instance.tokens])) 149 | 150 | for feature_name in features.keys(): 151 | feature = features[feature_name] 152 | values = [] 153 | if feature.int64_list.value: 154 | values = feature.int64_list.value 155 | elif feature.float_list.value: 156 | values = feature.float_list.value 157 | tf.logging.info( 158 | "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) 159 | 160 | for writer in writers: 161 | writer.close() 162 | 163 | tf.logging.info("Wrote %d total instances", total_written) 164 | 165 | 166 | def create_int_feature(values): 167 | feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) 168 | return feature 169 | 170 | 171 | def create_float_feature(values): 172 | feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) 173 | return feature 174 | 175 | 176 | def create_training_instances(input_files, tokenizer, max_seq_length, 177 | dupe_factor, short_seq_prob, masked_lm_prob, 178 | max_predictions_per_seq, rng): 179 | """Create `TrainingInstance`s from raw text.""" 180 | all_documents = [[]] 181 | 182 | # Input file format: 183 | # (1) One sentence per line. These should ideally be actual sentences, not 184 | # entire paragraphs or arbitrary spans of text. (Because we use the 185 | # sentence boundaries for the "next sentence prediction" task). 186 | # (2) Blank lines between documents. Document boundaries are needed so 187 | # that the "next sentence prediction" task doesn't span between documents. 188 | for input_file in input_files: 189 | with tf.gfile.GFile(input_file, "r") as reader: 190 | while True: 191 | line = tokenization.convert_to_unicode(reader.readline()) 192 | if not line: 193 | break 194 | line = line.strip() 195 | 196 | # Empty lines are used as document delimiters 197 | if not line: 198 | all_documents.append([]) 199 | tokens = tokenizer.tokenize(line) 200 | if tokens: 201 | all_documents[-1].append(tokens) 202 | 203 | # Remove empty documents 204 | all_documents = [x for x in all_documents if x] 205 | rng.shuffle(all_documents) 206 | 207 | vocab_words = list(tokenizer.vocab.keys()) 208 | instances = [] 209 | for _ in range(dupe_factor): 210 | for document_index in range(len(all_documents)): 211 | instances.extend( 212 | create_instances_from_document( 213 | all_documents, document_index, max_seq_length, short_seq_prob, 214 | masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) 215 | 216 | rng.shuffle(instances) 217 | return instances 218 | 219 | 220 | def create_instances_from_document( 221 | all_documents, document_index, max_seq_length, short_seq_prob, 222 | masked_lm_prob, max_predictions_per_seq, vocab_words, rng): 223 | """Creates `TrainingInstance`s for a single document.""" 224 | document = all_documents[document_index] 225 | 226 | # Account for [CLS], [SEP], [SEP] 227 | max_num_tokens = max_seq_length - 3 228 | 229 | # We *usually* want to fill up the entire sequence since we are padding 230 | # to `max_seq_length` anyways, so short sequences are generally wasted 231 | # computation. However, we *sometimes* 232 | # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter 233 | # sequences to minimize the mismatch between pre-training and fine-tuning. 234 | # The `target_seq_length` is just a rough target however, whereas 235 | # `max_seq_length` is a hard limit. 236 | target_seq_length = max_num_tokens 237 | if rng.random() < short_seq_prob: 238 | target_seq_length = rng.randint(2, max_num_tokens) 239 | 240 | # We DON'T just concatenate all of the tokens from a document into a long 241 | # sequence and choose an arbitrary split point because this would make the 242 | # next sentence prediction task too easy. Instead, we split the input into 243 | # segments "A" and "B" based on the actual "sentences" provided by the user 244 | # input. 245 | instances = [] 246 | current_chunk = [] 247 | current_length = 0 248 | i = 0 249 | while i < len(document): 250 | segment = document[i] 251 | current_chunk.append(segment) 252 | current_length += len(segment) 253 | if i == len(document) - 1 or current_length >= target_seq_length: 254 | if current_chunk: 255 | # `a_end` is how many segments from `current_chunk` go into the `A` 256 | # (first) sentence. 257 | a_end = 1 258 | if len(current_chunk) >= 2: 259 | a_end = rng.randint(1, len(current_chunk) - 1) 260 | 261 | tokens_a = [] 262 | for j in range(a_end): 263 | tokens_a.extend(current_chunk[j]) 264 | 265 | tokens_b = [] 266 | # Random next 267 | is_random_next = False 268 | if len(current_chunk) == 1 or rng.random() < 0.5: 269 | is_random_next = True 270 | target_b_length = target_seq_length - len(tokens_a) 271 | 272 | # This should rarely go for more than one iteration for large 273 | # corpora. However, just to be careful, we try to make sure that 274 | # the random document is not the same as the document 275 | # we're processing. 276 | for _ in range(10): 277 | random_document_index = rng.randint(0, len(all_documents) - 1) 278 | if random_document_index != document_index: 279 | break 280 | 281 | random_document = all_documents[random_document_index] 282 | random_start = rng.randint(0, len(random_document) - 1) 283 | for j in range(random_start, len(random_document)): 284 | tokens_b.extend(random_document[j]) 285 | if len(tokens_b) >= target_b_length: 286 | break 287 | # We didn't actually use these segments so we "put them back" so 288 | # they don't go to waste. 289 | num_unused_segments = len(current_chunk) - a_end 290 | i -= num_unused_segments 291 | # Actual next 292 | else: 293 | is_random_next = False 294 | for j in range(a_end, len(current_chunk)): 295 | tokens_b.extend(current_chunk[j]) 296 | truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) 297 | 298 | assert len(tokens_a) >= 1 299 | assert len(tokens_b) >= 1 300 | 301 | tokens = [] 302 | segment_ids = [] 303 | tokens.append("[CLS]") 304 | segment_ids.append(0) 305 | for token in tokens_a: 306 | tokens.append(token) 307 | segment_ids.append(0) 308 | 309 | tokens.append("[SEP]") 310 | segment_ids.append(0) 311 | 312 | for token in tokens_b: 313 | tokens.append(token) 314 | segment_ids.append(1) 315 | tokens.append("[SEP]") 316 | segment_ids.append(1) 317 | 318 | (tokens, masked_lm_positions, 319 | masked_lm_labels) = create_masked_lm_predictions( 320 | tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) 321 | instance = TrainingInstance( 322 | tokens=tokens, 323 | segment_ids=segment_ids, 324 | is_random_next=is_random_next, 325 | masked_lm_positions=masked_lm_positions, 326 | masked_lm_labels=masked_lm_labels) 327 | instances.append(instance) 328 | current_chunk = [] 329 | current_length = 0 330 | i += 1 331 | 332 | return instances 333 | 334 | 335 | def create_masked_lm_predictions(tokens, masked_lm_prob, 336 | max_predictions_per_seq, vocab_words, rng): 337 | """Creates the predictions for the masked LM objective.""" 338 | 339 | cand_indexes = [] 340 | for (i, token) in enumerate(tokens): 341 | if token == "[CLS]" or token == "[SEP]": 342 | continue 343 | cand_indexes.append(i) 344 | 345 | rng.shuffle(cand_indexes) 346 | 347 | output_tokens = list(tokens) 348 | 349 | masked_lm = collections.namedtuple("masked_lm", ["index", "label"]) # pylint: disable=invalid-name 350 | 351 | num_to_predict = min(max_predictions_per_seq, 352 | max(1, int(round(len(tokens) * masked_lm_prob)))) 353 | 354 | masked_lms = [] 355 | covered_indexes = set() 356 | for index in cand_indexes: 357 | if len(masked_lms) >= num_to_predict: 358 | break 359 | if index in covered_indexes: 360 | continue 361 | covered_indexes.add(index) 362 | 363 | masked_token = None 364 | # 80% of the time, replace with [MASK] 365 | if rng.random() < 0.8: 366 | masked_token = "[MASK]" 367 | else: 368 | # 10% of the time, keep original 369 | if rng.random() < 0.5: 370 | masked_token = tokens[index] 371 | # 10% of the time, replace with random word 372 | else: 373 | masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] 374 | 375 | output_tokens[index] = masked_token 376 | 377 | masked_lms.append(masked_lm(index=index, label=tokens[index])) 378 | 379 | masked_lms = sorted(masked_lms, key=lambda x: x.index) 380 | 381 | masked_lm_positions = [] 382 | masked_lm_labels = [] 383 | for p in masked_lms: 384 | masked_lm_positions.append(p.index) 385 | masked_lm_labels.append(p.label) 386 | 387 | return (output_tokens, masked_lm_positions, masked_lm_labels) 388 | 389 | 390 | def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): 391 | """Truncates a pair of sequences to a maximum sequence length.""" 392 | while True: 393 | total_length = len(tokens_a) + len(tokens_b) 394 | if total_length <= max_num_tokens: 395 | break 396 | 397 | trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b 398 | assert len(trunc_tokens) >= 1 399 | 400 | # We want to sometimes truncate from the front and sometimes from the 401 | # back to add more randomness and avoid biases. 402 | if rng.random() < 0.5: 403 | del trunc_tokens[0] 404 | else: 405 | trunc_tokens.pop() 406 | 407 | 408 | def main(_): 409 | tf.logging.set_verbosity(tf.logging.INFO) 410 | 411 | tokenizer = tokenization.FullTokenizer( 412 | vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) 413 | 414 | input_files = [] 415 | for input_pattern in FLAGS.input_file.split(","): 416 | input_files.extend(tf.gfile.Glob(input_pattern)) 417 | 418 | tf.logging.info("*** Reading from input files ***") 419 | for input_file in input_files: 420 | tf.logging.info(" %s", input_file) 421 | 422 | rng = random.Random(FLAGS.random_seed) 423 | instances = create_training_instances( 424 | input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor, 425 | FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, 426 | rng) 427 | 428 | output_files = FLAGS.output_file.split(",") 429 | tf.logging.info("*** Writing to output files ***") 430 | for output_file in output_files: 431 | tf.logging.info(" %s", output_file) 432 | 433 | write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, 434 | FLAGS.max_predictions_per_seq, output_files) 435 | 436 | 437 | if __name__ == "__main__": 438 | flags.mark_flag_as_required("input_file") 439 | flags.mark_flag_as_required("output_file") 440 | flags.mark_flag_as_required("vocab_file") 441 | tf.app.run() 442 | -------------------------------------------------------------------------------- /bert_base/bert/extract_features.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Extract pre-computed feature vectors from BERT.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import codecs 22 | import collections 23 | import json 24 | import re 25 | 26 | from bert_base.bert import modeling 27 | from bert_base.bert import tokenization 28 | import tensorflow as tf 29 | 30 | flags = tf.flags 31 | 32 | FLAGS = flags.FLAGS 33 | 34 | flags.DEFINE_string("input_file", None, "") 35 | 36 | flags.DEFINE_string("output_file", None, "") 37 | 38 | flags.DEFINE_string("layers", "-1,-2,-3,-4", "") 39 | 40 | flags.DEFINE_string( 41 | "bert_config_file", None, 42 | "The config json file corresponding to the pre-trained BERT model. " 43 | "This specifies the model architecture.") 44 | 45 | flags.DEFINE_integer( 46 | "max_seq_length", 128, 47 | "The maximum total input sequence length after WordPiece tokenization. " 48 | "Sequences longer than this will be truncated, and sequences shorter " 49 | "than this will be padded.") 50 | 51 | flags.DEFINE_string( 52 | "init_checkpoint", None, 53 | "Initial checkpoint (usually from a pre-trained BERT model).") 54 | 55 | flags.DEFINE_string("vocab_file", None, 56 | "The vocabulary file that the BERT model was trained on.") 57 | 58 | flags.DEFINE_bool( 59 | "do_lower_case", True, 60 | "Whether to lower case the input text. Should be True for uncased " 61 | "models and False for cased models.") 62 | 63 | flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.") 64 | 65 | flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") 66 | 67 | flags.DEFINE_string("master", None, 68 | "If using a TPU, the address of the master.") 69 | 70 | flags.DEFINE_integer( 71 | "num_tpu_cores", 8, 72 | "Only used if `use_tpu` is True. Total number of TPU cores to use.") 73 | 74 | flags.DEFINE_bool( 75 | "use_one_hot_embeddings", False, 76 | "If True, tf.one_hot will be used for embedding lookups, otherwise " 77 | "tf.nn.embedding_lookup will be used. On TPUs, this should be True " 78 | "since it is much faster.") 79 | 80 | 81 | class InputExample(object): 82 | 83 | def __init__(self, unique_id, text_a, text_b): 84 | self.unique_id = unique_id 85 | self.text_a = text_a 86 | self.text_b = text_b 87 | 88 | 89 | class InputFeatures(object): 90 | """A single set of features of data.""" 91 | 92 | def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): 93 | self.unique_id = unique_id 94 | self.tokens = tokens 95 | self.input_ids = input_ids 96 | self.input_mask = input_mask 97 | self.input_type_ids = input_type_ids 98 | 99 | 100 | def input_fn_builder(features, seq_length): 101 | """Creates an `input_fn` closure to be passed to TPUEstimator.""" 102 | 103 | all_unique_ids = [] 104 | all_input_ids = [] 105 | all_input_mask = [] 106 | all_input_type_ids = [] 107 | 108 | for feature in features: 109 | all_unique_ids.append(feature.unique_id) 110 | all_input_ids.append(feature.input_ids) 111 | all_input_mask.append(feature.input_mask) 112 | all_input_type_ids.append(feature.input_type_ids) 113 | 114 | def input_fn(params): 115 | """The actual input function.""" 116 | batch_size = params["batch_size"] 117 | 118 | num_examples = len(features) 119 | 120 | # This is for demo purposes and does NOT scale to large data sets. We do 121 | # not use Dataset.from_generator() because that uses tf.py_func which is 122 | # not TPU compatible. The right way to load data is with TFRecordReader. 123 | d = tf.data.Dataset.from_tensor_slices({ 124 | "unique_ids": 125 | tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), 126 | "input_ids": 127 | tf.constant( 128 | all_input_ids, shape=[num_examples, seq_length], 129 | dtype=tf.int32), 130 | "input_mask": 131 | tf.constant( 132 | all_input_mask, 133 | shape=[num_examples, seq_length], 134 | dtype=tf.int32), 135 | "input_type_ids": 136 | tf.constant( 137 | all_input_type_ids, 138 | shape=[num_examples, seq_length], 139 | dtype=tf.int32), 140 | }) 141 | 142 | d = d.batch(batch_size=batch_size, drop_remainder=False) 143 | return d 144 | 145 | return input_fn 146 | 147 | 148 | def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu, 149 | use_one_hot_embeddings): 150 | """Returns `model_fn` closure for TPUEstimator.""" 151 | 152 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument 153 | """The `model_fn` for TPUEstimator.""" 154 | 155 | unique_ids = features["unique_ids"] 156 | input_ids = features["input_ids"] 157 | input_mask = features["input_mask"] 158 | input_type_ids = features["input_type_ids"] 159 | 160 | model = modeling.BertModel( 161 | config=bert_config, 162 | is_training=False, 163 | input_ids=input_ids, 164 | input_mask=input_mask, 165 | token_type_ids=input_type_ids, 166 | use_one_hot_embeddings=use_one_hot_embeddings) 167 | 168 | if mode != tf.estimator.ModeKeys.PREDICT: 169 | raise ValueError("Only PREDICT modes are supported: %s" % (mode)) 170 | 171 | tvars = tf.trainable_variables() 172 | scaffold_fn = None 173 | (assignment_map, 174 | initialized_variable_names) = modeling.get_assignment_map_from_checkpoint( 175 | tvars, init_checkpoint) 176 | if use_tpu: 177 | 178 | def tpu_scaffold(): 179 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map) 180 | return tf.train.Scaffold() 181 | 182 | scaffold_fn = tpu_scaffold 183 | else: 184 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map) 185 | 186 | tf.logging.info("**** Trainable Variables ****") 187 | for var in tvars: 188 | init_string = "" 189 | if var.name in initialized_variable_names: 190 | init_string = ", *INIT_FROM_CKPT*" 191 | tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, 192 | init_string) 193 | 194 | all_layers = model.get_all_encoder_layers() 195 | 196 | predictions = { 197 | "unique_id": unique_ids, 198 | } 199 | 200 | for (i, layer_index) in enumerate(layer_indexes): 201 | predictions["layer_output_%d" % i] = all_layers[layer_index] 202 | 203 | output_spec = tf.contrib.tpu.TPUEstimatorSpec( 204 | mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) 205 | return output_spec 206 | 207 | return model_fn 208 | 209 | 210 | def convert_examples_to_features(examples, seq_length, tokenizer): 211 | """Loads a data file into a list of `InputBatch`s.""" 212 | 213 | features = [] 214 | for (ex_index, example) in enumerate(examples): 215 | tokens_a = tokenizer.tokenize(example.text_a) 216 | 217 | tokens_b = None 218 | if example.text_b: 219 | tokens_b = tokenizer.tokenize(example.text_b) 220 | 221 | if tokens_b: 222 | # Modifies `tokens_a` and `tokens_b` in place so that the total 223 | # length is less than the specified length. 224 | # Account for [CLS], [SEP], [SEP] with "- 3" 225 | _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) 226 | else: 227 | # Account for [CLS] and [SEP] with "- 2" 228 | if len(tokens_a) > seq_length - 2: 229 | tokens_a = tokens_a[0:(seq_length - 2)] 230 | 231 | # The convention in BERT is: 232 | # (a) For sequence pairs: 233 | # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] 234 | # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 235 | # (b) For single sequences: 236 | # tokens: [CLS] the dog is hairy . [SEP] 237 | # type_ids: 0 0 0 0 0 0 0 238 | # 239 | # Where "type_ids" are used to indicate whether this is the first 240 | # sequence or the second sequence. The embedding vectors for `type=0` and 241 | # `type=1` were learned during pre-training and are added to the wordpiece 242 | # embedding vector (and position vector). This is not *strictly* necessary 243 | # since the [SEP] token unambiguously separates the sequences, but it makes 244 | # it easier for the model to learn the concept of sequences. 245 | # 246 | # For classification tasks, the first vector (corresponding to [CLS]) is 247 | # used as as the "sentence vector". Note that this only makes sense because 248 | # the entire model is fine-tuned. 249 | tokens = [] 250 | input_type_ids = [] 251 | tokens.append("[CLS]") 252 | input_type_ids.append(0) 253 | for token in tokens_a: 254 | tokens.append(token) 255 | input_type_ids.append(0) 256 | tokens.append("[SEP]") 257 | input_type_ids.append(0) 258 | 259 | if tokens_b: 260 | for token in tokens_b: 261 | tokens.append(token) 262 | input_type_ids.append(1) 263 | tokens.append("[SEP]") 264 | input_type_ids.append(1) 265 | 266 | input_ids = tokenizer.convert_tokens_to_ids(tokens) 267 | 268 | # The mask has 1 for real tokens and 0 for padding tokens. Only real 269 | # tokens are attended to. 270 | input_mask = [1] * len(input_ids) 271 | 272 | # Zero-pad up to the sequence length. 273 | while len(input_ids) < seq_length: 274 | input_ids.append(0) 275 | input_mask.append(0) 276 | input_type_ids.append(0) 277 | 278 | assert len(input_ids) == seq_length 279 | assert len(input_mask) == seq_length 280 | assert len(input_type_ids) == seq_length 281 | 282 | if ex_index < 5: 283 | tf.logging.info("*** Example ***") 284 | tf.logging.info("unique_id: %s" % (example.unique_id)) 285 | tf.logging.info("tokens: %s" % " ".join( 286 | [tokenization.printable_text(x) for x in tokens])) 287 | tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) 288 | tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) 289 | tf.logging.info( 290 | "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) 291 | 292 | features.append( 293 | InputFeatures( 294 | unique_id=example.unique_id, 295 | tokens=tokens, 296 | input_ids=input_ids, 297 | input_mask=input_mask, 298 | input_type_ids=input_type_ids)) 299 | return features 300 | 301 | 302 | def _truncate_seq_pair(tokens_a, tokens_b, max_length): 303 | """Truncates a sequence pair in place to the maximum length.""" 304 | 305 | # This is a simple heuristic which will always truncate the longer sequence 306 | # one token at a time. This makes more sense than truncating an equal percent 307 | # of tokens from each, since if one sequence is very short then each token 308 | # that's truncated likely contains more information than a longer sequence. 309 | while True: 310 | total_length = len(tokens_a) + len(tokens_b) 311 | if total_length <= max_length: 312 | break 313 | if len(tokens_a) > len(tokens_b): 314 | tokens_a.pop() 315 | else: 316 | tokens_b.pop() 317 | 318 | 319 | def read_examples(input_file): 320 | """Read a list of `InputExample`s from an input file.""" 321 | examples = [] 322 | unique_id = 0 323 | with tf.gfile.GFile(input_file, "r") as reader: 324 | while True: 325 | line = tokenization.convert_to_unicode(reader.readline()) 326 | if not line: 327 | break 328 | line = line.strip() 329 | text_a = None 330 | text_b = None 331 | m = re.match(r"^(.*) \|\|\| (.*)$", line) 332 | if m is None: 333 | text_a = line 334 | else: 335 | text_a = m.group(1) 336 | text_b = m.group(2) 337 | examples.append( 338 | InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) 339 | unique_id += 1 340 | return examples 341 | 342 | 343 | def main(_): 344 | tf.logging.set_verbosity(tf.logging.INFO) 345 | 346 | layer_indexes = [int(x) for x in FLAGS.layers.split(",")] 347 | 348 | bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) 349 | 350 | tokenizer = tokenization.FullTokenizer( 351 | vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) 352 | 353 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 354 | run_config = tf.contrib.tpu.RunConfig( 355 | master=FLAGS.master, 356 | tpu_config=tf.contrib.tpu.TPUConfig( 357 | num_shards=FLAGS.num_tpu_cores, 358 | per_host_input_for_training=is_per_host)) 359 | 360 | examples = read_examples(FLAGS.input_file) 361 | 362 | features = convert_examples_to_features( 363 | examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer) 364 | 365 | unique_id_to_feature = {} 366 | for feature in features: 367 | unique_id_to_feature[feature.unique_id] = feature 368 | 369 | model_fn = model_fn_builder( 370 | bert_config=bert_config, 371 | init_checkpoint=FLAGS.init_checkpoint, 372 | layer_indexes=layer_indexes, 373 | use_tpu=FLAGS.use_tpu, 374 | use_one_hot_embeddings=FLAGS.use_one_hot_embeddings) 375 | 376 | # If TPU is not available, this will fall back to normal Estimator on CPU 377 | # or GPU. 378 | estimator = tf.contrib.tpu.TPUEstimator( 379 | use_tpu=FLAGS.use_tpu, 380 | model_fn=model_fn, 381 | config=run_config, 382 | predict_batch_size=FLAGS.batch_size) 383 | 384 | input_fn = input_fn_builder( 385 | features=features, seq_length=FLAGS.max_seq_length) 386 | 387 | with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file, 388 | "w")) as writer: 389 | for result in estimator.predict(input_fn, yield_single_examples=True): 390 | unique_id = int(result["unique_id"]) 391 | feature = unique_id_to_feature[unique_id] 392 | output_json = collections.OrderedDict() 393 | output_json["linex_index"] = unique_id 394 | all_features = [] 395 | for (i, token) in enumerate(feature.tokens): 396 | all_layers = [] 397 | for (j, layer_index) in enumerate(layer_indexes): 398 | layer_output = result["layer_output_%d" % j] 399 | layers = collections.OrderedDict() 400 | layers["index"] = layer_index 401 | layers["values"] = [ 402 | round(float(x), 6) for x in layer_output[i:(i + 1)].flat 403 | ] 404 | all_layers.append(layers) 405 | features = collections.OrderedDict() 406 | features["token"] = token 407 | features["layers"] = all_layers 408 | all_features.append(features) 409 | output_json["features"] = all_features 410 | writer.write(json.dumps(output_json) + "\n") 411 | 412 | 413 | def convert_lst_to_features(lst_str, seq_length, tokenizer, logger, is_tokenized=False, mask_cls_sep=False): 414 | """Loads a data file into a list of `InputBatch`s.""" 415 | 416 | examples = read_tokenized_examples(lst_str) if is_tokenized else read_line_examples(lst_str) 417 | 418 | _tokenize = lambda x: x if is_tokenized else tokenizer.tokenize(x) 419 | 420 | for (ex_index, example) in enumerate(examples): 421 | tokens_a = _tokenize(example.text_a) 422 | 423 | tokens_b = None 424 | if example.text_b: 425 | tokens_b = _tokenize(example.text_b) 426 | 427 | if tokens_b: 428 | # Modifies `tokens_a` and `tokens_b` in place so that the total 429 | # length is less than the specified length. 430 | # Account for [CLS], [SEP], [SEP] with "- 3" 431 | _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) 432 | else: 433 | # Account for [CLS] and [SEP] with "- 2" 434 | if len(tokens_a) > seq_length - 2: 435 | tokens_a = tokens_a[0:(seq_length - 2)] 436 | 437 | # The convention in BERT is: 438 | # (a) For sequence pairs: 439 | # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] 440 | # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 441 | # (b) For single sequences: 442 | # tokens: [CLS] the dog is hairy . [SEP] 443 | # type_ids: 0 0 0 0 0 0 0 444 | # 445 | # Where "type_ids" are used to indicate whether this is the first 446 | # sequence or the second sequence. The embedding vectors for `type=0` and 447 | # `type=1` were learned during pre-training and are added to the wordpiece 448 | # embedding vector (and position vector). This is not *strictly* necessary 449 | # since the [SEP] token unambiguously separates the sequences, but it makes 450 | # it easier for the model to learn the concept of sequences. 451 | # 452 | # For classification tasks, the first vector (corresponding to [CLS]) is 453 | # used as as the "sentence vector". Note that this only makes sense because 454 | # the entire model is fine-tuned. 455 | tokens = ['[CLS]'] + tokens_a + ['[SEP]'] 456 | input_type_ids = [0] * len(tokens) 457 | input_mask = [int(not mask_cls_sep)] + [1] * len(tokens_a) + [int(not mask_cls_sep)] 458 | 459 | if tokens_b: 460 | tokens += tokens_b + ['[SEP]'] 461 | input_type_ids += [1] * (len(tokens_b) + 1) 462 | input_mask += [1] * len(tokens_b) + [int(not mask_cls_sep)] 463 | 464 | input_ids = tokenizer.convert_tokens_to_ids(tokens) 465 | 466 | # Zero-pad up to the sequence length. more pythonic 467 | pad_len = seq_length - len(input_ids) 468 | input_ids += [0] * pad_len 469 | input_mask += [0] * pad_len 470 | input_type_ids += [0] * pad_len 471 | 472 | assert len(input_ids) == seq_length 473 | assert len(input_mask) == seq_length 474 | assert len(input_type_ids) == seq_length 475 | 476 | # logger.debug('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens])) 477 | # logger.debug('input_ids: %s' % ' '.join([str(x) for x in input_ids])) 478 | # logger.debug('input_mask: %s' % ' '.join([str(x) for x in input_mask])) 479 | # logger.debug('input_type_ids: %s' % ' '.join([str(x) for x in input_type_ids])) 480 | 481 | yield InputFeatures( 482 | unique_id=example.unique_id, 483 | tokens=tokens, 484 | input_ids=input_ids, 485 | input_mask=input_mask, 486 | input_type_ids=input_type_ids) 487 | 488 | 489 | def read_tokenized_examples(lst_strs): 490 | """ 491 | 492 | :param lst_strs: [[]] 每个子元素为一个序列,子元素的每一个元素为这个序列的一个index 493 | :return: 494 | """ 495 | unique_id = 0 496 | # 对lst_list中的数据进行转化为ID 497 | lst_strs = [[tokenization.convert_to_unicode(w) for w in s] for s in lst_strs] 498 | for ss in lst_strs: 499 | text_a = ss 500 | text_b = None 501 | try: 502 | # 这里使用|||对输入的句子进行切分如果存在这个符号,表示输入的是两个句子,即text_a 和text_b, 否则index出错,只会存在test_a 503 | j = ss.index('|||') 504 | text_a = ss[:j] 505 | text_b = ss[(j + 1):] 506 | except ValueError: 507 | pass 508 | yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b) 509 | unique_id += 1 510 | 511 | 512 | def read_line_examples(lst_strs): 513 | """Read a list of `InputExample`s from a list of strings.""" 514 | unique_id = 0 515 | for ss in lst_strs: 516 | line = tokenization.convert_to_unicode(ss) 517 | if not line: 518 | continue 519 | line = line.strip() 520 | text_a = None 521 | text_b = None 522 | m = re.match(r"^(.*) \|\|\| (.*)$", line) 523 | if m is None: 524 | text_a = line 525 | else: 526 | text_a = m.group(1) 527 | text_b = m.group(2) 528 | yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b) 529 | unique_id += 1 530 | 531 | 532 | if __name__ == "__main__": 533 | flags.mark_flag_as_required("input_file") 534 | flags.mark_flag_as_required("vocab_file") 535 | flags.mark_flag_as_required("bert_config_file") 536 | flags.mark_flag_as_required("init_checkpoint") 537 | flags.mark_flag_as_required("output_file") 538 | tf.app.run() 539 | -------------------------------------------------------------------------------- /bert_base/bert/modeling_test.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import collections 20 | import json 21 | import random 22 | import re 23 | 24 | import modeling 25 | import six 26 | import tensorflow as tf 27 | 28 | 29 | class BertModelTest(tf.test.TestCase): 30 | 31 | class BertModelTester(object): 32 | 33 | def __init__(self, 34 | parent, 35 | batch_size=13, 36 | seq_length=7, 37 | is_training=True, 38 | use_input_mask=True, 39 | use_token_type_ids=True, 40 | vocab_size=99, 41 | hidden_size=32, 42 | num_hidden_layers=5, 43 | num_attention_heads=4, 44 | intermediate_size=37, 45 | hidden_act="gelu", 46 | hidden_dropout_prob=0.1, 47 | attention_probs_dropout_prob=0.1, 48 | max_position_embeddings=512, 49 | type_vocab_size=16, 50 | initializer_range=0.02, 51 | scope=None): 52 | self.parent = parent 53 | self.batch_size = batch_size 54 | self.seq_length = seq_length 55 | self.is_training = is_training 56 | self.use_input_mask = use_input_mask 57 | self.use_token_type_ids = use_token_type_ids 58 | self.vocab_size = vocab_size 59 | self.hidden_size = hidden_size 60 | self.num_hidden_layers = num_hidden_layers 61 | self.num_attention_heads = num_attention_heads 62 | self.intermediate_size = intermediate_size 63 | self.hidden_act = hidden_act 64 | self.hidden_dropout_prob = hidden_dropout_prob 65 | self.attention_probs_dropout_prob = attention_probs_dropout_prob 66 | self.max_position_embeddings = max_position_embeddings 67 | self.type_vocab_size = type_vocab_size 68 | self.initializer_range = initializer_range 69 | self.scope = scope 70 | 71 | def create_model(self): 72 | input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], 73 | self.vocab_size) 74 | 75 | input_mask = None 76 | if self.use_input_mask: 77 | input_mask = BertModelTest.ids_tensor( 78 | [self.batch_size, self.seq_length], vocab_size=2) 79 | 80 | token_type_ids = None 81 | if self.use_token_type_ids: 82 | token_type_ids = BertModelTest.ids_tensor( 83 | [self.batch_size, self.seq_length], self.type_vocab_size) 84 | 85 | config = modeling.BertConfig( 86 | vocab_size=self.vocab_size, 87 | hidden_size=self.hidden_size, 88 | num_hidden_layers=self.num_hidden_layers, 89 | num_attention_heads=self.num_attention_heads, 90 | intermediate_size=self.intermediate_size, 91 | hidden_act=self.hidden_act, 92 | hidden_dropout_prob=self.hidden_dropout_prob, 93 | attention_probs_dropout_prob=self.attention_probs_dropout_prob, 94 | max_position_embeddings=self.max_position_embeddings, 95 | type_vocab_size=self.type_vocab_size, 96 | initializer_range=self.initializer_range) 97 | 98 | model = modeling.BertModel( 99 | config=config, 100 | is_training=self.is_training, 101 | input_ids=input_ids, 102 | input_mask=input_mask, 103 | token_type_ids=token_type_ids, 104 | scope=self.scope) 105 | 106 | outputs = { 107 | "embedding_output": model.get_embedding_output(), 108 | "sequence_output": model.get_sequence_output(), 109 | "pooled_output": model.get_pooled_output(), 110 | "all_encoder_layers": model.get_all_encoder_layers(), 111 | } 112 | return outputs 113 | 114 | def check_output(self, result): 115 | self.parent.assertAllEqual( 116 | result["embedding_output"].shape, 117 | [self.batch_size, self.seq_length, self.hidden_size]) 118 | 119 | self.parent.assertAllEqual( 120 | result["sequence_output"].shape, 121 | [self.batch_size, self.seq_length, self.hidden_size]) 122 | 123 | self.parent.assertAllEqual(result["pooled_output"].shape, 124 | [self.batch_size, self.hidden_size]) 125 | 126 | def test_default(self): 127 | self.run_tester(BertModelTest.BertModelTester(self)) 128 | 129 | def test_config_to_json_string(self): 130 | config = modeling.BertConfig(vocab_size=99, hidden_size=37) 131 | obj = json.loads(config.to_json_string()) 132 | self.assertEqual(obj["vocab_size"], 99) 133 | self.assertEqual(obj["hidden_size"], 37) 134 | 135 | def run_tester(self, tester): 136 | with self.test_session() as sess: 137 | ops = tester.create_model() 138 | init_op = tf.group(tf.global_variables_initializer(), 139 | tf.local_variables_initializer()) 140 | sess.run(init_op) 141 | output_result = sess.run(ops) 142 | tester.check_output(output_result) 143 | 144 | self.assert_all_tensors_reachable(sess, [init_op, ops]) 145 | 146 | @classmethod 147 | def ids_tensor(cls, shape, vocab_size, rng=None, name=None): 148 | """Creates a random int32 tensor of the shape within the vocab size.""" 149 | if rng is None: 150 | rng = random.Random() 151 | 152 | total_dims = 1 153 | for dim in shape: 154 | total_dims *= dim 155 | 156 | values = [] 157 | for _ in range(total_dims): 158 | values.append(rng.randint(0, vocab_size - 1)) 159 | 160 | return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name) 161 | 162 | def assert_all_tensors_reachable(self, sess, outputs): 163 | """Checks that all the tensors in the graph are reachable from outputs.""" 164 | graph = sess.graph 165 | 166 | ignore_strings = [ 167 | "^.*/assert_less_equal/.*$", 168 | "^.*/dilation_rate$", 169 | "^.*/Tensordot/concat$", 170 | "^.*/Tensordot/concat/axis$", 171 | "^testing/.*$", 172 | ] 173 | 174 | ignore_regexes = [re.compile(x) for x in ignore_strings] 175 | 176 | unreachable = self.get_unreachable_ops(graph, outputs) 177 | filtered_unreachable = [] 178 | for x in unreachable: 179 | do_ignore = False 180 | for r in ignore_regexes: 181 | m = r.match(x.name) 182 | if m is not None: 183 | do_ignore = True 184 | if do_ignore: 185 | continue 186 | filtered_unreachable.append(x) 187 | unreachable = filtered_unreachable 188 | 189 | self.assertEqual( 190 | len(unreachable), 0, "The following ops are unreachable: %s" % 191 | (" ".join([x.name for x in unreachable]))) 192 | 193 | @classmethod 194 | def get_unreachable_ops(cls, graph, outputs): 195 | """Finds all of the tensors in graph that are unreachable from outputs.""" 196 | outputs = cls.flatten_recursive(outputs) 197 | output_to_op = collections.defaultdict(list) 198 | op_to_all = collections.defaultdict(list) 199 | assign_out_to_in = collections.defaultdict(list) 200 | 201 | for op in graph.get_operations(): 202 | for x in op.inputs: 203 | op_to_all[op.name].append(x.name) 204 | for y in op.outputs: 205 | output_to_op[y.name].append(op.name) 206 | op_to_all[op.name].append(y.name) 207 | if str(op.type) == "Assign": 208 | for y in op.outputs: 209 | for x in op.inputs: 210 | assign_out_to_in[y.name].append(x.name) 211 | 212 | assign_groups = collections.defaultdict(list) 213 | for out_name in assign_out_to_in.keys(): 214 | name_group = assign_out_to_in[out_name] 215 | for n1 in name_group: 216 | assign_groups[n1].append(out_name) 217 | for n2 in name_group: 218 | if n1 != n2: 219 | assign_groups[n1].append(n2) 220 | 221 | seen_tensors = {} 222 | stack = [x.name for x in outputs] 223 | while stack: 224 | name = stack.pop() 225 | if name in seen_tensors: 226 | continue 227 | seen_tensors[name] = True 228 | 229 | if name in output_to_op: 230 | for op_name in output_to_op[name]: 231 | if op_name in op_to_all: 232 | for input_name in op_to_all[op_name]: 233 | if input_name not in stack: 234 | stack.append(input_name) 235 | 236 | expanded_names = [] 237 | if name in assign_groups: 238 | for assign_name in assign_groups[name]: 239 | expanded_names.append(assign_name) 240 | 241 | for expanded_name in expanded_names: 242 | if expanded_name not in stack: 243 | stack.append(expanded_name) 244 | 245 | unreachable_ops = [] 246 | for op in graph.get_operations(): 247 | is_unreachable = False 248 | all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs] 249 | for name in all_names: 250 | if name not in seen_tensors: 251 | is_unreachable = True 252 | if is_unreachable: 253 | unreachable_ops.append(op) 254 | return unreachable_ops 255 | 256 | @classmethod 257 | def flatten_recursive(cls, item): 258 | """Flattens (potentially nested) a tuple/dictionary/list to a list.""" 259 | output = [] 260 | if isinstance(item, list): 261 | output.extend(item) 262 | elif isinstance(item, tuple): 263 | output.extend(list(item)) 264 | elif isinstance(item, dict): 265 | for (_, v) in six.iteritems(item): 266 | output.append(v) 267 | else: 268 | return [item] 269 | 270 | flat_output = [] 271 | for x in output: 272 | flat_output.extend(cls.flatten_recursive(x)) 273 | return flat_output 274 | 275 | 276 | if __name__ == "__main__": 277 | tf.test.main() 278 | -------------------------------------------------------------------------------- /bert_base/bert/multilingual.md: -------------------------------------------------------------------------------- 1 | ## Models 2 | 3 | There are two multilingual models currently available. We do not plan to release 4 | more single-language models, but we may release `BERT-Large` versions of these 5 | two in the future: 6 | 7 | * **[`BERT-Base, Multilingual`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)**: 8 | 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters 9 | * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**: 10 | Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M 11 | parameters 12 | 13 | See the [list of languages](#list-of-languages) that the Multilingual model 14 | supports. The Multilingual model does include Chinese (and English), but if your 15 | fine-tuning data is Chinese-only, then the Chinese model will likely produce 16 | better results. 17 | 18 | ## Results 19 | 20 | To evaluate these systems, we use the 21 | [XNLI dataset](https://github.com/facebookresearch/XNLI) dataset, which is a 22 | version of [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) where the 23 | dev and test sets have been translated (by humans) into 15 languages. Note that 24 | the training set was *machine* translated (we used the translations provided by 25 | XNLI, not Google NMT). For clarity, we only report on 6 languages below: 26 | 27 | 28 | 29 | | System | English | Chinese | Spanish | German | Arabic | Urdu | 30 | | ------------------------------- | -------- | -------- | -------- | -------- | -------- | -------- | 31 | | XNLI Baseline - Translate Train | 73.7 | 67.0 | 68.8 | 66.5 | 65.8 | 56.6 | 32 | | XNLI Baseline - Translate Test | 73.7 | 68.3 | 70.7 | 68.7 | 66.8 | 59.3 | 33 | | BERT -Translate Train | **81.4** | **74.2** | **77.3** | **75.2** | **70.5** | 61.7 | 34 | | BERT - Translate Test | 81.4 | 70.1 | 74.9 | 74.4 | 70.4 | **62.1** | 35 | | BERT - Zero Shot | 81.4 | 63.8 | 74.3 | 70.5 | 62.1 | 58.3 | 36 | 37 | 38 | 39 | The first two rows are baselines from the XNLI paper and the last three rows are 40 | our results with BERT. 41 | 42 | **Translate Train** means that the MultiNLI training set was machine translated 43 | from English into the foreign language. So training and evaluation were both 44 | done in the foreign language. Unfortunately, training was done on 45 | machine-translated data, so it is impossible to quantify how much of the lower 46 | accuracy (compared to English) is due to the quality of the machine translation 47 | vs. the quality of the pre-trained model. 48 | 49 | **Translate Test** means that the XNLI test set was machine translated from the 50 | foreign language into English. So training and evaluation were both done on 51 | English. However, test evaluation was done on machine-translated English, so the 52 | accuracy depends on the quality of the machine translation system. 53 | 54 | **Zero Shot** means that the Multilingual BERT system was fine-tuned on English 55 | MultiNLI, and then evaluated on the foreign language XNLI test. In this case, 56 | machine translation was not involved at all in either the pre-training or 57 | fine-tuning. 58 | 59 | Note that the English result is worse than the 84.2 MultiNLI baseline because 60 | this training used Multilingual BERT rather than English-only BERT. This implies 61 | that for high-resource languages, the Multilingual model is somewhat worse than 62 | a single-language model. However, it is not feasible for us to train and 63 | maintain dozens of single-language model. Therefore, if your goal is to maximize 64 | performance with a language other than English or Chinese, you might find it 65 | beneficial to run pre-training for additional steps starting from our 66 | Multilingual model on data from your language of interest. 67 | 68 | Here is a comparison of training Chinese models with the Multilingual 69 | `BERT-Base` and Chinese-only `BERT-Base`: 70 | 71 | System | Chinese 72 | ----------------------- | ------- 73 | XNLI Baseline | 67.0 74 | BERT Multilingual Model | 74.2 75 | BERT Chinese-only Model | 77.2 76 | 77 | Similar to English, the single-language model does 3% better than the 78 | Multilingual model. 79 | 80 | ## Fine-tuning Example 81 | 82 | The multilingual model does **not** require any special consideration or API 83 | changes. We did update the implementation of `BasicTokenizer` in 84 | `tokenization.py` to support Chinese character tokenization, so please update if 85 | you forked it. However, we did not change the tokenization API. 86 | 87 | To test the new models, we did modify `run_classifier.py` to add support for the 88 | [XNLI dataset](https://github.com/facebookresearch/XNLI). This is a 15-language 89 | version of MultiNLI where the dev/test sets have been human-translated, and the 90 | training set has been machine-translated. 91 | 92 | To run the fine-tuning code, please download the 93 | [XNLI dev/test set](https://s3.amazonaws.com/xnli/XNLI-1.0.zip) and the 94 | [XNLI machine-translated training set](https://s3.amazonaws.com/xnli/XNLI-MT-1.0.zip) 95 | and then unpack both .zip files into some directory `$XNLI_DIR`. 96 | 97 | To run fine-tuning on XNLI. The language is hard-coded into `run_classifier.py` 98 | (Chinese by default), so please modify `XnliProcessor` if you want to run on 99 | another language. 100 | 101 | This is a large dataset, so this will training will take a few hours on a GPU 102 | (or about 30 minutes on a Cloud TPU). To run an experiment quickly for 103 | debugging, just set `num_train_epochs` to a small value like `0.1`. 104 | 105 | ```shell 106 | export BERT_BASE_DIR=/path/to/bert/chinese_L-12_H-768_A-12 # or multilingual_L-12_H-768_A-12 107 | export XNLI_DIR=/path/to/xnli 108 | 109 | python run_classifier.py \ 110 | --task_name=XNLI \ 111 | --do_train=true \ 112 | --do_eval=true \ 113 | --data_dir=$XNLI_DIR \ 114 | --vocab_file=$BERT_BASE_DIR/vocab.txt \ 115 | --bert_config_file=$BERT_BASE_DIR/bert_config.json \ 116 | --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ 117 | --max_seq_length=128 \ 118 | --train_batch_size=32 \ 119 | --learning_rate=5e-5 \ 120 | --num_train_epochs=2.0 \ 121 | --output_dir=/tmp/xnli_output/ 122 | ``` 123 | 124 | With the Chinese-only model, the results should look something like this: 125 | 126 | ``` 127 | ***** Eval results ***** 128 | eval_accuracy = 0.774116 129 | eval_loss = 0.83554 130 | global_step = 24543 131 | loss = 0.74603 132 | ``` 133 | 134 | ## Details 135 | 136 | ### Data Source and Sampling 137 | 138 | The languages chosen were the 139 | [top 100 languages with the largest Wikipedias](https://meta.wikimedia.org/wiki/List_of_Wikipedias). 140 | The entire Wikipedia dump for each language (excluding user and talk pages) was 141 | taken as the training data for each language 142 | 143 | However, the size of the Wikipedia for a given language varies greatly, and 144 | therefore low-resource languages may be "under-represented" in terms of the 145 | neural network model (under the assumption that languages are "competing" for 146 | limited model capacity to some extent). 147 | 148 | However, the size of a Wikipedia also correlates with the number of speakers of 149 | a language, and we also don't want to overfit the model by performing thousands 150 | of epochs over a tiny Wikipedia for a particular language. 151 | 152 | To balance these two factors, we performed exponentially smoothed weighting of 153 | the data during pre-training data creation (and WordPiece vocab creation). In 154 | other words, let's say that the probability of a language is *P(L)*, e.g., 155 | *P(English) = 0.21* means that after concatenating all of the Wikipedias 156 | together, 21% of our data is English. We exponentiate each probability by some 157 | factor *S* and then re-normalize, and sample from that distribution. In our case 158 | we use *S=0.7*. So, high-resource languages like English will be under-sampled, 159 | and low-resource languages like Icelandic will be over-sampled. E.g., in the 160 | original distribution English would be sampled 1000x more than Icelandic, but 161 | after smoothing it's only sampled 100x more. 162 | 163 | ### Tokenization 164 | 165 | For tokenization, we use a 110k shared WordPiece vocabulary. The word counts are 166 | weighted the same way as the data, so low-resource languages are upweighted by 167 | some factor. We intentionally do *not* use any marker to denote the input 168 | language (so that zero-shot training can work). 169 | 170 | Because Chinese does not have whitespace characters, we add spaces around every 171 | character in the 172 | [CJK Unicode range](https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_\(Unicode_block\)) 173 | before applying WordPiece. This means that Chinese is effectively 174 | character-tokenized. Note that the CJK Unicode block only includes 175 | Chinese-origin characters and does *not* include Hangul Korean or 176 | Katakana/Hiragana Japanese, which are tokenized with whitespace+WordPiece like 177 | all other languages. 178 | 179 | For all other languages, we apply the 180 | [same recipe as English](https://github.com/google-research/bert#tokenization): 181 | (a) lower casing+accent removal, (b) punctuation splitting, (c) whitespace 182 | tokenization. We understand that accent markers have substantial meaning in some 183 | languages, but felt that the benefits of reducing the effective vocabulary make 184 | up for this. Generally the strong contextual models of BERT should make up for 185 | any ambiguity introduced by stripping accent markers. 186 | 187 | ### List of Languages 188 | 189 | The multilingual model supports the following languages. These languages were 190 | chosen because they are the top 100 languages with the largest Wikipedias: 191 | 192 | * Afrikaans 193 | * Albanian 194 | * Arabic 195 | * Aragonese 196 | * Armenian 197 | * Asturian 198 | * Azerbaijani 199 | * Bashkir 200 | * Basque 201 | * Bavarian 202 | * Belarusian 203 | * Bengali 204 | * Bishnupriya Manipuri 205 | * Bosnian 206 | * Breton 207 | * Bulgarian 208 | * Burmese 209 | * Catalan 210 | * Cebuano 211 | * Chechen 212 | * Chinese (Simplified) 213 | * Chinese (Traditional) 214 | * Chuvash 215 | * Croatian 216 | * Czech 217 | * Danish 218 | * Dutch 219 | * English 220 | * Estonian 221 | * Finnish 222 | * French 223 | * Galician 224 | * Georgian 225 | * German 226 | * Greek 227 | * Gujarati 228 | * Haitian 229 | * Hebrew 230 | * Hindi 231 | * Hungarian 232 | * Icelandic 233 | * Ido 234 | * Indonesian 235 | * Irish 236 | * Italian 237 | * Japanese 238 | * Javanese 239 | * Kannada 240 | * Kazakh 241 | * Kirghiz 242 | * Korean 243 | * Latin 244 | * Latvian 245 | * Lithuanian 246 | * Lombard 247 | * Low Saxon 248 | * Luxembourgish 249 | * Macedonian 250 | * Malagasy 251 | * Malay 252 | * Malayalam 253 | * Marathi 254 | * Minangkabau 255 | * Nepali 256 | * Newar 257 | * Norwegian (Bokmal) 258 | * Norwegian (Nynorsk) 259 | * Occitan 260 | * Persian (Farsi) 261 | * Piedmontese 262 | * Polish 263 | * Portuguese 264 | * Punjabi 265 | * Romanian 266 | * Russian 267 | * Scots 268 | * Serbian 269 | * Serbo-Croatian 270 | * Sicilian 271 | * Slovak 272 | * Slovenian 273 | * South Azerbaijani 274 | * Spanish 275 | * Sundanese 276 | * Swahili 277 | * Swedish 278 | * Tagalog 279 | * Tajik 280 | * Tamil 281 | * Tatar 282 | * Telugu 283 | * Turkish 284 | * Ukrainian 285 | * Urdu 286 | * Uzbek 287 | * Vietnamese 288 | * Volapük 289 | * Waray-Waray 290 | * Welsh 291 | * West 292 | * Western Punjabi 293 | * Yoruba 294 | 295 | The only language which we had to unfortunately exclude was Thai, since it is 296 | the only language (other than Chinese) that does not use whitespace to delimit 297 | words, and it has too many characters-per-word to use character-based 298 | tokenization. Our WordPiece algorithm is quadratic with respect to the size of 299 | the input token so very long character strings do not work with it. 300 | -------------------------------------------------------------------------------- /bert_base/bert/optimization.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Functions and classes related to optimization (weight updates).""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import re 22 | import tensorflow as tf 23 | 24 | 25 | def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu): 26 | """Creates an optimizer training op.""" 27 | global_step = tf.train.get_or_create_global_step() 28 | 29 | learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32) 30 | 31 | # Implements linear decay of the learning rate. 32 | learning_rate = tf.train.polynomial_decay( 33 | learning_rate, 34 | global_step, 35 | num_train_steps, 36 | end_learning_rate=0.0, 37 | power=1.0, 38 | cycle=False) 39 | 40 | # Implements linear warmup. I.e., if global_step < num_warmup_steps, the 41 | # learning rate will be `global_step/num_warmup_steps * init_lr`. 42 | if num_warmup_steps: 43 | global_steps_int = tf.cast(global_step, tf.int32) 44 | warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) 45 | 46 | global_steps_float = tf.cast(global_steps_int, tf.float32) 47 | warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) 48 | 49 | warmup_percent_done = global_steps_float / warmup_steps_float 50 | warmup_learning_rate = init_lr * warmup_percent_done 51 | 52 | is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) 53 | learning_rate = ( 54 | (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) 55 | 56 | # It is recommended that you use this optimizer for fine tuning, since this 57 | # is how the model was trained (note that the Adam m/v variables are NOT 58 | # loaded from init_checkpoint.) 59 | optimizer = AdamWeightDecayOptimizer( 60 | learning_rate=learning_rate, 61 | weight_decay_rate=0.01, 62 | beta_1=0.9, 63 | beta_2=0.999, 64 | epsilon=1e-6, 65 | exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) 66 | 67 | if use_tpu: 68 | optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) 69 | 70 | tvars = tf.trainable_variables() 71 | grads = tf.gradients(loss, tvars) 72 | 73 | # This is how the model was pre-trained. 74 | (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) 75 | 76 | train_op = optimizer.apply_gradients( 77 | zip(grads, tvars), global_step=global_step) 78 | 79 | new_global_step = global_step + 1 80 | train_op = tf.group(train_op, [global_step.assign(new_global_step)]) 81 | return train_op 82 | 83 | 84 | class AdamWeightDecayOptimizer(tf.train.Optimizer): 85 | """A basic Adam optimizer that includes "correct" L2 weight decay.""" 86 | 87 | def __init__(self, 88 | learning_rate, 89 | weight_decay_rate=0.0, 90 | beta_1=0.9, 91 | beta_2=0.999, 92 | epsilon=1e-6, 93 | exclude_from_weight_decay=None, 94 | name="AdamWeightDecayOptimizer"): 95 | """Constructs a AdamWeightDecayOptimizer.""" 96 | super(AdamWeightDecayOptimizer, self).__init__(False, name) 97 | 98 | self.learning_rate = learning_rate 99 | self.weight_decay_rate = weight_decay_rate 100 | self.beta_1 = beta_1 101 | self.beta_2 = beta_2 102 | self.epsilon = epsilon 103 | self.exclude_from_weight_decay = exclude_from_weight_decay 104 | 105 | def apply_gradients(self, grads_and_vars, global_step=None, name=None): 106 | """See base class.""" 107 | assignments = [] 108 | for (grad, param) in grads_and_vars: 109 | if grad is None or param is None: 110 | continue 111 | 112 | param_name = self._get_variable_name(param.name) 113 | 114 | m = tf.get_variable( 115 | name=param_name + "/adam_m", 116 | shape=param.shape.as_list(), 117 | dtype=tf.float32, 118 | trainable=False, 119 | initializer=tf.zeros_initializer()) 120 | v = tf.get_variable( 121 | name=param_name + "/adam_v", 122 | shape=param.shape.as_list(), 123 | dtype=tf.float32, 124 | trainable=False, 125 | initializer=tf.zeros_initializer()) 126 | 127 | # Standard Adam update. 128 | next_m = ( 129 | tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) 130 | next_v = ( 131 | tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, 132 | tf.square(grad))) 133 | 134 | update = next_m / (tf.sqrt(next_v) + self.epsilon) 135 | 136 | # Just adding the square of the weights to the loss function is *not* 137 | # the correct way of using L2 regularization/weight decay with Adam, 138 | # since that will interact with the m and v parameters in strange ways. 139 | # 140 | # Instead we want ot decay the weights in a manner that doesn't interact 141 | # with the m/v parameters. This is equivalent to adding the square 142 | # of the weights to the loss with plain (non-momentum) SGD. 143 | if self._do_use_weight_decay(param_name): 144 | update += self.weight_decay_rate * param 145 | 146 | update_with_lr = self.learning_rate * update 147 | 148 | next_param = param - update_with_lr 149 | 150 | assignments.extend( 151 | [param.assign(next_param), 152 | m.assign(next_m), 153 | v.assign(next_v)]) 154 | return tf.group(*assignments, name=name) 155 | 156 | def _do_use_weight_decay(self, param_name): 157 | """Whether to use L2 weight decay for `param_name`.""" 158 | if not self.weight_decay_rate: 159 | return False 160 | if self.exclude_from_weight_decay: 161 | for r in self.exclude_from_weight_decay: 162 | if re.search(r, param_name) is not None: 163 | return False 164 | return True 165 | 166 | def _get_variable_name(self, param_name): 167 | """Get the variable name from the tensor name.""" 168 | m = re.match("^(.*):\\d+$", param_name) 169 | if m is not None: 170 | param_name = m.group(1) 171 | return param_name 172 | -------------------------------------------------------------------------------- /bert_base/bert/optimization_test.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import optimization 20 | import tensorflow as tf 21 | 22 | 23 | class OptimizationTest(tf.test.TestCase): 24 | 25 | def test_adam(self): 26 | with self.test_session() as sess: 27 | w = tf.get_variable( 28 | "w", 29 | shape=[3], 30 | initializer=tf.constant_initializer([0.1, -0.2, -0.1])) 31 | x = tf.constant([0.4, 0.2, -0.5]) 32 | loss = tf.reduce_mean(tf.square(x - w)) 33 | tvars = tf.trainable_variables() 34 | grads = tf.gradients(loss, tvars) 35 | global_step = tf.train.get_or_create_global_step() 36 | optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2) 37 | train_op = optimizer.apply_gradients(zip(grads, tvars), global_step) 38 | init_op = tf.group(tf.global_variables_initializer(), 39 | tf.local_variables_initializer()) 40 | sess.run(init_op) 41 | for _ in range(100): 42 | sess.run(train_op) 43 | w_np = sess.run(w) 44 | self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) 45 | 46 | 47 | if __name__ == "__main__": 48 | tf.test.main() 49 | -------------------------------------------------------------------------------- /bert_base/bert/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow >= 1.11.0 # CPU Version of TensorFlow. 2 | # tensorflow-gpu >= 1.11.0 # GPU version of TensorFlow. 3 | -------------------------------------------------------------------------------- /bert_base/bert/run_pretraining.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Run masked LM/next sentence masked_lm pre-training for BERT.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import os 22 | import modeling 23 | import optimization 24 | import tensorflow as tf 25 | 26 | flags = tf.flags 27 | 28 | FLAGS = flags.FLAGS 29 | 30 | ## Required parameters 31 | flags.DEFINE_string( 32 | "bert_config_file", None, 33 | "The config json file corresponding to the pre-trained BERT model. " 34 | "This specifies the model architecture.") 35 | 36 | flags.DEFINE_string( 37 | "input_file", None, 38 | "Input TF example files (can be a glob or comma separated).") 39 | 40 | flags.DEFINE_string( 41 | "output_dir", None, 42 | "The output directory where the model checkpoints will be written.") 43 | 44 | ## Other parameters 45 | flags.DEFINE_string( 46 | "init_checkpoint", None, 47 | "Initial checkpoint (usually from a pre-trained BERT model).") 48 | 49 | flags.DEFINE_integer( 50 | "max_seq_length", 128, 51 | "The maximum total input sequence length after WordPiece tokenization. " 52 | "Sequences longer than this will be truncated, and sequences shorter " 53 | "than this will be padded. Must match data generation.") 54 | 55 | flags.DEFINE_integer( 56 | "max_predictions_per_seq", 20, 57 | "Maximum number of masked LM predictions per sequence. " 58 | "Must match data generation.") 59 | 60 | flags.DEFINE_bool("do_train", False, "Whether to run training.") 61 | 62 | flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") 63 | 64 | flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") 65 | 66 | flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") 67 | 68 | flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") 69 | 70 | flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") 71 | 72 | flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") 73 | 74 | flags.DEFINE_integer("save_checkpoints_steps", 1000, 75 | "How often to save the model checkpoint.") 76 | 77 | flags.DEFINE_integer("iterations_per_loop", 1000, 78 | "How many steps to make in each estimator call.") 79 | 80 | flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") 81 | 82 | flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") 83 | 84 | tf.flags.DEFINE_string( 85 | "tpu_name", None, 86 | "The Cloud TPU to use for training. This should be either the name " 87 | "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " 88 | "url.") 89 | 90 | tf.flags.DEFINE_string( 91 | "tpu_zone", None, 92 | "[Optional] GCE zone where the Cloud TPU is located in. If not " 93 | "specified, we will attempt to automatically detect the GCE project from " 94 | "metadata.") 95 | 96 | tf.flags.DEFINE_string( 97 | "gcp_project", None, 98 | "[Optional] Project name for the Cloud TPU-enabled project. If not " 99 | "specified, we will attempt to automatically detect the GCE project from " 100 | "metadata.") 101 | 102 | tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") 103 | 104 | flags.DEFINE_integer( 105 | "num_tpu_cores", 8, 106 | "Only used if `use_tpu` is True. Total number of TPU cores to use.") 107 | 108 | 109 | def model_fn_builder(bert_config, init_checkpoint, learning_rate, 110 | num_train_steps, num_warmup_steps, use_tpu, 111 | use_one_hot_embeddings): 112 | """Returns `model_fn` closure for TPUEstimator.""" 113 | 114 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument 115 | """The `model_fn` for TPUEstimator.""" 116 | 117 | tf.logging.info("*** Features ***") 118 | for name in sorted(features.keys()): 119 | tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) 120 | 121 | input_ids = features["input_ids"] 122 | input_mask = features["input_mask"] 123 | segment_ids = features["segment_ids"] 124 | masked_lm_positions = features["masked_lm_positions"] 125 | masked_lm_ids = features["masked_lm_ids"] 126 | masked_lm_weights = features["masked_lm_weights"] 127 | next_sentence_labels = features["next_sentence_labels"] 128 | 129 | is_training = (mode == tf.estimator.ModeKeys.TRAIN) 130 | 131 | model = modeling.BertModel( 132 | config=bert_config, 133 | is_training=is_training, 134 | input_ids=input_ids, 135 | input_mask=input_mask, 136 | token_type_ids=segment_ids, 137 | use_one_hot_embeddings=use_one_hot_embeddings) 138 | 139 | (masked_lm_loss, 140 | masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( 141 | bert_config, model.get_sequence_output(), model.get_embedding_table(), 142 | masked_lm_positions, masked_lm_ids, masked_lm_weights) 143 | 144 | (next_sentence_loss, next_sentence_example_loss, 145 | next_sentence_log_probs) = get_next_sentence_output( 146 | bert_config, model.get_pooled_output(), next_sentence_labels) 147 | 148 | total_loss = masked_lm_loss + next_sentence_loss 149 | 150 | tvars = tf.trainable_variables() 151 | 152 | initialized_variable_names = {} 153 | scaffold_fn = None 154 | if init_checkpoint: 155 | (assignment_map, initialized_variable_names 156 | ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) 157 | if use_tpu: 158 | 159 | def tpu_scaffold(): 160 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map) 161 | return tf.train.Scaffold() 162 | 163 | scaffold_fn = tpu_scaffold 164 | else: 165 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map) 166 | 167 | tf.logging.info("**** Trainable Variables ****") 168 | for var in tvars: 169 | init_string = "" 170 | if var.name in initialized_variable_names: 171 | init_string = ", *INIT_FROM_CKPT*" 172 | tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, 173 | init_string) 174 | 175 | output_spec = None 176 | if mode == tf.estimator.ModeKeys.TRAIN: 177 | train_op = optimization.create_optimizer( 178 | total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) 179 | 180 | output_spec = tf.contrib.tpu.TPUEstimatorSpec( 181 | mode=mode, 182 | loss=total_loss, 183 | train_op=train_op, 184 | scaffold_fn=scaffold_fn) 185 | elif mode == tf.estimator.ModeKeys.EVAL: 186 | 187 | def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, 188 | masked_lm_weights, next_sentence_example_loss, 189 | next_sentence_log_probs, next_sentence_labels): 190 | """Computes the loss and accuracy of the model.""" 191 | masked_lm_log_probs = tf.reshape(masked_lm_log_probs, 192 | [-1, masked_lm_log_probs.shape[-1]]) 193 | masked_lm_predictions = tf.argmax( 194 | masked_lm_log_probs, axis=-1, output_type=tf.int32) 195 | masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) 196 | masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) 197 | masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) 198 | masked_lm_accuracy = tf.metrics.accuracy( 199 | labels=masked_lm_ids, 200 | predictions=masked_lm_predictions, 201 | weights=masked_lm_weights) 202 | masked_lm_mean_loss = tf.metrics.mean( 203 | values=masked_lm_example_loss, weights=masked_lm_weights) 204 | 205 | next_sentence_log_probs = tf.reshape( 206 | next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) 207 | next_sentence_predictions = tf.argmax( 208 | next_sentence_log_probs, axis=-1, output_type=tf.int32) 209 | next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) 210 | next_sentence_accuracy = tf.metrics.accuracy( 211 | labels=next_sentence_labels, predictions=next_sentence_predictions) 212 | next_sentence_mean_loss = tf.metrics.mean( 213 | values=next_sentence_example_loss) 214 | 215 | return { 216 | "masked_lm_accuracy": masked_lm_accuracy, 217 | "masked_lm_loss": masked_lm_mean_loss, 218 | "next_sentence_accuracy": next_sentence_accuracy, 219 | "next_sentence_loss": next_sentence_mean_loss, 220 | } 221 | 222 | eval_metrics = (metric_fn, [ 223 | masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, 224 | masked_lm_weights, next_sentence_example_loss, 225 | next_sentence_log_probs, next_sentence_labels 226 | ]) 227 | output_spec = tf.contrib.tpu.TPUEstimatorSpec( 228 | mode=mode, 229 | loss=total_loss, 230 | eval_metrics=eval_metrics, 231 | scaffold_fn=scaffold_fn) 232 | else: 233 | raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) 234 | 235 | return output_spec 236 | 237 | return model_fn 238 | 239 | 240 | def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, 241 | label_ids, label_weights): 242 | """Get loss and log probs for the masked LM.""" 243 | input_tensor = gather_indexes(input_tensor, positions) 244 | 245 | with tf.variable_scope("cls/predictions"): 246 | # We apply one more non-linear transformation before the output layer. 247 | # This matrix is not used after pre-training. 248 | with tf.variable_scope("transform"): 249 | input_tensor = tf.layers.dense( 250 | input_tensor, 251 | units=bert_config.hidden_size, 252 | activation=modeling.get_activation(bert_config.hidden_act), 253 | kernel_initializer=modeling.create_initializer( 254 | bert_config.initializer_range)) 255 | input_tensor = modeling.layer_norm(input_tensor) 256 | 257 | # The output weights are the same as the input embeddings, but there is 258 | # an output-only bias for each token. 259 | output_bias = tf.get_variable( 260 | "output_bias", 261 | shape=[bert_config.vocab_size], 262 | initializer=tf.zeros_initializer()) 263 | logits = tf.matmul(input_tensor, output_weights, transpose_b=True) 264 | logits = tf.nn.bias_add(logits, output_bias) 265 | log_probs = tf.nn.log_softmax(logits, axis=-1) 266 | 267 | label_ids = tf.reshape(label_ids, [-1]) 268 | label_weights = tf.reshape(label_weights, [-1]) 269 | 270 | one_hot_labels = tf.one_hot( 271 | label_ids, depth=bert_config.vocab_size, dtype=tf.float32) 272 | 273 | # The `positions` tensor might be zero-padded (if the sequence is too 274 | # short to have the maximum number of predictions). The `label_weights` 275 | # tensor has a value of 1.0 for every real prediction and 0.0 for the 276 | # padding predictions. 277 | per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) 278 | numerator = tf.reduce_sum(label_weights * per_example_loss) 279 | denominator = tf.reduce_sum(label_weights) + 1e-5 280 | loss = numerator / denominator 281 | 282 | return (loss, per_example_loss, log_probs) 283 | 284 | 285 | def get_next_sentence_output(bert_config, input_tensor, labels): 286 | """Get loss and log probs for the next sentence prediction.""" 287 | 288 | # Simple binary classification. Note that 0 is "next sentence" and 1 is 289 | # "random sentence". This weight matrix is not used after pre-training. 290 | with tf.variable_scope("cls/seq_relationship"): 291 | output_weights = tf.get_variable( 292 | "output_weights", 293 | shape=[2, bert_config.hidden_size], 294 | initializer=modeling.create_initializer(bert_config.initializer_range)) 295 | output_bias = tf.get_variable( 296 | "output_bias", shape=[2], initializer=tf.zeros_initializer()) 297 | 298 | logits = tf.matmul(input_tensor, output_weights, transpose_b=True) 299 | logits = tf.nn.bias_add(logits, output_bias) 300 | log_probs = tf.nn.log_softmax(logits, axis=-1) 301 | labels = tf.reshape(labels, [-1]) 302 | one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) 303 | per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) 304 | loss = tf.reduce_mean(per_example_loss) 305 | return (loss, per_example_loss, log_probs) 306 | 307 | 308 | def gather_indexes(sequence_tensor, positions): 309 | """Gathers the vectors at the specific positions over a minibatch.""" 310 | sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) 311 | batch_size = sequence_shape[0] 312 | seq_length = sequence_shape[1] 313 | width = sequence_shape[2] 314 | 315 | flat_offsets = tf.reshape( 316 | tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) 317 | flat_positions = tf.reshape(positions + flat_offsets, [-1]) 318 | flat_sequence_tensor = tf.reshape(sequence_tensor, 319 | [batch_size * seq_length, width]) 320 | output_tensor = tf.gather(flat_sequence_tensor, flat_positions) 321 | return output_tensor 322 | 323 | 324 | def input_fn_builder(input_files, 325 | max_seq_length, 326 | max_predictions_per_seq, 327 | is_training, 328 | num_cpu_threads=4): 329 | """Creates an `input_fn` closure to be passed to TPUEstimator.""" 330 | 331 | def input_fn(params): 332 | """The actual input function.""" 333 | batch_size = params["batch_size"] 334 | 335 | name_to_features = { 336 | "input_ids": 337 | tf.FixedLenFeature([max_seq_length], tf.int64), 338 | "input_mask": 339 | tf.FixedLenFeature([max_seq_length], tf.int64), 340 | "segment_ids": 341 | tf.FixedLenFeature([max_seq_length], tf.int64), 342 | "masked_lm_positions": 343 | tf.FixedLenFeature([max_predictions_per_seq], tf.int64), 344 | "masked_lm_ids": 345 | tf.FixedLenFeature([max_predictions_per_seq], tf.int64), 346 | "masked_lm_weights": 347 | tf.FixedLenFeature([max_predictions_per_seq], tf.float32), 348 | "next_sentence_labels": 349 | tf.FixedLenFeature([1], tf.int64), 350 | } 351 | 352 | # For training, we want a lot of parallel reading and shuffling. 353 | # For eval, we want no shuffling and parallel reading doesn't matter. 354 | if is_training: 355 | d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) 356 | d = d.repeat() 357 | d = d.shuffle(buffer_size=len(input_files)) 358 | 359 | # `cycle_length` is the number of parallel files that get read. 360 | cycle_length = min(num_cpu_threads, len(input_files)) 361 | 362 | # `sloppy` mode means that the interleaving is not exact. This adds 363 | # even more randomness to the training pipeline. 364 | d = d.apply( 365 | tf.contrib.data.parallel_interleave( 366 | tf.data.TFRecordDataset, 367 | sloppy=is_training, 368 | cycle_length=cycle_length)) 369 | d = d.shuffle(buffer_size=100) 370 | else: 371 | d = tf.data.TFRecordDataset(input_files) 372 | # Since we evaluate for a fixed number of steps we don't want to encounter 373 | # out-of-range exceptions. 374 | d = d.repeat() 375 | 376 | # We must `drop_remainder` on training because the TPU requires fixed 377 | # size dimensions. For eval, we assume we are evaluating on the CPU or GPU 378 | # and we *don't* want to drop the remainder, otherwise we wont cover 379 | # every sample. 380 | d = d.apply( 381 | tf.contrib.data.map_and_batch( 382 | lambda record: _decode_record(record, name_to_features), 383 | batch_size=batch_size, 384 | num_parallel_batches=num_cpu_threads, 385 | drop_remainder=True)) 386 | return d 387 | 388 | return input_fn 389 | 390 | 391 | def _decode_record(record, name_to_features): 392 | """Decodes a record to a TensorFlow example.""" 393 | example = tf.parse_single_example(record, name_to_features) 394 | 395 | # tf.Example only supports tf.int64, but the TPU only supports tf.int32. 396 | # So cast all int64 to int32. 397 | for name in list(example.keys()): 398 | t = example[name] 399 | if t.dtype == tf.int64: 400 | t = tf.to_int32(t) 401 | example[name] = t 402 | 403 | return example 404 | 405 | 406 | def main(_): 407 | tf.logging.set_verbosity(tf.logging.INFO) 408 | 409 | if not FLAGS.do_train and not FLAGS.do_eval: 410 | raise ValueError("At least one of `do_train` or `do_eval` must be True.") 411 | 412 | bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) 413 | 414 | tf.gfile.MakeDirs(FLAGS.output_dir) 415 | 416 | input_files = [] 417 | for input_pattern in FLAGS.input_file.split(","): 418 | input_files.extend(tf.gfile.Glob(input_pattern)) 419 | 420 | tf.logging.info("*** Input Files ***") 421 | for input_file in input_files: 422 | tf.logging.info(" %s" % input_file) 423 | 424 | tpu_cluster_resolver = None 425 | if FLAGS.use_tpu and FLAGS.tpu_name: 426 | tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( 427 | FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) 428 | 429 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 430 | run_config = tf.contrib.tpu.RunConfig( 431 | cluster=tpu_cluster_resolver, 432 | master=FLAGS.master, 433 | model_dir=FLAGS.output_dir, 434 | save_checkpoints_steps=FLAGS.save_checkpoints_steps, 435 | tpu_config=tf.contrib.tpu.TPUConfig( 436 | iterations_per_loop=FLAGS.iterations_per_loop, 437 | num_shards=FLAGS.num_tpu_cores, 438 | per_host_input_for_training=is_per_host)) 439 | 440 | model_fn = model_fn_builder( 441 | bert_config=bert_config, 442 | init_checkpoint=FLAGS.init_checkpoint, 443 | learning_rate=FLAGS.learning_rate, 444 | num_train_steps=FLAGS.num_train_steps, 445 | num_warmup_steps=FLAGS.num_warmup_steps, 446 | use_tpu=FLAGS.use_tpu, 447 | use_one_hot_embeddings=FLAGS.use_tpu) 448 | 449 | # If TPU is not available, this will fall back to normal Estimator on CPU 450 | # or GPU. 451 | estimator = tf.contrib.tpu.TPUEstimator( 452 | use_tpu=FLAGS.use_tpu, 453 | model_fn=model_fn, 454 | config=run_config, 455 | train_batch_size=FLAGS.train_batch_size, 456 | eval_batch_size=FLAGS.eval_batch_size) 457 | 458 | if FLAGS.do_train: 459 | tf.logging.info("***** Running training *****") 460 | tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) 461 | train_input_fn = input_fn_builder( 462 | input_files=input_files, 463 | max_seq_length=FLAGS.max_seq_length, 464 | max_predictions_per_seq=FLAGS.max_predictions_per_seq, 465 | is_training=True) 466 | estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) 467 | 468 | if FLAGS.do_eval: 469 | tf.logging.info("***** Running evaluation *****") 470 | tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) 471 | 472 | eval_input_fn = input_fn_builder( 473 | input_files=input_files, 474 | max_seq_length=FLAGS.max_seq_length, 475 | max_predictions_per_seq=FLAGS.max_predictions_per_seq, 476 | is_training=False) 477 | 478 | result = estimator.evaluate( 479 | input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) 480 | 481 | output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") 482 | with tf.gfile.GFile(output_eval_file, "w") as writer: 483 | tf.logging.info("***** Eval results *****") 484 | for key in sorted(result.keys()): 485 | tf.logging.info(" %s = %s", key, str(result[key])) 486 | writer.write("%s = %s\n" % (key, str(result[key]))) 487 | 488 | 489 | if __name__ == "__main__": 490 | flags.mark_flag_as_required("input_file") 491 | flags.mark_flag_as_required("bert_config_file") 492 | flags.mark_flag_as_required("output_dir") 493 | tf.app.run() 494 | -------------------------------------------------------------------------------- /bert_base/bert/sample_text.txt: -------------------------------------------------------------------------------- 1 | This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত 2 | Text should be one-sentence-per-line, with empty lines between documents. 3 | This sample text is public domain and was randomly selected from Project Guttenberg. 4 | 5 | The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors. 6 | Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity. 7 | Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them. 8 | "Cass" Beard had risen early that morning, but not with a view to discovery. 9 | A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets. 10 | The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency. 11 | This was nearly opposite. 12 | Mr. Cassius crossed the highway, and stopped suddenly. 13 | Something glittered in the nearest red pool before him. 14 | Gold, surely! 15 | But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring. 16 | Looking at it more attentively, he saw that it bore the inscription, "May to Cass." 17 | Like most of his fellow gold-seekers, Cass was superstitious. 18 | 19 | The fountain of classic wisdom, Hypatia herself. 20 | As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge. 21 | From my youth I felt in me a soul above the matter-entangled herd. 22 | She revealed to me the glorious fact, that I am a spark of Divinity itself. 23 | A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's. 24 | There is a philosophic pleasure in opening one's treasures to the modest young. 25 | Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street. 26 | Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide; 27 | but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind. 28 | Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now. 29 | His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert; 30 | while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts. 31 | At last they reached the quay at the opposite end of the street; 32 | and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers. 33 | He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him. 34 | -------------------------------------------------------------------------------- /bert_base/bert/tokenization.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | """Tokenization classes.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import collections 22 | import unicodedata 23 | import six 24 | import tensorflow as tf 25 | 26 | 27 | def convert_to_unicode(text): 28 | """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" 29 | if six.PY3: 30 | if isinstance(text, str): 31 | return text 32 | elif isinstance(text, bytes): 33 | return text.decode("utf-8", "ignore") 34 | else: 35 | raise ValueError("Unsupported string type: %s" % (type(text))) 36 | elif six.PY2: 37 | if isinstance(text, str): 38 | return text.decode("utf-8", "ignore") 39 | elif isinstance(text, unicode): 40 | return text 41 | else: 42 | raise ValueError("Unsupported string type: %s" % (type(text))) 43 | else: 44 | raise ValueError("Not running on Python2 or Python 3?") 45 | 46 | 47 | def printable_text(text): 48 | """Returns text encoded in a way suitable for print or `tf.logging`.""" 49 | 50 | # These functions want `str` for both Python2 and Python3, but in one case 51 | # it's a Unicode string and in the other it's a byte string. 52 | if six.PY3: 53 | if isinstance(text, str): 54 | return text 55 | elif isinstance(text, bytes): 56 | return text.decode("utf-8", "ignore") 57 | else: 58 | raise ValueError("Unsupported string type: %s" % (type(text))) 59 | elif six.PY2: 60 | if isinstance(text, str): 61 | return text 62 | elif isinstance(text, unicode): 63 | return text.encode("utf-8") 64 | else: 65 | raise ValueError("Unsupported string type: %s" % (type(text))) 66 | else: 67 | raise ValueError("Not running on Python2 or Python 3?") 68 | 69 | 70 | def load_vocab(vocab_file): 71 | """Loads a vocabulary file into a dictionary.""" 72 | vocab = collections.OrderedDict() 73 | index = 0 74 | with tf.gfile.GFile(vocab_file, "r") as reader: 75 | while True: 76 | token = convert_to_unicode(reader.readline()) 77 | if not token: 78 | break 79 | token = token.strip() 80 | vocab[token] = index 81 | index += 1 82 | return vocab 83 | 84 | 85 | def convert_by_vocab(vocab, items): 86 | """Converts a sequence of [tokens|ids] using the vocab.""" 87 | output = [] 88 | for item in items: 89 | #modify for oov, using [unk] replace, if you using english language do not change this 90 | # output.append(vocab.[item]) 91 | output.append(vocab.get(item, 100)) 92 | return output 93 | 94 | 95 | def convert_tokens_to_ids(vocab, tokens): 96 | return convert_by_vocab(vocab, tokens) 97 | 98 | 99 | def convert_ids_to_tokens(inv_vocab, ids): 100 | return convert_by_vocab(inv_vocab, ids) 101 | 102 | 103 | def whitespace_tokenize(text): 104 | """Runs basic whitespace cleaning and splitting on a peice of text.""" 105 | text = text.strip() 106 | if not text: 107 | return [] 108 | tokens = text.split() 109 | return tokens 110 | 111 | 112 | class FullTokenizer(object): 113 | """Runs end-to-end tokenziation.""" 114 | 115 | def __init__(self, vocab_file, do_lower_case=True): 116 | self.vocab = load_vocab(vocab_file) 117 | self.inv_vocab = {v: k for k, v in self.vocab.items()} 118 | self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) 119 | self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) 120 | 121 | def tokenize(self, text): 122 | split_tokens = [] 123 | for token in self.basic_tokenizer.tokenize(text): 124 | for sub_token in self.wordpiece_tokenizer.tokenize(token): 125 | split_tokens.append(sub_token) 126 | 127 | return split_tokens 128 | 129 | def convert_tokens_to_ids(self, tokens): 130 | return convert_by_vocab(self.vocab, tokens) 131 | 132 | def convert_ids_to_tokens(self, ids): 133 | return convert_by_vocab(self.inv_vocab, ids) 134 | 135 | 136 | class BasicTokenizer(object): 137 | """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" 138 | 139 | def __init__(self, do_lower_case=True): 140 | """Constructs a BasicTokenizer. 141 | 142 | Args: 143 | do_lower_case: Whether to lower case the input. 144 | """ 145 | self.do_lower_case = do_lower_case 146 | 147 | def tokenize(self, text): 148 | """Tokenizes a piece of text.""" 149 | text = convert_to_unicode(text) 150 | text = self._clean_text(text) 151 | 152 | # This was added on November 1st, 2018 for the multilingual and Chinese 153 | # models. This is also applied to the English models now, but it doesn't 154 | # matter since the English models were not trained on any Chinese data 155 | # and generally don't have any Chinese data in them (there are Chinese 156 | # characters in the vocabulary because Wikipedia does have some Chinese 157 | # words in the English Wikipedia.). 158 | text = self._tokenize_chinese_chars(text) 159 | 160 | orig_tokens = whitespace_tokenize(text) 161 | split_tokens = [] 162 | for token in orig_tokens: 163 | if self.do_lower_case: 164 | token = token.lower() 165 | token = self._run_strip_accents(token) 166 | split_tokens.extend(self._run_split_on_punc(token)) 167 | 168 | output_tokens = whitespace_tokenize(" ".join(split_tokens)) 169 | return output_tokens 170 | 171 | def _run_strip_accents(self, text): 172 | """Strips accents from a piece of text.""" 173 | text = unicodedata.normalize("NFD", text) 174 | output = [] 175 | for char in text: 176 | cat = unicodedata.category(char) 177 | if cat == "Mn": 178 | continue 179 | output.append(char) 180 | return "".join(output) 181 | 182 | def _run_split_on_punc(self, text): 183 | """Splits punctuation on a piece of text.""" 184 | chars = list(text) 185 | i = 0 186 | start_new_word = True 187 | output = [] 188 | while i < len(chars): 189 | char = chars[i] 190 | if _is_punctuation(char): 191 | output.append([char]) 192 | start_new_word = True 193 | else: 194 | if start_new_word: 195 | output.append([]) 196 | start_new_word = False 197 | output[-1].append(char) 198 | i += 1 199 | 200 | return ["".join(x) for x in output] 201 | 202 | def _tokenize_chinese_chars(self, text): 203 | """Adds whitespace around any CJK character.""" 204 | output = [] 205 | for char in text: 206 | cp = ord(char) 207 | if self._is_chinese_char(cp): 208 | output.append(" ") 209 | output.append(char) 210 | output.append(" ") 211 | else: 212 | output.append(char) 213 | return "".join(output) 214 | 215 | def _is_chinese_char(self, cp): 216 | """Checks whether CP is the codepoint of a CJK character.""" 217 | # This defines a "chinese character" as anything in the CJK Unicode block: 218 | # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) 219 | # 220 | # Note that the CJK Unicode block is NOT all Japanese and Korean characters, 221 | # despite its name. The modern Korean Hangul alphabet is a different block, 222 | # as is Japanese Hiragana and Katakana. Those alphabets are used to write 223 | # space-separated words, so they are not treated specially and handled 224 | # like the all of the other languages. 225 | if ((cp >= 0x4E00 and cp <= 0x9FFF) or # 226 | (cp >= 0x3400 and cp <= 0x4DBF) or # 227 | (cp >= 0x20000 and cp <= 0x2A6DF) or # 228 | (cp >= 0x2A700 and cp <= 0x2B73F) or # 229 | (cp >= 0x2B740 and cp <= 0x2B81F) or # 230 | (cp >= 0x2B820 and cp <= 0x2CEAF) or 231 | (cp >= 0xF900 and cp <= 0xFAFF) or # 232 | (cp >= 0x2F800 and cp <= 0x2FA1F)): # 233 | return True 234 | 235 | return False 236 | 237 | def _clean_text(self, text): 238 | """Performs invalid character removal and whitespace cleanup on text.""" 239 | output = [] 240 | for char in text: 241 | cp = ord(char) 242 | if cp == 0 or cp == 0xfffd or _is_control(char): 243 | continue 244 | if _is_whitespace(char): 245 | output.append(" ") 246 | else: 247 | output.append(char) 248 | return "".join(output) 249 | 250 | 251 | class WordpieceTokenizer(object): 252 | """Runs WordPiece tokenziation.""" 253 | 254 | def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): 255 | self.vocab = vocab 256 | self.unk_token = unk_token 257 | self.max_input_chars_per_word = max_input_chars_per_word 258 | 259 | def tokenize(self, text): 260 | """Tokenizes a piece of text into its word pieces. 261 | 262 | This uses a greedy longest-match-first algorithm to perform tokenization 263 | using the given vocabulary. 264 | 265 | For example: 266 | input = "unaffable" 267 | output = ["un", "##aff", "##able"] 268 | 269 | Args: 270 | text: A single token or whitespace separated tokens. This should have 271 | already been passed through `BasicTokenizer. 272 | 273 | Returns: 274 | A list of wordpiece tokens. 275 | """ 276 | 277 | text = convert_to_unicode(text) 278 | 279 | output_tokens = [] 280 | for token in whitespace_tokenize(text): 281 | chars = list(token) 282 | if len(chars) > self.max_input_chars_per_word: 283 | output_tokens.append(self.unk_token) 284 | continue 285 | 286 | is_bad = False 287 | start = 0 288 | sub_tokens = [] 289 | while start < len(chars): 290 | end = len(chars) 291 | cur_substr = None 292 | while start < end: 293 | substr = "".join(chars[start:end]) 294 | if start > 0: 295 | substr = "##" + substr 296 | if substr in self.vocab: 297 | cur_substr = substr 298 | break 299 | end -= 1 300 | if cur_substr is None: 301 | is_bad = True 302 | break 303 | sub_tokens.append(cur_substr) 304 | start = end 305 | 306 | if is_bad: 307 | output_tokens.append(self.unk_token) 308 | else: 309 | output_tokens.extend(sub_tokens) 310 | return output_tokens 311 | 312 | 313 | def _is_whitespace(char): 314 | """Checks whether `chars` is a whitespace character.""" 315 | # \t, \n, and \r are technically contorl characters but we treat them 316 | # as whitespace since they are generally considered as such. 317 | if char == " " or char == "\t" or char == "\n" or char == "\r": 318 | return True 319 | cat = unicodedata.category(char) 320 | if cat == "Zs": 321 | return True 322 | return False 323 | 324 | 325 | def _is_control(char): 326 | """Checks whether `chars` is a control character.""" 327 | # These are technically control characters but we count them as whitespace 328 | # characters. 329 | if char == "\t" or char == "\n" or char == "\r": 330 | return False 331 | cat = unicodedata.category(char) 332 | if cat.startswith("C"): 333 | return True 334 | return False 335 | 336 | 337 | def _is_punctuation(char): 338 | """Checks whether `chars` is a punctuation character.""" 339 | cp = ord(char) 340 | # We treat all non-letter/number ASCII as punctuation. 341 | # Characters such as "^", "$", and "`" are not in the Unicode 342 | # Punctuation class but we treat them as punctuation anyways, for 343 | # consistency. 344 | if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or 345 | (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): 346 | return True 347 | cat = unicodedata.category(char) 348 | if cat.startswith("P"): 349 | return True 350 | return False 351 | 352 | -------------------------------------------------------------------------------- /bert_base/bert/tokenization_test.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2018 The Google AI Language Team Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import os 20 | import tempfile 21 | 22 | import tokenization 23 | import tensorflow as tf 24 | 25 | 26 | class TokenizationTest(tf.test.TestCase): 27 | 28 | def test_full_tokenizer(self): 29 | vocab_tokens = [ 30 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", 31 | "##ing", "," 32 | ] 33 | with tempfile.NamedTemporaryFile(delete=False) as vocab_writer: 34 | vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) 35 | 36 | vocab_file = vocab_writer.name 37 | 38 | tokenizer = tokenization.FullTokenizer(vocab_file) 39 | os.unlink(vocab_file) 40 | 41 | tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") 42 | self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) 43 | 44 | self.assertAllEqual( 45 | tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) 46 | 47 | def test_chinese(self): 48 | tokenizer = tokenization.BasicTokenizer() 49 | 50 | self.assertAllEqual( 51 | tokenizer.tokenize(u"ah\u535A\u63A8zz"), 52 | [u"ah", u"\u535A", u"\u63A8", u"zz"]) 53 | 54 | def test_basic_tokenizer_lower(self): 55 | tokenizer = tokenization.BasicTokenizer(do_lower_case=True) 56 | 57 | self.assertAllEqual( 58 | tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), 59 | ["hello", "!", "how", "are", "you", "?"]) 60 | self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"]) 61 | 62 | def test_basic_tokenizer_no_lower(self): 63 | tokenizer = tokenization.BasicTokenizer(do_lower_case=False) 64 | 65 | self.assertAllEqual( 66 | tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), 67 | ["HeLLo", "!", "how", "Are", "yoU", "?"]) 68 | 69 | def test_wordpiece_tokenizer(self): 70 | vocab_tokens = [ 71 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", 72 | "##ing" 73 | ] 74 | 75 | vocab = {} 76 | for (i, token) in enumerate(vocab_tokens): 77 | vocab[token] = i 78 | tokenizer = tokenization.WordpieceTokenizer(vocab=vocab) 79 | 80 | self.assertAllEqual(tokenizer.tokenize(""), []) 81 | 82 | self.assertAllEqual( 83 | tokenizer.tokenize("unwanted running"), 84 | ["un", "##want", "##ed", "runn", "##ing"]) 85 | 86 | self.assertAllEqual( 87 | tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) 88 | 89 | def test_convert_tokens_to_ids(self): 90 | vocab_tokens = [ 91 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", 92 | "##ing" 93 | ] 94 | 95 | vocab = {} 96 | for (i, token) in enumerate(vocab_tokens): 97 | vocab[token] = i 98 | 99 | self.assertAllEqual( 100 | tokenization.convert_tokens_to_ids( 101 | vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9]) 102 | 103 | def test_is_whitespace(self): 104 | self.assertTrue(tokenization._is_whitespace(u" ")) 105 | self.assertTrue(tokenization._is_whitespace(u"\t")) 106 | self.assertTrue(tokenization._is_whitespace(u"\r")) 107 | self.assertTrue(tokenization._is_whitespace(u"\n")) 108 | self.assertTrue(tokenization._is_whitespace(u"\u00A0")) 109 | 110 | self.assertFalse(tokenization._is_whitespace(u"A")) 111 | self.assertFalse(tokenization._is_whitespace(u"-")) 112 | 113 | def test_is_control(self): 114 | self.assertTrue(tokenization._is_control(u"\u0005")) 115 | 116 | self.assertFalse(tokenization._is_control(u"A")) 117 | self.assertFalse(tokenization._is_control(u" ")) 118 | self.assertFalse(tokenization._is_control(u"\t")) 119 | self.assertFalse(tokenization._is_control(u"\r")) 120 | 121 | def test_is_punctuation(self): 122 | self.assertTrue(tokenization._is_punctuation(u"-")) 123 | self.assertTrue(tokenization._is_punctuation(u"$")) 124 | self.assertTrue(tokenization._is_punctuation(u"`")) 125 | self.assertTrue(tokenization._is_punctuation(u".")) 126 | 127 | self.assertFalse(tokenization._is_punctuation(u"A")) 128 | self.assertFalse(tokenization._is_punctuation(u" ")) 129 | 130 | 131 | if __name__ == "__main__": 132 | tf.test.main() 133 | -------------------------------------------------------------------------------- /data/sample_files/展示预测的示例地址文件.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuperMap/address-matching/be9dd02786533db12583d1d3b2b782af647a6bda/data/sample_files/展示预测的示例地址文件.csv -------------------------------------------------------------------------------- /data/sample_files/手工标记好的示例地址.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuperMap/address-matching/be9dd02786533db12583d1d3b2b782af647a6bda/data/sample_files/手工标记好的示例地址.xlsx -------------------------------------------------------------------------------- /other/pictures/切分地址要素层级说明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuperMap/address-matching/be9dd02786533db12583d1d3b2b782af647a6bda/other/pictures/切分地址要素层级说明.png -------------------------------------------------------------------------------- /other/pictures/单条地址分词效果.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuperMap/address-matching/be9dd02786533db12583d1d3b2b782af647a6bda/other/pictures/单条地址分词效果.png -------------------------------------------------------------------------------- /other/pictures/打标签示例.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuperMap/address-matching/be9dd02786533db12583d1d3b2b782af647a6bda/other/pictures/打标签示例.png -------------------------------------------------------------------------------- /other/predict_base.py: -------------------------------------------------------------------------------- 1 | # encoding=utf-8 2 | 3 | import tensorflow as tf 4 | import numpy as np 5 | import codecs 6 | import pickle 7 | import os 8 | from other.preprocessing import atomization, tokenized 9 | 10 | from train.models import create_model, InputFeatures 11 | from bert_base.bert import tokenization, modeling 12 | from train.helper import get_args_parser 13 | 14 | args = get_args_parser() 15 | 16 | model_dir = args.output_dir 17 | bert_dir = args.bert_path 18 | 19 | is_training = False 20 | use_one_hot_embeddings = False 21 | batch_size = 1 22 | 23 | gpu_config = tf.ConfigProto() 24 | gpu_config.gpu_options.allow_growth = True 25 | sess = tf.Session(config=gpu_config) 26 | model = None 27 | 28 | global graph 29 | input_ids_p, input_mask_p, label_ids_p, segment_ids_p = None, None, None, None 30 | 31 | print('checkpoint path:{}'.format(os.path.join(model_dir, "checkpoint"))) 32 | if not os.path.exists(os.path.join(model_dir, "checkpoint")): 33 | raise Exception("failed to get checkpoint. going to return ") 34 | 35 | # 加载label->id的词典 36 | with codecs.open(os.path.join(model_dir, 'label2id.pkl'), 'rb') as rf: 37 | label2id = pickle.load(rf) 38 | id2label = {value: key for key, value in label2id.items()} 39 | 40 | with codecs.open(os.path.join(model_dir, 'label_list.pkl'), 'rb') as rf: 41 | label_list = pickle.load(rf) 42 | num_labels = len(label_list) + 1 43 | graph = tf.get_default_graph() 44 | with graph.as_default(): 45 | print("going to restore checkpoint") 46 | sess.run(tf.global_variables_initializer()) 47 | input_ids_p = tf.placeholder(tf.int32, [batch_size, args.max_seq_length], name="input_ids") 48 | input_mask_p = tf.placeholder(tf.int32, [batch_size, args.max_seq_length], name="input_mask") 49 | 50 | bert_config = modeling.BertConfig.from_json_file(os.path.join(bert_dir, 'bert_config.json')) 51 | total_loss, logits, trans, pred_ids = create_model( 52 | bert_config=bert_config, is_training=False, input_ids=input_ids_p, input_mask=input_mask_p, segment_ids=None, 53 | labels=None, num_labels=num_labels, use_one_hot_embeddings=False, dropout_rate=1.0) 54 | 55 | saver = tf.train.Saver() 56 | saver.restore(sess, tf.train.latest_checkpoint(model_dir)) 57 | 58 | 59 | tokenizer = tokenization.FullTokenizer( 60 | vocab_file=os.path.join(bert_dir, 'vocab.txt'), do_lower_case=args.do_lower_case) 61 | 62 | 63 | def convert_id_to_label(pred_ids_result, idx2label): 64 | """ 65 | 将id形式的结果转化为真实序列结果 66 | :param pred_ids_result: 67 | :param idx2label: 68 | :return: 69 | """ 70 | result = [] 71 | for row in range(batch_size): 72 | curr_seq = [] 73 | for ids in pred_ids_result[row][0]: 74 | if ids == 0: 75 | break 76 | curr_label = idx2label[ids] 77 | if curr_label in ['[CLS]', '[SEP]']: 78 | continue 79 | curr_seq.append(curr_label) 80 | result.append(curr_seq) 81 | return result 82 | 83 | 84 | def strage_combined_link_org_loc(tokens, tags): 85 | """ 86 | 组合策略 87 | :param pred_label_result: 88 | :param types: 89 | :return: 90 | """ 91 | def print_output(data, type): 92 | line = [] 93 | line.append(type) 94 | for i in data: 95 | line.append(i.word) 96 | # print(', '.join(line)) 97 | 98 | params = None 99 | eval = Result(params) 100 | if len(tokens) > len(tags): 101 | tokens = tokens[:len(tags)] 102 | person, loc, org = eval.get_result(tokens, tags) 103 | print_output(loc, 'LOC') 104 | print_output(person, 'PER') 105 | print_output(org, 'ORG') 106 | 107 | 108 | def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, mode): 109 | """ 110 | 将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中 111 | :param ex_index: index 112 | :param example: 一个样本 113 | :param label_list: 标签列表 114 | :param max_seq_length: 115 | :param tokenizer: 116 | :param mode: 117 | :return: 118 | """ 119 | label_map = {} 120 | # 1表示从1开始对label进行index化 121 | for (i, label) in enumerate(label_list, 1): 122 | label_map[label] = i 123 | # 保存label->index 的map 124 | if not os.path.exists(os.path.join(model_dir, 'label2id.pkl')): 125 | with codecs.open(os.path.join(model_dir, 'label2id.pkl'), 'wb') as w: 126 | pickle.dump(label_map, w) 127 | 128 | tokens = example 129 | # tokens = tokenizer.tokenize(example.text) 130 | # 序列截断 131 | if len(tokens) >= max_seq_length - 1: 132 | tokens = tokens[0:(max_seq_length - 2)] # -2 的原因是因为序列需要加一个句首和句尾标志 133 | ntokens = [] 134 | segment_ids = [] 135 | label_ids = [] 136 | ntokens.append("[CLS]") # 句子开始设置CLS 标志 137 | segment_ids.append(0) 138 | # append("O") or append("[CLS]") not sure! 139 | label_ids.append(label_map["[CLS]"]) # O OR CLS 没有任何影响,不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注,使用LCS 也没毛病 140 | for i, token in enumerate(tokens): 141 | ntokens.append(token) 142 | segment_ids.append(0) 143 | label_ids.append(0) 144 | ntokens.append("[SEP]") # 句尾添加[SEP] 标志 145 | segment_ids.append(0) 146 | # append("O") or append("[SEP]") not sure! 147 | label_ids.append(label_map["[SEP]"]) 148 | input_ids = tokenizer.convert_tokens_to_ids(ntokens) # 将序列中的字(ntokens)转化为ID形式 149 | input_mask = [1] * len(input_ids) 150 | 151 | # padding, 使用 152 | while len(input_ids) < max_seq_length: 153 | input_ids.append(0) 154 | input_mask.append(0) 155 | segment_ids.append(0) 156 | # we don't concerned about it! 157 | label_ids.append(0) 158 | ntokens.append("**NULL**") 159 | # label_mask.append(0) 160 | # print(len(input_ids)) 161 | assert len(input_ids) == max_seq_length 162 | assert len(input_mask) == max_seq_length 163 | assert len(segment_ids) == max_seq_length 164 | assert len(label_ids) == max_seq_length 165 | # assert len(label_mask) == max_seq_length 166 | 167 | # 结构化为一个类 168 | feature = InputFeatures( 169 | input_ids=input_ids, 170 | input_mask=input_mask, 171 | segment_ids=segment_ids, 172 | label_ids=label_ids, 173 | # label_mask = label_mask 174 | ) 175 | return feature 176 | 177 | 178 | class Pair(object): 179 | def __init__(self, word, start, end, type, merge=False): 180 | self.__word = word 181 | self.__start = start 182 | self.__end = end 183 | self.__merge = merge 184 | self.__types = type 185 | 186 | @property 187 | def start(self): 188 | return self.__start 189 | @property 190 | def end(self): 191 | return self.__end 192 | @property 193 | def merge(self): 194 | return self.__merge 195 | @property 196 | def word(self): 197 | return self.__word 198 | 199 | @property 200 | def types(self): 201 | return self.__types 202 | @word.setter 203 | def word(self, word): 204 | self.__word = word 205 | @start.setter 206 | def start(self, start): 207 | self.__start = start 208 | @end.setter 209 | def end(self, end): 210 | self.__end = end 211 | @merge.setter 212 | def merge(self, merge): 213 | self.__merge = merge 214 | 215 | @types.setter 216 | def types(self, type): 217 | self.__types = type 218 | 219 | def __str__(self) -> str: 220 | line = [] 221 | line.append('entity:{}'.format(self.__word)) 222 | line.append('start:{}'.format(self.__start)) 223 | line.append('end:{}'.format(self.__end)) 224 | line.append('merge:{}'.format(self.__merge)) 225 | line.append('types:{}'.format(self.__types)) 226 | return '\t'.join(line) 227 | 228 | 229 | class Result(object): 230 | def __init__(self, config): 231 | self.config = config 232 | self.person = [] 233 | self.loc = [] 234 | self.org = [] 235 | self.others = [] 236 | def get_result(self, tokens, tags, config=None): 237 | # 先获取标注结果 238 | self.result_to_json(tokens, tags) 239 | return self.person, self.loc, self.org 240 | 241 | def result_to_json(self, string, tags): 242 | """ 243 | 将模型标注序列和输入序列结合 转化为结果 244 | :param string: 输入序列 245 | :param tags: 标注结果 246 | :return: 247 | """ 248 | item = {"entities": []} 249 | entity_name = "" 250 | entity_start = 0 251 | idx = 0 252 | last_tag = '' 253 | 254 | for char, tag in zip(string, tags): 255 | if tag[0] == "S": 256 | self.append(char, idx, idx+1, tag[2:]) 257 | item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":tag[2:]}) 258 | elif tag[0] == "B": 259 | if entity_name != '': 260 | self.append(entity_name, entity_start, idx, last_tag[2:]) 261 | item["entities"].append({"word": entity_name, "start": entity_start, "end": idx, "type": last_tag[2:]}) 262 | entity_name = "" 263 | entity_name += char 264 | entity_start = idx 265 | elif tag[0] == "I": 266 | entity_name += char 267 | elif tag[0] == "O": 268 | if entity_name != '': 269 | self.append(entity_name, entity_start, idx, last_tag[2:]) 270 | item["entities"].append({"word": entity_name, "start": entity_start, "end": idx, "type": last_tag[2:]}) 271 | entity_name = "" 272 | else: 273 | entity_name = "" 274 | entity_start = idx 275 | idx += 1 276 | last_tag = tag 277 | if entity_name != '': 278 | self.append(entity_name, entity_start, idx, last_tag[2:]) 279 | item["entities"].append({"word": entity_name, "start": entity_start, "end": idx, "type": last_tag[2:]}) 280 | return item 281 | 282 | def append(self, word, start, end, tag): 283 | if tag == 'LOC': 284 | self.loc.append(Pair(word, start, end, 'LOC')) 285 | elif tag == 'PER': 286 | self.person.append(Pair(word, start, end, 'PER')) 287 | elif tag == 'ORG': 288 | self.org.append(Pair(word, start, end, 'ORG')) 289 | else: 290 | self.others.append(Pair(word, start, end, tag)) 291 | 292 | 293 | def predict_one(address): 294 | """ 295 | do online prediction. each time make prediction for one instance. 296 | you can change to a batch if you want. 297 | 298 | :param line: a list. element is: [dummy_label,text_a,text_b] 299 | :return: 300 | """ 301 | def convert(line): 302 | feature = convert_single_example(0, line, label_list, args.max_seq_length, tokenizer, 'p') 303 | input_ids = np.reshape([feature.input_ids], (batch_size, args.max_seq_length)) 304 | input_mask = np.reshape([feature.input_mask], (batch_size, args.max_seq_length)) 305 | segment_ids = np.reshape([feature.segment_ids], (batch_size, args.max_seq_length)) 306 | label_ids =np.reshape([feature.label_ids], (batch_size, args.max_seq_length)) 307 | return input_ids, input_mask, segment_ids, label_ids 308 | 309 | global graph 310 | with graph.as_default(): 311 | atom_list = atomization(address) 312 | address = tokenized(atom_list) 313 | # print('your input is:{}'.format(sentence)) 314 | input_ids, input_mask, segment_ids, label_ids = convert(address) 315 | 316 | feed_dict = {input_ids_p: input_ids, 317 | input_mask_p: input_mask} 318 | # run session get current feed_dict result 319 | pred_ids_result = sess.run([pred_ids], feed_dict) 320 | pred_label_result = convert_id_to_label(pred_ids_result, id2label) 321 | return pred_label_result 322 | 323 | 324 | def participles_sequence(address, sequence): 325 | if len(address) == len(sequence): 326 | B_index = [] 327 | for index in range(1, len(sequence)): 328 | if str(sequence[index]).startswith('B'): 329 | B_index.append(index) 330 | B_index.insert(0, 0) 331 | B_index.append(len(sequence)) 332 | participles_address = '' 333 | for index in range(len(B_index)-1): 334 | start = B_index[index] 335 | end = B_index[index+1] 336 | participles_address += ''.join(address[start:end]) 337 | participles_address += '/' 338 | return participles_address 339 | else: 340 | return "序列长度有误" 341 | 342 | 343 | if __name__ == "__main__": 344 | result = predict_one("内江市隆昌市四川内江市隆昌市李市镇新盛街2栋3单元201") 345 | print(result) -------------------------------------------------------------------------------- /other/preprocessing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Author : woleto 3 | # @Time : 2020/3/16 11:04 4 | import os 5 | import re 6 | import string 7 | import pandas as pd 8 | 9 | from bert_base.bert import tokenization 10 | from train.helper import get_args_parser 11 | 12 | args = get_args_parser() 13 | 14 | 15 | def atomization(text): 16 | atom_list = list() 17 | my_re = re.compile(r'([a-z0-9]+[-_()]*[a-z0-9]*)', re.I) 18 | parentheses_re = re.compile(r'[(](.*)[)]', re.S) 19 | # used '0' replace NaN before, judge it now 20 | if text == '0': 21 | return atom_list 22 | while len(text) > 0: 23 | if ('\u4e00' <= text[0] <= '\u9fff') or (text[0] in string.punctuation): 24 | atom_list.append(text[0]) 25 | text = text[1:] 26 | elif text[0] == '(': 27 | element = re.search(parentheses_re, text).group(0) 28 | # atom_list[-1] = atom_list[-1] + element 29 | text = text[len(element):] 30 | else: 31 | try: 32 | id_tuple = re.search(my_re, text).span() 33 | start_index = id_tuple[0] 34 | end_index = id_tuple[1] 35 | atom_list.append(text[start_index:end_index]) 36 | text = text[end_index:] 37 | except AttributeError: 38 | atom_list.append(text[0]) 39 | text = text[1:] 40 | return atom_list 41 | 42 | 43 | def tokenized(atomization_list): 44 | """ 45 | second step, process data that after atomization() 46 | :param atomization_list: 47 | :return: 48 | """ 49 | tokenized_list = [] 50 | tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True) 51 | for element in atomization_list: 52 | if element == '(': 53 | break 54 | token_list = tokenizer.tokenize(element) 55 | tokenized_list.append(token_list[0]) 56 | return tokenized_list 57 | 58 | 59 | def get_BMSE_by_length(length): 60 | if length <= 0: 61 | return list() 62 | elif length == 1: 63 | return list(["S"]) 64 | elif length == 2: 65 | return list(["B", "E"]) 66 | else: 67 | bmse_list = list(["B"]) 68 | for index in range(length - 2): 69 | bmse_list.append("M") 70 | bmse_list.append("E") 71 | return bmse_list 72 | 73 | 74 | class AddressDataset(object): 75 | def __init__(self, file_path, dataset_save_dir): 76 | self.file_path = file_path 77 | self.dataset_save_dir = dataset_save_dir 78 | self.labels = ['province', 'city', 'county', 'town', 'road', 'landmark', 'number', 'poi', 'orient', 'conj', 79 | 'punc'] 80 | self.dataset_split = [0.6, 0.2, 0.2] 81 | 82 | def generate(self): 83 | if self.file_path.find('.xl') > 0: 84 | file_data = pd.read_excel(self.file_path) 85 | elif self.file_path.endswith('.csv'): 86 | file_data = pd.read_csv(self.file_path) 87 | else: 88 | raise Exception('不支持的文件类型') 89 | file_data = pd.read_excel(self.file_path) 90 | model = 'train' 91 | data_row_sum = len(file_data) 92 | max_train_index = data_row_sum * self.dataset_split[0] 93 | max_dev_index = data_row_sum * (self.dataset_split[0] + self.dataset_split[1]) 94 | 95 | file_data['BMSE_Label'] = '' 96 | for index, row in file_data.iterrows(): 97 | # change model 98 | if max_train_index <= index < max_dev_index: 99 | model = 'dev' 100 | if index >= max_dev_index: 101 | model = 'test' 102 | 103 | split_address_str = str(file_data.iloc[index, 0]) 104 | split_num_label_str = str(file_data.iloc[index, 1]) 105 | split_address = split_address_str.split('|') 106 | split_num_label = split_num_label_str.split('|') 107 | # process address have only one segment. it will cause split_address's length is 1. 108 | if len(split_address) == 0: 109 | split_address = file_data.iloc[index, 0] 110 | split_num_label = file_data.iloc[index, 1] 111 | assert len(split_address) == len(split_num_label), 'the index of' + str( 112 | index) + ':the length of data not equals label' 113 | # make label index to label name. for example: 2-->city 114 | split_label = [int(x) - 1 for x in split_num_label] 115 | row_data_tokenizer = [] 116 | bmse_label_list = [] 117 | for segment_index in range(len(split_address)): 118 | address_segment = split_address[segment_index] 119 | address_segment_label = split_label[segment_index] 120 | # atomization 121 | atomization_list = atomization(address_segment) 122 | # toke 123 | token_list = tokenized(atomization_list) 124 | for element in token_list: 125 | row_data_tokenizer.append(element) 126 | print(token_list) 127 | # get bmse label 128 | segment_bmse_list = get_BMSE_by_length(len(token_list)) 129 | for segment_bmse in segment_bmse_list: 130 | bmse_label_list.append(segment_bmse + '-' + str(self.labels[address_segment_label])) 131 | # process.write_data_label(row_data_tokenizer, bmse_label_list, model) 132 | print(row_data_tokenizer, bmse_label_list) 133 | path = os.path.join(self.dataset_save_dir, model + '.txt') 134 | with open(path, 'a+', encoding='UTF-8') as w: 135 | for index in range(len(row_data_tokenizer)): 136 | str_line = row_data_tokenizer[index] + ' ' + bmse_label_list[index] 137 | w.write(str_line + '\n') 138 | w.write('\n') 139 | 140 | 141 | if __name__ == "__main__": 142 | process = AddressDataset(file_path='data/sample_files/手工标记好的示例地址.xlsx', dataset_save_dir='data') 143 | process.generate() 144 | -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Author : woleto 3 | # @Time : 2020/3/4 12:02 4 | from other.predict_base import predict_one, participles_sequence 5 | from other.preprocessing import atomization 6 | import pandas as pd 7 | 8 | 9 | def predict_address(address): 10 | predict_sequence = predict_one(address) 11 | atom_address = atomization(address) 12 | participles_address = participles_sequence(atom_address, predict_sequence[0]) 13 | return participles_address 14 | 15 | 16 | def predict_file_address(origin_file_path, result_file): 17 | """ 18 | Note: 19 | 示例代码,可根据文件本身调节 20 | :param origin_file_path: 21 | :param result_file: 22 | :return: 23 | """ 24 | address_file_path = origin_file_path 25 | predict_result_path = result_file 26 | data = pd.read_csv(address_file_path, sep=',', encoding="gbk", header=None) 27 | # print(data) 28 | data["序列标注"] = "" 29 | data["分词情况"] = "" 30 | for index, row in data.iterrows(): 31 | if index == 0: 32 | continue 33 | try: 34 | address = row[0] 35 | atom_address = atomization(address) 36 | predict_sequence = predict_one(address) 37 | data.iloc[index, 2] = ','.join(predict_sequence[0]) 38 | # 用序列映射地址,从而进行分词 39 | participles_address = participles_sequence(atom_address, predict_sequence[0]) 40 | data.iloc[index, 1] = participles_address 41 | except: 42 | print(index) 43 | continue 44 | if index % 100 == 0: 45 | data.to_csv(predict_result_path, index=False, header=None) 46 | data.to_csv(predict_result_path, index=False, header=None) 47 | 48 | 49 | if __name__ == "__main__": 50 | # 预测单个地址 51 | address = "北京市朝阳区酒仙桥北路甲10号电子城IT产业园107楼6层" 52 | result = predict_address(address) 53 | print('分词地址:', address) 54 | print('分词结果:', result) 55 | 56 | # 预测整个文件 57 | # predict_file_address('data/sample_files/展示预测的示例地址文件.csv','data/sample_files/展示预测的示例地址文件结果.csv') 58 | 59 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | 9 | def train(): 10 | """ 11 | 训练步骤: 12 | 1.有标记好的xlsx、csv或其他格式的数据,参考data/sample_files/手工标记好的示例地址.xlsx 13 | 2.使用other文件夹下的preprocessing.py文件生成数据集 14 | 3.在train/helper.py中找到data_dir属性,将其修改为数据集的目录 15 | 4.在train/helper.py中找到output_dir属性,修改该值,否则在此次训练开始后,会自动删除之前训练好的模型 16 | 5.(可选)在train/helper.py中修改有关训练的超参数属性,例如:batch_size,learning_rate等等 17 | :return: 18 | """ 19 | # 确保已经在dataset中产生了trian、test、dev文件 20 | import os 21 | from train.helper import get_args_parser 22 | from train.bert_lstm_ner import train 23 | 24 | args = get_args_parser() 25 | if True: 26 | import sys 27 | param_str = '\n'.join(['%20s = %s' % (k, v) for k, v in sorted(vars(args).items())]) 28 | print('usage: %s\n%20s %s\n%s\n%s\n' % (' '.join(sys.argv), 'ARG', 'VALUE', '_' * 50, param_str)) 29 | print(args) 30 | os.environ['CUDA_VISIBLE_DEVICES'] = args.device_map 31 | train(args=args) 32 | 33 | 34 | if __name__ == '__main__': 35 | train() -------------------------------------------------------------------------------- /train/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- -------------------------------------------------------------------------------- /train/bert_lstm_ner.py: -------------------------------------------------------------------------------- 1 | #! usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | """ 4 | Copyright 2018 The Google AI Language Team Authors. 5 | BASED ON Google_BERT. 6 | reference from :zhoukaiyin/ 7 | 8 | @Author:Macan 9 | """ 10 | 11 | from __future__ import absolute_import 12 | from __future__ import division 13 | from __future__ import print_function 14 | 15 | import collections 16 | import os 17 | import tensorflow as tf 18 | import codecs 19 | import pickle 20 | 21 | from bert_base.bert import modeling 22 | from bert_base.bert import optimization 23 | from bert_base.bert import tokenization 24 | 25 | 26 | from train.models import create_model, InputFeatures, InputExample 27 | from train.helper import set_logger 28 | __version__ = '0.1.0' 29 | 30 | __all__ = ['__version__', 'DataProcessor', 'NerProcessor', 'write_tokens', 'convert_single_example', 31 | 'filed_based_convert_examples_to_features', 'file_based_input_fn_builder', 32 | 'model_fn_builder', 'train'] 33 | 34 | 35 | logger = set_logger('Training') 36 | 37 | class DataProcessor(object): 38 | """Base class for data converters for sequence classification data sets.""" 39 | 40 | def get_train_examples(self, data_dir): 41 | """Gets a collection of `InputExample`s for the train set.""" 42 | raise NotImplementedError() 43 | 44 | def get_dev_examples(self, data_dir): 45 | """Gets a collection of `InputExample`s for the dev set.""" 46 | raise NotImplementedError() 47 | 48 | def get_labels(self): 49 | """Gets the list of labels for this data set.""" 50 | raise NotImplementedError() 51 | 52 | @classmethod 53 | def _read_data(cls, input_file): 54 | """Reads a BIO data.""" 55 | with codecs.open(input_file, 'r', encoding='utf-8') as f: 56 | lines = [] 57 | words = [] 58 | labels = [] 59 | for line in f: 60 | contends = line.strip() 61 | tokens = contends.split(' ') 62 | if len(tokens) == 2: 63 | words.append(tokens[0]) 64 | labels.append(tokens[1]) 65 | else: 66 | if len(contends) == 0: 67 | l = ' '.join([label for label in labels if len(label) > 0]) 68 | w = ' '.join([word for word in words if len(word) > 0]) 69 | lines.append([l, w]) 70 | words = [] 71 | labels = [] 72 | continue 73 | if contends.startswith("-DOCSTART-"): 74 | words.append('') 75 | continue 76 | return lines 77 | 78 | 79 | class NerProcessor(DataProcessor): 80 | def __init__(self, output_dir): 81 | self.labels = set() 82 | self.output_dir = output_dir 83 | 84 | def get_train_examples(self, data_dir): 85 | return self._create_example( 86 | self._read_data(os.path.join(data_dir, "train.txt")), "train" 87 | ) 88 | 89 | def get_dev_examples(self, data_dir): 90 | return self._create_example( 91 | self._read_data(os.path.join(data_dir, "dev.txt")), "dev" 92 | ) 93 | 94 | def get_test_examples(self, data_dir): 95 | return self._create_example( 96 | self._read_data(os.path.join(data_dir, "test.txt")), "test") 97 | 98 | def get_labels(self, labels=None): 99 | if labels is not None: 100 | try: 101 | # 支持从文件中读取标签类型 102 | if os.path.exists(labels) and os.path.isfile(labels): 103 | with codecs.open(labels, 'r', encoding='utf-8') as fd: 104 | for line in fd: 105 | self.labels.append(line.strip()) 106 | else: 107 | # 否则通过传入的参数,按照逗号分割 108 | self.labels = labels.split(',') 109 | self.labels = set(self.labels) # to set 110 | except Exception as e: 111 | print(e) 112 | # 通过读取train文件获取标签的方法会出现一定的风险。 113 | if os.path.exists(os.path.join(self.output_dir, 'label_list.pkl')): 114 | with codecs.open(os.path.join(self.output_dir, 'label_list.pkl'), 'rb') as rf: 115 | self.labels = pickle.load(rf) 116 | else: 117 | if len(self.labels) > 0: 118 | self.labels = self.labels.union(set(["X", "[CLS]", "[SEP]"])) 119 | with codecs.open(os.path.join(self.output_dir, 'label_list.pkl'), 'wb') as rf: 120 | pickle.dump(self.labels, rf) 121 | else: 122 | # self.labels = ["O", "B", "I", "X","[CLS]", "[SEP]"] 123 | self.labels = ["O", 'B-TIM', 'I-TIM', "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X", "[CLS]", "[SEP]"] 124 | return self.labels 125 | 126 | def _create_example(self, lines, set_type): 127 | examples = [] 128 | for (i, line) in enumerate(lines): 129 | guid = "%s-%s" % (set_type, i) 130 | text = tokenization.convert_to_unicode(line[1]) 131 | label = tokenization.convert_to_unicode(line[0]) 132 | # if i == 0: 133 | # print('label: ', label) 134 | examples.append(InputExample(guid=guid, text=text, label=label)) 135 | return examples 136 | 137 | def _read_data(self, input_file): 138 | """Reads a BIO data.""" 139 | with codecs.open(input_file, 'r', encoding='utf-8') as f: 140 | lines = [] 141 | words = [] 142 | labels = [] 143 | for line in f: 144 | contends = line.strip() 145 | tokens = contends.split(' ') 146 | if len(tokens) == 2: 147 | words.append(tokens[0]) 148 | labels.append(tokens[-1]) 149 | else: 150 | if len(contends) == 0 and len(words) > 0: 151 | label = [] 152 | word = [] 153 | for l, w in zip(labels, words): 154 | if len(l) > 0 and len(w) > 0: 155 | label.append(l) 156 | self.labels.add(l) 157 | word.append(w) 158 | lines.append([' '.join(label), ' '.join(word)]) 159 | words = [] 160 | labels = [] 161 | continue 162 | if contends.startswith("-DOCSTART-"): 163 | continue 164 | return lines 165 | 166 | 167 | def write_tokens(tokens, output_dir, mode): 168 | """ 169 | 将序列解析结果写入到文件中 170 | 只在mode=test的时候启用 171 | :param output_dir: 172 | :param tokens: 173 | :param mode: 174 | :return: 175 | """ 176 | if mode == "test": 177 | path = os.path.join(output_dir, "token_" + mode + ".txt") 178 | wf = codecs.open(path, 'a', encoding='utf-8') 179 | for token in tokens: 180 | if token != "**NULL**": 181 | wf.write(token + '\n') 182 | wf.close() 183 | 184 | 185 | def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, output_dir, mode): 186 | """ 187 | 将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中 188 | :param ex_index: index 189 | :param example: 一个样本 190 | :param label_list: 标签列表 191 | :param max_seq_length: 192 | :param tokenizer: 193 | :param output_dir 194 | :param mode: 195 | :return: 196 | """ 197 | label_map = {} 198 | # 1表示从1开始对label进行index化 199 | for (i, label) in enumerate(label_list, 1): 200 | label_map[label] = i 201 | # 保存label->index 的map 202 | if not os.path.exists(os.path.join(output_dir, 'label2id.pkl')): 203 | with codecs.open(os.path.join(output_dir, 'label2id.pkl'), 'wb') as w: 204 | pickle.dump(label_map, w) 205 | 206 | textlist = example.text.split(' ') 207 | labellist = example.label.split(' ') 208 | tokens = [] 209 | labels = [] 210 | for i, word in enumerate(textlist): 211 | # 分词,如果是中文,就是分字,但是对于一些不在BERT的vocab.txt中得字符会被进行WordPice处理(例如中文的引号),可以将所有的分字操作替换为list(input) 212 | token = tokenizer.tokenize(word) 213 | tokens.extend(token) 214 | label_1 = labellist[i] 215 | for m in range(len(token)): 216 | if m == 0: 217 | labels.append(label_1) 218 | else: # 一般不会出现else 219 | labels.append("X") 220 | # tokens = tokenizer.tokenize(example.text) 221 | # 序列截断 222 | if len(tokens) >= max_seq_length - 1: 223 | tokens = tokens[0:(max_seq_length - 2)] # -2 的原因是因为序列需要加一个句首和句尾标志 224 | labels = labels[0:(max_seq_length - 2)] 225 | ntokens = [] 226 | segment_ids = [] 227 | label_ids = [] 228 | ntokens.append("[CLS]") # 句子开始设置CLS 标志 229 | segment_ids.append(0) 230 | # append("O") or append("[CLS]") not sure! 231 | label_ids.append(label_map["[CLS]"]) # O OR CLS 没有任何影响,不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注,使用LCS 也没毛病 232 | for i, token in enumerate(tokens): 233 | ntokens.append(token) 234 | segment_ids.append(0) 235 | label_ids.append(label_map[labels[i]]) 236 | ntokens.append("[SEP]") # 句尾添加[SEP] 标志 237 | segment_ids.append(0) 238 | # append("O") or append("[SEP]") not sure! 239 | label_ids.append(label_map["[SEP]"]) 240 | input_ids = tokenizer.convert_tokens_to_ids(ntokens) # 将序列中的字(ntokens)转化为ID形式 241 | input_mask = [1] * len(input_ids) 242 | # label_mask = [1] * len(input_ids) 243 | # padding, 使用 244 | while len(input_ids) < max_seq_length: 245 | input_ids.append(0) 246 | input_mask.append(0) 247 | segment_ids.append(0) 248 | # we don't concerned about it! 249 | label_ids.append(0) 250 | ntokens.append("**NULL**") 251 | # label_mask.append(0) 252 | # print(len(input_ids)) 253 | assert len(input_ids) == max_seq_length 254 | assert len(input_mask) == max_seq_length 255 | assert len(segment_ids) == max_seq_length 256 | assert len(label_ids) == max_seq_length 257 | # assert len(label_mask) == max_seq_length 258 | 259 | # 打印部分样本数据信息 260 | if ex_index < 5: 261 | logger.info("*** Example ***") 262 | logger.info("guid: %s" % (example.guid)) 263 | logger.info("tokens: %s" % " ".join( 264 | [tokenization.printable_text(x) for x in tokens])) 265 | logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) 266 | logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) 267 | logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) 268 | logger.info("label_ids: %s" % " ".join([str(x) for x in label_ids])) 269 | # logger.info("label_mask: %s" % " ".join([str(x) for x in label_mask])) 270 | 271 | # 结构化为一个类 272 | feature = InputFeatures( 273 | input_ids=input_ids, 274 | input_mask=input_mask, 275 | segment_ids=segment_ids, 276 | label_ids=label_ids, 277 | # label_mask = label_mask 278 | ) 279 | # mode='test'的时候才有效 280 | write_tokens(ntokens, output_dir, mode) 281 | return feature 282 | 283 | 284 | def filed_based_convert_examples_to_features( 285 | examples, label_list, max_seq_length, tokenizer, output_file, output_dir, mode=None): 286 | """ 287 | 将数据转化为TF_Record 结构,作为模型数据输入 288 | :param examples: 样本 289 | :param label_list:标签list 290 | :param max_seq_length: 预先设定的最大序列长度 291 | :param tokenizer: tokenizer 对象 292 | :param output_file: tf.record 输出路径 293 | :param mode: 294 | :return: 295 | """ 296 | writer = tf.python_io.TFRecordWriter(output_file) 297 | # 遍历训练数据 298 | for (ex_index, example) in enumerate(examples): 299 | if ex_index % 5000 == 0: 300 | logger.info("Writing example %d of %d" % (ex_index, len(examples))) 301 | # 对于每一个训练样本, 302 | feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, output_dir, mode) 303 | 304 | def create_int_feature(values): 305 | f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) 306 | return f 307 | 308 | features = collections.OrderedDict() 309 | features["input_ids"] = create_int_feature(feature.input_ids) 310 | features["input_mask"] = create_int_feature(feature.input_mask) 311 | features["segment_ids"] = create_int_feature(feature.segment_ids) 312 | features["label_ids"] = create_int_feature(feature.label_ids) 313 | # features["label_mask"] = create_int_feature(feature.label_mask) 314 | # tf.train.Example/Feature 是一种协议,方便序列化??? 315 | tf_example = tf.train.Example(features=tf.train.Features(feature=features)) 316 | writer.write(tf_example.SerializeToString()) 317 | 318 | 319 | def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): 320 | name_to_features = { 321 | "input_ids": tf.FixedLenFeature([seq_length], tf.int64), 322 | "input_mask": tf.FixedLenFeature([seq_length], tf.int64), 323 | "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), 324 | "label_ids": tf.FixedLenFeature([seq_length], tf.int64), 325 | # "label_ids":tf.VarLenFeature(tf.int64), 326 | # "label_mask": tf.FixedLenFeature([seq_length], tf.int64), 327 | } 328 | 329 | def _decode_record(record, name_to_features): 330 | example = tf.parse_single_example(record, name_to_features) 331 | for name in list(example.keys()): 332 | t = example[name] 333 | if t.dtype == tf.int64: 334 | t = tf.to_int32(t) 335 | example[name] = t 336 | return example 337 | 338 | def input_fn(params): 339 | batch_size = params["batch_size"] 340 | d = tf.data.TFRecordDataset(input_file) 341 | if is_training: 342 | d = d.repeat() 343 | d = d.shuffle(buffer_size=300) 344 | d = d.apply(tf.data.experimental.map_and_batch(lambda record: _decode_record(record, name_to_features), 345 | batch_size=batch_size, 346 | num_parallel_calls=8, # 并行处理数据的CPU核心数量,不要大于你机器的核心数 347 | drop_remainder=drop_remainder)) 348 | d = d.prefetch(buffer_size=4) 349 | return d 350 | 351 | return input_fn 352 | 353 | 354 | def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, 355 | num_train_steps, num_warmup_steps, args): 356 | """ 357 | 构建模型 358 | :param bert_config: 359 | :param num_labels: 360 | :param init_checkpoint: 361 | :param learning_rate: 362 | :param num_train_steps: 363 | :param num_warmup_steps: 364 | :param use_tpu: 365 | :param use_one_hot_embeddings: 366 | :return: 367 | """ 368 | def model_fn(features, labels, mode, params): 369 | logger.info("*** Features ***") 370 | for name in sorted(features.keys()): 371 | logger.info(" name = %s, shape = %s" % (name, features[name].shape)) 372 | input_ids = features["input_ids"] 373 | input_mask = features["input_mask"] 374 | segment_ids = features["segment_ids"] 375 | label_ids = features["label_ids"] 376 | 377 | print('shape of input_ids', input_ids.shape) 378 | # label_mask = features["label_mask"] 379 | is_training = (mode == tf.estimator.ModeKeys.TRAIN) 380 | 381 | # 使用参数构建模型,input_idx 就是输入的样本idx表示,label_ids 就是标签的idx表示 382 | total_loss, logits, trans, pred_ids = create_model( 383 | bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, 384 | num_labels, False, args.dropout_rate, args.lstm_size, args.cell, args.num_layers) 385 | 386 | tvars = tf.trainable_variables() 387 | # 加载BERT模型 388 | if init_checkpoint: 389 | (assignment_map, initialized_variable_names) = \ 390 | modeling.get_assignment_map_from_checkpoint(tvars, 391 | init_checkpoint) 392 | tf.train.init_from_checkpoint(init_checkpoint, assignment_map) 393 | 394 | # 打印变量名 395 | # logger.info("**** Trainable Variables ****") 396 | # 397 | # # 打印加载模型的参数 398 | # for var in tvars: 399 | # init_string = "" 400 | # if var.name in initialized_variable_names: 401 | # init_string = ", *INIT_FROM_CKPT*" 402 | # logger.info(" name = %s, shape = %s%s", var.name, var.shape, 403 | # init_string) 404 | 405 | output_spec = None 406 | if mode == tf.estimator.ModeKeys.TRAIN: 407 | #train_op = optimizer.optimizer(total_loss, learning_rate, num_train_steps) 408 | train_op = optimization.create_optimizer( 409 | total_loss, learning_rate, num_train_steps, num_warmup_steps, False) 410 | hook_dict = {} 411 | hook_dict['loss'] = total_loss 412 | hook_dict['global_steps'] = tf.train.get_or_create_global_step() 413 | logging_hook = tf.train.LoggingTensorHook( 414 | hook_dict, every_n_iter=args.save_summary_steps) 415 | 416 | output_spec = tf.estimator.EstimatorSpec( 417 | mode=mode, 418 | loss=total_loss, 419 | train_op=train_op, 420 | training_hooks=[logging_hook]) 421 | 422 | elif mode == tf.estimator.ModeKeys.EVAL: 423 | # 针对NER ,进行了修改 424 | def metric_fn(label_ids, pred_ids): 425 | return { 426 | "eval_loss": tf.metrics.mean_squared_error(labels=label_ids, predictions=pred_ids), 427 | } 428 | eval_metrics = metric_fn(label_ids, pred_ids) 429 | output_spec = tf.estimator.EstimatorSpec( 430 | mode=mode, 431 | loss=total_loss, 432 | eval_metric_ops=eval_metrics 433 | ) 434 | else: 435 | output_spec = tf.estimator.EstimatorSpec( 436 | mode=mode, 437 | predictions=pred_ids 438 | ) 439 | return output_spec 440 | 441 | return model_fn 442 | 443 | 444 | def get_last_checkpoint(model_path): 445 | if not os.path.exists(os.path.join(model_path, 'checkpoint')): 446 | logger.info('checkpoint file not exits:'.format(os.path.join(model_path, 'checkpoint'))) 447 | return None 448 | last = None 449 | with codecs.open(os.path.join(model_path, 'checkpoint'), 'r', encoding='utf-8') as fd: 450 | for line in fd: 451 | line = line.strip().split(':') 452 | if len(line) != 2: 453 | continue 454 | if line[0] == 'model_checkpoint_path': 455 | last = line[1][2:-1] 456 | break 457 | return last 458 | 459 | 460 | def adam_filter(model_path): 461 | """ 462 | 去掉模型中的Adam相关参数,这些参数在测试的时候是没有用的 463 | :param model_path: 464 | :return: 465 | """ 466 | last_name = get_last_checkpoint(model_path) 467 | if last_name is None: 468 | return 469 | sess = tf.Session() 470 | imported_meta = tf.train.import_meta_graph(os.path.join(model_path, last_name + '.meta')) 471 | imported_meta.restore(sess, os.path.join(model_path, last_name)) 472 | need_vars = [] 473 | for var in tf.global_variables(): 474 | if 'adam_v' not in var.name and 'adam_m' not in var.name: 475 | need_vars.append(var) 476 | saver = tf.train.Saver(need_vars) 477 | saver.save(sess, os.path.join(model_path, 'model.ckpt')) 478 | 479 | 480 | def train(args): 481 | os.environ['CUDA_VISIBLE_DEVICES'] = args.device_map 482 | 483 | processors = { 484 | "ner": NerProcessor 485 | } 486 | bert_config = modeling.BertConfig.from_json_file(args.bert_config_file) 487 | 488 | if args.max_seq_length > bert_config.max_position_embeddings: 489 | raise ValueError( 490 | "Cannot use sequence length %d because the BERT model " 491 | "was only trained up to sequence length %d" % 492 | (args.max_seq_length, bert_config.max_position_embeddings)) 493 | 494 | # 在re train 的时候,才删除上一轮产出的文件,在predicted 的时候不做clean 495 | if args.clean and args.do_train: 496 | if os.path.exists(args.output_dir): 497 | def del_file(path): 498 | ls = os.listdir(path) 499 | for i in ls: 500 | c_path = os.path.join(path, i) 501 | if os.path.isdir(c_path): 502 | del_file(c_path) 503 | else: 504 | os.remove(c_path) 505 | 506 | try: 507 | del_file(args.output_dir) 508 | except Exception as e: 509 | print(e) 510 | print('pleace remove the files of output dir and data.conf') 511 | exit(-1) 512 | 513 | #check output dir exists 514 | if not os.path.exists(args.output_dir): 515 | os.mkdir(args.output_dir) 516 | 517 | processor = processors[args.ner](args.output_dir) 518 | 519 | tokenizer = tokenization.FullTokenizer( 520 | vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) 521 | 522 | session_config = tf.ConfigProto( 523 | log_device_placement=False, 524 | inter_op_parallelism_threads=0, 525 | intra_op_parallelism_threads=0, 526 | allow_soft_placement=True) 527 | 528 | run_config = tf.estimator.RunConfig( 529 | model_dir=args.output_dir, 530 | save_summary_steps=10, 531 | save_checkpoints_steps=500, 532 | session_config=session_config 533 | ) 534 | 535 | train_examples = None 536 | eval_examples = None 537 | num_train_steps = None 538 | num_warmup_steps = None 539 | 540 | if args.do_train and args.do_eval: 541 | # 加载训练数据 542 | train_examples = processor.get_train_examples(args.data_dir) 543 | num_train_steps = int( 544 | len(train_examples) *1.0 / args.batch_size * args.num_train_epochs) 545 | if num_train_steps < 1: 546 | raise AttributeError('training data is so small...') 547 | num_warmup_steps = int(num_train_steps * args.warmup_proportion) 548 | 549 | logger.info("***** Running training *****") 550 | logger.info(" Num examples = %d", len(train_examples)) 551 | logger.info(" Batch size = %d", args.batch_size) 552 | logger.info(" Num steps = %d", num_train_steps) 553 | 554 | eval_examples = processor.get_dev_examples(args.data_dir) 555 | 556 | # 打印验证集数据信息 557 | logger.info("***** Running evaluation *****") 558 | logger.info(" Num examples = %d", len(eval_examples)) 559 | logger.info(" Batch size = %d", args.batch_size) 560 | 561 | label_list = processor.get_labels() 562 | # 返回的model_dn 是一个函数,其定义了模型,训练,评测方法,并且使用钩子参数,加载了BERT模型的参数进行了自己模型的参数初始化过程 563 | # tf 新的架构方法,通过定义model_fn 函数,定义模型,然后通过EstimatorAPI进行模型的其他工作,Es就可以控制模型的训练,预测,评估工作等。 564 | model_fn = model_fn_builder( 565 | bert_config=bert_config, 566 | num_labels=len(label_list) + 1, 567 | init_checkpoint=args.init_checkpoint, 568 | learning_rate=args.learning_rate, 569 | num_train_steps=num_train_steps, 570 | num_warmup_steps=num_warmup_steps, 571 | args=args) 572 | 573 | params = { 574 | 'batch_size': args.batch_size 575 | } 576 | 577 | estimator = tf.estimator.Estimator( 578 | model_fn, 579 | params=params, 580 | config=run_config) 581 | 582 | if args.do_train and args.do_eval: 583 | # 1. 将数据转化为tf_record 数据 584 | train_file = os.path.join(args.output_dir, "train.tf_record") 585 | if not os.path.exists(train_file): 586 | filed_based_convert_examples_to_features( 587 | train_examples, label_list, args.max_seq_length, tokenizer, train_file, args.output_dir) 588 | 589 | # 2.读取record 数据,组成batch 590 | train_input_fn = file_based_input_fn_builder( 591 | input_file=train_file, 592 | seq_length=args.max_seq_length, 593 | is_training=True, 594 | drop_remainder=True) 595 | # estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) 596 | 597 | eval_file = os.path.join(args.output_dir, "eval.tf_record") 598 | if not os.path.exists(eval_file): 599 | filed_based_convert_examples_to_features( 600 | eval_examples, label_list, args.max_seq_length, tokenizer, eval_file, args.output_dir) 601 | eval_input_fn = file_based_input_fn_builder( 602 | input_file=eval_file, 603 | seq_length=args.max_seq_length, 604 | is_training=False, 605 | drop_remainder=False) 606 | 607 | # train and eval togither 608 | # early stop hook 609 | early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook( 610 | estimator=estimator, 611 | metric_name='loss', 612 | max_steps_without_decrease=num_train_steps, 613 | eval_dir=None, 614 | min_steps=0, 615 | run_every_secs=None, 616 | run_every_steps=args.save_checkpoints_steps) 617 | 618 | train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=num_train_steps, 619 | hooks=[early_stopping_hook]) 620 | eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn) 621 | tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) 622 | 623 | if args.do_predict: 624 | token_path = os.path.join(args.output_dir, "token_test.txt") 625 | if os.path.exists(token_path): 626 | os.remove(token_path) 627 | 628 | with codecs.open(os.path.join(args.output_dir, 'label2id.pkl'), 'rb') as rf: 629 | label2id = pickle.load(rf) 630 | id2label = {value: key for key, value in label2id.items()} 631 | 632 | predict_examples = processor.get_test_examples(args.data_dir) 633 | predict_file = os.path.join(args.output_dir, "predict.tf_record") 634 | filed_based_convert_examples_to_features(predict_examples, label_list, 635 | args.max_seq_length, tokenizer, 636 | predict_file, args.output_dir, mode="test") 637 | 638 | logger.info("***** Running prediction*****") 639 | logger.info(" Num examples = %d", len(predict_examples)) 640 | logger.info(" Batch size = %d", args.batch_size) 641 | 642 | predict_drop_remainder = False 643 | predict_input_fn = file_based_input_fn_builder( 644 | input_file=predict_file, 645 | seq_length=args.max_seq_length, 646 | is_training=False, 647 | drop_remainder=predict_drop_remainder) 648 | 649 | result = estimator.predict(input_fn=predict_input_fn) 650 | output_predict_file = os.path.join(args.output_dir, "label_test.txt") 651 | 652 | def result_to_pair(writer): 653 | for predict_line, prediction in zip(predict_examples, result): 654 | idx = 0 655 | line = '' 656 | line_token = str(predict_line.text).split(' ') 657 | label_token = str(predict_line.label).split(' ') 658 | len_seq = len(label_token) 659 | if len(line_token) != len(label_token): 660 | logger.info(predict_line.text) 661 | logger.info(predict_line.label) 662 | break 663 | for id in prediction: 664 | if idx >= len_seq: 665 | break 666 | if id == 0: 667 | continue 668 | curr_labels = id2label[id] 669 | if curr_labels in ['[CLS]', '[SEP]']: 670 | continue 671 | try: 672 | line += line_token[idx] + ' ' + label_token[idx] + ' ' + curr_labels + '\n' 673 | except Exception as e: 674 | logger.info(e) 675 | logger.info(predict_line.text) 676 | logger.info(predict_line.label) 677 | line = '' 678 | break 679 | idx += 1 680 | writer.write(line + '\n') 681 | 682 | with codecs.open(output_predict_file, 'w', encoding='utf-8') as writer: 683 | result_to_pair(writer) 684 | from train import conlleval 685 | eval_result = conlleval.return_report(output_predict_file) 686 | print(''.join(eval_result)) 687 | # 写结果到文件中 688 | with codecs.open(os.path.join(args.output_dir, 'predict_score.txt'), 'a', encoding='utf-8') as fd: 689 | fd.write(''.join(eval_result)) 690 | # filter model 691 | if args.filter_adam_var: 692 | adam_filter(args.output_dir) 693 | 694 | -------------------------------------------------------------------------------- /train/conlleval.py: -------------------------------------------------------------------------------- 1 | # Python version of the evaluation script from CoNLL'00- 2 | # Originates from: https://github.com/spyysalo/conlleval.py 3 | 4 | 5 | # Intentional differences: 6 | # - accept any space as delimiter by default 7 | # - optional file argument (default STDIN) 8 | # - option to set boundary (-b argument) 9 | # - LaTeX output (-l argument) not supported 10 | # - raw tags (-r argument) not supported 11 | 12 | # add function :evaluate(predicted_label, ori_label): which will not read from file 13 | 14 | import sys 15 | import re 16 | import codecs 17 | from collections import defaultdict, namedtuple 18 | 19 | ANY_SPACE = '' 20 | 21 | 22 | class FormatError(Exception): 23 | pass 24 | 25 | Metrics = namedtuple('Metrics', 'tp fp fn prec rec fscore') 26 | 27 | 28 | class EvalCounts(object): 29 | def __init__(self): 30 | self.correct_chunk = 0 # number of correctly identified chunks 31 | self.correct_tags = 0 # number of correct chunk tags 32 | self.found_correct = 0 # number of chunks in corpus 33 | self.found_guessed = 0 # number of identified chunks 34 | self.token_counter = 0 # token counter (ignores sentence breaks) 35 | 36 | # counts by type 37 | self.t_correct_chunk = defaultdict(int) 38 | self.t_found_correct = defaultdict(int) 39 | self.t_found_guessed = defaultdict(int) 40 | 41 | 42 | def parse_args(argv): 43 | import argparse 44 | parser = argparse.ArgumentParser( 45 | description='evaluate tagging results using CoNLL criteria', 46 | formatter_class=argparse.ArgumentDefaultsHelpFormatter 47 | ) 48 | arg = parser.add_argument 49 | arg('-b', '--boundary', metavar='STR', default='-X-', 50 | help='sentence boundary') 51 | arg('-d', '--delimiter', metavar='CHAR', default=ANY_SPACE, 52 | help='character delimiting items in input') 53 | arg('-o', '--otag', metavar='CHAR', default='O', 54 | help='alternative outside tag') 55 | arg('file', nargs='?', default=None) 56 | return parser.parse_args(argv) 57 | 58 | 59 | def parse_tag(t): 60 | m = re.match(r'^([^-]*)-(.*)$', t) 61 | return m.groups() if m else (t, '') 62 | 63 | 64 | def evaluate(iterable, options=None): 65 | if options is None: 66 | options = parse_args([]) # use defaults 67 | 68 | counts = EvalCounts() 69 | num_features = None # number of features per line 70 | in_correct = False # currently processed chunks is correct until now 71 | last_correct = 'O' # previous chunk tag in corpus 72 | last_correct_type = '' # type of previously identified chunk tag 73 | last_guessed = 'O' # previously identified chunk tag 74 | last_guessed_type = '' # type of previous chunk tag in corpus 75 | 76 | for line in iterable: 77 | line = line.rstrip('\r\n') 78 | 79 | if options.delimiter == ANY_SPACE: 80 | features = line.split() 81 | else: 82 | features = line.split(options.delimiter) 83 | 84 | if num_features is None: 85 | num_features = len(features) 86 | elif num_features != len(features) and len(features) != 0: 87 | raise FormatError('unexpected number of features: %d (%d)' % 88 | (len(features), num_features)) 89 | 90 | if len(features) == 0 or features[0] == options.boundary: 91 | features = [options.boundary, 'O', 'O'] 92 | if len(features) < 3: 93 | raise FormatError('unexpected number of features in line %s' % line) 94 | 95 | guessed, guessed_type = parse_tag(features.pop()) 96 | correct, correct_type = parse_tag(features.pop()) 97 | first_item = features.pop(0) 98 | 99 | if first_item == options.boundary: 100 | guessed = 'O' 101 | 102 | end_correct = end_of_chunk(last_correct, correct, 103 | last_correct_type, correct_type) 104 | end_guessed = end_of_chunk(last_guessed, guessed, 105 | last_guessed_type, guessed_type) 106 | start_correct = start_of_chunk(last_correct, correct, 107 | last_correct_type, correct_type) 108 | start_guessed = start_of_chunk(last_guessed, guessed, 109 | last_guessed_type, guessed_type) 110 | 111 | if in_correct: 112 | if (end_correct and end_guessed and 113 | last_guessed_type == last_correct_type): 114 | in_correct = False 115 | counts.correct_chunk += 1 116 | counts.t_correct_chunk[last_correct_type] += 1 117 | elif (end_correct != end_guessed or guessed_type != correct_type): 118 | in_correct = False 119 | 120 | if start_correct and start_guessed and guessed_type == correct_type: 121 | in_correct = True 122 | 123 | if start_correct: 124 | counts.found_correct += 1 125 | counts.t_found_correct[correct_type] += 1 126 | if start_guessed: 127 | counts.found_guessed += 1 128 | counts.t_found_guessed[guessed_type] += 1 129 | if first_item != options.boundary: 130 | if correct == guessed and guessed_type == correct_type: 131 | counts.correct_tags += 1 132 | counts.token_counter += 1 133 | 134 | last_guessed = guessed 135 | last_correct = correct 136 | last_guessed_type = guessed_type 137 | last_correct_type = correct_type 138 | 139 | if in_correct: 140 | counts.correct_chunk += 1 141 | counts.t_correct_chunk[last_correct_type] += 1 142 | 143 | return counts 144 | 145 | 146 | 147 | def uniq(iterable): 148 | seen = set() 149 | return [i for i in iterable if not (i in seen or seen.add(i))] 150 | 151 | 152 | def calculate_metrics(correct, guessed, total): 153 | tp, fp, fn = correct, guessed-correct, total-correct 154 | p = 0 if tp + fp == 0 else 1.*tp / (tp + fp) 155 | r = 0 if tp + fn == 0 else 1.*tp / (tp + fn) 156 | f = 0 if p + r == 0 else 2 * p * r / (p + r) 157 | return Metrics(tp, fp, fn, p, r, f) 158 | 159 | 160 | def metrics(counts): 161 | c = counts 162 | overall = calculate_metrics( 163 | c.correct_chunk, c.found_guessed, c.found_correct 164 | ) 165 | by_type = {} 166 | for t in uniq(list(c.t_found_correct) + list(c.t_found_guessed)): 167 | by_type[t] = calculate_metrics( 168 | c.t_correct_chunk[t], c.t_found_guessed[t], c.t_found_correct[t] 169 | ) 170 | return overall, by_type 171 | 172 | 173 | def report(counts, out=None): 174 | if out is None: 175 | out = sys.stdout 176 | 177 | overall, by_type = metrics(counts) 178 | 179 | c = counts 180 | out.write('processed %d tokens with %d phrases; ' % 181 | (c.token_counter, c.found_correct)) 182 | out.write('found: %d phrases; correct: %d.\n' % 183 | (c.found_guessed, c.correct_chunk)) 184 | 185 | if c.token_counter > 0: 186 | out.write('accuracy: %6.2f%%; ' % 187 | (100.*c.correct_tags/c.token_counter)) 188 | out.write('precision: %6.2f%%; ' % (100.*overall.prec)) 189 | out.write('recall: %6.2f%%; ' % (100.*overall.rec)) 190 | out.write('FB1: %6.2f\n' % (100.*overall.fscore)) 191 | 192 | for i, m in sorted(by_type.items()): 193 | out.write('%17s: ' % i) 194 | out.write('precision: %6.2f%%; ' % (100.*m.prec)) 195 | out.write('recall: %6.2f%%; ' % (100.*m.rec)) 196 | out.write('FB1: %6.2f %d\n' % (100.*m.fscore, c.t_found_guessed[i])) 197 | 198 | 199 | def report_notprint(counts, out=None): 200 | if out is None: 201 | out = sys.stdout 202 | 203 | overall, by_type = metrics(counts) 204 | 205 | c = counts 206 | final_report = [] 207 | line = [] 208 | line.append('processed %d tokens with %d phrases; ' % 209 | (c.token_counter, c.found_correct)) 210 | line.append('found: %d phrases; correct: %d.\n' % 211 | (c.found_guessed, c.correct_chunk)) 212 | final_report.append("".join(line)) 213 | 214 | if c.token_counter > 0: 215 | line = [] 216 | line.append('accuracy: %6.2f%%; ' % 217 | (100.*c.correct_tags/c.token_counter)) 218 | line.append('precision: %6.2f%%; ' % (100.*overall.prec)) 219 | line.append('recall: %6.2f%%; ' % (100.*overall.rec)) 220 | line.append('FB1: %6.2f\n' % (100.*overall.fscore)) 221 | final_report.append("".join(line)) 222 | 223 | for i, m in sorted(by_type.items()): 224 | line = [] 225 | line.append('%17s: ' % i) 226 | line.append('precision: %6.2f%%; ' % (100.*m.prec)) 227 | line.append('recall: %6.2f%%; ' % (100.*m.rec)) 228 | line.append('FB1: %6.2f %d\n' % (100.*m.fscore, c.t_found_guessed[i])) 229 | final_report.append("".join(line)) 230 | return final_report 231 | 232 | 233 | def end_of_chunk(prev_tag, tag, prev_type, type_): 234 | # check if a chunk ended between the previous and current word 235 | # arguments: previous and current chunk tags, previous and current types 236 | chunk_end = False 237 | 238 | if prev_tag == 'E': chunk_end = True 239 | if prev_tag == 'S': chunk_end = True 240 | 241 | if prev_tag == 'B' and tag == 'B': chunk_end = True 242 | if prev_tag == 'B' and tag == 'S': chunk_end = True 243 | if prev_tag == 'B' and tag == 'O': chunk_end = True 244 | if prev_tag == 'I' and tag == 'B': chunk_end = True 245 | if prev_tag == 'I' and tag == 'S': chunk_end = True 246 | if prev_tag == 'I' and tag == 'O': chunk_end = True 247 | 248 | if prev_tag != 'O' and prev_tag != '.' and prev_type != type_: 249 | chunk_end = True 250 | 251 | # these chunks are assumed to have length 1 252 | if prev_tag == ']': chunk_end = True 253 | if prev_tag == '[': chunk_end = True 254 | 255 | return chunk_end 256 | 257 | 258 | def start_of_chunk(prev_tag, tag, prev_type, type_): 259 | # check if a chunk started between the previous and current word 260 | # arguments: previous and current chunk tags, previous and current types 261 | chunk_start = False 262 | 263 | if tag == 'B': chunk_start = True 264 | if tag == 'S': chunk_start = True 265 | 266 | if prev_tag == 'E' and tag == 'E': chunk_start = True 267 | if prev_tag == 'E' and tag == 'I': chunk_start = True 268 | if prev_tag == 'S' and tag == 'E': chunk_start = True 269 | if prev_tag == 'S' and tag == 'I': chunk_start = True 270 | if prev_tag == 'O' and tag == 'E': chunk_start = True 271 | if prev_tag == 'O' and tag == 'I': chunk_start = True 272 | 273 | if tag != 'O' and tag != '.' and prev_type != type_: 274 | chunk_start = True 275 | 276 | # these chunks are assumed to have length 1 277 | if tag == '[': chunk_start = True 278 | if tag == ']': chunk_start = True 279 | 280 | return chunk_start 281 | 282 | 283 | def return_report(input_file): 284 | with codecs.open(input_file, "r", "utf8") as f: 285 | counts = evaluate(f) 286 | return report_notprint(counts) 287 | 288 | 289 | def main(argv): 290 | args = parse_args(argv[1:]) 291 | 292 | if args.file is None: 293 | counts = evaluate(sys.stdin, args) 294 | else: 295 | with open(args.file) as f: 296 | counts = evaluate(f, args) 297 | report(counts) 298 | 299 | if __name__ == '__main__': 300 | sys.exit(main(sys.argv)) -------------------------------------------------------------------------------- /train/helper.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import argparse 3 | import logging 4 | import os 5 | 6 | __all__ = ['get_args_parser', 'set_logger'] 7 | 8 | 9 | def get_args_parser(): 10 | from .bert_lstm_ner import __version__ 11 | parser = argparse.ArgumentParser() 12 | if os.name == 'nt': 13 | # chinese_L-12_H-768_A-12为google发布的预训练模型 14 | bert_path = 'bert_base/chinese_L-12_H-768_A-12' 15 | root_path = r'' 16 | else: 17 | bert_path = '/home/chinese_L-12_H-768_A-12/' 18 | root_path = '/home/BERT-BiLSTM-CRF-NER' 19 | 20 | group1 = parser.add_argument_group('File Paths', 'config the path, checkpoint and filename of a pretrained/fine-tuned BERT model') 21 | 22 | # 1.data_dir是存放最终数据集的地方,使用自己数据训练一定记得确认数据集是否在如下目录(当前是:data/dataset) 23 | group1.add_argument('-data_dir', type=str, default=os.path.join(root_path, 'data/dataset'), 24 | help='train, dev and test data dir') 25 | 26 | # 2.output_dir是放模型以及中间数据存放的目录,如果要生成新模型,一定要确认,否则会抹掉之前训练好的模型 27 | group1.add_argument('-output_dir', type=str, default=os.path.join(root_path, 'output'), 28 | help='directory of a pretrained BERT model') 29 | 30 | group1.add_argument('-bert_path', type=str, default=bert_path) 31 | group1.add_argument('-bert_config_file', type=str, default=os.path.join(bert_path, 'bert_config.json')) 32 | group1.add_argument('-init_checkpoint', type=str, default=os.path.join(bert_path, 'bert_model.ckpt'), 33 | help='Initial checkpoint (usually from a pre-trained BERT model).') 34 | group1.add_argument('-vocab_file', type=str, default=os.path.join(bert_path, 'vocab.txt'), 35 | help='') 36 | 37 | group2 = parser.add_argument_group('Model Config', 'config the model params') 38 | 39 | # 3.地址的最大长度,当前为100。可以根据需要调整。 40 | # 例如:"北京市朝阳区" 长度为6 41 | group2.add_argument('-max_seq_length', type=int, default=100, 42 | help='The maximum total input sequence length after WordPiece tokenization.') 43 | group2.add_argument('-do_train', action='store_false', default=True, 44 | help='Whether to run training.') 45 | group2.add_argument('-do_eval', action='store_false', default=True, 46 | help='Whether to run eval on the dev set.') 47 | group2.add_argument('-do_predict', action='store_false', default=True, 48 | help='Whether to run the predict in inference mode on the test set.') 49 | 50 | # 4. 一次添加多少条地址到模型进行训练,一般可以选择2的倍数 51 | group2.add_argument('-batch_size', type=int, default=32, 52 | help='Total batch size for training, eval and predict.') 53 | # 5. 训练学习率 54 | group2.add_argument('-learning_rate', type=float, default=1e-5, 55 | help='The initial learning rate for Adam.') 56 | # 6.一次训练训练多少代(把所有训练数据从头到尾训练一次是一代) 57 | group2.add_argument('-num_train_epochs', type=float, default=10, 58 | help='Total number of training epochs to perform.') 59 | 60 | 61 | group2.add_argument('-dropout_rate', type=float, default=0.5, 62 | help='Dropout rate') 63 | group2.add_argument('-clip', type=float, default=0.5, 64 | help='Gradient clip') 65 | group2.add_argument('-warmup_proportion', type=float, default=0.1, 66 | help='Proportion of training to perform linear learning rate warmup for ' 67 | 'E.g., 0.1 = 10% of training.') 68 | 69 | # 7. LSTM网络中一层的神经元数目 70 | group2.add_argument('-lstm_size', type=int, default=128, 71 | help='size of lstm units.') 72 | # 8. LSTM设置几层(数目较大会造成训练缓慢) 73 | group2.add_argument('-num_layers', type=int, default=1, 74 | help='number of rnn layers, default is 1.') 75 | group2.add_argument('-cell', type=str, default='lstm', 76 | help='which rnn cell used.') 77 | 78 | # 9. 训练多少步,保存一次模型 79 | group2.add_argument('-save_checkpoints_steps', type=int, default=500, 80 | help='save_checkpoints_steps') 81 | # 10.训练多少步,保存一次当前状态 82 | group2.add_argument('-save_summary_steps', type=int, default=500, 83 | help='save_summary_steps.') 84 | group2.add_argument('-filter_adam_var', type=bool, default=False, 85 | help='after training do filter Adam params from model and save no Adam params model in file.') 86 | group2.add_argument('-do_lower_case', type=bool, default=True, 87 | help='Whether to lower case the input text.') 88 | group2.add_argument('-clean', type=bool, default=True) 89 | group2.add_argument('-device_map', type=str, default='0', 90 | help='witch device using to train') 91 | 92 | # add labels 93 | group2.add_argument('-label_list', type=str, default=None, 94 | help='User define labels, can be a file with one label one line or a string using \',\' split') 95 | 96 | parser.add_argument('-verbose', action='store_true', default=False, 97 | help='turn on tensorflow logging for debug') 98 | parser.add_argument('-ner', type=str, default='ner', help='which modle to train') 99 | parser.add_argument('-version', action='version', version='%(prog)s ' + __version__) 100 | return parser.parse_args() 101 | 102 | 103 | def set_logger(context, verbose=False): 104 | logger = logging.getLogger(context) 105 | logger.setLevel(logging.DEBUG if verbose else logging.INFO) 106 | formatter = logging.Formatter( 107 | '%(levelname)-.1s:' + context + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt= 108 | '%m-%d %H:%M:%S') 109 | console_handler = logging.StreamHandler() 110 | console_handler.setLevel(logging.DEBUG if verbose else logging.INFO) 111 | console_handler.setFormatter(formatter) 112 | logger.handlers = [] 113 | logger.addHandler(console_handler) 114 | return logger -------------------------------------------------------------------------------- /train/lstm_crf_layer.py: -------------------------------------------------------------------------------- 1 | # encoding=utf-8 2 | 3 | """ 4 | bert-blstm-crf layer 5 | @Author:Macan 6 | """ 7 | 8 | import tensorflow as tf 9 | from tensorflow.contrib import rnn 10 | from tensorflow.contrib import crf 11 | 12 | 13 | class BLSTM_CRF(object): 14 | def __init__(self, embedded_chars, hidden_unit, cell_type, num_layers, dropout_rate, 15 | initializers, num_labels, seq_length, labels, lengths, is_training): 16 | """ 17 | BLSTM-CRF 网络 18 | :param embedded_chars: Fine-tuning embedding input 19 | :param hidden_unit: LSTM的隐含单元个数 20 | :param cell_type: RNN类型(LSTM OR GRU DICNN will be add in feature) 21 | :param num_layers: RNN的层数 22 | :param droupout_rate: droupout rate 23 | :param initializers: variable init class 24 | :param num_labels: 标签数量 25 | :param seq_length: 序列最大长度 26 | :param labels: 真实标签 27 | :param lengths: [batch_size] 每个batch下序列的真实长度 28 | :param is_training: 是否是训练过程 29 | """ 30 | self.hidden_unit = hidden_unit 31 | self.dropout_rate = dropout_rate 32 | self.cell_type = cell_type 33 | self.num_layers = num_layers 34 | self.embedded_chars = embedded_chars 35 | self.initializers = initializers 36 | self.seq_length = seq_length 37 | self.num_labels = num_labels 38 | self.labels = labels 39 | self.lengths = lengths 40 | self.embedding_dims = embedded_chars.shape[-1].value 41 | self.is_training = is_training 42 | 43 | def add_blstm_crf_layer(self, crf_only): 44 | """ 45 | blstm-crf网络 46 | :return: 47 | """ 48 | if self.is_training: 49 | # lstm input dropout rate i set 0.9 will get best score 50 | self.embedded_chars = tf.nn.dropout(self.embedded_chars, self.dropout_rate) 51 | 52 | if crf_only: 53 | logits = self.project_crf_layer(self.embedded_chars) 54 | else: 55 | # blstm 56 | lstm_output = self.blstm_layer(self.embedded_chars) 57 | # project 58 | logits = self.project_bilstm_layer(lstm_output) 59 | # crf 60 | loss, trans = self.crf_layer(logits) 61 | # CRF decode, pred_ids 是一条最大概率的标注路径 62 | pred_ids, _ = crf.crf_decode(potentials=logits, transition_params=trans, sequence_length=self.lengths) 63 | return (loss, logits, trans, pred_ids) 64 | 65 | def _witch_cell(self): 66 | """ 67 | RNN 类型 68 | :return: 69 | """ 70 | cell_tmp = None 71 | if self.cell_type == 'lstm': 72 | cell_tmp = rnn.LSTMCell(self.hidden_unit) 73 | elif self.cell_type == 'gru': 74 | cell_tmp = rnn.GRUCell(self.hidden_unit) 75 | return cell_tmp 76 | 77 | def _bi_dir_rnn(self): 78 | """ 79 | 双向RNN 80 | :return: 81 | """ 82 | cell_fw = self._witch_cell() 83 | cell_bw = self._witch_cell() 84 | if self.dropout_rate is not None: 85 | cell_bw = rnn.DropoutWrapper(cell_bw, output_keep_prob=self.dropout_rate) 86 | cell_fw = rnn.DropoutWrapper(cell_fw, output_keep_prob=self.dropout_rate) 87 | return cell_fw, cell_bw 88 | 89 | def blstm_layer(self, embedding_chars): 90 | """ 91 | 92 | :return: 93 | """ 94 | with tf.variable_scope('rnn_layer'): 95 | cell_fw, cell_bw = self._bi_dir_rnn() 96 | if self.num_layers > 1: 97 | cell_fw = rnn.MultiRNNCell([cell_fw] * self.num_layers, state_is_tuple=True) 98 | cell_bw = rnn.MultiRNNCell([cell_bw] * self.num_layers, state_is_tuple=True) 99 | outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedding_chars, 100 | dtype=tf.float32) 101 | outputs = tf.concat(outputs, axis=2) 102 | return outputs 103 | 104 | def project_bilstm_layer(self, lstm_outputs, name=None): 105 | """ 106 | hidden layer between lstm layer and logits 107 | :param lstm_outputs: [batch_size, num_steps, emb_size] 108 | :return: [batch_size, num_steps, num_tags] 109 | """ 110 | with tf.variable_scope("project" if not name else name): 111 | with tf.variable_scope("hidden"): 112 | W = tf.get_variable("W", shape=[self.hidden_unit * 2, self.hidden_unit], 113 | dtype=tf.float32, initializer=self.initializers.xavier_initializer()) 114 | 115 | b = tf.get_variable("b", shape=[self.hidden_unit], dtype=tf.float32, 116 | initializer=tf.zeros_initializer()) 117 | output = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2]) 118 | hidden = tf.nn.xw_plus_b(output, W, b) 119 | 120 | # project to score of tags 121 | with tf.variable_scope("logits"): 122 | W = tf.get_variable("W", shape=[self.hidden_unit, self.num_labels], 123 | dtype=tf.float32, initializer=self.initializers.xavier_initializer()) 124 | 125 | b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32, 126 | initializer=tf.zeros_initializer()) 127 | 128 | pred = tf.nn.xw_plus_b(hidden, W, b) 129 | return tf.reshape(pred, [-1, self.seq_length, self.num_labels]) 130 | 131 | def project_crf_layer(self, embedding_chars, name=None): 132 | """ 133 | hidden layer between input layer and logits 134 | :param lstm_outputs: [batch_size, num_steps, emb_size] 135 | :return: [batch_size, num_steps, num_tags] 136 | """ 137 | with tf.variable_scope("project" if not name else name): 138 | with tf.variable_scope("logits"): 139 | W = tf.get_variable("W", shape=[self.embedding_dims, self.num_labels], 140 | dtype=tf.float32, initializer=self.initializers.xavier_initializer()) 141 | 142 | b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32, 143 | initializer=tf.zeros_initializer()) 144 | output = tf.reshape(self.embedded_chars, 145 | shape=[-1, self.embedding_dims]) # [batch_size, embedding_dims] 146 | pred = tf.tanh(tf.nn.xw_plus_b(output, W, b)) 147 | return tf.reshape(pred, [-1, self.seq_length, self.num_labels]) 148 | 149 | def crf_layer(self, logits): 150 | """ 151 | calculate crf loss 152 | :param project_logits: [1, num_steps, num_tags] 153 | :return: scalar loss 154 | """ 155 | with tf.variable_scope("crf_loss"): 156 | trans = tf.get_variable( 157 | "transitions", 158 | shape=[self.num_labels, self.num_labels], 159 | initializer=self.initializers.xavier_initializer()) 160 | if self.labels is None: 161 | return None, trans 162 | else: 163 | log_likelihood, trans = tf.contrib.crf.crf_log_likelihood( 164 | inputs=logits, 165 | tag_indices=self.labels, 166 | transition_params=trans, 167 | sequence_lengths=self.lengths) 168 | return tf.reduce_mean(-log_likelihood), trans 169 | -------------------------------------------------------------------------------- /train/models.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | 一些公共模型代码 5 | @Time : 2019/1/30 12:46 6 | @Author : MaCan (ma_cancan@163.com) 7 | @File : models.py 8 | """ 9 | 10 | from train.lstm_crf_layer import BLSTM_CRF 11 | from tensorflow.contrib.layers.python.layers import initializers 12 | 13 | 14 | __all__ = ['InputExample', 'InputFeatures', 'decode_labels', 'create_model', 'convert_id_str', 15 | 'convert_id_to_label', 'result_to_json', 'create_classification_model'] 16 | 17 | class Model(object): 18 | def __init__(self, *args, **kwargs): 19 | pass 20 | 21 | 22 | class InputExample(object): 23 | """A single training/test example for simple sequence classification.""" 24 | 25 | def __init__(self, guid=None, text=None, label=None): 26 | """Constructs a InputExample. 27 | Args: 28 | guid: Unique id for the example. 29 | text_a: string. The untokenized text of the first sequence. For single 30 | sequence tasks, only this sequence must be specified. 31 | label: (Optional) string. The label of the example. This should be 32 | specified for train and dev examples, but not for test examples. 33 | """ 34 | self.guid = guid 35 | self.text = text 36 | self.label = label 37 | 38 | class InputFeatures(object): 39 | """A single set of features of data.""" 40 | 41 | def __init__(self, input_ids, input_mask, segment_ids, label_ids, ): 42 | self.input_ids = input_ids 43 | self.input_mask = input_mask 44 | self.segment_ids = segment_ids 45 | self.label_ids = label_ids 46 | # self.label_mask = label_mask 47 | 48 | 49 | class DataProcessor(object): 50 | """Base class for data converters for sequence classification data sets.""" 51 | 52 | def get_train_examples(self, data_dir): 53 | """Gets a collection of `InputExample`s for the train set.""" 54 | raise NotImplementedError() 55 | 56 | def get_dev_examples(self, data_dir): 57 | """Gets a collection of `InputExample`s for the dev set.""" 58 | raise NotImplementedError() 59 | 60 | def get_labels(self): 61 | """Gets the list of labels for this data set.""" 62 | raise NotImplementedError() 63 | 64 | 65 | def create_model(bert_config, is_training, input_ids, input_mask, 66 | segment_ids, labels, num_labels, use_one_hot_embeddings, 67 | dropout_rate=1.0, lstm_size=1, cell='lstm', num_layers=1): 68 | """ 69 | 创建X模型 70 | :param bert_config: bert 配置 71 | :param is_training: 72 | :param input_ids: 数据的idx 表示 73 | :param input_mask: 74 | :param segment_ids: 75 | :param labels: 标签的idx 表示 76 | :param num_labels: 类别数量 77 | :param use_one_hot_embeddings: 78 | :return: 79 | """ 80 | # 使用数据加载BertModel,获取对应的字embedding 81 | import tensorflow as tf 82 | from bert_base.bert import modeling 83 | model = modeling.BertModel( 84 | config=bert_config, 85 | is_training=is_training, 86 | input_ids=input_ids, 87 | input_mask=input_mask, 88 | token_type_ids=segment_ids, 89 | use_one_hot_embeddings=use_one_hot_embeddings 90 | ) 91 | # 获取对应的embedding 输入数据[batch_size, seq_length, embedding_size] 92 | embedding = model.get_sequence_output() 93 | max_seq_length = embedding.shape[1].value 94 | # 算序列真实长度 95 | used = tf.sign(tf.abs(input_ids)) 96 | lengths = tf.reduce_sum(used, reduction_indices=1) # [batch_size] 大小的向量,包含了当前batch中的序列长度 97 | # 添加CRF output layer 98 | blstm_crf = BLSTM_CRF(embedded_chars=embedding, hidden_unit=lstm_size, cell_type=cell, num_layers=num_layers, 99 | dropout_rate=dropout_rate, initializers=initializers, num_labels=num_labels, 100 | seq_length=max_seq_length, labels=labels, lengths=lengths, is_training=is_training) 101 | rst = blstm_crf.add_blstm_crf_layer(crf_only=True) 102 | return rst 103 | 104 | 105 | def create_classification_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels): 106 | """ 107 | 108 | :param bert_config: 109 | :param is_training: 110 | :param input_ids: 111 | :param input_mask: 112 | :param segment_ids: 113 | :param labels: 114 | :param num_labels: 115 | :param use_one_hot_embedding: 116 | :return: 117 | """ 118 | import tensorflow as tf 119 | from bert_base.bert import modeling 120 | # 通过传入的训练数据,进行representation 121 | model = modeling.BertModel( 122 | config=bert_config, 123 | is_training=is_training, 124 | input_ids=input_ids, 125 | input_mask=input_mask, 126 | token_type_ids=segment_ids, 127 | ) 128 | 129 | embedding_layer = model.get_sequence_output() 130 | output_layer = model.get_pooled_output() 131 | hidden_size = output_layer.shape[-1].value 132 | 133 | # predict = CNN_Classification(embedding_chars=embedding_layer, 134 | # labels=labels, 135 | # num_tags=num_labels, 136 | # sequence_length=FLAGS.max_seq_length, 137 | # embedding_dims=embedding_layer.shape[-1].value, 138 | # vocab_size=0, 139 | # filter_sizes=[3, 4, 5], 140 | # num_filters=3, 141 | # dropout_keep_prob=FLAGS.dropout_keep_prob, 142 | # l2_reg_lambda=0.001) 143 | # loss, predictions, probabilities = predict.add_cnn_layer() 144 | 145 | output_weights = tf.get_variable( 146 | "output_weights", [num_labels, hidden_size], 147 | initializer=tf.truncated_normal_initializer(stddev=0.02)) 148 | 149 | output_bias = tf.get_variable( 150 | "output_bias", [num_labels], initializer=tf.zeros_initializer()) 151 | 152 | with tf.variable_scope("loss"): 153 | if is_training: 154 | # I.e., 0.1 dropout 155 | output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) 156 | 157 | logits = tf.matmul(output_layer, output_weights, transpose_b=True) 158 | logits = tf.nn.bias_add(logits, output_bias) 159 | probabilities = tf.nn.softmax(logits, axis=-1) 160 | log_probs = tf.nn.log_softmax(logits, axis=-1) 161 | 162 | if labels is not None: 163 | one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) 164 | 165 | per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) 166 | loss = tf.reduce_mean(per_example_loss) 167 | else: 168 | loss, per_example_loss = None, None 169 | return (loss, per_example_loss, logits, probabilities) 170 | 171 | 172 | def decode_labels(labels, batch_size): 173 | new_labels = [] 174 | for row in range(batch_size): 175 | label = [] 176 | for i in labels[row]: 177 | i = i.decode('utf-8') 178 | if i == '**PAD**': 179 | break 180 | if i in ['[CLS]', '[SEP]']: 181 | continue 182 | label.append(i) 183 | new_labels.append(label) 184 | return new_labels 185 | 186 | 187 | def convert_id_str(input_ids, batch_size): 188 | res = [] 189 | for row in range(batch_size): 190 | line = [] 191 | for i in input_ids[row]: 192 | i = i.decode('utf-8') 193 | if i == '**PAD**': 194 | break 195 | if i in ['[CLS]', '[SEP]']: 196 | continue 197 | 198 | line.append(i) 199 | res.append(line) 200 | return res 201 | 202 | 203 | def convert_id_to_label(pred_ids_result, idx2label, batch_size): 204 | """ 205 | 将id形式的结果转化为真实序列结果 206 | :param pred_ids_result: 207 | :param idx2label: 208 | :return: 209 | """ 210 | result = [] 211 | index_result = [] 212 | for row in range(batch_size): 213 | curr_seq = [] 214 | curr_idx = [] 215 | ids = pred_ids_result[row] 216 | for idx, id in enumerate(ids): 217 | if id == 0: 218 | break 219 | curr_label = idx2label[id] 220 | if curr_label in ['[CLS]', '[SEP]']: 221 | if id == 102 and (idx < len(ids) and ids[idx + 1] == 0): 222 | break 223 | continue 224 | # elif curr_label == '[SEP]': 225 | # break 226 | curr_seq.append(curr_label) 227 | curr_idx.append(id) 228 | result.append(curr_seq) 229 | index_result.append(curr_idx) 230 | return result, index_result 231 | 232 | 233 | def result_to_json(self, string, tags): 234 | """ 235 | 将模型标注序列和输入序列结合 转化为结果 236 | :param string: 输入序列 237 | :param tags: 标注结果 238 | :return: 239 | """ 240 | item = {"entities": []} 241 | entity_name = "" 242 | entity_start = 0 243 | idx = 0 244 | last_tag = '' 245 | 246 | for char, tag in zip(string, tags): 247 | if tag[0] == "S": 248 | self.append(char, idx, idx+1, tag[2:]) 249 | item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":tag[2:]}) 250 | elif tag[0] == "B": 251 | if entity_name != '': 252 | self.append(entity_name, entity_start, idx, last_tag[2:]) 253 | item["entities"].append({"word": entity_name, "start": entity_start, "end": idx, "type": last_tag[2:]}) 254 | entity_name = "" 255 | entity_name += char 256 | entity_start = idx 257 | elif tag[0] == "I": 258 | entity_name += char 259 | elif tag[0] == "O": 260 | if entity_name != '': 261 | self.append(entity_name, entity_start, idx, last_tag[2:]) 262 | item["entities"].append({"word": entity_name, "start": entity_start, "end": idx, "type": last_tag[2:]}) 263 | entity_name = "" 264 | else: 265 | entity_name = "" 266 | entity_start = idx 267 | idx += 1 268 | last_tag = tag 269 | if entity_name != '': 270 | self.append(entity_name, entity_start, idx, last_tag[2:]) 271 | item["entities"].append({"word": entity_name, "start": entity_start, "end": idx, "type": last_tag[2:]}) 272 | return item 273 | -------------------------------------------------------------------------------- /train/tf_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Multiclass 3 | from: 4 | https://github.com/guillaumegenthial/tf_metrics/blob/master/tf_metrics/__init__.py 5 | 6 | """ 7 | 8 | __author__ = "Guillaume Genthial" 9 | 10 | import numpy as np 11 | import tensorflow as tf 12 | from tensorflow.python.ops.metrics_impl import _streaming_confusion_matrix 13 | 14 | __all__ = ['precision', 'recall', 'f1', 'fbeta', 'safe_div', 'pr_re_fbeta', 'pr_re_fbeta', 'metrics_from_confusion_matrix'] 15 | 16 | 17 | def precision(labels, predictions, num_classes, pos_indices=None, 18 | weights=None, average='micro'): 19 | """Multi-class precision metric for Tensorflow 20 | Parameters 21 | ---------- 22 | labels : Tensor of tf.int32 or tf.int64 23 | The true labels 24 | predictions : Tensor of tf.int32 or tf.int64 25 | The predictions, same shape as labels 26 | num_classes : int 27 | The number of classes 28 | pos_indices : list of int, optional 29 | The indices of the positive classes, default is all 30 | weights : Tensor of tf.int32, optional 31 | Mask, must be of compatible shape with labels 32 | average : str, optional 33 | 'micro': counts the total number of true positives, false 34 | positives, and false negatives for the classes in 35 | `pos_indices` and infer the metric from it. 36 | 'macro': will compute the metric separately for each class in 37 | `pos_indices` and average. Will not account for class 38 | imbalance. 39 | 'weighted': will compute the metric separately for each class in 40 | `pos_indices` and perform a weighted average by the total 41 | number of true labels for each class. 42 | Returns 43 | ------- 44 | tuple of (scalar float Tensor, update_op) 45 | """ 46 | cm, op = _streaming_confusion_matrix( 47 | labels, predictions, num_classes, weights) 48 | pr, _, _ = metrics_from_confusion_matrix( 49 | cm, pos_indices, average=average) 50 | op, _, _ = metrics_from_confusion_matrix( 51 | op, pos_indices, average=average) 52 | return (pr, op) 53 | 54 | 55 | def recall(labels, predictions, num_classes, pos_indices=None, weights=None, 56 | average='micro'): 57 | """Multi-class recall metric for Tensorflow 58 | Parameters 59 | ---------- 60 | labels : Tensor of tf.int32 or tf.int64 61 | The true labels 62 | predictions : Tensor of tf.int32 or tf.int64 63 | The predictions, same shape as labels 64 | num_classes : int 65 | The number of classes 66 | pos_indices : list of int, optional 67 | The indices of the positive classes, default is all 68 | weights : Tensor of tf.int32, optional 69 | Mask, must be of compatible shape with labels 70 | average : str, optional 71 | 'micro': counts the total number of true positives, false 72 | positives, and false negatives for the classes in 73 | `pos_indices` and infer the metric from it. 74 | 'macro': will compute the metric separately for each class in 75 | `pos_indices` and average. Will not account for class 76 | imbalance. 77 | 'weighted': will compute the metric separately for each class in 78 | `pos_indices` and perform a weighted average by the total 79 | number of true labels for each class. 80 | Returns 81 | ------- 82 | tuple of (scalar float Tensor, update_op) 83 | """ 84 | cm, op = _streaming_confusion_matrix( 85 | labels, predictions, num_classes, weights) 86 | _, re, _ = metrics_from_confusion_matrix( 87 | cm, pos_indices, average=average) 88 | _, op, _ = metrics_from_confusion_matrix( 89 | op, pos_indices, average=average) 90 | return (re, op) 91 | 92 | 93 | def f1(labels, predictions, num_classes, pos_indices=None, weights=None, 94 | average='micro'): 95 | return fbeta(labels, predictions, num_classes, pos_indices, weights, 96 | average) 97 | 98 | 99 | def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None, 100 | average='micro', beta=1): 101 | """Multi-class fbeta metric for Tensorflow 102 | Parameters 103 | ---------- 104 | labels : Tensor of tf.int32 or tf.int64 105 | The true labels 106 | predictions : Tensor of tf.int32 or tf.int64 107 | The predictions, same shape as labels 108 | num_classes : int 109 | The number of classes 110 | pos_indices : list of int, optional 111 | The indices of the positive classes, default is all 112 | weights : Tensor of tf.int32, optional 113 | Mask, must be of compatible shape with labels 114 | average : str, optional 115 | 'micro': counts the total number of true positives, false 116 | positives, and false negatives for the classes in 117 | `pos_indices` and infer the metric from it. 118 | 'macro': will compute the metric separately for each class in 119 | `pos_indices` and average. Will not account for class 120 | imbalance. 121 | 'weighted': will compute the metric separately for each class in 122 | `pos_indices` and perform a weighted average by the total 123 | number of true labels for each class. 124 | beta : int, optional 125 | Weight of precision in harmonic mean 126 | Returns 127 | ------- 128 | tuple of (scalar float Tensor, update_op) 129 | """ 130 | cm, op = _streaming_confusion_matrix( 131 | labels, predictions, num_classes, weights) 132 | _, _, fbeta = metrics_from_confusion_matrix( 133 | cm, pos_indices, average=average, beta=beta) 134 | _, _, op = metrics_from_confusion_matrix( 135 | op, pos_indices, average=average, beta=beta) 136 | return (fbeta, op) 137 | 138 | 139 | def safe_div(numerator, denominator): 140 | """Safe division, return 0 if denominator is 0""" 141 | numerator, denominator = tf.to_float(numerator), tf.to_float(denominator) 142 | zeros = tf.zeros_like(numerator, dtype=numerator.dtype) 143 | denominator_is_zero = tf.equal(denominator, zeros) 144 | return tf.where(denominator_is_zero, zeros, numerator / denominator) 145 | 146 | 147 | def pr_re_fbeta(cm, pos_indices, beta=1): 148 | """Uses a confusion matrix to compute precision, recall and fbeta""" 149 | num_classes = cm.shape[0] 150 | neg_indices = [i for i in range(num_classes) if i not in pos_indices] 151 | cm_mask = np.ones([num_classes, num_classes]) 152 | cm_mask[neg_indices, neg_indices] = 0 153 | diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask)) 154 | 155 | cm_mask = np.ones([num_classes, num_classes]) 156 | cm_mask[:, neg_indices] = 0 157 | tot_pred = tf.reduce_sum(cm * cm_mask) 158 | 159 | cm_mask = np.ones([num_classes, num_classes]) 160 | cm_mask[neg_indices, :] = 0 161 | tot_gold = tf.reduce_sum(cm * cm_mask) 162 | 163 | pr = safe_div(diag_sum, tot_pred) 164 | re = safe_div(diag_sum, tot_gold) 165 | fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re) 166 | 167 | return pr, re, fbeta 168 | 169 | 170 | def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro', 171 | beta=1): 172 | """Precision, Recall and F1 from the confusion matrix 173 | Parameters 174 | ---------- 175 | cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes) 176 | The streaming confusion matrix. 177 | pos_indices : list of int, optional 178 | The indices of the positive classes 179 | beta : int, optional 180 | Weight of precision in harmonic mean 181 | average : str, optional 182 | 'micro', 'macro' or 'weighted' 183 | """ 184 | num_classes = cm.shape[0] 185 | if pos_indices is None: 186 | pos_indices = [i for i in range(num_classes)] 187 | 188 | if average == 'micro': 189 | return pr_re_fbeta(cm, pos_indices, beta) 190 | elif average in {'macro', 'weighted'}: 191 | precisions, recalls, fbetas, n_golds = [], [], [], [] 192 | for idx in pos_indices: 193 | pr, re, fbeta = pr_re_fbeta(cm, [idx], beta) 194 | precisions.append(pr) 195 | recalls.append(re) 196 | fbetas.append(fbeta) 197 | cm_mask = np.zeros([num_classes, num_classes]) 198 | cm_mask[idx, :] = 1 199 | n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask))) 200 | 201 | if average == 'macro': 202 | pr = tf.reduce_mean(precisions) 203 | re = tf.reduce_mean(recalls) 204 | fbeta = tf.reduce_mean(fbetas) 205 | return pr, re, fbeta 206 | if average == 'weighted': 207 | n_gold = tf.reduce_sum(n_golds) 208 | pr_sum = sum(p * n for p, n in zip(precisions, n_golds)) 209 | pr = safe_div(pr_sum, n_gold) 210 | re_sum = sum(r * n for r, n in zip(recalls, n_golds)) 211 | re = safe_div(re_sum, n_gold) 212 | fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds)) 213 | fbeta = safe_div(fbeta_sum, n_gold) 214 | return pr, re, fbeta 215 | 216 | else: 217 | raise NotImplementedError() --------------------------------------------------------------------------------