├── .gitignore ├── LICENSE ├── README.md ├── _config.yml ├── dockerfile ├── export_jupyter_to_markdown.py └── tutorials ├── Chapter1 ├── 实验环境准备.md └── 数据集下载.md └── Chapter2 ├── BPE.ipynb ├── BPE.md ├── ChineseTokenizer.ipynb ├── ChineseTokenizer.md ├── EnglishTokenizer.ipynb ├── EnglishTokenizer.md ├── Normalize.ipynb ├── Normalize.md ├── Truecase.ipynb ├── Truecase.md └── assets ├── DAG.jpg ├── HMM分词篱笆型图.drawio.png ├── HMM模型.png ├── IsAlnum.txt ├── IsAlpha.txt ├── IsLower.txt ├── IsN.txt ├── Lowercase_Letter.txt ├── Non-BreakingSpace.webp ├── NormalSpace.webp ├── Titlecase_Letter.txt ├── Uppercase_Letter.txt ├── dict.txt ├── jieba分词算法流程图.jpg ├── nonbreaking_prefix.en ├── prob_emit.p ├── prob_start.p ├── prob_trans.p ├── viterbi_step1.drawio.png ├── viterbi_step2.drawio.png ├── viterbi_step3.drawio.png ├── viterbi_step4.drawio.png └── 序列标注问题.png /.gitignore: -------------------------------------------------------------------------------- 1 | # File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig 2 | 3 | # Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,linux,python 4 | # Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,linux,python 5 | 6 | ### Linux ### 7 | *~ 8 | 9 | # temporary files which can be created if a process still has a handle open of a deleted file 10 | .fuse_hidden* 11 | 12 | # KDE directory preferences 13 | .directory 14 | 15 | # Linux trash folder which might appear on any partition or disk 16 | .Trash-* 17 | 18 | # .nfs files are created when an open file is removed but is still being accessed 19 | .nfs* 20 | 21 | ### Python ### 22 | # Byte-compiled / optimized / DLL files 23 | __pycache__/ 24 | *.py[cod] 25 | *$py.class 26 | 27 | # C extensions 28 | *.so 29 | 30 | # Distribution / packaging 31 | .Python 32 | build/ 33 | develop-eggs/ 34 | dist/ 35 | downloads/ 36 | eggs/ 37 | .eggs/ 38 | lib/ 39 | lib64/ 40 | parts/ 41 | sdist/ 42 | var/ 43 | wheels/ 44 | pip-wheel-metadata/ 45 | share/python-wheels/ 46 | *.egg-info/ 47 | .installed.cfg 48 | *.egg 49 | MANIFEST 50 | 51 | # PyInstaller 52 | # Usually these files are written by a python script from a template 53 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 54 | *.manifest 55 | *.spec 56 | 57 | # Installer logs 58 | pip-log.txt 59 | pip-delete-this-directory.txt 60 | 61 | # Unit test / coverage reports 62 | htmlcov/ 63 | .tox/ 64 | .nox/ 65 | .coverage 66 | .coverage.* 67 | .cache 68 | nosetests.xml 69 | coverage.xml 70 | *.cover 71 | *.py,cover 72 | .hypothesis/ 73 | .pytest_cache/ 74 | pytestdebug.log 75 | 76 | # Translations 77 | *.mo 78 | *.pot 79 | 80 | # Django stuff: 81 | *.log 82 | local_settings.py 83 | db.sqlite3 84 | db.sqlite3-journal 85 | 86 | # Flask stuff: 87 | instance/ 88 | .webassets-cache 89 | 90 | # Scrapy stuff: 91 | .scrapy 92 | 93 | # Sphinx documentation 94 | docs/_build/ 95 | doc/_build/ 96 | 97 | # PyBuilder 98 | target/ 99 | 100 | # Jupyter Notebook 101 | .ipynb_checkpoints 102 | 103 | # IPython 104 | profile_default/ 105 | ipython_config.py 106 | 107 | # pyenv 108 | .python-version 109 | 110 | # pipenv 111 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 112 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 113 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 114 | # install all needed dependencies. 115 | #Pipfile.lock 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | ### VisualStudioCode ### 158 | .vscode/* 159 | 160 | ### VisualStudioCode Patch ### 161 | # Ignore all local history of files 162 | .history 163 | 164 | # End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,linux,python 165 | 166 | # Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) 167 | 168 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Bing Han 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 机器翻译教程 2 | 3 | 本项目受到[pytorch-seq2seq](https://github.com/bentrevett/pytorch-seq2seq)项目启发,包含了机器翻译从数据预处理、模型、评测到部署的一整套教程,风格类似于[pytorch官方的Tutorial](https://pytorch.org/tutorials/)。借本项目对自己开发机器翻译项目的经验进行总结,也帮助刚入门的小伙伴少走一些弯路,网上的一些文章教程总是空洞的介绍一些模型、算法或是论文里的一些tricks,非常的晦涩难懂,理论+代码的方式是最容易理解的了。 4 | 5 | ## Chapter1:实验准备 6 | - [实验环境准备](tutorials/Chapter1/实验环境准备.md) 7 | - [数据集下载](tutorials/Chapter1/数据集下载.md) 8 | 9 | ## Chapter2:数据准备与预处理篇 10 | - [规范化](tutorials/Chapter2/Normalize.md) 11 | - [中文分词](tutorials/Chapter2/ChineseTokenizer.md) 12 | - [英文分词](tutorials/Chapter2/EnglishTokenizer.md) 13 | - [Truecase](tutorials/Chapter2/Truecase.md) 14 | - [BPE](tutorials/Chapter2/BPE.md) 15 | - [Wordpiece](tutorials/Chapter2/Wordpiece.md) 16 | - [SentencePiece](tutorials/Chapter2/SentencePiece.md) 17 | - [数据预处理示例](tutorials/Chapter2/Example.md) 18 | 19 | ## Chapter3:模型篇 20 | 21 | TODO 22 | 23 | ## Chapter4:推断与评测篇 24 | 25 | TODO 26 | 27 | ## Chapter5:模型部署篇 28 | 29 | TODO 30 | 31 | ## Chapter6:进阶技术 32 | 33 | TODO 34 | 35 | ## Chapter7:参加WMT 36 | 37 | TODO 38 | 39 | ## Chapter8:学习资料及相关资源 40 | 41 | TODO 42 | 43 | ## Reference 44 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /dockerfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/dockerfile -------------------------------------------------------------------------------- /export_jupyter_to_markdown.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | import glob 3 | import os 4 | 5 | import argparse 6 | 7 | parser = ArgumentParser() 8 | parser.add_argument('--override', action='store_true', default=False, 9 | help='Indicated whether override existing markdown files or not.') 10 | args = parser.parse_args() 11 | 12 | all_jupyter_files = glob.glob("tutorials/*/*.ipynb") 13 | all_markdown_files = [i[:-6] + ".md" for i in all_jupyter_files] 14 | 15 | for ipynb, md in zip(all_jupyter_files, all_markdown_files): 16 | if not args.override and os.path.exists(md): 17 | continue 18 | cmd = "jupyter nbconvert --to markdown {}".format(ipynb) 19 | stat = os.system(cmd) 20 | if stat == 0: 21 | print("Convert '{}' to markdown format success.".format(ipynb)) 22 | else: 23 | print("Convert '{}' to markdown format fail.".format(ipynb)) 24 | -------------------------------------------------------------------------------- /tutorials/Chapter1/实验环境准备.md: -------------------------------------------------------------------------------- 1 | # 实验环境准备 2 | 本文主要列出了我做机器翻译开发过程中使用的环境,包括IDE等,仅作为一个参考。如果对深度学习开发比较熟悉或者已经有自己比较顺手的开发环境,可以略过此章节。 3 | 4 | ## 物理机配置 5 | - ubuntu16.04 (其他Linux发行版如centos也可以) 6 | - 4x1080ti (做分布式训练时需要用到,如果没有也可以) 7 | - Anaconda 8 | 9 | 10 | ## docker 11 | -------------------------------------------------------------------------------- /tutorials/Chapter1/数据集下载.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter1/数据集下载.md -------------------------------------------------------------------------------- /tutorials/Chapter2/BPE.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# BPE分词原理\n", 8 | "\n", 9 | "BPE(字节对)编码或二元编码是一种简单的数据压缩形式,其中最常见的一对连续字节数据被替换为该数据中不存在的字节。 后期使用时需要一个替换表来重建原始数据。将BPE算法原理应用在机器翻译分词部分源于这篇论文[Neural Machine Translation of Rare Words with Subword Units](https://arxiv.org/pdf/1508.07909)。后来各类NLP任务模型,如OpenAI GPT-2 与Facebook RoBERTa均采用此方法构建subword。作为传统的(以Moses为代表)基于空格、规则的分词方法的补充,使用BPE为代表的Subword分词算法主要解决了传统分词的以下几个痛点:\n", 10 | "- 传统词表示方法无法很好的处理未知或罕见的词汇(OOV问题)\n", 11 | "- 传统词tokenization方法不利于模型学习词缀之间的关系。如:模型学到的“old”, “older”, and “oldest”之间的关系无法泛化到“smart”, “smarter”, and “smartest”。\n", 12 | "- Character embedding作为OOV的解决方法粒度太细\n", 13 | "- Subword粒度在词与字符之间,能够较好的平衡OOV问题" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "## 算法原理\n", 21 | "1. 准备足够大的训练语料,将语料进行预分词(中文可以使用jieba分词,拉丁语可以使用Moses,nltk等工具)。\n", 22 | "2. 确定期望的subword词表大小\n", 23 | "3. 将单词拆分为字符序列并在末尾添加后缀“ ”,统计单词频率。 本阶段的subword的粒度是字符。 例如,“ low”的频率为5,那么我们将其改写为“ l o w ”:5\n", 24 | "4. 统计每一个连续字节对的出现频率,选择最高频者合并成新的subword\n", 25 | "5. 重复第4步直到达到第2步设定的subword词表大小或下一个最高频的字节对出现频率为1\n", 26 | "\n", 27 | "停止符\"\"的意义在于表示subword是词后缀。举例来说:\"st\"字词不加\"\"可以出现在词首如\"st ar\",加了\"\"表明改字词位于词尾,如\"wide st\",二者意义截然不同。\n", 28 | "\n", 29 | "每次合并后词表可能出现3种变化:\n", 30 | "\n", 31 | "- +1,表明加入合并后的新字词,同时原来的2个子词还保留(2个字词不是完全同时连续出现)\n", 32 | "- +0,表明加入合并后的新字词,同时原来的2个子词中一个保留,一个被消解(一个字词完全随着另一个字词的出现而紧跟着出现)\n", 33 | "- -1,表明加入合并后的新字词,同时原来的2个子词都被消解(2个字词同时连续出现)\n", 34 | "\n", 35 | "实际上,随着合并的次数增加,词表大小通常先增加后减小。\n", 36 | "\n", 37 | "### 例子\n", 38 | "输入:\n", 39 | "```\n", 40 | "{'l o w ': 5, 'l o w e r ': 2, 'n e w e s t ': 6, 'w i d e s t ': 3}\n", 41 | "```\n", 42 | "Iter 1, 最高频连续字节对\"e\"和\"s\"出现了6+3=9次,合并成\"es\"。输出:\n", 43 | "```\n", 44 | "{'l o w ': 5, 'l o w e r ': 2, 'n e w es t ': 6, 'w i d es t ': 3}\n", 45 | "```\n", 46 | "Iter 2, 最高频连续字节对\"es\"和\"t\"出现了6+3=9次, 合并成\"est\"。输出:\n", 47 | "```\n", 48 | "{'l o w ': 5, 'l o w e r ': 2, 'n e w est ': 6, 'w i d est ': 3}\n", 49 | "```\n", 50 | "Iter 3, 以此类推,最高频连续字节对为\"est\"和\"<\\/w>\" 输出:\n", 51 | "```\n", 52 | "{'l o w ': 5, 'l o w e r ': 2, 'n e w est': 6, 'w i d est': 3}\n", 53 | "```\n", 54 | "……\n", 55 | "Iter n, 继续迭代直到达到预设的subword词表大小或下一个最高频的字节对出现频率为1。\n", 56 | "\n", 57 | "注意到输入BPE学习算法的数据是经过传统分词处理的,词内字符直接分割(Moses基于规则的分词,也可以加上Normalize,Truecase等操作),词尾字符增加标志。所以这里也印证了前面说的BPE字词分割算法是作为传统预处理方法的一个补充。\n", 58 | "\n", 59 | "### 代码实现\n", 60 | "根据上文的算法原理,可以很容易地实现BPE学习算法" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 1, 66 | "metadata": {}, 67 | "outputs": [ 68 | { 69 | "name": "stdout", 70 | "output_type": "stream", 71 | "text": [ 72 | "{'O bject': 2, 'raspberrypi': 10, 'functools': 3, 'dict': 3, 'kwargs': 3, '.': 14, 'G event': 2, 'Dunder': 3, 'decorator': 3, \"didn't\": 4, 'la m bd a ': 2, 'z ip ': 2, 'import': 3, 'py ra m i d , ': 1, 'she': 3, 'i te rat e': 1, '? ': 2, 'K wargs': 2, 'di v er s ity': 2, 'unit': 3, 'o bject': 1, 'g event': 1, 'I mport': 2, 'fall': 4, 'integration': 8, 'd j a n g o ': 1, 'y i el d': 2, 't w is te d': 1, 'he': 4, 'f ut ure': 1, 'P ython': 2, 'c o m m un ity': 2, 'p yp y': 1, 'beautifu l': 3, 'test': 5, 'r e d u c e': 2, 'g i l': 1, 'py thon': 3, 'c l o s ure': 1, 'g en e rator': 1, 'ra is e': 1, 'v is or': 1, 'it er tools': 1, '. . .': 1, 'R e d u c e': 1, 'c or o ut in e': 1, 'bd f l': 1, 'C ython': 1, 'w h i l e': 1, 'l i st': 2, 'n it': 1, '! ': 1, '2 to 3 ': 1, 'd under': 2, 's c r ip t': 2, 'f or': 1, ': ': 1, 'e x c e p t ion': 1, 'py c on': 1, 's i x ': 1, 'w e b ': 2, 'F ut ure': 1, 'm er c ur i al': 3, 's el f ': 1, 'R e t ur n': 1, 's t a b l e': 1, 'D j a n g o ': 1, 'v is u al': 1, 'r o c k s d a h o u s e': 1, 'c la s s': 1, 's c i py ': 1, 'h el m e t': 1}\n" 73 | ] 74 | } 75 | ], 76 | "source": [ 77 | "# Code is adapt from original paper \n", 78 | "import re, collections\n", 79 | "\n", 80 | "def get_stats(vocab):\n", 81 | " pairs = collections.defaultdict(int)\n", 82 | " for word, freq in vocab.items():\n", 83 | " symbols = word.split()\n", 84 | " for i in range(len(symbols)-1):\n", 85 | " pairs[symbols[i],symbols[i+1]] += freq\n", 86 | " return pairs\n", 87 | "\n", 88 | "def merge_vocab(pair, v_in):\n", 89 | " v_out = {}\n", 90 | " bigram = re.escape(' '.join(pair))\n", 91 | " p = re.compile(r'(?\": value for key, value in counter.items()}\n", 104 | "\n", 105 | "test_corpus = '''Object raspberrypi functools dict kwargs . Gevent raspberrypi functools . Dunder raspberrypi decorator dict didn't lambda zip import pyramid, she lambda iterate ?\n", 106 | "Kwargs raspberrypi diversity unit object gevent . Import fall integration decorator unit django yield functools twisted . Dunder integration decorator he she future . Python raspberrypi community pypy . Kwargs integration beautiful test reduce gil python closure . Gevent he integration generator fall test kwargs raise didn't visor he itertools ...\n", 107 | "Reduce integration coroutine bdfl he python . Cython didn't integration while beautiful list python didn't nit !\n", 108 | "Object fall diversity 2to3 dunder script . Python fall for : integration exception dict kwargs dunder pycon . Import raspberrypi beautiful test import six web . Future integration mercurial self script web . Return raspberrypi community test she stable .\n", 109 | "Django raspberrypi mercurial unit import yield raspberrypi visual rocksdahouse . Dunder raspberrypi mercurial list reduce class test scipy helmet zip ?'''\n", 110 | " \n", 111 | "\n", 112 | "vocab = build_vocab(test_corpus.split('\\n'))\n", 113 | "num_merges = 100\n", 114 | "for i in range(num_merges):\n", 115 | " pairs = get_stats(vocab)\n", 116 | " if not pairs:\n", 117 | " break\n", 118 | " best = max(pairs, key=pairs.get)\n", 119 | " vocab = merge_vocab(best, vocab)\n", 120 | "\n", 121 | "print(vocab)" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "经过100轮的迭代,可以看到一些出现频率较高的子词已经被合并在一起了。但是这样的词表没有办法直接使用,需要将它们合并成`字词:频率`对的形式。" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 2, 134 | "metadata": {}, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | " 17\n", 141 | "c 17\n", 142 | ". 15\n", 143 | "i 13\n", 144 | "s 12\n", 145 | "m 11\n", 146 | "raspberrypi 10\n", 147 | "e 10\n", 148 | "e 9\n", 149 | "o 9\n", 150 | "d 8\n", 151 | "integration 8\n", 152 | "a 6\n", 153 | "py 6\n", 154 | "er 6\n", 155 | "g 5\n", 156 | "l 5\n", 157 | "test 5\n", 158 | "r 5\n", 159 | "u 5\n" 160 | ] 161 | } 162 | ], 163 | "source": [ 164 | "def build_dict_from_vocab(vocab):\n", 165 | "\n", 166 | " subword_dict = collections.defaultdict(int)\n", 167 | "\n", 168 | " for key, value in vocab.items():\n", 169 | " for subword in key.split():\n", 170 | " subword_dict[subword] += value\n", 171 | " return subword_dict\n", 172 | "\n", 173 | "subword_dict = build_dict_from_vocab(vocab)\n", 174 | "sorted_subword_dict = sorted(subword_dict.items(), key=lambda x: -x[1])\n", 175 | " \n", 176 | "# 这里只输出前二十个\n", 177 | "for subword, num in sorted_subword_dict[:20]:\n", 178 | " print(subword.ljust(20), num)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "可以看到一些常见的字词组合如`.`,`integration`等已经被学习到了。由于语料比较小,有些字词还是以单个英文字母的形式存在,所以需要训练语料量足够大,才能学习到足够好的subword词典。" 186 | ] 187 | }, 188 | { 189 | "cell_type": "markdown", 190 | "metadata": {}, 191 | "source": [ 192 | "## 编码和解码\n", 193 | "在之前的算法中,我们已经得到了subword的词表,对该词表按照子词长度由大到小排序。编码时,对于每个单词,遍历排好序的子词词表寻找是否有token是当前单词的子字符串,如果有,则该token是表示单词的tokens之一。\n", 194 | "\n", 195 | "我们从最长的token迭代到最短的token,尝试将每个单词中的子字符串替换为token。 最终,我们将迭代所有tokens,并将所有子字符串替换为tokens。 如果仍然有子字符串没被替换但所有token都已迭代完毕,则将剩余的子词替换为特殊token,如``。\n", 196 | "\n", 197 | "### 例子\n", 198 | "\n", 199 | "为了简单起见,我们给定单词序列\n", 200 | "```\n", 201 | "[“the”, “highest”, “mountain”, “largest”]\n", 202 | "```\n", 203 | "假设已有排好序的subword词表\n", 204 | "```\n", 205 | "[“err”, “tain”, “moun”, “est”, “high”, “the”, “a”]\n", 206 | "```\n", 207 | "迭代结果\n", 208 | "```\n", 209 | "\"the\" -> [\"the\"]\n", 210 | "\"highest\" -> [\"high\", \"est\"]\n", 211 | "\"mountain\" -> [\"moun\", \"tain\"]\n", 212 | "\"largest\" -> [\"\", \"est\"]\n", 213 | "```\n", 214 | "\n", 215 | "### 代码实现\n" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 3, 221 | "metadata": {}, 222 | "outputs": [ 223 | { 224 | "name": "stdout", 225 | "output_type": "stream", 226 | "text": [ 227 | "['the', 'high', 'est', 'and', 'l', 'a', 'r', 'g', 'est', 'moun', 'tain', '']\n" 228 | ] 229 | } 230 | ], 231 | "source": [ 232 | "subword_dict = {\"err\": 2, \"tain\":3, \"moun\":3, \"est\":4, \"high\":2, \"the\":5, \"and\":10, \"l\":1, \"g\":1, \"a\":1, \"r\":1}\n", 233 | "ngram_max = max(len(x) for x in subword_dict)\n", 234 | "UNK = \"\"\n", 235 | "\n", 236 | "def subword_tokenize(word):\n", 237 | " word += \"\"\n", 238 | " end_idx = min([len(word), ngram_max])\n", 239 | " sw_tokens = []\n", 240 | " start_idx = 0\n", 241 | "\n", 242 | " while start_idx < len(word):\n", 243 | " subword = word[start_idx:end_idx]\n", 244 | " if subword in subword_dict:\n", 245 | " sw_tokens.append(subword)\n", 246 | " start_idx = end_idx\n", 247 | " end_idx = min([len(word), start_idx + ngram_max])\n", 248 | " elif subword == \"\":\n", 249 | " break\n", 250 | " elif len(subword) == 1:\n", 251 | " sw_tokens.append(UNK)\n", 252 | " start_idx = end_idx\n", 253 | " end_idx = min([len(word), start_idx + ngram_max])\n", 254 | " else:\n", 255 | " end_idx -= 1\n", 256 | "\n", 257 | " return sw_tokens\n", 258 | "\n", 259 | "def tokenize(sentence):\n", 260 | " \"\"\"给定预训练的词表和待分词的句子(默认已经通过nltk或者moses等工具进行过预分词),输出基于bpe的分词结果\n", 261 | " \n", 262 | " Args:\n", 263 | " sentence (str): 待分词的句子\n", 264 | " \n", 265 | " Return:\n", 266 | " list: 字词列表\n", 267 | " \"\"\"\n", 268 | " tokens = []\n", 269 | " \n", 270 | " for word in sentence.split():\n", 271 | " tokens.extend(subword_tokenize(word))\n", 272 | " \n", 273 | " return tokens\n", 274 | "\n", 275 | "\n", 276 | "tokens = tokenize(\"the highest and largest mountain 好\")\n", 277 | "print(tokens)" 278 | ] 279 | }, 280 | { 281 | "cell_type": "markdown", 282 | "metadata": {}, 283 | "source": [ 284 | "解码的代码就比较简单了,以``为标志做词的划分,其余相邻的词直接进行合并。" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 4, 290 | "metadata": {}, 291 | "outputs": [ 292 | { 293 | "name": "stdout", 294 | "output_type": "stream", 295 | "text": [ 296 | "the highest and largest mountain \n" 297 | ] 298 | } 299 | ], 300 | "source": [ 301 | "def detokenize(tokens):\n", 302 | " return \"\".join(tokens).replace(\"\", \" \").replace(\"\", \"\")\n", 303 | "\n", 304 | "print(detokenize(tokens))" 305 | ] 306 | } 307 | ], 308 | "metadata": { 309 | "kernelspec": { 310 | "display_name": "Python 3", 311 | "language": "python", 312 | "name": "python3" 313 | }, 314 | "language_info": { 315 | "codemirror_mode": { 316 | "name": "ipython", 317 | "version": 3 318 | }, 319 | "file_extension": ".py", 320 | "mimetype": "text/x-python", 321 | "name": "python", 322 | "nbconvert_exporter": "python", 323 | "pygments_lexer": "ipython3", 324 | "version": "3.7.0" 325 | } 326 | }, 327 | "nbformat": 4, 328 | "nbformat_minor": 2 329 | } 330 | -------------------------------------------------------------------------------- /tutorials/Chapter2/BPE.md: -------------------------------------------------------------------------------- 1 | 2 | # BPE分词原理 3 | 4 | BPE(字节对)编码或二元编码是一种简单的数据压缩形式,其中最常见的一对连续字节数据被替换为该数据中不存在的字节。 后期使用时需要一个替换表来重建原始数据。将BPE算法原理应用在机器翻译分词部分源于这篇论文[Neural Machine Translation of Rare Words with Subword Units](https://arxiv.org/pdf/1508.07909)。后来各类NLP任务模型,如OpenAI GPT-2 与Facebook RoBERTa均采用此方法构建subword。作为传统的(以Moses为代表)基于空格、规则的分词方法的补充,使用BPE为代表的Subword分词算法主要解决了传统分词的以下几个痛点: 5 | - 传统词表示方法无法很好的处理未知或罕见的词汇(OOV问题) 6 | - 传统词tokenization方法不利于模型学习词缀之间的关系。如:模型学到的“old”, “older”, and “oldest”之间的关系无法泛化到“smart”, “smarter”, and “smartest”。 7 | - Character embedding作为OOV的解决方法粒度太细 8 | - Subword粒度在词与字符之间,能够较好的平衡OOV问题 9 | 10 | ## 算法原理 11 | 1. 准备足够大的训练语料,将语料进行预分词(中文可以使用jieba分词,拉丁语可以使用Moses,nltk等工具)。 12 | 2. 确定期望的subword词表大小 13 | 3. 将单词拆分为字符序列并在末尾添加后缀“ ”,统计单词频率。 本阶段的subword的粒度是字符。 例如,“ low”的频率为5,那么我们将其改写为“ l o w ”:5 14 | 4. 统计每一个连续字节对的出现频率,选择最高频者合并成新的subword 15 | 5. 重复第4步直到达到第2步设定的subword词表大小或下一个最高频的字节对出现频率为1 16 | 17 | 停止符""的意义在于表示subword是词后缀。举例来说:"st"字词不加""可以出现在词首如"st ar",加了""表明改字词位于词尾,如"wide st",二者意义截然不同。 18 | 19 | 每次合并后词表可能出现3种变化: 20 | 21 | - +1,表明加入合并后的新字词,同时原来的2个子词还保留(2个字词不是完全同时连续出现) 22 | - +0,表明加入合并后的新字词,同时原来的2个子词中一个保留,一个被消解(一个字词完全随着另一个字词的出现而紧跟着出现) 23 | - -1,表明加入合并后的新字词,同时原来的2个子词都被消解(2个字词同时连续出现) 24 | 25 | 实际上,随着合并的次数增加,词表大小通常先增加后减小。 26 | 27 | ### 例子 28 | 输入: 29 | ``` 30 | {'l o w ': 5, 'l o w e r ': 2, 'n e w e s t ': 6, 'w i d e s t ': 3} 31 | ``` 32 | Iter 1, 最高频连续字节对"e"和"s"出现了6+3=9次,合并成"es"。输出: 33 | ``` 34 | {'l o w ': 5, 'l o w e r ': 2, 'n e w es t ': 6, 'w i d es t ': 3} 35 | ``` 36 | Iter 2, 最高频连续字节对"es"和"t"出现了6+3=9次, 合并成"est"。输出: 37 | ``` 38 | {'l o w ': 5, 'l o w e r ': 2, 'n e w est ': 6, 'w i d est ': 3} 39 | ``` 40 | Iter 3, 以此类推,最高频连续字节对为"est"和"<\/w>" 输出: 41 | ``` 42 | {'l o w ': 5, 'l o w e r ': 2, 'n e w est': 6, 'w i d est': 3} 43 | ``` 44 | …… 45 | Iter n, 继续迭代直到达到预设的subword词表大小或下一个最高频的字节对出现频率为1。 46 | 47 | 注意到输入BPE学习算法的数据是经过传统分词处理的,词内字符直接分割(Moses基于规则的分词,也可以加上Normalize,Truecase等操作),词尾字符增加标志。所以这里也印证了前面说的BPE字词分割算法是作为传统预处理方法的一个补充。 48 | 49 | ### 代码实现 50 | 根据上文的算法原理,可以很容易地实现BPE学习算法 51 | 52 | 53 | ```python 54 | # Code is adapt from original paper 55 | import re, collections 56 | 57 | def get_stats(vocab): 58 | pairs = collections.defaultdict(int) 59 | for word, freq in vocab.items(): 60 | symbols = word.split() 61 | for i in range(len(symbols)-1): 62 | pairs[symbols[i],symbols[i+1]] += freq 63 | return pairs 64 | 65 | def merge_vocab(pair, v_in): 66 | v_out = {} 67 | bigram = re.escape(' '.join(pair)) 68 | p = re.compile(r'(?": value for key, value in counter.items()} 81 | 82 | test_corpus = '''Object raspberrypi functools dict kwargs . Gevent raspberrypi functools . Dunder raspberrypi decorator dict didn't lambda zip import pyramid, she lambda iterate ? 83 | Kwargs raspberrypi diversity unit object gevent . Import fall integration decorator unit django yield functools twisted . Dunder integration decorator he she future . Python raspberrypi community pypy . Kwargs integration beautiful test reduce gil python closure . Gevent he integration generator fall test kwargs raise didn't visor he itertools ... 84 | Reduce integration coroutine bdfl he python . Cython didn't integration while beautiful list python didn't nit ! 85 | Object fall diversity 2to3 dunder script . Python fall for : integration exception dict kwargs dunder pycon . Import raspberrypi beautiful test import six web . Future integration mercurial self script web . Return raspberrypi community test she stable . 86 | Django raspberrypi mercurial unit import yield raspberrypi visual rocksdahouse . Dunder raspberrypi mercurial list reduce class test scipy helmet zip ?''' 87 | 88 | 89 | vocab = build_vocab(test_corpus.split('\n')) 90 | num_merges = 100 91 | for i in range(num_merges): 92 | pairs = get_stats(vocab) 93 | if not pairs: 94 | break 95 | best = max(pairs, key=pairs.get) 96 | vocab = merge_vocab(best, vocab) 97 | 98 | print(vocab) 99 | ``` 100 | 101 | {'O bject': 2, 'raspberrypi': 10, 'functools': 3, 'dict': 3, 'kwargs': 3, '.': 14, 'G event': 2, 'Dunder': 3, 'decorator': 3, "didn't": 4, 'la m bd a ': 2, 'z ip ': 2, 'import': 3, 'py ra m i d , ': 1, 'she': 3, 'i te rat e': 1, '? ': 2, 'K wargs': 2, 'di v er s ity': 2, 'unit': 3, 'o bject': 1, 'g event': 1, 'I mport': 2, 'fall': 4, 'integration': 8, 'd j a n g o ': 1, 'y i el d': 2, 't w is te d': 1, 'he': 4, 'f ut ure': 1, 'P ython': 2, 'c o m m un ity': 2, 'p yp y': 1, 'beautifu l': 3, 'test': 5, 'r e d u c e': 2, 'g i l': 1, 'py thon': 3, 'c l o s ure': 1, 'g en e rator': 1, 'ra is e': 1, 'v is or': 1, 'it er tools': 1, '. . .': 1, 'R e d u c e': 1, 'c or o ut in e': 1, 'bd f l': 1, 'C ython': 1, 'w h i l e': 1, 'l i st': 2, 'n it': 1, '! ': 1, '2 to 3 ': 1, 'd under': 2, 's c r ip t': 2, 'f or': 1, ': ': 1, 'e x c e p t ion': 1, 'py c on': 1, 's i x ': 1, 'w e b ': 2, 'F ut ure': 1, 'm er c ur i al': 3, 's el f ': 1, 'R e t ur n': 1, 's t a b l e': 1, 'D j a n g o ': 1, 'v is u al': 1, 'r o c k s d a h o u s e': 1, 'c la s s': 1, 's c i py ': 1, 'h el m e t': 1} 102 | 103 | 104 | 经过100轮的迭代,可以看到一些出现频率较高的子词已经被合并在一起了。但是这样的词表没有办法直接使用,需要将它们合并成`字词:频率`对的形式。 105 | 106 | 107 | ```python 108 | def build_dict_from_vocab(vocab): 109 | 110 | subword_dict = collections.defaultdict(int) 111 | 112 | for key, value in vocab.items(): 113 | for subword in key.split(): 114 | subword_dict[subword] += value 115 | return subword_dict 116 | 117 | subword_dict = build_dict_from_vocab(vocab) 118 | sorted_subword_dict = sorted(subword_dict.items(), key=lambda x: -x[1]) 119 | 120 | # 这里只输出前二十个 121 | for subword, num in sorted_subword_dict[:20]: 122 | print(subword.ljust(20), num) 123 | ``` 124 | 125 | 17 126 | c 17 127 | . 15 128 | i 13 129 | s 12 130 | m 11 131 | raspberrypi 10 132 | e 10 133 | e 9 134 | o 9 135 | d 8 136 | integration 8 137 | a 6 138 | py 6 139 | er 6 140 | g 5 141 | l 5 142 | test 5 143 | r 5 144 | u 5 145 | 146 | 147 | 可以看到一些常见的字词组合如`.`,`integration`等已经被学习到了。由于语料比较小,有些字词还是以单个英文字母的形式存在,所以需要训练语料量足够大,才能学习到足够好的subword词典。 148 | 149 | ## 编码和解码 150 | 在之前的算法中,我们已经得到了subword的词表,对该词表按照子词长度由大到小排序。编码时,对于每个单词,遍历排好序的子词词表寻找是否有token是当前单词的子字符串,如果有,则该token是表示单词的tokens之一。 151 | 152 | 我们从最长的token迭代到最短的token,尝试将每个单词中的子字符串替换为token。 最终,我们将迭代所有tokens,并将所有子字符串替换为tokens。 如果仍然有子字符串没被替换但所有token都已迭代完毕,则将剩余的子词替换为特殊token,如``。 153 | 154 | ### 例子 155 | 156 | 为了简单起见,我们给定单词序列 157 | ``` 158 | [“the”, “highest”, “mountain”, “largest”] 159 | ``` 160 | 假设已有排好序的subword词表 161 | ``` 162 | [“err”, “tain”, “moun”, “est”, “high”, “the”, “a”] 163 | ``` 164 | 迭代结果 165 | ``` 166 | "the" -> ["the"] 167 | "highest" -> ["high", "est"] 168 | "mountain" -> ["moun", "tain"] 169 | "largest" -> ["", "est"] 170 | ``` 171 | 172 | ### 代码实现 173 | 174 | 175 | 176 | ```python 177 | subword_dict = {"err": 2, "tain":3, "moun":3, "est":4, "high":2, "the":5, "and":10, "l":1, "g":1, "a":1, "r":1} 178 | ngram_max = max(len(x) for x in subword_dict) 179 | UNK = "" 180 | 181 | def subword_tokenize(word): 182 | word += "" 183 | end_idx = min([len(word), ngram_max]) 184 | sw_tokens = [] 185 | start_idx = 0 186 | 187 | while start_idx < len(word): 188 | subword = word[start_idx:end_idx] 189 | if subword in subword_dict: 190 | sw_tokens.append(subword) 191 | start_idx = end_idx 192 | end_idx = min([len(word), start_idx + ngram_max]) 193 | elif subword == "": 194 | break 195 | elif len(subword) == 1: 196 | sw_tokens.append(UNK) 197 | start_idx = end_idx 198 | end_idx = min([len(word), start_idx + ngram_max]) 199 | else: 200 | end_idx -= 1 201 | 202 | return sw_tokens 203 | 204 | def tokenize(sentence): 205 | """给定预训练的词表和待分词的句子(默认已经通过nltk或者moses等工具进行过预分词),输出基于bpe的分词结果 206 | 207 | Args: 208 | sentence (str): 待分词的句子 209 | 210 | Return: 211 | list: 字词列表 212 | """ 213 | tokens = [] 214 | 215 | for word in sentence.split(): 216 | tokens.extend(subword_tokenize(word)) 217 | 218 | return tokens 219 | 220 | 221 | tokens = tokenize("the highest and largest mountain 好") 222 | print(tokens) 223 | ``` 224 | 225 | ['the', 'high', 'est', 'and', 'l', 'a', 'r', 'g', 'est', 'moun', 'tain', ''] 226 | 227 | 228 | 解码的代码就比较简单了,以``为标志做词的划分,其余相邻的词直接进行合并。 229 | 230 | 231 | ```python 232 | def detokenize(tokens): 233 | return "".join(tokens).replace("", " ").replace("", "") 234 | 235 | print(detokenize(tokens)) 236 | ``` 237 | 238 | the highest and largest mountain 239 | 240 | -------------------------------------------------------------------------------- /tutorials/Chapter2/ChineseTokenizer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "language_info": { 4 | "codemirror_mode": { 5 | "name": "ipython", 6 | "version": 3 7 | }, 8 | "file_extension": ".py", 9 | "mimetype": "text/x-python", 10 | "name": "python", 11 | "nbconvert_exporter": "python", 12 | "pygments_lexer": "ipython3", 13 | "version": "3.7.3-final" 14 | }, 15 | "orig_nbformat": 2, 16 | "kernelspec": { 17 | "name": "python37364bitpytorchlatestconda37dda3a0837247e597f023e05705e960", 18 | "display_name": "Python 3.7.3 64-bit ('pytorch_latest': conda)" 19 | } 20 | }, 21 | "nbformat": 4, 22 | "nbformat_minor": 2, 23 | "cells": [ 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "# 中文分词\n", 29 | "与大部分印欧语系的语言不同,中文在词与词之间没有任何空格之类的显示标志指示词的边界。因此,中文分词是很多自然语言处理系统中的基础模块和首要环节。\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## 中文分词基本原理\n", 37 | "从20世纪80年代或更早的时候起,学者们研究了很多的分词方法,这些方法大致可以分为三大类:\n", 38 | "\n", 39 | "基于词表的分词方法\n", 40 | "- 正向最大匹配法(forward maximum matching method, FMM)\n", 41 | "- 逆向最大匹配法(backward maximum matching method, BMM)\n", 42 | "- N-最短路径方法\n", 43 | "基于统计模型的分词方法\n", 44 | "- 基于N-gram语言模型的分词方法\n", 45 | "基于序列标注的分词方法\n", 46 | "- 基于HMM的分词方法\n", 47 | "- 基于CRF的分词方法\n", 48 | "- 基于词感知机的分词方法\n", 49 | "- 基于深度学习的端到端的分词方法\n", 50 | "\n", 51 | "在这里只介绍jieba分词用到的**基于N-gram语言模型的分词方法**和**基于HMM的分词方法**" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": {}, 57 | "source": [ 58 | "\n", 59 | "\n", 60 | "### 基于N-gram语言模型的分词方法\n", 61 | "假设随机变量S为一个汉字序列,W是S上所有可能的切分路径。对于分词,实际上就是求解使条件概率P(W∣S)最大的切分路径W∗,即\n", 62 | "$$\n", 63 | "W^*=\\mathop{\\arg\\max}\\limits_{W}P(W|S)\n", 64 | "$$\n", 65 | "根据贝叶斯公式\n", 66 | "$$\n", 67 | "W*=\\mathop{\\arg\\max}\\limits_{W}\\frac{P(W)P(S|W)}{P(S)}\n", 68 | "$$\n", 69 | "由于P(S)为归一化因子,P(S∣W)恒为1,因此只需要求解P(W)。P(W)使用N-gram语言模型建模,定义如下(以Bi-gram为例):\n", 70 | "$$\n", 71 | "P(W)=P(w_0,w_1,...w_n)=P(w_0)P(w_1|w_0)P(w_2|w_1)...P(w_n|w_{n-1})=P(w_0)\\prod \\limits_{t=1}^nP(w_n|w_{n-1})\n", 72 | "$$\n", 73 | "至此,各切分路径的好坏程度(条件概率P(W∣S))可以求解。简单的,可以根据DAG枚举全路径,暴力求解最优路径;也可以使用动态规划的方法求解,jieba中不带HMM新词发现的分词,就是DAG + Uni-gram的语言模型 + 后向DP的方式进行的。\n", 74 | "\n" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "### 基于HMM的分词方法\n", 82 | "该方法属于由字构词的分词方法,由字构词的分词方法思想并不复杂,它是将分词问题转化为字的分类问题(序列标注问题)。从某些层面讲,由字构词的方法并不依赖于事先编制好的词表,但仍然需要分好词的训练语料。\n", 83 | "\n", 84 | "规定每个字有4个词位:\n", 85 | "- 词首 B\n", 86 | "- 词中 M\n", 87 | "- 词尾 E\n", 88 | "- 单字成词 S\n", 89 | "\n", 90 | "![序列标注问题示例](./assets/序列标注问题.png)\n", 91 | "\n", 92 | "由于HMM是一个生成式模型,X为观测序列,Y为隐序列。基于HMM的两个假设\n", 93 | "- 齐次马尔科夫性假设,即假设隐藏的马尔科夫链在任意时刻t的状态只依赖于其前一时刻的状态,与其它时刻的状态及观测无关,也与时刻t无关;\n", 94 | "- 观测独立性假设,即假设任意时刻的观测只依赖于该时刻的马尔科夫链的状态,与其它观测和状态无关,\n", 95 | "\n", 96 | "HMM模型中的五元组表示:\n", 97 | "- 观测序列\n", 98 | "- 隐藏状态序列\n", 99 | "- 状态初始概率\n", 100 | "- 状态转移概率\n", 101 | "- 状态发射概率\n", 102 | "\n", 103 | "最的模型为:\n", 104 | "$$\n", 105 | "P(X, Y)=P(y_0)P(y_0|x_0)\\prod \\limits_{t=1}^nP(y_t|y_{t-1})P(x_t|y_t)\n", 106 | "$$\n", 107 | "![HMM模型](./assets/HMM模型.png)\n", 108 | "\n", 109 | "其中X为观测序列,Y为隐藏状态序列(B,M,E,S),$P(y_0)$位状态初始概率,$P(y_t|y_{t-1})$为状态转移概率,$P(x_t|y_t)$为状态发射概率。\n", 110 | "\n", 111 | "HMM模型有三个基本问题:\n", 112 | "\n", 113 | "- 概率计算问题,HMM的五元组,计算在模型下给定隐藏序列Y,计算观测序列X出现的概率也就是Forward-backward算法;\n", 114 | "\n", 115 | "- 学习问题,已知观测序列{X},隐藏序列{Y} ,估计模型的状态初始概率,状态转移概率和状态发射概率 ,使得在该模型下观测序列X的概率尽可能的大,即用极大似然估计的方法估计参数;\n", 116 | "\n", 117 | "- 预测问题,也称为解码问题,已知模型状态初始概率,状态转移概率和状态发射概率和观测序列X,求最大概率的隐藏序列Y。\n", 118 | "\n", 119 | "其中,jieba分词主要中主要涉及第三个问题,也即预测问题。计算方法会涉及到维特比算法,这个后面会结合代码讲到。\n" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "metadata": {}, 125 | "source": [ 126 | "## jieba分词\n", 127 | "下面我们以jieba分词为例,结合上满介绍的原理和代码介绍一下分词的内部原理,并参考jieba分词源码给出一个简单的实现版本。\n", 128 | "jieba的分词过程可以概括为以下几个步骤\n", 129 | "- 依据统计词典(模型中这部分已经具备,也可自定义加载)构建统计词典中词的前缀词典。\n", 130 | "- 对输入的内容按照子句进行分割(使用正则表达式,以标点符号或者非中文字符为分界)。\n", 131 | "- 依据前缀词典对输入的句子进行DAG(有向无环图)的构造。\n", 132 | "- 使用动态规划的方法在DAG上找到一条概率最大路径,依据此路径进行分词。\n", 133 | "- 对于未收录词(是指不在统计词典中出现的词,未收录词怎么识别可以看完第三部分之后思考一下),使用HMM(隐马尔克夫模型)模型,用Viterbi(维特比)算法找出最可能出现的隐状态序列。\n", 134 | "\n", 135 | "![jieba分词算法流程图](./assets/jieba分词算法流程图.jpg)\n" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "### 生成前缀词典\n", 143 | "统计词典在jieba包的dict.txt文件中,是开发者已经统计好的词典,第一列代表的是词语,第二列是词频,第三列是词性,我们主要用到前两列信息,词性这部分,这里没有涉及。我们先看一下词典中的部分内容:\n" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 1, 149 | "metadata": { 150 | "tags": [] 151 | }, 152 | "outputs": [ 153 | { 154 | "output_type": "stream", 155 | "name": "stdout", 156 | "text": "孟玉楼 3 nr\n\n斗蓬 3 ns\n\n铁法官 3 n\n\n羿射九日 3 nr\n\n占金丰 4 nr\n\n" 157 | } 158 | ], 159 | "source": [ 160 | "import random\n", 161 | "\n", 162 | "with open(\"assets/dict.txt\") as f:\n", 163 | " lines = f.readlines()\n", 164 | " for line in random.choices(lines, k=5):\n", 165 | " print(line)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "metadata": {}, 171 | "source": [ 172 | "当程序运行的时候,它会加载统计词典生成前缀词典,前缀词典是表示什么的呢,我们举个简单的例子。\n", 173 | "\n", 174 | "比如统计词典中含有如下词语\n", 175 | "```\n", 176 | "我 123\n", 177 | "在 234\n", 178 | "学习 456\n", 179 | "结巴 345\n", 180 | "分词 456\n", 181 | "结巴分词 23\n", 182 | "学 2344\n", 183 | "分 23\n", 184 | "结 234\n", 185 | "```\n", 186 | "则前缀词典构造如下,它是将在统计词典中出现的每一个词的每一个前缀提取出来,统计词频,如果某个前缀词在统计词典中没有出现,词频统计为0,如果这个前缀词已经统计过,则不再重复。\n", 187 | "```\n", 188 | "我 123\n", 189 | "在 234\n", 190 | "学 2344\n", 191 | "学习 456\n", 192 | "结 234\n", 193 | "结巴 345\n", 194 | "结巴分 0\n", 195 | "结巴分词 23\n", 196 | "分 23\n", 197 | "分词 456\n", 198 | "```\n", 199 | "这里把未出现的统计词也统计出来,且词频统计为0,是为了后面构造DAG方便。生成前缀词典的代码如下,在jieba分词中前缀词典一般会进行缓存,不需要每次分词都重新加载。" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": 2, 205 | "metadata": { 206 | "tags": [] 207 | }, 208 | "outputs": [ 209 | { 210 | "output_type": "stream", 211 | "name": "stdout", 212 | "text": "生成前缀词典的大小为60102007。\n" 213 | } 214 | ], 215 | "source": [ 216 | "def get_prefix_dict(f_name):\n", 217 | " lfreq = {}\n", 218 | " ltotal = 0\n", 219 | " f = open(f_name)\n", 220 | " for lineno, line in enumerate(f, 1):\n", 221 | " try:\n", 222 | " line = line.strip()\n", 223 | " word, freq = line.split(' ')[:2]\n", 224 | " freq = int(freq)\n", 225 | " lfreq[word] = freq\n", 226 | " ltotal += freq\n", 227 | " for ch in range(len(word)):\n", 228 | " wfrag = word[:ch + 1]\n", 229 | " if wfrag not in lfreq:\n", 230 | " lfreq[wfrag] = 0\n", 231 | " except ValueError:\n", 232 | " raise ValueError(\n", 233 | " 'invalid dictionary entry in %s at Line %s: %s' % (f_name, lineno, line))\n", 234 | " f.close()\n", 235 | " return lfreq, ltotal\n", 236 | "\n", 237 | "freq, total = get_prefix_dict(\"assets/dict.txt\")\n", 238 | "print(\"生成前缀词典的大小为{}。\".format(total))" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "### 分割子句\n", 246 | "假如我们要对`\"我爱结巴分词。我叫孙悟空,我爱北京,我爱Python和C++。 《机器翻译》这本书是我的最爱。\"`这句话进行分词,我们首先要把它们划分为子句,第一个原因是标点符号是天然的词语间隔,我们的词语中不会包含标点符号。第二个原因是我们的词典中可能没有包含标点符号的内容,我们应当以这些非中文字符、标点字符作为分界,将输入内容划分为子句,对每个子句进行分词。\n", 247 | "\n", 248 | "一个可行的实现方法是列举所有在中文词典中可能会出现的字符,将连续出现的这些字符作为一个子句进行划分,这些字符之外的其他符号,我们便可以认为是中文标点符号,并把他们作为子句划分标志。我们可以简单的使用正则表达式来完成,出现在中文词典中的字符可能是中文字符、阿拉伯数字、英文字母、+=.等部分英文数字标点。\n" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 3, 254 | "metadata": { 255 | "tags": [] 256 | }, 257 | "outputs": [ 258 | { 259 | "output_type": "stream", 260 | "name": "stdout", 261 | "text": "['', '我爱结巴分词', '。', '我叫孙悟空', ',', '我爱北京', ',', '我爱Python和C++', '。 《', '机器翻译', '》', '这本书是我的最爱', '。']\n" 262 | } 263 | ], 264 | "source": [ 265 | "import re\n", 266 | "\n", 267 | "example = \"我爱结巴分词。我叫孙悟空,我爱北京,我爱Python和C++。 《机器翻译》这本书是我的最爱。\"\n", 268 | "\n", 269 | "# 列举所有中文词中可能包含的字符\n", 270 | "re_han_default = re.compile(\"([\\u4E00-\\u9FD5a-zA-Z0-9+#&\\._%\\-]+)\", re.U)\n", 271 | "\n", 272 | "# 将连续出现的合法字符作为一个子句的划分\n", 273 | "blocks = re_han_default.split(example)\n", 274 | "\n", 275 | "print(blocks)\n" 276 | ] 277 | }, 278 | { 279 | "cell_type": "markdown", 280 | "metadata": {}, 281 | "source": [ 282 | "我们看到我们已经将整句话分割成子句,每个子句中不再包含标点符号。对于标点符号部分,单独的标点符号我们可以将它直接作为一个单词,而对于`'。 《'`这种情况,我们可以用空白字符\\t\\r\\n将它们进一步分开。\n" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": 4, 288 | "metadata": { 289 | "tags": [] 290 | }, 291 | "outputs": [ 292 | { 293 | "output_type": "stream", 294 | "name": "stdout", 295 | "text": "['。', ' ', '《']\n" 296 | } 297 | ], 298 | "source": [ 299 | "re_skip_default = re.compile(\"(\\r\\n|\\s)\", re.U)\n", 300 | "\n", 301 | "example = \"。 《\"\n", 302 | "\n", 303 | "words = re_skip_default.split(example)\n", 304 | "\n", 305 | "print(words)" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "metadata": {}, 311 | "source": [ 312 | "### 构造DAG\n", 313 | "我们来讲解一下程序里是怎么存储“DAG”的,程序实现图的构建是存储为字典形式的,以每个字所在的位置为键值key,相应划分的末尾位置构成的列表为value,相应划分的末尾位置指的是什么呢,我们举例来说明\n", 314 | "\n", 315 | "“我在学习结巴分词”\n", 316 | "在这句话里,我们将每一个字用所在位置来代表,比如0代表“我”,4代表“结”,针对“结”,我们可以在前缀词典里看到以“结”为前缀的词“结”,“结巴”,“结巴分词”的词频大于0,因此“结”,“巴”,“词”为相应划分的末尾位置,因此键值4的value为[4,5,7],其他键值的对应value统计如下\n", 317 | "```\n", 318 | "0 :[0]\n", 319 | "1 :[1]\n", 320 | "2 :[2,3]\n", 321 | "3 :[3]\n", 322 | "4 :[4,5,7]\n", 323 | "5 :[5]\n", 324 | "6 :[6,7]\n", 325 | "7 :[7]\n", 326 | "```\n", 327 | "注:每一个字都将其自己作为相应划分的末尾位置,即使这个字不在统计词典里。\n", 328 | "\n", 329 | "基于以上构建的键值对,我们将有向图可视化一下,以便方便理解。\n", 330 | "![DAG](./assets/DAG.jpg)\n", 331 | "\n", 332 | "从“我”到“词”的路径有以下10种\n", 333 | "```\n", 334 | "我/在/学/习/结/巴/分/词\n", 335 | "我/在/学习/结巴分词\n", 336 | "我/在/学习/结/巴/分/词\n", 337 | "我/在/学习/结巴/分词\n", 338 | "我/在/学习/结/巴/分词\n", 339 | "我/在/学习/结巴/分/词\n", 340 | "我/在/学/习/结/巴/分词\n", 341 | "我/在/学/习/结巴/分/词\n", 342 | "我/在/学/习/结巴分词\n", 343 | "我/在/学/习/结巴/分词\n", 344 | "```\n", 345 | "\n" 346 | ] 347 | }, 348 | { 349 | "cell_type": "code", 350 | "execution_count": 5, 351 | "metadata": { 352 | "tags": [] 353 | }, 354 | "outputs": [ 355 | { 356 | "output_type": "stream", 357 | "name": "stdout", 358 | "text": "{0: [0], 1: [1], 2: [2, 3], 3: [3], 4: [4, 5, 7], 5: [5], 6: [6, 7], 7: [7]}\n" 359 | } 360 | ], 361 | "source": [ 362 | "def get_DAG(sentence, freq):\n", 363 | " DAG = {}\n", 364 | " N = len(sentence)\n", 365 | " for k in range(N):\n", 366 | " tmplist = []\n", 367 | " i = k\n", 368 | " frag = sentence[k]\n", 369 | " while i < N and frag in freq:\n", 370 | " if freq[frag]:\n", 371 | " tmplist.append(i)\n", 372 | " i += 1\n", 373 | " frag = sentence[k:i + 1]\n", 374 | " if not tmplist:\n", 375 | " tmplist.append(k)\n", 376 | " DAG[k] = tmplist\n", 377 | " return DAG\n", 378 | "\n", 379 | "example = \"我在学习结巴分词\"\n", 380 | "dag = get_DAG(example, freq)\n", 381 | "print(dag)" 382 | ] 383 | }, 384 | { 385 | "cell_type": "markdown", 386 | "metadata": {}, 387 | "source": [ 388 | "### 动态规划找到最大路径\n", 389 | "接下来我们需要计算上面10条路径那一条的可能性最大,将可能性最大的那条路径对应的划分作为我们的分词结果。\n", 390 | "$$\n", 391 | "W^*=\\mathop{\\arg\\min}\\limits_{W}P(W)\n", 392 | "$$\n", 393 | "其中$W$为句子的一个划分,${w_1,w_2,...wn}$\n", 394 | "$$\n", 395 | "P(W)=P(w_1,w_2...wn)=\\prod \\limits_{i=0}^nP(w_n)\n", 396 | "$$\n", 397 | "\n", 398 | "每一个词出现的概率等于该词在前缀里的词频除以所有词的词频之和。如果词频为0或是不存在,当做词频为1来处理。\n", 399 | "$$\n", 400 | "P(w_n)=\\frac{freq[w_n]+1}{total}\n", 401 | "$$\n", 402 | "\n", 403 | "这里会取对数概率,即在每个词概率的基础上取对数,一是为了防止下溢,二后面的概率相乘可以变成相加计算。\n", 404 | "\n", 405 | "最后我们使用动态规划算法算出概率最大的路径。\n", 406 | "\n" 407 | ] 408 | }, 409 | { 410 | "cell_type": "code", 411 | "execution_count": 6, 412 | "metadata": { 413 | "tags": [] 414 | }, 415 | "outputs": [ 416 | { 417 | "output_type": "stream", 418 | "name": "stdout", 419 | "text": "{8: (0, 0), 7: (-9.257210763727148, 7), 6: (-14.967114814124178, 7), 5: (-24.384334710144643, 5), 4: (-14.222674339176683, 7), 3: (-25.03090606994119, 3), 2: (-22.62511739105392, 3), 1: (-27.038731622224248, 1), 0: (-32.24695578526084, 0)}\n" 420 | } 421 | ], 422 | "source": [ 423 | "from math import log\n", 424 | "\n", 425 | "def clac(sentence, DAG, freq, total):\n", 426 | " n = len(sentence)\n", 427 | " route = {n: (0, 0)}\n", 428 | " log_total = log(total)\n", 429 | "\n", 430 | " for i in range(n-1, -1, -1):\n", 431 | " cache = []\n", 432 | " for j in DAG[i]:\n", 433 | " log_p = log(freq.get(sentence[i:j+1], 0) or 1)\n", 434 | " cache.append((log_p - log_total + route[j+1][0], j))\n", 435 | " route[i] = max(cache)\n", 436 | " return route\n", 437 | "\n", 438 | "route = clac(example, dag, freq, total)\n", 439 | "print(route)\n", 440 | "\n" 441 | ] 442 | }, 443 | { 444 | "cell_type": "markdown", 445 | "metadata": {}, 446 | "source": [ 447 | "通过上面的计算结果,`route`中的key代表最优路径中当前词的起始位置,value的第二个元素代表最优路径中当前词的末尾位置,通过这两个量我们可以推出一个初步的基于词典和词频的分词结果。" 448 | ] 449 | }, 450 | { 451 | "cell_type": "code", 452 | "execution_count": 7, 453 | "metadata": { 454 | "tags": [] 455 | }, 456 | "outputs": [ 457 | { 458 | "output_type": "stream", 459 | "name": "stdout", 460 | "text": "我\n在\n学习\n结巴分词\n" 461 | } 462 | ], 463 | "source": [ 464 | "def cut_no_hmm(sentence, route):\n", 465 | " i = 0\n", 466 | " while(i < len(route)-1):\n", 467 | " j = route[i][1]\n", 468 | " yield sentence[i:j+1]\n", 469 | " i = j + 1\n", 470 | "\n", 471 | "for word in cut_no_hmm(example, route):\n", 472 | " print(word)" 473 | ] 474 | }, 475 | { 476 | "cell_type": "markdown", 477 | "metadata": {}, 478 | "source": [ 479 | "### HMM算法对于未登录词的识别\n", 480 | "在jieba分词中,基于HMM的分词主要是作为基于Uni—gram分词的一个补充,主要是解决OOV(out of vocabulary)问题的,它的作用是对未登录词典的词进行识别发现。我们首先用一个例子说明HMM的重要性。比如我们要对一个包含人名的句子进行分词,“韩冰是个好人”。“韩冰”这个词不在词典之中,所以前面基于词典+Uni-Gram语言模型的方法进行分词就会将“韩冰”这个人名分成“韩”+“冰”。所以我们需要一个有一定泛化能力的机器学习模型对这些新词进行发现。" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": 8, 486 | "metadata": { 487 | "tags": [] 488 | }, 489 | "outputs": [ 490 | { 491 | "output_type": "execute_result", 492 | "data": { 493 | "text/plain": "['韩', '冰', '是', '个', '好人']" 494 | }, 495 | "metadata": {}, 496 | "execution_count": 8 497 | } 498 | ], 499 | "source": [ 500 | "example = \"韩冰是个好人\"\n", 501 | "dag = get_DAG(example, freq)\n", 502 | "route = clac(example, dag, freq, total)\n", 503 | "\n", 504 | "list(cut_no_hmm(example, route))" 505 | ] 506 | }, 507 | { 508 | "cell_type": "markdown", 509 | "metadata": {}, 510 | "source": [ 511 | "使用HMM进行分词的原理在前面已经介绍过了。利用HMM模型进行分词,主要是将分词问题视为一个序列标注(sequence labeling)问题,其中,句子为观测序列,分词结果为状态序列。首先通过语料训练出HMM相关的模型,然后利用Viterbi算法进行求解,最终得到最优的状态序列,然后再根据状态序列,输出分词结果。\n", 512 | "\n", 513 | "这里的状态序列的元素有四种\n", 514 | "- \"B\":Begin(这个字处于词的开始位置)\n", 515 | "- \"M\":Middle(这个字处于词的中间位置)\n", 516 | "- \"E\":End(这个字处于词的结束位置)\n", 517 | "- \"S\":Single(这个字是单字成词)}\n", 518 | "\n", 519 | "由于分词算法术语HMM的预测问题(已知模型状态初始概率,状态转移概率和状态发射概率和观测序列X,求最大概率的隐藏序列Y),所以我们需要在已经进行标注的数据集上训练我们模型的参数,也就是初始概率,状态转移概率和状态发射概率。这里jieba分词中包含了一个已经训练好的模型,至于模型数据来源和训练方法,这里不再赘述,可以参考[模型的数据是如何生成的?]https://github.com/fxsjy/jieba/issues/7 。这里我们直接将模型加载进来用。" 520 | ] 521 | }, 522 | { 523 | "cell_type": "code", 524 | "execution_count": 9, 525 | "metadata": {}, 526 | "outputs": [], 527 | "source": [ 528 | "import pickle\n", 529 | "import math\n", 530 | "\n", 531 | "prob_start = pickle.load(open(\"./assets/prob_start.p\", \"rb\")) # 初始概率参数\n", 532 | "prob_emit = pickle.load(open(\"./assets/prob_emit.p\", \"rb\")) # 发射概率\n", 533 | "prob_trans = pickle.load(open(\"./assets/prob_trans.p\", \"rb\")) # 状态转移概率" 534 | ] 535 | }, 536 | { 537 | "cell_type": "markdown", 538 | "metadata": {}, 539 | "source": [ 540 | "首先是初始概率,及输入观察序列(带分词句子)首个字符是\"B\",\"M\", \"E\", \"S\"的概率(这里的概率也进行了对数运算及log(p))。由这个概率值可以看出,句子首字单字成词(S)和作为词的词首(B)的概率较高,作为词中和词尾概率为0,也比较符合我们的常识。" 541 | ] 542 | }, 543 | { 544 | "cell_type": "code", 545 | "execution_count": 10, 546 | "metadata": {}, 547 | "outputs": [ 548 | { 549 | "output_type": "execute_result", 550 | "data": { 551 | "text/plain": "{'B': 0.7689828525554734, 'E': 0.0, 'M': 0.0, 'S': 0.2310171474445266}" 552 | }, 553 | "metadata": {}, 554 | "execution_count": 10 555 | } 556 | ], 557 | "source": [ 558 | "# 为了直观,将log概率转化为真实概率\n", 559 | "{key:math.exp(value) for key, value in prob_start.items()}" 560 | ] 561 | }, 562 | { 563 | "cell_type": "markdown", 564 | "metadata": {}, 565 | "source": [ 566 | "接下来是状态转移概率,及\"B\",\"M\", \"E\", \"S\"四个状态之间相互转化的概率。" 567 | ] 568 | }, 569 | { 570 | "cell_type": "code", 571 | "execution_count": 11, 572 | "metadata": {}, 573 | "outputs": [ 574 | { 575 | "output_type": "execute_result", 576 | "data": { 577 | "text/plain": "{'B': {'E': 0.6000000000000004, 'M': 0.4},\n 'E': {'B': 0.5544853051164425, 'S': 0.44551469488355755},\n 'M': {'E': 0.7164487459986911, 'M': 0.2835512540013088},\n 'S': {'B': 0.48617017333894563, 'S': 0.5138298266610544}}" 578 | }, 579 | "metadata": {}, 580 | "execution_count": 11 581 | } 582 | ], 583 | "source": [ 584 | "{key: {k: math.exp(v) for k, v in value.items()} for key, value in prob_trans.items()}" 585 | ] 586 | }, 587 | { 588 | "cell_type": "markdown", 589 | "metadata": {}, 590 | "source": [ 591 | "最后是发射概率,即在观测序列是某个字的情况下,被标注为\"B\",\"M\", \"E\", \"S\"的概率" 592 | ] 593 | }, 594 | { 595 | "cell_type": "code", 596 | "execution_count": 12, 597 | "metadata": {}, 598 | "outputs": [ 599 | { 600 | "output_type": "execute_result", 601 | "data": { 602 | "text/plain": "{'B': {'一': 0.025874486447195644,\n '丁': 0.0002960323136559398,\n '七': 0.0004026703175442123,\n '万': 0.0018186831560606151,\n '丈': 0.00014100868588615948},\n 'E': {'一': 0.002369710374262949,\n '丁': 0.000114401037236071,\n '七': 0.00010115647270757471,\n '万': 0.00047351540431744344,\n '丈': 0.00012050479628052327},\n 'M': {'一': 0.01193645010412285,\n '丁': 0.00035872815397116633,\n '七': 0.001416288550382968,\n '万': 0.0021550909026310924,\n '丈': 8.165936412282943e-05},\n 'S': {'∶': 1.3353987946490163e-07,\n '一': 0.007272247985959882,\n '丁': 0.00012041958630747509,\n '丂': 6.67699397324508e-08,\n '七': 0.00025622964372327994}}" 603 | }, 604 | "metadata": {}, 605 | "execution_count": 12 606 | } 607 | ], 608 | "source": [ 609 | "# 由于这个表比较大,所以随机挑选一些出来看\n", 610 | "{key: {k: math.exp(v) for i, (k, v) in enumerate(value.items()) if i < 5} for key, value in prob_emit.items()}\n" 611 | ] 612 | }, 613 | { 614 | "cell_type": "markdown", 615 | "metadata": {}, 616 | "source": [ 617 | "有了模型,接下来就可以用viterbi算法对给定的序列进行分词。还拿上面的例子举例 \"韩冰是个好人\" -> \\['韩', '冰', '是', '个', '好人'\\],对于已经成词的部分“好人”,我们不需要对它进行计算了,我们只需要将还是单个字的序列“韩冰是个”放入到HMM模型中进行分词,也就是将这四个字分别打上 “BEMS”标签。并且我们期望的标签是\\['韩'->B, '冰'->M, '是'->S, '个'->S\\]。首先我们简单介绍一下维特比算法。\n", 618 | "\n", 619 | "#### Viterbi算法\n", 620 | "viterbi维特比算法解决的是篱笆型的图的最短路径问题,图的节点按列组织,每列的节点数量可以不一样,每一列的节点只能和相邻列的节点相连,不能跨列相连,节点之间有着不同的距离,距离的值就不在图上一一标注出来了,大家自行脑补。\n", 621 | "![Viterbi算法](./assets/HMM分词篱笆型图.drawio.png)\n", 622 | "\n", 623 | "过程非常简单:\n", 624 | "\n", 625 | "为了找出Start到End之间的最短路径,我们先从Start开始从左到右一列一列地来看。首先起点是Start,从Start到“韩”字对应的状态列的路径有四种可能:Start-B、Start-E、Start-M,Start-S。对应的路径长度即\n", 626 | "\n", 627 | "![viterbi_step1](./assets/viterbi_step1.drawio.png)\n" 628 | ] 629 | }, 630 | { 631 | "cell_type": "code", 632 | "execution_count": 13, 633 | "metadata": { 634 | "tags": [] 635 | }, 636 | "outputs": [ 637 | { 638 | "output_type": "stream", 639 | "name": "stdout", 640 | "text": "-8.093263409081425 -3.14e+100 -3.14e+100 -10.534873750321356\n" 641 | } 642 | ], 643 | "source": [ 644 | "import sys\n", 645 | "\n", 646 | "MIN_FLOAT = -3.14e100\n", 647 | "start_2_B = prob_emit[\"B\"].get(\"韩\", MIN_FLOAT) + prob_start[\"B\"]\n", 648 | "start_2_E = prob_emit[\"E\"].get(\"韩\", MIN_FLOAT) + prob_start[\"E\"]\n", 649 | "start_2_M = prob_emit[\"M\"].get(\"韩\", MIN_FLOAT) + prob_start[\"M\"]\n", 650 | "start_2_S = prob_emit[\"S\"].get(\"韩\", MIN_FLOAT) + prob_start[\"S\"]\n", 651 | "\n", 652 | "print(start_2_B, start_2_E, start_2_M, start_2_S)" 653 | ] 654 | }, 655 | { 656 | "source": [ 657 | "我们不能武断地说这四条路径中中的哪一段必定是全局最短路径中的一部分,目前为止任何一段都有可能是全局最优路径的备选项。我们继续往右看,到了“冰”这一列列。按照四个状态进行逐一分析,先看到达“冰”(B)节点的各个路径长度。\n", 658 | "\n", 659 | "![viterbi_step2](./assets/viterbi_step2.drawio.png)\n", 660 | "\n", 661 | "以上这四条路径,各节点距离加起来对比一下,我们就可以知道其中哪一条是最短的。因为Start-B-B是最短的,那么我们就知道了经过“冰”(B)的所有路径当中Start-B-B是最短的,其它三条路径路径都比Start-B-B长,绝对不是目标答案,可以大胆地删掉了。删掉了不可能是答案的路径,就是viterbi算法(维特比算法)的重点,因为后面我们再也不用考虑这些被删掉的路径了。现在经过“冰”(B)的所有路径只剩一条路径了(红色标识)\n", 662 | "\n" 663 | ], 664 | "cell_type": "markdown", 665 | "metadata": {} 666 | }, 667 | { 668 | "cell_type": "code", 669 | "execution_count": 14, 670 | "metadata": { 671 | "tags": [] 672 | }, 673 | "outputs": [ 674 | { 675 | "output_type": "stream", 676 | "name": "stdout", 677 | "text": "-3.14e+100 -3.14e+100 -6.28e+100 -19.68864099798377\n" 678 | } 679 | ], 680 | "source": [ 681 | "B_2_B = start_2_B + prob_trans[\"B\"].get(\"B\", MIN_FLOAT) + prob_emit[\"B\"].get(\"冰\", MIN_FLOAT)\n", 682 | "E_2_B = start_2_E + prob_trans[\"E\"].get(\"B\", MIN_FLOAT) + prob_emit[\"B\"].get(\"冰\", MIN_FLOAT)\n", 683 | "M_2_B = start_2_M + prob_trans[\"M\"].get(\"B\", MIN_FLOAT) + prob_emit[\"B\"].get(\"冰\", MIN_FLOAT)\n", 684 | "S_2_B = start_2_S + prob_trans[\"S\"].get(\"B\", MIN_FLOAT) + prob_emit[\"B\"].get(\"冰\", MIN_FLOAT)\n", 685 | "\n", 686 | "print(B_2_B, E_2_B, M_2_B, S_2_B)" 687 | ] 688 | }, 689 | { 690 | "source": [ 691 | "以此类推,我们可以分别找出到达“冰”字对应列的所有四个状态的最优路径。\n", 692 | "\n", 693 | "![viterbi_step3](./assets/viterbi_step3.drawio.png)\n", 694 | "\n", 695 | "对后面的“是”,“个”也进行同样的操作,我们便可以得到一条全局最优路径。\n", 696 | "![viterbi_step4](./assets/viterbi_step4.drawio.png)\n" 697 | ], 698 | "cell_type": "markdown", 699 | "metadata": {} 700 | }, 701 | { 702 | "cell_type": "code", 703 | "execution_count": 15, 704 | "metadata": { 705 | "tags": [] 706 | }, 707 | "outputs": [ 708 | { 709 | "output_type": "stream", 710 | "name": "stdout", 711 | "text": "韩 -> B\n冰 -> E\n是 -> S\n个 -> S\n" 712 | } 713 | ], 714 | "source": [ 715 | "def viterbi(obs, states, start_p, trans_p, emit_p):\n", 716 | " V = [{}] # tabular\n", 717 | " path = {}\n", 718 | " for y in states: # init\n", 719 | " V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT)\n", 720 | " path[y] = [y]\n", 721 | " for t in range(1, len(obs)):\n", 722 | " V.append({})\n", 723 | " newpath = {}\n", 724 | " for y in states:\n", 725 | " em_p = emit_p[y].get(obs[t], MIN_FLOAT)\n", 726 | " (prob, state) = max(\n", 727 | " [(V[t - 1][y0] + trans_p[y0].get(y, MIN_FLOAT) + em_p, y0) for y0 in states])\n", 728 | " V[t][y] = prob\n", 729 | " newpath[y] = path[state] + [y]\n", 730 | " path = newpath\n", 731 | "\n", 732 | " (prob, state) = max((V[len(obs) - 1][y], y) for y in 'ES')\n", 733 | "\n", 734 | " return (prob, path[state])\n", 735 | "\n", 736 | "example = \"韩冰是个\"\n", 737 | "prob, path = viterbi(example, \"BEMS\", prob_start, prob_trans, prob_emit)\n", 738 | "\n", 739 | "for w, s in zip(example, path):\n", 740 | " print(w, \"->\", s)" 741 | ] 742 | }, 743 | { 744 | "source": [ 745 | "根据HMM输出的结果,我们可以将”韩“->B,”冰“->E合并成为一个新词”韩冰“。所以”韩冰是个好人“的分词结果就是['韩冰', '是', '个', '好人']" 746 | ], 747 | "cell_type": "markdown", 748 | "metadata": {} 749 | }, 750 | { 751 | "cell_type": "code", 752 | "execution_count": 16, 753 | "metadata": { 754 | "tags": [] 755 | }, 756 | "outputs": [ 757 | { 758 | "output_type": "stream", 759 | "name": "stdout", 760 | "text": "韩冰\n是\n个\n好人\n" 761 | } 762 | ], 763 | "source": [ 764 | "def hmm(sentence, start_P, trans_P, emit_P):\n", 765 | " prob, pos_list = viterbi(sentence, 'BMES', start_P, trans_P, emit_P)\n", 766 | " begin, nexti = 0, 0\n", 767 | " # print pos_list, sentence\n", 768 | " for i, char in enumerate(sentence):\n", 769 | " pos = pos_list[i]\n", 770 | " if pos == 'B':\n", 771 | " begin = i\n", 772 | " elif pos == 'E':\n", 773 | " yield sentence[begin:i + 1]\n", 774 | " nexti = i + 1\n", 775 | " elif pos == 'S':\n", 776 | " yield char\n", 777 | " nexti = i + 1\n", 778 | " if nexti < len(sentence):\n", 779 | " yield sentence[nexti:]\n", 780 | "\n", 781 | "def cut_hmm(sentence):\n", 782 | " dag = get_DAG(sentence, freq)\n", 783 | " route = clac(sentence, dag, freq, total)\n", 784 | " i = 0\n", 785 | " buf = \"\"\n", 786 | " while(i < len(route)-1):\n", 787 | " j = route[i][1] + 1\n", 788 | "\n", 789 | " if j - i <= 1:\n", 790 | " buf += sentence[i]\n", 791 | " else:\n", 792 | " if buf:\n", 793 | " if len(buf) == 1:\n", 794 | " yield buf\n", 795 | " else:\n", 796 | " if buf not in freq:\n", 797 | " for w in hmm(buf, prob_start, prob_trans, prob_emit):\n", 798 | " yield w\n", 799 | " else:\n", 800 | " for w in buf:\n", 801 | " yield w\n", 802 | " buf = \"\"\n", 803 | " yield sentence[i:j]\n", 804 | " i = j\n", 805 | " \n", 806 | " if buf:\n", 807 | " if len(buf) == 1:\n", 808 | " yield buf\n", 809 | " buf = \"\"\n", 810 | " else:\n", 811 | " if buf not in freq:\n", 812 | " for w in hmm(buf, prob_start, prob_trans, prob_emit):\n", 813 | " yield w\n", 814 | " else:\n", 815 | " for w in buf:\n", 816 | " yield w\n", 817 | "\n", 818 | "example = \"韩冰是个好人\"\n", 819 | "for word in cut_hmm(example):\n", 820 | " print(word)" 821 | ] 822 | }, 823 | { 824 | "source": [ 825 | "### 正则表达式辅助分词\n", 826 | "除了上述使用机器学习的方法进行分词之外,在我们翻译语料的分词过程中,经常会遇到一些特殊情况,比如日期、数字、英文单词或者其他符合某个特定规则的词语,在前面的操作中,我们将他们划分到了子句之中,因为词典中某些词也会出现这些字符。但是,对于未出现在词典中的英文、数字、符号组合,我们也希望强制把它们当做一个词进行处理,而不是将它们分开。它们通常很难添加到词典中(因为数字字母的排列组合往往是很大的),却很容易通过一些简单的正则表达式对他们进行处理。\n", 827 | "```py\n", 828 | "1920.2333 # 浮点数\n", 829 | "2020.9.2 # 日期\n", 830 | "apple # 英文词\n", 831 | "```\n", 832 | "我们来看看如果只用 词典+HMM的方式处理他们会怎么样" 833 | ], 834 | "cell_type": "markdown", 835 | "metadata": {} 836 | }, 837 | { 838 | "cell_type": "code", 839 | "execution_count": 17, 840 | "metadata": { 841 | "tags": [] 842 | }, 843 | "outputs": [ 844 | { 845 | "output_type": "stream", 846 | "name": "stdout", 847 | "text": "['最终', '结果', '为', '1', '9', '2', '0', '.', '2', '3', '3', '3']\n['今天', '是', '2', '0', '2', '0', '.', '9', '.', '2']\n['A', 'p', 'p', 'l', 'e', '手机', '是', '我', '的', '最', '爱']\n" 848 | } 849 | ], 850 | "source": [ 851 | "sentences = [\"最终结果为1920.2333\", \"今天是2020.9.2\", \"Apple手机是我的最爱\"]\n", 852 | "for s in sentences:\n", 853 | " print(list(cut_hmm(s)))" 854 | ] 855 | }, 856 | { 857 | "cell_type": "markdown", 858 | "metadata": {}, 859 | "source": [ 860 | "为了处理这个问题,我们需要把连续的、不在词典中的非汉字字符提取出来。" 861 | ] 862 | }, 863 | { 864 | "cell_type": "code", 865 | "execution_count": 18, 866 | "metadata": { 867 | "tags": [] 868 | }, 869 | "outputs": [ 870 | { 871 | "output_type": "stream", 872 | "name": "stdout", 873 | "text": "['最终', '结果', '为', '1920.2333']\n['今天', '是', '2020.9.2']\n['Apple', '手机', '是', '我', '的', '最', '爱']\n" 874 | } 875 | ], 876 | "source": [ 877 | "# 用于提取连续的汉字部分\n", 878 | "re_han = re.compile(\"([\\u4E00-\\u9FD5]+)\")\n", 879 | "# 用于分割连续的非汉字部分\n", 880 | "re_skip = re.compile(\"([a-zA-Z0-9\\.]+(?:\\.\\d+)?%?)\")\n", 881 | "\n", 882 | "def cut_regx_hmm(sentence):\n", 883 | " blocks = re_han.split(sentence)\n", 884 | " for block in blocks:\n", 885 | " if not block:\n", 886 | " continue\n", 887 | " if re_han.match(block):\n", 888 | " yield from cut_hmm(block)\n", 889 | " else:\n", 890 | " for ss in re_skip.split(block):\n", 891 | " if ss:\n", 892 | " yield ss\n", 893 | "\n", 894 | "for s in sentences:\n", 895 | " print(list(cut_regx_hmm(s))) \n" 896 | ] 897 | }, 898 | { 899 | "source": [ 900 | "## Putting them together.\n" 901 | ], 902 | "cell_type": "markdown", 903 | "metadata": {} 904 | }, 905 | { 906 | "cell_type": "code", 907 | "execution_count": 19, 908 | "metadata": {}, 909 | "outputs": [ 910 | { 911 | "output_type": "execute_result", 912 | "data": { 913 | "text/plain": "['程序员',\n '祝',\n '海林',\n '和',\n '朱会震',\n '是',\n '在',\n '孙健',\n '的',\n '左面',\n '和',\n '右面',\n ',',\n ' ',\n '范凯',\n '在',\n '最',\n '右面',\n '。',\n '再往',\n '左',\n '是',\n '李松洪']" 914 | }, 915 | "metadata": {}, 916 | "execution_count": 19 917 | } 918 | ], 919 | "source": [ 920 | "import re\n", 921 | "import pickle\n", 922 | "\n", 923 | "from math import log\n", 924 | "\n", 925 | "class ChineseTokenizer(object):\n", 926 | "\n", 927 | " re_han_default = re.compile(\"([\\u4E00-\\u9FD5a-zA-Z0-9+#&\\._%\\-]+)\", re.U)\n", 928 | " re_skip_default = re.compile(\"(\\r\\n|\\s)\", re.U)\n", 929 | "\n", 930 | " # 用于提取连续的汉字部分\n", 931 | " re_han = re.compile(\"([\\u4E00-\\u9FD5]+)\")\n", 932 | " # 用于分割连续的非汉字部分\n", 933 | " re_skip = re.compile(\"([a-zA-Z0-9\\.]+(?:\\.\\d+)?%?)\")\n", 934 | "\n", 935 | " MIN_FLOAT = -3.14e100\n", 936 | "\n", 937 | " @staticmethod\n", 938 | " def get_prefix_dict(f_name):\n", 939 | " lfreq = {}\n", 940 | " ltotal = 0\n", 941 | " f = open(f_name)\n", 942 | " for lineno, line in enumerate(f, 1):\n", 943 | " try:\n", 944 | " line = line.strip()\n", 945 | " word, freq = line.split(' ')[:2]\n", 946 | " freq = int(freq)\n", 947 | " lfreq[word] = freq\n", 948 | " ltotal += freq\n", 949 | " for ch in range(len(word)):\n", 950 | " wfrag = word[:ch + 1]\n", 951 | " if wfrag not in lfreq:\n", 952 | " lfreq[wfrag] = 0\n", 953 | " except ValueError:\n", 954 | " raise ValueError(\n", 955 | " 'invalid dictionary entry in %s at Line %s: %s' % (f_name, lineno, line))\n", 956 | " f.close()\n", 957 | " return lfreq, ltotal\n", 958 | "\n", 959 | " def __init__(self):\n", 960 | " self.freq, self.total = self.get_prefix_dict(\"./assets/dict.txt\") # 前缀词典 \n", 961 | " self.prob_start = pickle.load(open(\"./assets/prob_start.p\", \"rb\")) # 初始概率参数\n", 962 | " self.prob_emit = pickle.load(open(\"./assets/prob_emit.p\", \"rb\")) # 发射概率\n", 963 | " self.prob_trans = pickle.load(open(\"./assets/prob_trans.p\", \"rb\")) # 状态转移概率\n", 964 | "\n", 965 | " def cut(self, sentence):\n", 966 | " blocks = self.re_han_default.split(sentence)\n", 967 | " for blk in blocks:\n", 968 | " # 处理空字符串\n", 969 | " if not blk:\n", 970 | " continue\n", 971 | " if self.re_han_default.match(blk):\n", 972 | " # 处理子句\n", 973 | " for word in self.cut_block(blk):\n", 974 | " yield word\n", 975 | " else:\n", 976 | " # 处理标点符号、空格等等\n", 977 | " tmp = self.re_skip_default.split(blk)\n", 978 | " for x in tmp:\n", 979 | " if self.re_skip_default.match(x):\n", 980 | " # 空格、制表符、换行等一起返回\n", 981 | " yield x\n", 982 | " else:\n", 983 | " # 标点符号等分割成字符返回\n", 984 | " for xx in x:\n", 985 | " yield xx\n", 986 | "\n", 987 | "\n", 988 | " def cut_block(self, sentence):\n", 989 | " DAG = self.get_DAG(sentence)\n", 990 | " route = self.clac(sentence, DAG)\n", 991 | " x = 0\n", 992 | " buf = ''\n", 993 | " N = len(sentence)\n", 994 | " while x < N:\n", 995 | " y = route[x][1] + 1\n", 996 | " l_word = sentence[x:y]\n", 997 | "\n", 998 | " # 如果当前为一个字符,加入buffer待HMM进一步分词\n", 999 | " if y - x == 1:\n", 1000 | " buf += l_word\n", 1001 | " else:\n", 1002 | " # 对当前buffer进行分词\n", 1003 | " if buf:\n", 1004 | " # 当前buffer只有一个字符,直接yield\n", 1005 | " if len(buf) == 1:\n", 1006 | " yield buf\n", 1007 | " buf = ''\n", 1008 | " else:\n", 1009 | " # 这里加了一层判断,如果词典中存在和当前buffer相同的词,则不需要再用HMM进行切分了。\n", 1010 | " if not self.freq.get(buf):\n", 1011 | " # 讲buffer送入HMM进行分词\n", 1012 | " recognized = self.cut_regx_hmm(buf)\n", 1013 | " for t in recognized:\n", 1014 | " yield t\n", 1015 | " else:\n", 1016 | " for elem in buf:\n", 1017 | " yield elem\n", 1018 | " buf = ''\n", 1019 | " yield l_word\n", 1020 | " x = y\n", 1021 | "\n", 1022 | " # 跳出循环后,可能还有待处理的buffer,进行处理\n", 1023 | " if buf:\n", 1024 | " if len(buf) == 1:\n", 1025 | " yield buf\n", 1026 | " elif not self.freq.get(buf):\n", 1027 | " recognized = self.cut_regx_hmm(buf)\n", 1028 | " for t in recognized:\n", 1029 | " yield t\n", 1030 | " else:\n", 1031 | " for elem in buf:\n", 1032 | " yield elem\n", 1033 | " \n", 1034 | " def cut_regx_hmm(self, sentence):\n", 1035 | " blocks = self.re_han.split(sentence)\n", 1036 | " for block in blocks:\n", 1037 | " if not block:\n", 1038 | " continue\n", 1039 | " if self.re_han.match(block):\n", 1040 | " yield from self.cut_hmm(block)\n", 1041 | " else:\n", 1042 | " for ss in self.re_skip.split(block):\n", 1043 | " if ss:\n", 1044 | " yield ss\n", 1045 | "\n", 1046 | " def cut_hmm(self, sentence):\n", 1047 | " prob, pos_list = self.viterbi(sentence, 'BMES')\n", 1048 | " begin, nexti = 0, 0\n", 1049 | " # print pos_list, sentence\n", 1050 | " for i, char in enumerate(sentence):\n", 1051 | " pos = pos_list[i]\n", 1052 | " if pos == 'B':\n", 1053 | " begin = i\n", 1054 | " elif pos == 'E':\n", 1055 | " yield sentence[begin:i + 1]\n", 1056 | " nexti = i + 1\n", 1057 | " elif pos == 'S':\n", 1058 | " yield char\n", 1059 | " nexti = i + 1\n", 1060 | " if nexti < len(sentence):\n", 1061 | " yield sentence[nexti:]\n", 1062 | "\n", 1063 | " def viterbi(self, obs, states):\n", 1064 | " V = [{}] # tabular\n", 1065 | " path = {}\n", 1066 | " for y in states: # init\n", 1067 | " V[0][y] = self.prob_start[y] + self.prob_emit[y].get(obs[0], self.MIN_FLOAT)\n", 1068 | " path[y] = [y]\n", 1069 | " for t in range(1, len(obs)):\n", 1070 | " V.append({})\n", 1071 | " newpath = {}\n", 1072 | " for y in states:\n", 1073 | " em_p = self.prob_emit[y].get(obs[t], self.MIN_FLOAT)\n", 1074 | " (prob, state) = max(\n", 1075 | " [(V[t - 1][y0] + self.prob_trans[y0].get(y, self.MIN_FLOAT) + em_p, y0) for y0 in states])\n", 1076 | " V[t][y] = prob\n", 1077 | " newpath[y] = path[state] + [y]\n", 1078 | " path = newpath\n", 1079 | "\n", 1080 | " (prob, state) = max((V[len(obs) - 1][y], y) for y in 'ES')\n", 1081 | "\n", 1082 | " return (prob, path[state])\n", 1083 | "\n", 1084 | " def get_DAG(self, sentence):\n", 1085 | " DAG = {}\n", 1086 | " N = len(sentence)\n", 1087 | " for k in range(N):\n", 1088 | " tmplist = []\n", 1089 | " i = k\n", 1090 | " frag = sentence[k]\n", 1091 | " while i < N and frag in self.freq:\n", 1092 | " if self.freq[frag]:\n", 1093 | " tmplist.append(i)\n", 1094 | " i += 1\n", 1095 | " frag = sentence[k:i + 1]\n", 1096 | " if not tmplist:\n", 1097 | " tmplist.append(k)\n", 1098 | " DAG[k] = tmplist\n", 1099 | " return DAG\n", 1100 | "\n", 1101 | " def clac(self, sentence, DAG):\n", 1102 | " n = len(sentence)\n", 1103 | " route = {n: (0, 0)}\n", 1104 | " log_total = log(self.total)\n", 1105 | "\n", 1106 | " for i in range(n-1, -1, -1):\n", 1107 | " cache = []\n", 1108 | " for j in DAG[i]:\n", 1109 | " log_p = log(self.freq.get(sentence[i:j+1], 0) or 1)\n", 1110 | " cache.append((log_p - log_total + route[j+1][0], j))\n", 1111 | " route[i] = max(cache)\n", 1112 | " return route \n", 1113 | "\n", 1114 | "\n", 1115 | "sentence1 = \"程序员祝海林和朱会震是在孙健的左面和右面, 范凯在最右面。再往左是李松洪\"\n", 1116 | "tokenizer = ChineseTokenizer()\n", 1117 | "list(tokenizer.cut(sentence1))" 1118 | ] 1119 | }, 1120 | { 1121 | "cell_type": "markdown", 1122 | "metadata": {}, 1123 | "source": [ 1124 | "## 参考\n", 1125 | "- [Github: jieba](https://github.com/fxsjy/jieba)\n", 1126 | "- [Github: sacremoses](https://github.com/alvations/sacremoses)\n", 1127 | "- [知乎:jieba分词的原理](https://zhuanlan.zhihu.com/p/189410443)\n", 1128 | "\n" 1129 | ] 1130 | } 1131 | ] 1132 | } -------------------------------------------------------------------------------- /tutorials/Chapter2/ChineseTokenizer.md: -------------------------------------------------------------------------------- 1 | 2 | # 中文分词 3 | 与大部分印欧语系的语言不同,中文在词与词之间没有任何空格之类的显示标志指示词的边界。因此,中文分词是很多自然语言处理系统中的基础模块和首要环节。 4 | 5 | 6 | ## 中文分词基本原理 7 | 从20世纪80年代或更早的时候起,学者们研究了很多的分词方法,这些方法大致可以分为三大类: 8 | 9 | 基于词表的分词方法 10 | - 正向最大匹配法(forward maximum matching method, FMM) 11 | - 逆向最大匹配法(backward maximum matching method, BMM) 12 | - N-最短路径方法 13 | 基于统计模型的分词方法 14 | - 基于N-gram语言模型的分词方法 15 | 基于序列标注的分词方法 16 | - 基于HMM的分词方法 17 | - 基于CRF的分词方法 18 | - 基于词感知机的分词方法 19 | - 基于深度学习的端到端的分词方法 20 | 21 | 在这里只介绍jieba分词用到的**基于N-gram语言模型的分词方法**和**基于HMM的分词方法** 22 | 23 | 24 | 25 | ### 基于N-gram语言模型的分词方法 26 | 假设随机变量S为一个汉字序列,W是S上所有可能的切分路径。对于分词,实际上就是求解使条件概率P(W∣S)最大的切分路径W∗,即 27 | $$ 28 | W^*=\mathop{\arg\max}\limits_{W}P(W|S) 29 | $$ 30 | 根据贝叶斯公式 31 | $$ 32 | W*=\mathop{\arg\max}\limits_{W}\frac{P(W)P(S|W)}{P(S)} 33 | $$ 34 | 由于P(S)为归一化因子,P(S∣W)恒为1,因此只需要求解P(W)。P(W)使用N-gram语言模型建模,定义如下(以Bi-gram为例): 35 | $$ 36 | P(W)=P(w_0,w_1,...w_n)=P(w_0)P(w_1|w_0)P(w_2|w_1)...P(w_n|w_{n-1})=P(w_0)\prod \limits_{t=1}^nP(w_n|w_{n-1}) 37 | $$ 38 | 至此,各切分路径的好坏程度(条件概率P(W∣S))可以求解。简单的,可以根据DAG枚举全路径,暴力求解最优路径;也可以使用动态规划的方法求解,jieba中不带HMM新词发现的分词,就是DAG + Uni-gram的语言模型 + 后向DP的方式进行的。 39 | 40 | 41 | 42 | ### 基于HMM的分词方法 43 | 该方法属于由字构词的分词方法,由字构词的分词方法思想并不复杂,它是将分词问题转化为字的分类问题(序列标注问题)。从某些层面讲,由字构词的方法并不依赖于事先编制好的词表,但仍然需要分好词的训练语料。 44 | 45 | 规定每个字有4个词位: 46 | - 词首 B 47 | - 词中 M 48 | - 词尾 E 49 | - 单字成词 S 50 | 51 | ![序列标注问题示例](./assets/序列标注问题.png) 52 | 53 | 由于HMM是一个生成式模型,X为观测序列,Y为隐序列。基于HMM的两个假设 54 | - 齐次马尔科夫性假设,即假设隐藏的马尔科夫链在任意时刻t的状态只依赖于其前一时刻的状态,与其它时刻的状态及观测无关,也与时刻t无关; 55 | - 观测独立性假设,即假设任意时刻的观测只依赖于该时刻的马尔科夫链的状态,与其它观测和状态无关, 56 | 57 | HMM模型中的五元组表示: 58 | - 观测序列 59 | - 隐藏状态序列 60 | - 状态初始概率 61 | - 状态转移概率 62 | - 状态发射概率 63 | 64 | 最的模型为: 65 | $$ 66 | P(X, Y)=P(y_0)P(y_0|x_0)\prod \limits_{t=1}^nP(y_t|y_{t-1})P(x_t|y_t) 67 | $$ 68 | ![HMM模型](./assets/HMM模型.png) 69 | 70 | 其中X为观测序列,Y为隐藏状态序列(B,M,E,S),$P(y_0)$位状态初始概率,$P(y_t|y_{t-1})$为状态转移概率,$P(x_t|y_t)$为状态发射概率。 71 | 72 | HMM模型有三个基本问题: 73 | 74 | - 概率计算问题,HMM的五元组,计算在模型下给定隐藏序列Y,计算观测序列X出现的概率也就是Forward-backward算法; 75 | 76 | - 学习问题,已知观测序列{X},隐藏序列{Y} ,估计模型的状态初始概率,状态转移概率和状态发射概率 ,使得在该模型下观测序列X的概率尽可能的大,即用极大似然估计的方法估计参数; 77 | 78 | - 预测问题,也称为解码问题,已知模型状态初始概率,状态转移概率和状态发射概率和观测序列X,求最大概率的隐藏序列Y。 79 | 80 | 其中,jieba分词主要中主要涉及第三个问题,也即预测问题。计算方法会涉及到维特比算法,这个后面会结合代码讲到。 81 | 82 | 83 | ## jieba分词 84 | 下面我们以jieba分词为例,结合上满介绍的原理和代码介绍一下分词的内部原理,并参考jieba分词源码给出一个简单的实现版本。 85 | jieba的分词过程可以概括为以下几个步骤 86 | - 依据统计词典(模型中这部分已经具备,也可自定义加载)构建统计词典中词的前缀词典。 87 | - 对输入的内容按照子句进行分割(使用正则表达式,以标点符号或者非中文字符为分界)。 88 | - 依据前缀词典对输入的句子进行DAG(有向无环图)的构造。 89 | - 使用动态规划的方法在DAG上找到一条概率最大路径,依据此路径进行分词。 90 | - 对于未收录词(是指不在统计词典中出现的词,未收录词怎么识别可以看完第三部分之后思考一下),使用HMM(隐马尔克夫模型)模型,用Viterbi(维特比)算法找出最可能出现的隐状态序列。 91 | 92 | ![jieba分词算法流程图](./assets/jieba分词算法流程图.jpg) 93 | 94 | 95 | ### 生成前缀词典 96 | 统计词典在jieba包的dict.txt文件中,是开发者已经统计好的词典,第一列代表的是词语,第二列是词频,第三列是词性,我们主要用到前两列信息,词性这部分,这里没有涉及。我们先看一下词典中的部分内容: 97 | 98 | 99 | 100 | ```python 101 | import random 102 | 103 | with open("assets/dict.txt") as f: 104 | lines = f.readlines() 105 | for line in random.choices(lines, k=5): 106 | print(line) 107 | ``` 108 | 109 | 孟玉楼 3 nr 110 | 111 | 斗蓬 3 ns 112 | 113 | 铁法官 3 n 114 | 115 | 羿射九日 3 nr 116 | 117 | 占金丰 4 nr 118 | 119 | 120 | 121 | 当程序运行的时候,它会加载统计词典生成前缀词典,前缀词典是表示什么的呢,我们举个简单的例子。 122 | 123 | 比如统计词典中含有如下词语 124 | ``` 125 | 我 123 126 | 在 234 127 | 学习 456 128 | 结巴 345 129 | 分词 456 130 | 结巴分词 23 131 | 学 2344 132 | 分 23 133 | 结 234 134 | ``` 135 | 则前缀词典构造如下,它是将在统计词典中出现的每一个词的每一个前缀提取出来,统计词频,如果某个前缀词在统计词典中没有出现,词频统计为0,如果这个前缀词已经统计过,则不再重复。 136 | ``` 137 | 我 123 138 | 在 234 139 | 学 2344 140 | 学习 456 141 | 结 234 142 | 结巴 345 143 | 结巴分 0 144 | 结巴分词 23 145 | 分 23 146 | 分词 456 147 | ``` 148 | 这里把未出现的统计词也统计出来,且词频统计为0,是为了后面构造DAG方便。生成前缀词典的代码如下,在jieba分词中前缀词典一般会进行缓存,不需要每次分词都重新加载。 149 | 150 | 151 | ```python 152 | def get_prefix_dict(f_name): 153 | lfreq = {} 154 | ltotal = 0 155 | f = open(f_name) 156 | for lineno, line in enumerate(f, 1): 157 | try: 158 | line = line.strip() 159 | word, freq = line.split(' ')[:2] 160 | freq = int(freq) 161 | lfreq[word] = freq 162 | ltotal += freq 163 | for ch in range(len(word)): 164 | wfrag = word[:ch + 1] 165 | if wfrag not in lfreq: 166 | lfreq[wfrag] = 0 167 | except ValueError: 168 | raise ValueError( 169 | 'invalid dictionary entry in %s at Line %s: %s' % (f_name, lineno, line)) 170 | f.close() 171 | return lfreq, ltotal 172 | 173 | freq, total = get_prefix_dict("assets/dict.txt") 174 | print("生成前缀词典的大小为{}。".format(total)) 175 | ``` 176 | 177 | 生成前缀词典的大小为60102007。 178 | 179 | 180 | ### 分割子句 181 | 假如我们要对`"我爱结巴分词。我叫孙悟空,我爱北京,我爱Python和C++。 《机器翻译》这本书是我的最爱。"`这句话进行分词,我们首先要把它们划分为子句,第一个原因是标点符号是天然的词语间隔,我们的词语中不会包含标点符号。第二个原因是我们的词典中可能没有包含标点符号的内容,我们应当以这些非中文字符、标点字符作为分界,将输入内容划分为子句,对每个子句进行分词。 182 | 183 | 一个可行的实现方法是列举所有在中文词典中可能会出现的字符,将连续出现的这些字符作为一个子句进行划分,这些字符之外的其他符号,我们便可以认为是中文标点符号,并把他们作为子句划分标志。我们可以简单的使用正则表达式来完成,出现在中文词典中的字符可能是中文字符、阿拉伯数字、英文字母、+=.等部分英文数字标点。 184 | 185 | 186 | 187 | ```python 188 | import re 189 | 190 | example = "我爱结巴分词。我叫孙悟空,我爱北京,我爱Python和C++。 《机器翻译》这本书是我的最爱。" 191 | 192 | # 列举所有中文词中可能包含的字符 193 | re_han_default = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%\-]+)", re.U) 194 | 195 | # 将连续出现的合法字符作为一个子句的划分 196 | blocks = re_han_default.split(example) 197 | 198 | print(blocks) 199 | 200 | ``` 201 | 202 | ['', '我爱结巴分词', '。', '我叫孙悟空', ',', '我爱北京', ',', '我爱Python和C++', '。 《', '机器翻译', '》', '这本书是我的最爱', '。'] 203 | 204 | 205 | 我们看到我们已经将整句话分割成子句,每个子句中不再包含标点符号。对于标点符号部分,单独的标点符号我们可以将它直接作为一个单词,而对于`'。 《'`这种情况,我们可以用空白字符\t\r\n将它们进一步分开。 206 | 207 | 208 | 209 | ```python 210 | re_skip_default = re.compile("(\r\n|\s)", re.U) 211 | 212 | example = "。 《" 213 | 214 | words = re_skip_default.split(example) 215 | 216 | print(words) 217 | ``` 218 | 219 | ['。', ' ', '《'] 220 | 221 | 222 | ### 构造DAG 223 | 我们来讲解一下程序里是怎么存储“DAG”的,程序实现图的构建是存储为字典形式的,以每个字所在的位置为键值key,相应划分的末尾位置构成的列表为value,相应划分的末尾位置指的是什么呢,我们举例来说明 224 | 225 | “我在学习结巴分词” 226 | 在这句话里,我们将每一个字用所在位置来代表,比如0代表“我”,4代表“结”,针对“结”,我们可以在前缀词典里看到以“结”为前缀的词“结”,“结巴”,“结巴分词”的词频大于0,因此“结”,“巴”,“词”为相应划分的末尾位置,因此键值4的value为[4,5,7],其他键值的对应value统计如下 227 | ``` 228 | 0 :[0] 229 | 1 :[1] 230 | 2 :[2,3] 231 | 3 :[3] 232 | 4 :[4,5,7] 233 | 5 :[5] 234 | 6 :[6,7] 235 | 7 :[7] 236 | ``` 237 | 注:每一个字都将其自己作为相应划分的末尾位置,即使这个字不在统计词典里。 238 | 239 | 基于以上构建的键值对,我们将有向图可视化一下,以便方便理解。 240 | ![DAG](./assets/DAG.jpg) 241 | 242 | 从“我”到“词”的路径有以下10种 243 | ``` 244 | 我/在/学/习/结/巴/分/词 245 | 我/在/学习/结巴分词 246 | 我/在/学习/结/巴/分/词 247 | 我/在/学习/结巴/分词 248 | 我/在/学习/结/巴/分词 249 | 我/在/学习/结巴/分/词 250 | 我/在/学/习/结/巴/分词 251 | 我/在/学/习/结巴/分/词 252 | 我/在/学/习/结巴分词 253 | 我/在/学/习/结巴/分词 254 | ``` 255 | 256 | 257 | 258 | 259 | ```python 260 | def get_DAG(sentence, freq): 261 | DAG = {} 262 | N = len(sentence) 263 | for k in range(N): 264 | tmplist = [] 265 | i = k 266 | frag = sentence[k] 267 | while i < N and frag in freq: 268 | if freq[frag]: 269 | tmplist.append(i) 270 | i += 1 271 | frag = sentence[k:i + 1] 272 | if not tmplist: 273 | tmplist.append(k) 274 | DAG[k] = tmplist 275 | return DAG 276 | 277 | example = "我在学习结巴分词" 278 | dag = get_DAG(example, freq) 279 | print(dag) 280 | ``` 281 | 282 | {0: [0], 1: [1], 2: [2, 3], 3: [3], 4: [4, 5, 7], 5: [5], 6: [6, 7], 7: [7]} 283 | 284 | 285 | ### 动态规划找到最大路径 286 | 接下来我们需要计算上面10条路径那一条的可能性最大,将可能性最大的那条路径对应的划分作为我们的分词结果。 287 | $$ 288 | W^*=\mathop{\arg\min}\limits_{W}P(W) 289 | $$ 290 | 其中$W$为句子的一个划分,${w_1,w_2,...wn}$ 291 | $$ 292 | P(W)=P(w_1,w_2...wn)=\prod \limits_{i=0}^nP(w_n) 293 | $$ 294 | 295 | 每一个词出现的概率等于该词在前缀里的词频除以所有词的词频之和。如果词频为0或是不存在,当做词频为1来处理。 296 | $$ 297 | P(w_n)=\frac{freq[w_n]+1}{total} 298 | $$ 299 | 300 | 这里会取对数概率,即在每个词概率的基础上取对数,一是为了防止下溢,二后面的概率相乘可以变成相加计算。 301 | 302 | 最后我们使用动态规划算法算出概率最大的路径。 303 | 304 | 305 | 306 | 307 | ```python 308 | from math import log 309 | 310 | def clac(sentence, DAG, freq, total): 311 | n = len(sentence) 312 | route = {n: (0, 0)} 313 | log_total = log(total) 314 | 315 | for i in range(n-1, -1, -1): 316 | cache = [] 317 | for j in DAG[i]: 318 | log_p = log(freq.get(sentence[i:j+1], 0) or 1) 319 | cache.append((log_p - log_total + route[j+1][0], j)) 320 | route[i] = max(cache) 321 | return route 322 | 323 | route = clac(example, dag, freq, total) 324 | print(route) 325 | 326 | 327 | ``` 328 | 329 | {8: (0, 0), 7: (-9.257210763727148, 7), 6: (-14.967114814124178, 7), 5: (-24.384334710144643, 5), 4: (-14.222674339176683, 7), 3: (-25.03090606994119, 3), 2: (-22.62511739105392, 3), 1: (-27.038731622224248, 1), 0: (-32.24695578526084, 0)} 330 | 331 | 332 | 通过上面的计算结果,`route`中的key代表最优路径中当前词的起始位置,value的第二个元素代表最优路径中当前词的末尾位置,通过这两个量我们可以推出一个初步的基于词典和词频的分词结果。 333 | 334 | 335 | ```python 336 | def cut_no_hmm(sentence, route): 337 | i = 0 338 | while(i < len(route)-1): 339 | j = route[i][1] 340 | yield sentence[i:j+1] 341 | i = j + 1 342 | 343 | for word in cut_no_hmm(example, route): 344 | print(word) 345 | ``` 346 | 347 | 我 348 | 在 349 | 学习 350 | 结巴分词 351 | 352 | 353 | ### HMM算法对于未登录词的识别 354 | 在jieba分词中,基于HMM的分词主要是作为基于Uni—gram分词的一个补充,主要是解决OOV(out of vocabulary)问题的,它的作用是对未登录词典的词进行识别发现。我们首先用一个例子说明HMM的重要性。比如我们要对一个包含人名的句子进行分词,“韩冰是个好人”。“韩冰”这个词不在词典之中,所以前面基于词典+Uni-Gram语言模型的方法进行分词就会将“韩冰”这个人名分成“韩”+“冰”。所以我们需要一个有一定泛化能力的机器学习模型对这些新词进行发现。 355 | 356 | 357 | ```python 358 | example = "韩冰是个好人" 359 | dag = get_DAG(example, freq) 360 | route = clac(example, dag, freq, total) 361 | 362 | list(cut_no_hmm(example, route)) 363 | ``` 364 | 365 | 366 | 367 | 368 | ['韩', '冰', '是', '个', '好人'] 369 | 370 | 371 | 372 | 使用HMM进行分词的原理在前面已经介绍过了。利用HMM模型进行分词,主要是将分词问题视为一个序列标注(sequence labeling)问题,其中,句子为观测序列,分词结果为状态序列。首先通过语料训练出HMM相关的模型,然后利用Viterbi算法进行求解,最终得到最优的状态序列,然后再根据状态序列,输出分词结果。 373 | 374 | 这里的状态序列的元素有四种 375 | - "B":Begin(这个字处于词的开始位置) 376 | - "M":Middle(这个字处于词的中间位置) 377 | - "E":End(这个字处于词的结束位置) 378 | - "S":Single(这个字是单字成词)} 379 | 380 | 由于分词算法术语HMM的预测问题(已知模型状态初始概率,状态转移概率和状态发射概率和观测序列X,求最大概率的隐藏序列Y),所以我们需要在已经进行标注的数据集上训练我们模型的参数,也就是初始概率,状态转移概率和状态发射概率。这里jieba分词中包含了一个已经训练好的模型,至于模型数据来源和训练方法,这里不再赘述,可以参考[模型的数据是如何生成的?]https://github.com/fxsjy/jieba/issues/7 。这里我们直接将模型加载进来用。 381 | 382 | 383 | ```python 384 | import pickle 385 | import math 386 | 387 | prob_start = pickle.load(open("./assets/prob_start.p", "rb")) # 初始概率参数 388 | prob_emit = pickle.load(open("./assets/prob_emit.p", "rb")) # 发射概率 389 | prob_trans = pickle.load(open("./assets/prob_trans.p", "rb")) # 状态转移概率 390 | ``` 391 | 392 | 首先是初始概率,及输入观察序列(带分词句子)首个字符是"B","M", "E", "S"的概率(这里的概率也进行了对数运算及log(p))。由这个概率值可以看出,句子首字单字成词(S)和作为词的词首(B)的概率较高,作为词中和词尾概率为0,也比较符合我们的常识。 393 | 394 | 395 | ```python 396 | # 为了直观,将log概率转化为真实概率 397 | {key:math.exp(value) for key, value in prob_start.items()} 398 | ``` 399 | 400 | 401 | 402 | 403 | {'B': 0.7689828525554734, 'E': 0.0, 'M': 0.0, 'S': 0.2310171474445266} 404 | 405 | 406 | 407 | 接下来是状态转移概率,及"B","M", "E", "S"四个状态之间相互转化的概率。 408 | 409 | 410 | ```python 411 | {key: {k: math.exp(v) for k, v in value.items()} for key, value in prob_trans.items()} 412 | ``` 413 | 414 | 415 | 416 | 417 | {'B': {'E': 0.6000000000000004, 'M': 0.4}, 418 | 'E': {'B': 0.5544853051164425, 'S': 0.44551469488355755}, 419 | 'M': {'E': 0.7164487459986911, 'M': 0.2835512540013088}, 420 | 'S': {'B': 0.48617017333894563, 'S': 0.5138298266610544}} 421 | 422 | 423 | 424 | 最后是发射概率,即在观测序列是某个字的情况下,被标注为"B","M", "E", "S"的概率 425 | 426 | 427 | ```python 428 | # 由于这个表比较大,所以随机挑选一些出来看 429 | {key: {k: math.exp(v) for i, (k, v) in enumerate(value.items()) if i < 5} for key, value in prob_emit.items()} 430 | 431 | ``` 432 | 433 | 434 | 435 | 436 | {'B': {'一': 0.025874486447195644, 437 | '丁': 0.0002960323136559398, 438 | '七': 0.0004026703175442123, 439 | '万': 0.0018186831560606151, 440 | '丈': 0.00014100868588615948}, 441 | 'E': {'一': 0.002369710374262949, 442 | '丁': 0.000114401037236071, 443 | '七': 0.00010115647270757471, 444 | '万': 0.00047351540431744344, 445 | '丈': 0.00012050479628052327}, 446 | 'M': {'一': 0.01193645010412285, 447 | '丁': 0.00035872815397116633, 448 | '七': 0.001416288550382968, 449 | '万': 0.0021550909026310924, 450 | '丈': 8.165936412282943e-05}, 451 | 'S': {'∶': 1.3353987946490163e-07, 452 | '一': 0.007272247985959882, 453 | '丁': 0.00012041958630747509, 454 | '丂': 6.67699397324508e-08, 455 | '七': 0.00025622964372327994}} 456 | 457 | 458 | 459 | 有了模型,接下来就可以用viterbi算法对给定的序列进行分词。还拿上面的例子举例 "韩冰是个好人" -> \['韩', '冰', '是', '个', '好人'\],对于已经成词的部分“好人”,我们不需要对它进行计算了,我们只需要将还是单个字的序列“韩冰是个”放入到HMM模型中进行分词,也就是将这四个字分别打上 “BEMS”标签。并且我们期望的标签是\['韩'->B, '冰'->M, '是'->S, '个'->S\]。首先我们简单介绍一下维特比算法。 460 | 461 | #### Viterbi算法 462 | viterbi维特比算法解决的是篱笆型的图的最短路径问题,图的节点按列组织,每列的节点数量可以不一样,每一列的节点只能和相邻列的节点相连,不能跨列相连,节点之间有着不同的距离,距离的值就不在图上一一标注出来了,大家自行脑补。 463 | ![Viterbi算法](./assets/HMM分词篱笆型图.drawio.png) 464 | 465 | 过程非常简单: 466 | 467 | 为了找出Start到End之间的最短路径,我们先从Start开始从左到右一列一列地来看。首先起点是Start,从Start到“韩”字对应的状态列的路径有四种可能:Start-B、Start-E、Start-M,Start-S。对应的路径长度即 468 | 469 | ![viterbi_step1](./assets/viterbi_step1.drawio.png) 470 | 471 | 472 | 473 | ```python 474 | import sys 475 | 476 | MIN_FLOAT = -3.14e100 477 | start_2_B = prob_emit["B"].get("韩", MIN_FLOAT) + prob_start["B"] 478 | start_2_E = prob_emit["E"].get("韩", MIN_FLOAT) + prob_start["E"] 479 | start_2_M = prob_emit["M"].get("韩", MIN_FLOAT) + prob_start["M"] 480 | start_2_S = prob_emit["S"].get("韩", MIN_FLOAT) + prob_start["S"] 481 | 482 | print(start_2_B, start_2_E, start_2_M, start_2_S) 483 | ``` 484 | 485 | -8.093263409081425 -3.14e+100 -3.14e+100 -10.534873750321356 486 | 487 | 488 | 我们不能武断地说这四条路径中中的哪一段必定是全局最短路径中的一部分,目前为止任何一段都有可能是全局最优路径的备选项。我们继续往右看,到了“冰”这一列列。按照四个状态进行逐一分析,先看到达“冰”(B)节点的各个路径长度。 489 | 490 | ![viterbi_step2](./assets/viterbi_step2.drawio.png) 491 | 492 | 以上这四条路径,各节点距离加起来对比一下,我们就可以知道其中哪一条是最短的。因为Start-B-B是最短的,那么我们就知道了经过“冰”(B)的所有路径当中Start-B-B是最短的,其它三条路径路径都比Start-B-B长,绝对不是目标答案,可以大胆地删掉了。删掉了不可能是答案的路径,就是viterbi算法(维特比算法)的重点,因为后面我们再也不用考虑这些被删掉的路径了。现在经过“冰”(B)的所有路径只剩一条路径了(红色标识) 493 | 494 | 495 | 496 | 497 | ```python 498 | B_2_B = start_2_B + prob_trans["B"].get("B", MIN_FLOAT) + prob_emit["B"].get("冰", MIN_FLOAT) 499 | E_2_B = start_2_E + prob_trans["E"].get("B", MIN_FLOAT) + prob_emit["B"].get("冰", MIN_FLOAT) 500 | M_2_B = start_2_M + prob_trans["M"].get("B", MIN_FLOAT) + prob_emit["B"].get("冰", MIN_FLOAT) 501 | S_2_B = start_2_S + prob_trans["S"].get("B", MIN_FLOAT) + prob_emit["B"].get("冰", MIN_FLOAT) 502 | 503 | print(B_2_B, E_2_B, M_2_B, S_2_B) 504 | ``` 505 | 506 | -3.14e+100 -3.14e+100 -6.28e+100 -19.68864099798377 507 | 508 | 509 | 以此类推,我们可以分别找出到达“冰”字对应列的所有四个状态的最优路径。 510 | 511 | ![viterbi_step3](./assets/viterbi_step3.drawio.png) 512 | 513 | 对后面的“是”,“个”也进行同样的操作,我们便可以得到一条全局最优路径。 514 | ![viterbi_step4](./assets/viterbi_step4.drawio.png) 515 | 516 | 517 | 518 | ```python 519 | def viterbi(obs, states, start_p, trans_p, emit_p): 520 | V = [{}] # tabular 521 | path = {} 522 | for y in states: # init 523 | V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT) 524 | path[y] = [y] 525 | for t in range(1, len(obs)): 526 | V.append({}) 527 | newpath = {} 528 | for y in states: 529 | em_p = emit_p[y].get(obs[t], MIN_FLOAT) 530 | (prob, state) = max( 531 | [(V[t - 1][y0] + trans_p[y0].get(y, MIN_FLOAT) + em_p, y0) for y0 in states]) 532 | V[t][y] = prob 533 | newpath[y] = path[state] + [y] 534 | path = newpath 535 | 536 | (prob, state) = max((V[len(obs) - 1][y], y) for y in 'ES') 537 | 538 | return (prob, path[state]) 539 | 540 | example = "韩冰是个" 541 | prob, path = viterbi(example, "BEMS", prob_start, prob_trans, prob_emit) 542 | 543 | for w, s in zip(example, path): 544 | print(w, "->", s) 545 | ``` 546 | 547 | 韩 -> B 548 | 冰 -> E 549 | 是 -> S 550 | 个 -> S 551 | 552 | 553 | 根据HMM输出的结果,我们可以将”韩“->B,”冰“->E合并成为一个新词”韩冰“。所以”韩冰是个好人“的分词结果就是['韩冰', '是', '个', '好人'] 554 | 555 | 556 | ```python 557 | def hmm(sentence, start_P, trans_P, emit_P): 558 | prob, pos_list = viterbi(sentence, 'BMES', start_P, trans_P, emit_P) 559 | begin, nexti = 0, 0 560 | # print pos_list, sentence 561 | for i, char in enumerate(sentence): 562 | pos = pos_list[i] 563 | if pos == 'B': 564 | begin = i 565 | elif pos == 'E': 566 | yield sentence[begin:i + 1] 567 | nexti = i + 1 568 | elif pos == 'S': 569 | yield char 570 | nexti = i + 1 571 | if nexti < len(sentence): 572 | yield sentence[nexti:] 573 | 574 | def cut_hmm(sentence): 575 | dag = get_DAG(sentence, freq) 576 | route = clac(sentence, dag, freq, total) 577 | i = 0 578 | buf = "" 579 | while(i < len(route)-1): 580 | j = route[i][1] + 1 581 | 582 | if j - i <= 1: 583 | buf += sentence[i] 584 | else: 585 | if buf: 586 | if len(buf) == 1: 587 | yield buf 588 | else: 589 | if buf not in freq: 590 | for w in hmm(buf, prob_start, prob_trans, prob_emit): 591 | yield w 592 | else: 593 | for w in buf: 594 | yield w 595 | buf = "" 596 | yield sentence[i:j] 597 | i = j 598 | 599 | if buf: 600 | if len(buf) == 1: 601 | yield buf 602 | buf = "" 603 | else: 604 | if buf not in freq: 605 | for w in hmm(buf, prob_start, prob_trans, prob_emit): 606 | yield w 607 | else: 608 | for w in buf: 609 | yield w 610 | 611 | example = "韩冰是个好人" 612 | for word in cut_hmm(example): 613 | print(word) 614 | ``` 615 | 616 | 韩冰 617 | 是 618 | 个 619 | 好人 620 | 621 | 622 | ### 正则表达式辅助分词 623 | 除了上述使用机器学习的方法进行分词之外,在我们翻译语料的分词过程中,经常会遇到一些特殊情况,比如日期、数字、英文单词或者其他符合某个特定规则的词语,在前面的操作中,我们将他们划分到了子句之中,因为词典中某些词也会出现这些字符。但是,对于未出现在词典中的英文、数字、符号组合,我们也希望强制把它们当做一个词进行处理,而不是将它们分开。它们通常很难添加到词典中(因为数字字母的排列组合往往是很大的),却很容易通过一些简单的正则表达式对他们进行处理。 624 | ```py 625 | 1920.2333 # 浮点数 626 | 2020.9.2 # 日期 627 | apple # 英文词 628 | ``` 629 | 我们来看看如果只用 词典+HMM的方式处理他们会怎么样 630 | 631 | 632 | ```python 633 | sentences = ["最终结果为1920.2333", "今天是2020.9.2", "Apple手机是我的最爱"] 634 | for s in sentences: 635 | print(list(cut_hmm(s))) 636 | ``` 637 | 638 | ['最终', '结果', '为', '1', '9', '2', '0', '.', '2', '3', '3', '3'] 639 | ['今天', '是', '2', '0', '2', '0', '.', '9', '.', '2'] 640 | ['A', 'p', 'p', 'l', 'e', '手机', '是', '我', '的', '最', '爱'] 641 | 642 | 643 | 为了处理这个问题,我们需要把连续的、不在词典中的非汉字字符提取出来。 644 | 645 | 646 | ```python 647 | # 用于提取连续的汉字部分 648 | re_han = re.compile("([\u4E00-\u9FD5]+)") 649 | # 用于分割连续的非汉字部分 650 | re_skip = re.compile("([a-zA-Z0-9\.]+(?:\.\d+)?%?)") 651 | 652 | def cut_regx_hmm(sentence): 653 | blocks = re_han.split(sentence) 654 | for block in blocks: 655 | if not block: 656 | continue 657 | if re_han.match(block): 658 | yield from cut_hmm(block) 659 | else: 660 | for ss in re_skip.split(block): 661 | if ss: 662 | yield ss 663 | 664 | for s in sentences: 665 | print(list(cut_regx_hmm(s))) 666 | 667 | ``` 668 | 669 | ['最终', '结果', '为', '1920.2333'] 670 | ['今天', '是', '2020.9.2'] 671 | ['Apple', '手机', '是', '我', '的', '最', '爱'] 672 | 673 | 674 | ## Putting them together. 675 | 676 | 677 | 678 | ```python 679 | import re 680 | import pickle 681 | 682 | from math import log 683 | 684 | class ChineseTokenizer(object): 685 | 686 | re_han_default = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%\-]+)", re.U) 687 | re_skip_default = re.compile("(\r\n|\s)", re.U) 688 | 689 | # 用于提取连续的汉字部分 690 | re_han = re.compile("([\u4E00-\u9FD5]+)") 691 | # 用于分割连续的非汉字部分 692 | re_skip = re.compile("([a-zA-Z0-9\.]+(?:\.\d+)?%?)") 693 | 694 | MIN_FLOAT = -3.14e100 695 | 696 | @staticmethod 697 | def get_prefix_dict(f_name): 698 | lfreq = {} 699 | ltotal = 0 700 | f = open(f_name) 701 | for lineno, line in enumerate(f, 1): 702 | try: 703 | line = line.strip() 704 | word, freq = line.split(' ')[:2] 705 | freq = int(freq) 706 | lfreq[word] = freq 707 | ltotal += freq 708 | for ch in range(len(word)): 709 | wfrag = word[:ch + 1] 710 | if wfrag not in lfreq: 711 | lfreq[wfrag] = 0 712 | except ValueError: 713 | raise ValueError( 714 | 'invalid dictionary entry in %s at Line %s: %s' % (f_name, lineno, line)) 715 | f.close() 716 | return lfreq, ltotal 717 | 718 | def __init__(self): 719 | self.freq, self.total = self.get_prefix_dict("./assets/dict.txt") # 前缀词典 720 | self.prob_start = pickle.load(open("./assets/prob_start.p", "rb")) # 初始概率参数 721 | self.prob_emit = pickle.load(open("./assets/prob_emit.p", "rb")) # 发射概率 722 | self.prob_trans = pickle.load(open("./assets/prob_trans.p", "rb")) # 状态转移概率 723 | 724 | def cut(self, sentence): 725 | blocks = self.re_han_default.split(sentence) 726 | for blk in blocks: 727 | # 处理空字符串 728 | if not blk: 729 | continue 730 | if self.re_han_default.match(blk): 731 | # 处理子句 732 | for word in self.cut_block(blk): 733 | yield word 734 | else: 735 | # 处理标点符号、空格等等 736 | tmp = self.re_skip_default.split(blk) 737 | for x in tmp: 738 | if self.re_skip_default.match(x): 739 | # 空格、制表符、换行等一起返回 740 | yield x 741 | else: 742 | # 标点符号等分割成字符返回 743 | for xx in x: 744 | yield xx 745 | 746 | 747 | def cut_block(self, sentence): 748 | DAG = self.get_DAG(sentence) 749 | route = self.clac(sentence, DAG) 750 | x = 0 751 | buf = '' 752 | N = len(sentence) 753 | while x < N: 754 | y = route[x][1] + 1 755 | l_word = sentence[x:y] 756 | 757 | # 如果当前为一个字符,加入buffer待HMM进一步分词 758 | if y - x == 1: 759 | buf += l_word 760 | else: 761 | # 对当前buffer进行分词 762 | if buf: 763 | # 当前buffer只有一个字符,直接yield 764 | if len(buf) == 1: 765 | yield buf 766 | buf = '' 767 | else: 768 | # 这里加了一层判断,如果词典中存在和当前buffer相同的词,则不需要再用HMM进行切分了。 769 | if not self.freq.get(buf): 770 | # 讲buffer送入HMM进行分词 771 | recognized = self.cut_regx_hmm(buf) 772 | for t in recognized: 773 | yield t 774 | else: 775 | for elem in buf: 776 | yield elem 777 | buf = '' 778 | yield l_word 779 | x = y 780 | 781 | # 跳出循环后,可能还有待处理的buffer,进行处理 782 | if buf: 783 | if len(buf) == 1: 784 | yield buf 785 | elif not self.freq.get(buf): 786 | recognized = self.cut_regx_hmm(buf) 787 | for t in recognized: 788 | yield t 789 | else: 790 | for elem in buf: 791 | yield elem 792 | 793 | def cut_regx_hmm(self, sentence): 794 | blocks = self.re_han.split(sentence) 795 | for block in blocks: 796 | if not block: 797 | continue 798 | if self.re_han.match(block): 799 | yield from self.cut_hmm(block) 800 | else: 801 | for ss in self.re_skip.split(block): 802 | if ss: 803 | yield ss 804 | 805 | def cut_hmm(self, sentence): 806 | prob, pos_list = self.viterbi(sentence, 'BMES') 807 | begin, nexti = 0, 0 808 | # print pos_list, sentence 809 | for i, char in enumerate(sentence): 810 | pos = pos_list[i] 811 | if pos == 'B': 812 | begin = i 813 | elif pos == 'E': 814 | yield sentence[begin:i + 1] 815 | nexti = i + 1 816 | elif pos == 'S': 817 | yield char 818 | nexti = i + 1 819 | if nexti < len(sentence): 820 | yield sentence[nexti:] 821 | 822 | def viterbi(self, obs, states): 823 | V = [{}] # tabular 824 | path = {} 825 | for y in states: # init 826 | V[0][y] = self.prob_start[y] + self.prob_emit[y].get(obs[0], self.MIN_FLOAT) 827 | path[y] = [y] 828 | for t in range(1, len(obs)): 829 | V.append({}) 830 | newpath = {} 831 | for y in states: 832 | em_p = self.prob_emit[y].get(obs[t], self.MIN_FLOAT) 833 | (prob, state) = max( 834 | [(V[t - 1][y0] + self.prob_trans[y0].get(y, self.MIN_FLOAT) + em_p, y0) for y0 in states]) 835 | V[t][y] = prob 836 | newpath[y] = path[state] + [y] 837 | path = newpath 838 | 839 | (prob, state) = max((V[len(obs) - 1][y], y) for y in 'ES') 840 | 841 | return (prob, path[state]) 842 | 843 | def get_DAG(self, sentence): 844 | DAG = {} 845 | N = len(sentence) 846 | for k in range(N): 847 | tmplist = [] 848 | i = k 849 | frag = sentence[k] 850 | while i < N and frag in self.freq: 851 | if self.freq[frag]: 852 | tmplist.append(i) 853 | i += 1 854 | frag = sentence[k:i + 1] 855 | if not tmplist: 856 | tmplist.append(k) 857 | DAG[k] = tmplist 858 | return DAG 859 | 860 | def clac(self, sentence, DAG): 861 | n = len(sentence) 862 | route = {n: (0, 0)} 863 | log_total = log(self.total) 864 | 865 | for i in range(n-1, -1, -1): 866 | cache = [] 867 | for j in DAG[i]: 868 | log_p = log(self.freq.get(sentence[i:j+1], 0) or 1) 869 | cache.append((log_p - log_total + route[j+1][0], j)) 870 | route[i] = max(cache) 871 | return route 872 | 873 | 874 | sentence1 = "程序员祝海林和朱会震是在孙健的左面和右面, 范凯在最右面。再往左是李松洪" 875 | tokenizer = ChineseTokenizer() 876 | list(tokenizer.cut(sentence1)) 877 | ``` 878 | 879 | 880 | 881 | 882 | ['程序员', 883 | '祝', 884 | '海林', 885 | '和', 886 | '朱会震', 887 | '是', 888 | '在', 889 | '孙健', 890 | '的', 891 | '左面', 892 | '和', 893 | '右面', 894 | ',', 895 | ' ', 896 | '范凯', 897 | '在', 898 | '最', 899 | '右面', 900 | '。', 901 | '再往', 902 | '左', 903 | '是', 904 | '李松洪'] 905 | 906 | 907 | 908 | ## 参考 909 | - [Github: jieba](https://github.com/fxsjy/jieba) 910 | - [Github: sacremoses](https://github.com/alvations/sacremoses) 911 | - [知乎:jieba分词的原理](https://zhuanlan.zhihu.com/p/189410443) 912 | 913 | 914 | -------------------------------------------------------------------------------- /tutorials/Chapter2/EnglishTokenizer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 英文分词\n", 8 | "分词是数据预处理的第一步。。对于像中文这样没有单词边界的语言,分词的策略通常比较复杂。现在常用的一些中文分词工具有 NLTK、jieba等。而像英文这种有单词边界的语言,分词要简单许多,比如,Moses 工具包就有可以处理绝大多数拉丁语系语言的分词脚本。\n", 9 | "\n", 10 | "本章节就以[sacremoses](https://github.com/alvations/sacremoses)为例,讲解英文的分词过程。\n", 11 | "\n", 12 | "目录:\n", 13 | "1. 替换空白字符\n", 14 | "2. 去掉句子开头和结尾的空白字符\n", 15 | "3. 将常见标点、乱码等符号与词语分开\n", 16 | "4. 分割逗号`,\n", 17 | "5. 分割句号`,`\n", 18 | "6. 处理`'`号缩写\n", 19 | "7. 可选处理项\n", 20 | " - Mask受保护字符串\n", 21 | " - 分割破折号" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "### 替换空白字符\n", 29 | "包括空格、换行、tab缩进等所有的空字符,在正则表达式中,我们可以使用`\"\\s+\"`进行匹配。除此之外,在ASCII码中,第0~31号及第127号(共33个)是控制字符或通讯专用字符,如控制符:LF(换行)、CR(回车)、FF(换页)、DEL(删除)、BS(退格)、BEL(振铃)等;通讯专用字符:SOH(文头)、EOT(文尾)、ACK(确认)等,我们可以使用`\"[\\000-\\037]\"`进行匹配。\n", 30 | "\n", 31 | "有了对应的正则表达式,在python中我们可以使用`re.sub`函数进行替换。" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 1, 37 | "metadata": {}, 38 | "outputs": [ 39 | { 40 | "name": "stdout", 41 | "output_type": "stream", 42 | "text": [ 43 | " This is a test sentence \t with useless blank chars\r", 44 | ".\u0001\n", 45 | " This is a test sentence with useless blank chars .\n" 46 | ] 47 | } 48 | ], 49 | "source": [ 50 | "import re\n", 51 | "\n", 52 | "DEDUPLICATE_SPACE = r\"\\s+\", r\" \"\n", 53 | "ASCII_JUNK = r\"[\\000-\\037]\", r\"\" \n", 54 | "\n", 55 | "text = u\" This is a test sentence \\t with useless blank chars\\r.\\x01\"\n", 56 | "print(text)\n", 57 | "\n", 58 | "for regexp, substitution in [DEDUPLICATE_SPACE, ASCII_JUNK]:\n", 59 | " text = re.sub(regexp, substitution, text)\n", 60 | "\n", 61 | "print(text)" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "metadata": {}, 67 | "source": [ 68 | "### 去掉句子开头和结尾的空白字符\n", 69 | "刚才将所有的空白字符替换为了空格,但是句子开头和结尾的空白字符也被替换成了空格,还没有被去掉,所以这里我们使用`strip()`方法去掉开头和结尾的空格。" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 2, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "This is a test sentence with useless blank chars .\n" 82 | ] 83 | } 84 | ], 85 | "source": [ 86 | "text = text.strip()\n", 87 | "print(text)" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "### 将常见标点、乱码等符号与词语分开\n", 95 | "在Unicode字符集中,一部分字符会在我们的单词中出现,一部分则为标点符号以及其他的一些符号、乱码,如果我们的平行语料中这些字符通常与我们的单词连在一起,我们需要将它们与正常的单词分开。一个可行的方法是列举出所有可能出现在单词中字符(包括正常标点符号),除此之外的字符都在其两侧添加空格符号。幸运的是,moses中已经为我们搜集了这些字符,我们可以直接拿过来用(注意这里的标点符号不包含`.`,`.`,后续会单独处理)。" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 3, 101 | "metadata": {}, 102 | "outputs": [ 103 | { 104 | "name": "stdout", 105 | "output_type": "stream", 106 | "text": [ 107 | "['This,', 'is', 'a', 'sentence', 'with', 'weird', '»', 'symbols', '…', 'appearing', 'everywhere', '¿', '.', 'Olso', 'some', 'normal', 'punctuation', 'such', 'as', '?']\n" 108 | ] 109 | } 110 | ], 111 | "source": [ 112 | "with open(\"./assets/IsAlnum.txt\") as f:\n", 113 | " IsAlnum = f.read()\n", 114 | " \n", 115 | "PAD_NOT_ISALNUM = r\"([^{}\\s\\.'\\`\\,\\-])\".format(IsAlnum), r\" \\1 \"\n", 116 | "regxp, substitution = PAD_NOT_ISALNUM\n", 117 | "\n", 118 | "text = \"This, is a sentence with weird\\xbb symbols\\u2026 appearing everywhere\\xbf. Olso some normal punctuation such as?\"\n", 119 | "\n", 120 | "text = re.sub(regxp, substitution, text)\n", 121 | "print(text.split())" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "### 分割逗号`,`\n", 129 | "这是刚才的遗留问题,由于`,`在用作子句划分的时候(I'm fine, thank you.)我们希望它与前后的单词分开,在某些情况下(如数字5,300)我们不希望将`,`与前后的字符分开。所以我们将`,`进行单独处理。" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": 4, 135 | "metadata": {}, 136 | "outputs": [ 137 | { 138 | "name": "stdout", 139 | "output_type": "stream", 140 | "text": [ 141 | "[',', 'This', 'is', 'a', 'sentence', '10', ',', 'with', 'number', '5,300', ',', 'and', '5', ',']\n" 142 | ] 143 | } 144 | ], 145 | "source": [ 146 | "with open(\"./assets/IsN.txt\") as f:\n", 147 | " IsN = f.read()\n", 148 | "\n", 149 | "COMMA_SEPARATE_1 = r\"([^{}])[,]\".format(IsN), r\"\\1 , \" # 若逗号前面不是数字,则分离逗号,如 hello,120 -> hello , 120\n", 150 | "COMMA_SEPARATE_2 = r\"[,]([^{}])\".format(IsN), r\" , \\1\" # 若逗号后面不是数字,则分离逗号,如 120, hello -> 120 , hello\n", 151 | "COMMA_SEPARATE_3 = r\"([{}])[,]$\".format(IsN), r\"\\1 , \" # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 ,\n", 152 | "COMMA_SEPARATE_4 = r\"^[,]([{}])\".format(IsN), r\" \\1, \" # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 ,\n", 153 | "\n", 154 | "text = \",This is a sentence 10, with number 5,300, and 5,\"\n", 155 | "\n", 156 | "# 此版本的实现可能会在此处创建额外的空格,但稍后会删除这些空格\n", 157 | "for regxp, substitution in [COMMA_SEPARATE_1, COMMA_SEPARATE_2, COMMA_SEPARATE_3, COMMA_SEPARATE_4]:\n", 158 | " text = re.sub(regxp, substitution, text)\n", 159 | "print(text.split())" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "### 分割句号`.`\n", 167 | "与`,`一样,`.`同样需要特殊的规则进行分割。需要考虑的情况有以下几种\n", 168 | "1. 连续多个点号的情况(省略号)`.....`\n", 169 | "2. 一个单独的大写字母跟一个`.` (通常出现在人名中,如`Aaron C. Courville`)\n", 170 | "3. 其他的多字母人名,地名、机构名等缩写。 (如`Gov.`表示政府,Mr.代表某某先生)\n", 171 | "4. 其他带`.`的缩写。(如`e.g.`表示举例,`i.e.`表示换句话说,`rev.`表示revision)\n", 172 | "5. 一些`.`后面跟数字的情况(如`No`. `Nos.`),这种情况与前面的区别是只有当这些词后面跟随数字时才不是句子的结束,如`No.`也可能做否定的意思。\n", 173 | "6. 月份的缩写。(如`Jan.` 表示一月,`Feb.`表示2月)\n", 174 | "\n", 175 | "对于情况1,我们先匹配到多个`.`连续出现的情况,在其前后添加空格,并用Mask做标记(防止处理其他情况时对其造成不可预见的影响),待处理完其他情况后再将其还原。\n", 176 | "\n", 177 | "对于后面几种情况,我们针对每一中情况建立一个前缀词表,如果`.`前面是这些词的话,就不讲`.`和前面的词分开。" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": 5, 183 | "metadata": {}, 184 | "outputs": [ 185 | { 186 | "name": "stdout", 187 | "output_type": "stream", 188 | "text": [ 189 | "['This', 'is', 'a', 'test', 'sentence', 'write', 'on', 'Sep.', '6th', '.', 'No', '.', 'I', 'am', '123No.', 'in', 'all', 'people', '.', 'We', 'are', 'good', 'at', 'talk,', 'swiming', 'DOTDOTDOTMULTI']\n" 190 | ] 191 | } 192 | ], 193 | "source": [ 194 | "with open(\"./assets/nonbreaking_prefix.en\") as f:\n", 195 | " NONBREAKING_PREFIXES = []\n", 196 | " NUMERIC_ONLY_PREFIXES = []\n", 197 | " for line in f:\n", 198 | " line = line.strip()\n", 199 | " if line and not line.startswith(\"#\"):\n", 200 | " if line.endswith(\"#NUMERIC_ONLY#\"):\n", 201 | " NUMERIC_ONLY_PREFIXES.append(line.split()[0])\n", 202 | " if line not in NONBREAKING_PREFIXES:\n", 203 | " NONBREAKING_PREFIXES.append(line)\n", 204 | "\n", 205 | "with open(\"./assets/IsAlpha.txt\") as f:\n", 206 | " # IsAlnum = IsAlpha + IsN\n", 207 | " IsAlpha = f.read()\n", 208 | " \n", 209 | "with open(\"./assets/IsLower.txt\") as f:\n", 210 | " IsLower = f.read()\n", 211 | "\n", 212 | "def isanyalpha(text):\n", 213 | " # 判断给定字符串中是否全是字母(非数字、符号)\n", 214 | " return any(set(text).intersection(set(IsAlpha)))\n", 215 | "\n", 216 | "def islower(text):\n", 217 | " # 判断给定字符串中是否全部都是小写字母\n", 218 | " return not set(text).difference(set(IsLower))\n", 219 | "\n", 220 | "\n", 221 | "def replace_multidots(text):\n", 222 | " # 处理情况1,对多个\".\"的情况作mask处理\n", 223 | " text = re.sub(r\"\\.([\\.]+)\", r\" DOTMULTI\\1\", text)\n", 224 | " while re.search(r\"DOTMULTI\\.\", text):\n", 225 | " text = re.sub(r\"DOTMULTI\\.([^\\.])\", r\"DOTDOTMULTI \\1\", text)\n", 226 | " text = re.sub(r\"DOTMULTI\\.\", \"DOTDOTMULTI\", text)\n", 227 | " return text\n", 228 | "\n", 229 | "def handles_nonbreaking_prefixes(text):\n", 230 | " # 将文本拆分为标记以检查 \".\" 为结尾的部分是否符合拆分条件\n", 231 | " tokens = text.split()\n", 232 | " num_tokens = len(tokens)\n", 233 | " for i, token in enumerate(tokens):\n", 234 | " # 判断是否以\".\"结尾\n", 235 | " token_ends_with_period = re.search(r\"^(\\S+)\\.$\", token)\n", 236 | " if token_ends_with_period: \n", 237 | " prefix = token_ends_with_period.group(1)\n", 238 | "\n", 239 | " # 处理情况2,3,4,6\n", 240 | " if (\n", 241 | " (\".\" in prefix and isanyalpha(prefix))\n", 242 | " or (\n", 243 | " prefix in NONBREAKING_PREFIXES\n", 244 | " and prefix not in NUMERIC_ONLY_PREFIXES\n", 245 | " )\n", 246 | " or (\n", 247 | " i != num_tokens - 1\n", 248 | " and tokens[i + 1]\n", 249 | " and islower(tokens[i + 1][0])\n", 250 | " )\n", 251 | " ):\n", 252 | " pass # 不做拆分处理\n", 253 | "\n", 254 | " # 处理情况 5\n", 255 | " elif (\n", 256 | " prefix in NUMERIC_ONLY_PREFIXES\n", 257 | " and (i + 1) < num_tokens\n", 258 | " and re.search(r\"^[0-9]+\", tokens[i + 1])\n", 259 | " ):\n", 260 | " pass # 不做拆分处理\n", 261 | " else: # 不在1-6中,做拆分处理\n", 262 | " tokens[i] = prefix + \" .\"\n", 263 | " return \" \".join(tokens) # Stitch the tokens back.\n", 264 | "\n", 265 | "text = \"This is a test sentence write on Sep. 6th. No. I am 123No. in all people. We are good at talk, swiming...\"\n", 266 | "text = replace_multidots(text)\n", 267 | "text = handles_nonbreaking_prefixes(text)\n", 268 | "print(text.split())" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": {}, 274 | "source": [ 275 | "### 处理`'`号缩写\n", 276 | "在英文中,使用`'`号缩写非常常见,比如`I'm`,`You're`等等,他们其实是两个词,现在被缩写成了一个词,我们希望它被分割成`[\"I\", \"'m\"]`,`[\"You\", \"'re\"]`的形式。这个我们可以列几个简单的正则表达式进行处理。\n" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": 6, 282 | "metadata": {}, 283 | "outputs": [ 284 | { 285 | "name": "stdout", 286 | "output_type": "stream", 287 | "text": [ 288 | "['I', \"'m\", 'fine,', 'thank', 'you.', 'And', 'you?']\n" 289 | ] 290 | } 291 | ], 292 | "source": [ 293 | "EN_SPECIFIC_1 = r\"([^{alpha}])[']([^{alpha}])\".format(alpha=IsAlpha), r\"\\1 ' \\2\"\n", 294 | "EN_SPECIFIC_2 = (\n", 295 | " r\"([^{alpha}{isn}])[']([{alpha}])\".format(alpha=IsAlpha, isn=IsN),\n", 296 | " r\"\\1 ' \\2\",\n", 297 | ")\n", 298 | "EN_SPECIFIC_3 = r\"([{alpha}])[']([^{alpha}])\".format(alpha=IsAlpha), r\"\\1 ' \\2\"\n", 299 | "EN_SPECIFIC_4 = r\"([{alpha}])[']([{alpha}])\".format(alpha=IsAlpha), r\"\\1 '\\2\"\n", 300 | "EN_SPECIFIC_5 = r\"([{isn}])[']([s])\".format(isn=IsN), r\"\\1 '\\2\"\n", 301 | "\n", 302 | "EN_SPECIFIC = [EN_SPECIFIC_1, EN_SPECIFIC_2, EN_SPECIFIC_3, EN_SPECIFIC_4, EN_SPECIFIC_5]\n", 303 | "\n", 304 | "text = \"I'm fine, thank you. And you?\"\n", 305 | "for regxp, substitution in EN_SPECIFIC:\n", 306 | " text = re.sub(regxp, substitution, text)\n", 307 | "\n", 308 | "print(text.split())" 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "metadata": {}, 314 | "source": [ 315 | "### 最后的收尾工作\n", 316 | "刚才在分词的过程中,还留下了两个遗留问题,一个是我们对连续多个`.`进行了Mask,现在要对其进行还原。二是在上述过程中可能会在词与词之间产生多个空格,我们要把它们合并成一个。" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": 7, 322 | "metadata": {}, 323 | "outputs": [ 324 | { 325 | "name": "stdout", 326 | "output_type": "stream", 327 | "text": [ 328 | "There are apple , banana ...\n" 329 | ] 330 | } 331 | ], 332 | "source": [ 333 | "def restore_multidots(text):\n", 334 | " # 恢复对多个\".\"的mask\n", 335 | " while re.search(r\"DOTDOTMULTI\", text):\n", 336 | " text = re.sub(r\"DOTDOTMULTI\", r\"DOTMULTI.\", text)\n", 337 | " return re.sub(r\"DOTMULTI\", r\".\", text)\n", 338 | " \n", 339 | "DEDUPLICATE_SPACE = r\"\\s+\", r\" \"\n", 340 | "regxp, substitution = DEDUPLICATE_SPACE\n", 341 | "\n", 342 | "text = \"There are apple , banana DOTDOTDOTMULTI\"\n", 343 | "text = restore_multidots(text)\n", 344 | "text = re.sub(regxp, substitution, text)\n", 345 | "print(text)" 346 | ] 347 | }, 348 | { 349 | "cell_type": "markdown", 350 | "metadata": {}, 351 | "source": [ 352 | "### 可选处理项\n", 353 | "#### Mask 受保护的字符串\n", 354 | "在分词过程中,有一些固定的字符串格式(比如url,日期,时间等),我们不希望把他们拆分开,而是希望将他们标注为统一标识符,以便于在翻译过程中减少词表的大小。" 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": 8, 360 | "metadata": {}, 361 | "outputs": [ 362 | { 363 | "name": "stdout", 364 | "output_type": "stream", 365 | "text": [ 366 | "['this', 'is', 'a', 'webpage', 'THISISPROTECTED000', 'that', 'kicks', 'ass']\n" 367 | ] 368 | } 369 | ], 370 | "source": [ 371 | "# 匹配<\\hello>标签\n", 372 | "BASIC_PROTECTED_PATTERN_1 = r\"<\\/?\\S+\\/?>\"\n", 373 | "\n", 374 | "# 匹配xml的标签 \n", 375 | "BASIC_PROTECTED_PATTERN_2 = r'<\\S+( [a-zA-Z0-9]+\\=\"?[^\"]\")+ ?\\/?>'\n", 376 | "BASIC_PROTECTED_PATTERN_3 = r\"<\\S+( [a-zA-Z0-9]+\\='?[^']')+ ?\\/?>\"\n", 377 | "# 匹配邮箱\n", 378 | "BASIC_PROTECTED_PATTERN_4 = r\"[\\w\\-\\_\\.]+\\@([\\w\\-\\_]+\\.)+[a-zA-Z]{2,}\"\n", 379 | "# 匹配url\n", 380 | "BASIC_PROTECTED_PATTERN_5 = r\"(http[s]?|ftp):\\/\\/[^:\\/\\s]+(\\/\\w+)*\\/[\\w\\-\\.]+\"\n", 381 | "\n", 382 | "BASIC_PROTECTED_PATTERNS = [\n", 383 | " BASIC_PROTECTED_PATTERN_1,\n", 384 | " BASIC_PROTECTED_PATTERN_2,\n", 385 | " BASIC_PROTECTED_PATTERN_3,\n", 386 | " BASIC_PROTECTED_PATTERN_4,\n", 387 | " BASIC_PROTECTED_PATTERN_5,\n", 388 | " ]\n", 389 | "\n", 390 | "text = \"this is a webpage https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl that kicks ass\"\n", 391 | "\n", 392 | "# Find the tokens that needs to be protected.\n", 393 | "protected_tokens = [\n", 394 | " match.group()\n", 395 | " for protected_pattern in BASIC_PROTECTED_PATTERNS\n", 396 | " for match in re.finditer(protected_pattern, text, re.IGNORECASE)\n", 397 | "]\n", 398 | "# Apply the protected_patterns.\n", 399 | "for i, token in enumerate(protected_tokens):\n", 400 | " substituition = \"THISISPROTECTED\" + str(i).zfill(3)\n", 401 | " text = text.replace(token, substituition)\n", 402 | " \n", 403 | "print(text.split())" 404 | ] 405 | }, 406 | { 407 | "cell_type": "markdown", 408 | "metadata": {}, 409 | "source": [ 410 | "#### 分割破折号\n", 411 | "对于型如`word-word`的连字符号,我们可以选择将它们分成两个词,中间的破折号用特殊符号标记(方便Detokenize)。思路还是使用上面的IsAlnum字符表,如果存在某个破折号,两边都是IsAlnum中的字符,则将破折号与两边的字符用空格隔开。" 412 | ] 413 | }, 414 | { 415 | "cell_type": "code", 416 | "execution_count": 9, 417 | "metadata": {}, 418 | "outputs": [ 419 | { 420 | "name": "stdout", 421 | "output_type": "stream", 422 | "text": [ 423 | "This is a sentence with hyphen. pre @-@ trained.\n" 424 | ] 425 | } 426 | ], 427 | "source": [ 428 | "AGGRESSIVE_HYPHEN_SPLIT = (\n", 429 | " r\"([{alphanum}])\\-(?=[{alphanum}])\".format(alphanum=IsAlnum),\n", 430 | " r\"\\1 @-@ \",\n", 431 | " )\n", 432 | "\n", 433 | "text = \"This is a sentence with hyphen. pre-trained.\"\n", 434 | "regxp, substitution = AGGRESSIVE_HYPHEN_SPLIT\n", 435 | "text = re.sub(regxp, substitution, text)\n", 436 | "print(text)" 437 | ] 438 | }, 439 | { 440 | "cell_type": "markdown", 441 | "metadata": {}, 442 | "source": [ 443 | "## Putting them together" 444 | ] 445 | }, 446 | { 447 | "cell_type": "code", 448 | "execution_count": 10, 449 | "metadata": {}, 450 | "outputs": [], 451 | "source": [ 452 | "import re\n", 453 | "import os\n", 454 | "\n", 455 | "def get_charset(charset_name):\n", 456 | " f = open(os.path.join(\"assets\", charset_name + \".txt\"))\n", 457 | " return f.read()\n", 458 | "\n", 459 | "\n", 460 | "def get_nobreaking_prefix(lang=\"en\"):\n", 461 | " f = open(os.path.join(\"assets\", \"nonbreaking_prefix.\" + lang))\n", 462 | " NONBREAKING_PREFIXES = []\n", 463 | " NUMERIC_ONLY_PREFIXES = []\n", 464 | " for line in f:\n", 465 | " line = line.strip()\n", 466 | " if line and not line.startswith(\"#\"):\n", 467 | " if line.endswith(\"#NUMERIC_ONLY#\"):\n", 468 | " NUMERIC_ONLY_PREFIXES.append(line.split()[0])\n", 469 | " if line not in NONBREAKING_PREFIXES:\n", 470 | " NONBREAKING_PREFIXES.append(line)\n", 471 | " f.close()\n", 472 | " return NONBREAKING_PREFIXES, NUMERIC_ONLY_PREFIXES\n", 473 | "\n", 474 | "\n", 475 | "class MoseTokenizer(object):\n", 476 | "\n", 477 | " # 字符集\n", 478 | " IsAlnum = get_charset(\"IsAlnum\")\n", 479 | " IsAlpha = get_charset(\"IsAlpha\")\n", 480 | " IsLower = get_charset(\"IsLower\")\n", 481 | " IsN = get_charset(\"IsN\")\n", 482 | "\n", 483 | " # 步骤1 - 替换空白字符 相关正则表达式\n", 484 | " DEDUPLICATE_SPACE = r\"\\s+\", r\" \"\n", 485 | " ASCII_JUNK = r\"[\\000-\\037]\", r\"\"\n", 486 | "\n", 487 | " # 步骤2 - 将常见标点、乱码等符号与词语分开 相关正则表达式\n", 488 | " PAD_NOT_ISALNUM = r\"([^{}\\s\\.'\\`\\,\\-])\".format(IsAlnum), r\" \\1 \"\n", 489 | "\n", 490 | " # 步骤4 - 分割逗号 相关正则表达式\n", 491 | " # 若逗号前面不是数字,则分离逗号,如 hello,120 -> hello , 120\n", 492 | " COMMA_SEPARATE_1 = r\"([^{}])[,]\".format(IsN), r\"\\1 , \"\n", 493 | " # 若逗号后面不是数字,则分离逗号,如 120, hello -> 120 , hello\n", 494 | " COMMA_SEPARATE_2 = r\"[,]([^{}])\".format(IsN), r\" , \\1\"\n", 495 | " COMMA_SEPARATE_3 = r\"([{}])[,]$\".format(\n", 496 | " IsN), r\"\\1 , \" # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 ,\n", 497 | " COMMA_SEPARATE_4 = r\"^[,]([{}])\".format(\n", 498 | " IsN), r\" \\1, \" # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 ,\n", 499 | "\n", 500 | " COMMA_SEPARATE = [\n", 501 | " COMMA_SEPARATE_1,\n", 502 | " COMMA_SEPARATE_2,\n", 503 | " COMMA_SEPARATE_3,\n", 504 | " COMMA_SEPARATE_4\n", 505 | " ]\n", 506 | "\n", 507 | " # 步骤5 - 分割句号 受保护的前缀\n", 508 | " NONBREAKING_PREFIXES, NUMERIC_ONLY_PREFIXES = get_nobreaking_prefix(\n", 509 | " lang=\"en\")\n", 510 | "\n", 511 | " # 步骤6 - 处理'号缩写 相关正则表达式\n", 512 | " EN_SPECIFIC_1 = r\"([^{alpha}])[']([^{alpha}])\".format(\n", 513 | " alpha=IsAlpha), r\"\\1 ' \\2\"\n", 514 | " EN_SPECIFIC_2 = (\n", 515 | " r\"([^{alpha}{isn}])[']([{alpha}])\".format(alpha=IsAlpha, isn=IsN),\n", 516 | " r\"\\1 ' \\2\",\n", 517 | " )\n", 518 | " EN_SPECIFIC_3 = r\"([{alpha}])[']([^{alpha}])\".format(\n", 519 | " alpha=IsAlpha), r\"\\1 ' \\2\"\n", 520 | " EN_SPECIFIC_4 = r\"([{alpha}])[']([{alpha}])\".format(\n", 521 | " alpha=IsAlpha), r\"\\1 '\\2\"\n", 522 | " EN_SPECIFIC_5 = r\"([{isn}])[']([s])\".format(isn=IsN), r\"\\1 '\\2\"\n", 523 | "\n", 524 | " EN_SPECIFIC = [\n", 525 | " EN_SPECIFIC_1,\n", 526 | " EN_SPECIFIC_1,\n", 527 | " EN_SPECIFIC_1,\n", 528 | " EN_SPECIFIC_1,\n", 529 | " EN_SPECIFIC_1\n", 530 | " ]\n", 531 | "\n", 532 | " # 可选步骤 Mask 受保护的字符串 相关的正则表达式\n", 533 | " BASIC_PROTECTED_PATTERN_1 = r\"<\\/?\\S+\\/?>\"\n", 534 | " BASIC_PROTECTED_PATTERN_2 = r'<\\S+( [a-zA-Z0-9]+\\=\"?[^\"]\")+ ?\\/?>'\n", 535 | " BASIC_PROTECTED_PATTERN_3 = r\"<\\S+( [a-zA-Z0-9]+\\='?[^']')+ ?\\/?>\"\n", 536 | " BASIC_PROTECTED_PATTERN_4 = r\"[\\w\\-\\_\\.]+\\@([\\w\\-\\_]+\\.)+[a-zA-Z]{2,}\"\n", 537 | " BASIC_PROTECTED_PATTERN_5 = r\"(http[s]?|ftp):\\/\\/[^:\\/\\s]+(\\/\\w+)*\\/[\\w\\-\\.]+\"\n", 538 | "\n", 539 | " BASIC_PROTECTED_PATTERNS = [\n", 540 | " BASIC_PROTECTED_PATTERN_1,\n", 541 | " BASIC_PROTECTED_PATTERN_2,\n", 542 | " BASIC_PROTECTED_PATTERN_3,\n", 543 | " BASIC_PROTECTED_PATTERN_4,\n", 544 | " BASIC_PROTECTED_PATTERN_5\n", 545 | " ]\n", 546 | "\n", 547 | " # 可选步骤 分割破折号 相关正则表达式\n", 548 | " AGGRESSIVE_HYPHEN_SPLIT = (\n", 549 | " r\"([{alphanum}])\\-(?=[{alphanum}])\".format(alphanum=IsAlnum),\n", 550 | " r\"\\1 @-@ \",\n", 551 | " )\n", 552 | "\n", 553 | " def isanyalpha(self, text):\n", 554 | " # 判断给定字符串中是否全是字母(非数字、符号)\n", 555 | " return any(set(text).intersection(set(self.IsAlpha)))\n", 556 | "\n", 557 | " def islower(self, text):\n", 558 | " # 判断给定字符串中是否全部都是小写字母\n", 559 | " return not set(text).difference(set(self.IsLower))\n", 560 | "\n", 561 | " @staticmethod\n", 562 | " def replace_multidots(text):\n", 563 | " # 处理情况1,对多个\".\"的情况作mask处理\n", 564 | " text = re.sub(r\"\\.([\\.]+)\", r\" DOTMULTI\\1\", text)\n", 565 | " while re.search(r\"DOTMULTI\\.\", text):\n", 566 | " text = re.sub(r\"DOTMULTI\\.([^\\.])\", r\"DOTDOTMULTI \\1\", text)\n", 567 | " text = re.sub(r\"DOTMULTI\\.\", \"DOTDOTMULTI\", text)\n", 568 | " return text\n", 569 | "\n", 570 | " @staticmethod\n", 571 | " def restore_multidots(text):\n", 572 | " # 恢复对多个\".\"的mask\n", 573 | " while re.search(r\"DOTDOTMULTI\", text):\n", 574 | " text = re.sub(r\"DOTDOTMULTI\", r\"DOTMULTI.\", text)\n", 575 | " return re.sub(r\"DOTMULTI\", r\".\", text)\n", 576 | "\n", 577 | " def handles_nonbreaking_prefixes(self, text):\n", 578 | " # 将文本拆分为标记以检查 \".\" 为结尾的部分是否符合拆分条件\n", 579 | " tokens = text.split()\n", 580 | " num_tokens = len(tokens)\n", 581 | " for i, token in enumerate(tokens):\n", 582 | " # 判断是否以\".\"结尾\n", 583 | " token_ends_with_period = re.search(r\"^(\\S+)\\.$\", token)\n", 584 | " if token_ends_with_period:\n", 585 | " prefix = token_ends_with_period.group(1)\n", 586 | "\n", 587 | " # 处理情况2,3,4,6\n", 588 | " if (\n", 589 | " (\".\" in prefix and self.isanyalpha(prefix))\n", 590 | " or (\n", 591 | " prefix in self.NONBREAKING_PREFIXES\n", 592 | " and prefix not in self.NUMERIC_ONLY_PREFIXES\n", 593 | " )\n", 594 | " or (\n", 595 | " i != num_tokens - 1\n", 596 | " and tokens[i + 1]\n", 597 | " and self.islower(tokens[i + 1][0])\n", 598 | " )\n", 599 | " ):\n", 600 | " pass # 不做拆分处理\n", 601 | "\n", 602 | " # 处理情况 5\n", 603 | " elif (\n", 604 | " prefix in self.NUMERIC_ONLY_PREFIXES\n", 605 | " and (i + 1) < num_tokens\n", 606 | " and re.search(r\"^[0-9]+\", tokens[i + 1])\n", 607 | " ):\n", 608 | " pass # 不做拆分处理\n", 609 | " else: # 不在1-6中,做拆分处理\n", 610 | " tokens[i] = prefix + \" .\"\n", 611 | " return \" \".join(tokens) # Stitch the tokens back.\n", 612 | "\n", 613 | " def tokenize(self,\n", 614 | " text,\n", 615 | " aggressive_dash_splits=False, # 是否分割破折号 \"-\"\n", 616 | " return_str=False, # 返回字符串还是以list的形式返回\n", 617 | " protected_patterns=None # Mask 受保护的字符串 (以正则表达式list的形式传入)\n", 618 | " ):\n", 619 | "\n", 620 | " # 步骤1 - 替换空白字符\n", 621 | " for regexp, substitution in [self.DEDUPLICATE_SPACE, self.ASCII_JUNK]:\n", 622 | " text = re.sub(regexp, substitution, text)\n", 623 | "\n", 624 | " # 可选步骤 Mask 受保护的字符串\n", 625 | " if protected_patterns:\n", 626 | " protecte_partterns.extend(self.BASIC_PROTECTED_PATTERNS)\n", 627 | " else:\n", 628 | " protecte_partterns = self.BASIC_PROTECTED_PATTERNS\n", 629 | "\n", 630 | " # Find the tokens that needs to be protected.\n", 631 | " protected_tokens = [\n", 632 | " match.group()\n", 633 | " for protected_pattern in self.BASIC_PROTECTED_PATTERNS\n", 634 | " for match in re.finditer(protected_pattern, text, re.IGNORECASE)\n", 635 | " ]\n", 636 | " # Apply the protected_patterns.\n", 637 | " for i, token in enumerate(protected_tokens):\n", 638 | " substituition = \"THISISPROTECTED\" + str(i).zfill(3)\n", 639 | " text = text.replace(token, substituition)\n", 640 | "\n", 641 | " # 步骤2 - 将常见标点、乱码等符号与词语分开 相关正则表达式\n", 642 | " regxp, substitution = self.PAD_NOT_ISALNUM\n", 643 | " text = re.sub(regxp, substitution, text)\n", 644 | "\n", 645 | " # 步骤3 - 去掉句子开头和结尾的空白字符\n", 646 | " text = text.strip()\n", 647 | "\n", 648 | " # 步骤4 - 分割逗号\n", 649 | " for regxp, substitution in self.COMMA_SEPARATE:\n", 650 | " text = re.sub(regxp, substitution, text)\n", 651 | "\n", 652 | " # 步骤5 - 分割句号\n", 653 | " text = self.replace_multidots(text)\n", 654 | " text = self.handles_nonbreaking_prefixes(text)\n", 655 | "\n", 656 | " # 步骤6 - 处理'号缩写\n", 657 | " for regxp, substitution in self.EN_SPECIFIC:\n", 658 | " text = re.sub(regxp, substitution, text)\n", 659 | "\n", 660 | " if aggressive_dash_splits:\n", 661 | " regxp, substitution = self.AGGRESSIVE_HYPHEN_SPLIT\n", 662 | " text = re.sub(regxp, substitution, text)\n", 663 | "\n", 664 | " # 收尾工作\n", 665 | " regxp, substitution = self.DEDUPLICATE_SPACE\n", 666 | " text = self.restore_multidots(text)\n", 667 | " text = re.sub(regxp, substitution, text)\n", 668 | "\n", 669 | " # 恢复受保护的字符串 Mask->原字符串.\n", 670 | " for i, token in enumerate(protected_tokens):\n", 671 | " substituition = \"THISISPROTECTED\" + str(i).zfill(3)\n", 672 | " text = text.replace(substituition, token)\n", 673 | "\n", 674 | " return text if return_str else text.split()" 675 | ] 676 | }, 677 | { 678 | "cell_type": "code", 679 | "execution_count": 11, 680 | "metadata": {}, 681 | "outputs": [ 682 | { 683 | "name": "stdout", 684 | "output_type": "stream", 685 | "text": [ 686 | "['this', 'is', 'a', 'webpage', 'https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl', 'that', 'kicks', 'ass']\n", 687 | "['Sie', 'sollten', 'vor', 'dem', 'Upgrade', 'eine', 'Sicherung', 'dieser', 'Daten', 'erstellen', '(', 'wie', 'unter', 'Abschnitt', '4.1.1', ',', '„', 'Sichern', 'aller', 'Daten', 'und', 'Konfigurationsinformationen', '“', 'beschrieben', ')', '.']\n", 688 | "['This', \"ain't\", 'funny', '.', \"It's\", 'actually', 'hillarious', ',', 'yet', 'double', 'Ls', '.', '|', '[', ']', '<', '>', '[', ']', '&', \"You're\", 'gonna', 'shake', 'it', 'off', '?', \"Don't\", '?']\n", 689 | "['This', ',', 'is', 'a', 'sentence', 'with', 'weird', '»', 'symbols', '…', 'appearing', 'everywhere', '¿']\n" 690 | ] 691 | } 692 | ], 693 | "source": [ 694 | "test_sentences = [\n", 695 | " \"this is a webpage https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl that kicks ass\",\n", 696 | " \"Sie sollten vor dem Upgrade eine Sicherung dieser Daten erstellen (wie unter Abschnitt 4.1.1, „Sichern aller Daten und Konfigurationsinformationen“ beschrieben).\",\n", 697 | " \"This ain't funny. It's actually hillarious, yet double Ls. | [] < > [ ] & You're gonna shake it off? Don't?\",\n", 698 | " \"This, is a sentence with weird\\xbb symbols\\u2026 appearing everywhere\\xbf\"\n", 699 | "]\n", 700 | "\n", 701 | "mt = MoseTokenizer()\n", 702 | "\n", 703 | "for text in test_sentences:\n", 704 | " text = mt.tokenize(text)\n", 705 | " print(text)" 706 | ] 707 | } 708 | ], 709 | "metadata": { 710 | "kernelspec": { 711 | "display_name": "Python 3.7.3 64-bit ('pytorch_latest': conda)", 712 | "language": "python", 713 | "name": "python37364bitpytorchlatestconda37dda3a0837247e597f023e05705e960" 714 | }, 715 | "language_info": { 716 | "codemirror_mode": { 717 | "name": "ipython", 718 | "version": 3 719 | }, 720 | "file_extension": ".py", 721 | "mimetype": "text/x-python", 722 | "name": "python", 723 | "nbconvert_exporter": "python", 724 | "pygments_lexer": "ipython3", 725 | "version": "3.7.3" 726 | } 727 | }, 728 | "nbformat": 4, 729 | "nbformat_minor": 2 730 | } 731 | -------------------------------------------------------------------------------- /tutorials/Chapter2/EnglishTokenizer.md: -------------------------------------------------------------------------------- 1 | 2 | # 英文分词 3 | 分词是数据预处理的第一步。。对于像中文这样没有单词边界的语言,分词的策略通常比较复杂。现在常用的一些中文分词工具有 NLTK、jieba等。而像英文这种有单词边界的语言,分词要简单许多,比如,Moses 工具包就有可以处理绝大多数拉丁语系语言的分词脚本。 4 | 5 | 本章节就以[sacremoses](https://github.com/alvations/sacremoses)为例,讲解英文的分词过程。 6 | 7 | 目录: 8 | 1. 替换空白字符 9 | 2. 去掉句子开头和结尾的空白字符 10 | 3. 将常见标点、乱码等符号与词语分开 11 | 4. 分割逗号`, 12 | 5. 分割句号`,` 13 | 6. 处理`'`号缩写 14 | 7. 可选处理项 15 | - Mask受保护字符串 16 | - 分割破折号 17 | 18 | ### 替换空白字符 19 | 包括空格、换行、tab缩进等所有的空字符,在正则表达式中,我们可以使用`"\s+"`进行匹配。除此之外,在ASCII码中,第0~31号及第127号(共33个)是控制字符或通讯专用字符,如控制符:LF(换行)、CR(回车)、FF(换页)、DEL(删除)、BS(退格)、BEL(振铃)等;通讯专用字符:SOH(文头)、EOT(文尾)、ACK(确认)等,我们可以使用`"[\000-\037]"`进行匹配。 20 | 21 | 有了对应的正则表达式,在python中我们可以使用`re.sub`函数进行替换。 22 | 23 | 24 | ```python 25 | import re 26 | 27 | DEDUPLICATE_SPACE = r"\s+", r" " 28 | ASCII_JUNK = r"[\000-\037]", r"" 29 | 30 | text = u" This is a test sentence \t with useless blank chars\r.\x01" 31 | print(text) 32 | 33 | for regexp, substitution in [DEDUPLICATE_SPACE, ASCII_JUNK]: 34 | text = re.sub(regexp, substitution, text) 35 | 36 | print(text) 37 | ``` 38 | 39 | . 40 | This is a test sentence with useless blank chars . 41 | 42 | 43 | ### 去掉句子开头和结尾的空白字符 44 | 刚才将所有的空白字符替换为了空格,但是句子开头和结尾的空白字符也被替换成了空格,还没有被去掉,所以这里我们使用`strip()`方法去掉开头和结尾的空格。 45 | 46 | 47 | ```python 48 | text = text.strip() 49 | print(text) 50 | ``` 51 | 52 | This is a test sentence with useless blank chars . 53 | 54 | 55 | ### 将常见标点、乱码等符号与词语分开 56 | 在Unicode字符集中,一部分字符会在我们的单词中出现,一部分则为标点符号以及其他的一些符号、乱码,如果我们的平行语料中这些字符通常与我们的单词连在一起,我们需要将它们与正常的单词分开。一个可行的方法是列举出所有可能出现在单词中字符(包括正常标点符号),除此之外的字符都在其两侧添加空格符号。幸运的是,moses中已经为我们搜集了这些字符,我们可以直接拿过来用(注意这里的标点符号不包含`.`,`.`,后续会单独处理)。 57 | 58 | 59 | ```python 60 | with open("./assets/IsAlnum.txt") as f: 61 | IsAlnum = f.read() 62 | 63 | PAD_NOT_ISALNUM = r"([^{}\s\.'\`\,\-])".format(IsAlnum), r" \1 " 64 | regxp, substitution = PAD_NOT_ISALNUM 65 | 66 | text = "This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf. Olso some normal punctuation such as?" 67 | 68 | text = re.sub(regxp, substitution, text) 69 | print(text.split()) 70 | ``` 71 | 72 | ['This,', 'is', 'a', 'sentence', 'with', 'weird', '»', 'symbols', '…', 'appearing', 'everywhere', '¿', '.', 'Olso', 'some', 'normal', 'punctuation', 'such', 'as', '?'] 73 | 74 | 75 | ### 分割逗号`,` 76 | 这是刚才的遗留问题,由于`,`在用作子句划分的时候(I'm fine, thank you.)我们希望它与前后的单词分开,在某些情况下(如数字5,300)我们不希望将`,`与前后的字符分开。所以我们将`,`进行单独处理。 77 | 78 | 79 | ```python 80 | with open("./assets/IsN.txt") as f: 81 | IsN = f.read() 82 | 83 | COMMA_SEPARATE_1 = r"([^{}])[,]".format(IsN), r"\1 , " # 若逗号前面不是数字,则分离逗号,如 hello,120 -> hello , 120 84 | COMMA_SEPARATE_2 = r"[,]([^{}])".format(IsN), r" , \1" # 若逗号后面不是数字,则分离逗号,如 120, hello -> 120 , hello 85 | COMMA_SEPARATE_3 = r"([{}])[,]$".format(IsN), r"\1 , " # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 , 86 | COMMA_SEPARATE_4 = r"^[,]([{}])".format(IsN), r" \1, " # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 , 87 | 88 | text = ",This is a sentence 10, with number 5,300, and 5," 89 | 90 | # 此版本的实现可能会在此处创建额外的空格,但稍后会删除这些空格 91 | for regxp, substitution in [COMMA_SEPARATE_1, COMMA_SEPARATE_2, COMMA_SEPARATE_3, COMMA_SEPARATE_4]: 92 | text = re.sub(regxp, substitution, text) 93 | print(text.split()) 94 | ``` 95 | 96 | [',', 'This', 'is', 'a', 'sentence', '10', ',', 'with', 'number', '5,300', ',', 'and', '5', ','] 97 | 98 | 99 | ### 分割句号`.` 100 | 与`,`一样,`.`同样需要特殊的规则进行分割。需要考虑的情况有以下几种 101 | 1. 连续多个点号的情况(省略号)`.....` 102 | 2. 一个单独的大写字母跟一个`.` (通常出现在人名中,如`Aaron C. Courville`) 103 | 3. 其他的多字母人名,地名、机构名等缩写。 (如`Gov.`表示政府,Mr.代表某某先生) 104 | 4. 其他带`.`的缩写。(如`e.g.`表示举例,`i.e.`表示换句话说,`rev.`表示revision) 105 | 5. 一些`.`后面跟数字的情况(如`No`. `Nos.`),这种情况与前面的区别是只有当这些词后面跟随数字时才不是句子的结束,如`No.`也可能做否定的意思。 106 | 6. 月份的缩写。(如`Jan.` 表示一月,`Feb.`表示2月) 107 | 108 | 对于情况1,我们先匹配到多个`.`连续出现的情况,在其前后添加空格,并用Mask做标记(防止处理其他情况时对其造成不可预见的影响),待处理完其他情况后再将其还原。 109 | 110 | 对于后面几种情况,我们针对每一中情况建立一个前缀词表,如果`.`前面是这些词的话,就不讲`.`和前面的词分开。 111 | 112 | 113 | ```python 114 | with open("./assets/nonbreaking_prefix.en") as f: 115 | NONBREAKING_PREFIXES = [] 116 | NUMERIC_ONLY_PREFIXES = [] 117 | for line in f: 118 | line = line.strip() 119 | if line and not line.startswith("#"): 120 | if line.endswith("#NUMERIC_ONLY#"): 121 | NUMERIC_ONLY_PREFIXES.append(line.split()[0]) 122 | if line not in NONBREAKING_PREFIXES: 123 | NONBREAKING_PREFIXES.append(line) 124 | 125 | with open("./assets/IsAlpha.txt") as f: 126 | # IsAlnum = IsAlpha + IsN 127 | IsAlpha = f.read() 128 | 129 | with open("./assets/IsLower.txt") as f: 130 | IsLower = f.read() 131 | 132 | def isanyalpha(text): 133 | # 判断给定字符串中是否全是字母(非数字、符号) 134 | return any(set(text).intersection(set(IsAlpha))) 135 | 136 | def islower(text): 137 | # 判断给定字符串中是否全部都是小写字母 138 | return not set(text).difference(set(IsLower)) 139 | 140 | 141 | def replace_multidots(text): 142 | # 处理情况1,对多个"."的情况作mask处理 143 | text = re.sub(r"\.([\.]+)", r" DOTMULTI\1", text) 144 | while re.search(r"DOTMULTI\.", text): 145 | text = re.sub(r"DOTMULTI\.([^\.])", r"DOTDOTMULTI \1", text) 146 | text = re.sub(r"DOTMULTI\.", "DOTDOTMULTI", text) 147 | return text 148 | 149 | def handles_nonbreaking_prefixes(text): 150 | # 将文本拆分为标记以检查 "." 为结尾的部分是否符合拆分条件 151 | tokens = text.split() 152 | num_tokens = len(tokens) 153 | for i, token in enumerate(tokens): 154 | # 判断是否以"."结尾 155 | token_ends_with_period = re.search(r"^(\S+)\.$", token) 156 | if token_ends_with_period: 157 | prefix = token_ends_with_period.group(1) 158 | 159 | # 处理情况2,3,4,6 160 | if ( 161 | ("." in prefix and isanyalpha(prefix)) 162 | or ( 163 | prefix in NONBREAKING_PREFIXES 164 | and prefix not in NUMERIC_ONLY_PREFIXES 165 | ) 166 | or ( 167 | i != num_tokens - 1 168 | and tokens[i + 1] 169 | and islower(tokens[i + 1][0]) 170 | ) 171 | ): 172 | pass # 不做拆分处理 173 | 174 | # 处理情况 5 175 | elif ( 176 | prefix in NUMERIC_ONLY_PREFIXES 177 | and (i + 1) < num_tokens 178 | and re.search(r"^[0-9]+", tokens[i + 1]) 179 | ): 180 | pass # 不做拆分处理 181 | else: # 不在1-6中,做拆分处理 182 | tokens[i] = prefix + " ." 183 | return " ".join(tokens) # Stitch the tokens back. 184 | 185 | text = "This is a test sentence write on Sep. 6th. No. I am 123No. in all people. We are good at talk, swiming..." 186 | text = replace_multidots(text) 187 | text = handles_nonbreaking_prefixes(text) 188 | print(text.split()) 189 | ``` 190 | 191 | ['This', 'is', 'a', 'test', 'sentence', 'write', 'on', 'Sep.', '6th', '.', 'No', '.', 'I', 'am', '123No.', 'in', 'all', 'people', '.', 'We', 'are', 'good', 'at', 'talk,', 'swiming', 'DOTDOTDOTMULTI'] 192 | 193 | 194 | ### 处理`'`号缩写 195 | 在英文中,使用`'`号缩写非常常见,比如`I'm`,`You're`等等,他们其实是两个词,现在被缩写成了一个词,我们希望它被分割成`["I", "'m"]`,`["You", "'re"]`的形式。这个我们可以列几个简单的正则表达式进行处理。 196 | 197 | 198 | 199 | ```python 200 | EN_SPECIFIC_1 = r"([^{alpha}])[']([^{alpha}])".format(alpha=IsAlpha), r"\1 ' \2" 201 | EN_SPECIFIC_2 = ( 202 | r"([^{alpha}{isn}])[']([{alpha}])".format(alpha=IsAlpha, isn=IsN), 203 | r"\1 ' \2", 204 | ) 205 | EN_SPECIFIC_3 = r"([{alpha}])[']([^{alpha}])".format(alpha=IsAlpha), r"\1 ' \2" 206 | EN_SPECIFIC_4 = r"([{alpha}])[']([{alpha}])".format(alpha=IsAlpha), r"\1 '\2" 207 | EN_SPECIFIC_5 = r"([{isn}])[']([s])".format(isn=IsN), r"\1 '\2" 208 | 209 | EN_SPECIFIC = [EN_SPECIFIC_1, EN_SPECIFIC_2, EN_SPECIFIC_3, EN_SPECIFIC_4, EN_SPECIFIC_5] 210 | 211 | text = "I'm fine, thank you. And you?" 212 | for regxp, substitution in EN_SPECIFIC: 213 | text = re.sub(regxp, substitution, text) 214 | 215 | print(text.split()) 216 | ``` 217 | 218 | ['I', "'m", 'fine,', 'thank', 'you.', 'And', 'you?'] 219 | 220 | 221 | ### 最后的收尾工作 222 | 刚才在分词的过程中,还留下了两个遗留问题,一个是我们对连续多个`.`进行了Mask,现在要对其进行还原。二是在上述过程中可能会在词与词之间产生多个空格,我们要把它们合并成一个。 223 | 224 | 225 | ```python 226 | def restore_multidots(text): 227 | # 恢复对多个"."的mask 228 | while re.search(r"DOTDOTMULTI", text): 229 | text = re.sub(r"DOTDOTMULTI", r"DOTMULTI.", text) 230 | return re.sub(r"DOTMULTI", r".", text) 231 | 232 | DEDUPLICATE_SPACE = r"\s+", r" " 233 | regxp, substitution = DEDUPLICATE_SPACE 234 | 235 | text = "There are apple , banana DOTDOTDOTMULTI" 236 | text = restore_multidots(text) 237 | text = re.sub(regxp, substitution, text) 238 | print(text) 239 | ``` 240 | 241 | There are apple , banana ... 242 | 243 | 244 | ### 可选处理项 245 | #### Mask 受保护的字符串 246 | 在分词过程中,有一些固定的字符串格式(比如url,日期,时间等),我们不希望把他们拆分开,而是希望将他们标注为统一标识符,以便于在翻译过程中减少词表的大小。 247 | 248 | 249 | ```python 250 | # 匹配<\hello>标签 251 | BASIC_PROTECTED_PATTERN_1 = r"<\/?\S+\/?>" 252 | 253 | # 匹配xml的标签 254 | BASIC_PROTECTED_PATTERN_2 = r'<\S+( [a-zA-Z0-9]+\="?[^"]")+ ?\/?>' 255 | BASIC_PROTECTED_PATTERN_3 = r"<\S+( [a-zA-Z0-9]+\='?[^']')+ ?\/?>" 256 | # 匹配邮箱 257 | BASIC_PROTECTED_PATTERN_4 = r"[\w\-\_\.]+\@([\w\-\_]+\.)+[a-zA-Z]{2,}" 258 | # 匹配url 259 | BASIC_PROTECTED_PATTERN_5 = r"(http[s]?|ftp):\/\/[^:\/\s]+(\/\w+)*\/[\w\-\.]+" 260 | 261 | BASIC_PROTECTED_PATTERNS = [ 262 | BASIC_PROTECTED_PATTERN_1, 263 | BASIC_PROTECTED_PATTERN_2, 264 | BASIC_PROTECTED_PATTERN_3, 265 | BASIC_PROTECTED_PATTERN_4, 266 | BASIC_PROTECTED_PATTERN_5, 267 | ] 268 | 269 | text = "this is a webpage https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl that kicks ass" 270 | 271 | # Find the tokens that needs to be protected. 272 | protected_tokens = [ 273 | match.group() 274 | for protected_pattern in BASIC_PROTECTED_PATTERNS 275 | for match in re.finditer(protected_pattern, text, re.IGNORECASE) 276 | ] 277 | # Apply the protected_patterns. 278 | for i, token in enumerate(protected_tokens): 279 | substituition = "THISISPROTECTED" + str(i).zfill(3) 280 | text = text.replace(token, substituition) 281 | 282 | print(text.split()) 283 | ``` 284 | 285 | ['this', 'is', 'a', 'webpage', 'THISISPROTECTED000', 'that', 'kicks', 'ass'] 286 | 287 | 288 | #### 分割破折号 289 | 对于型如`word-word`的连字符号,我们可以选择将它们分成两个词,中间的破折号用特殊符号标记(方便Detokenize)。思路还是使用上面的IsAlnum字符表,如果存在某个破折号,两边都是IsAlnum中的字符,则将破折号与两边的字符用空格隔开。 290 | 291 | 292 | ```python 293 | AGGRESSIVE_HYPHEN_SPLIT = ( 294 | r"([{alphanum}])\-(?=[{alphanum}])".format(alphanum=IsAlnum), 295 | r"\1 @-@ ", 296 | ) 297 | 298 | text = "This is a sentence with hyphen. pre-trained." 299 | regxp, substitution = AGGRESSIVE_HYPHEN_SPLIT 300 | text = re.sub(regxp, substitution, text) 301 | print(text) 302 | ``` 303 | 304 | This is a sentence with hyphen. pre @-@ trained. 305 | 306 | 307 | ## Putting them together 308 | 309 | 310 | ```python 311 | import re 312 | import os 313 | 314 | def get_charset(charset_name): 315 | f = open(os.path.join("assets", charset_name + ".txt")) 316 | return f.read() 317 | 318 | 319 | def get_nobreaking_prefix(lang="en"): 320 | f = open(os.path.join("assets", "nonbreaking_prefix." + lang)) 321 | NONBREAKING_PREFIXES = [] 322 | NUMERIC_ONLY_PREFIXES = [] 323 | for line in f: 324 | line = line.strip() 325 | if line and not line.startswith("#"): 326 | if line.endswith("#NUMERIC_ONLY#"): 327 | NUMERIC_ONLY_PREFIXES.append(line.split()[0]) 328 | if line not in NONBREAKING_PREFIXES: 329 | NONBREAKING_PREFIXES.append(line) 330 | f.close() 331 | return NONBREAKING_PREFIXES, NUMERIC_ONLY_PREFIXES 332 | 333 | 334 | class MoseTokenizer(object): 335 | 336 | # 字符集 337 | IsAlnum = get_charset("IsAlnum") 338 | IsAlpha = get_charset("IsAlpha") 339 | IsLower = get_charset("IsLower") 340 | IsN = get_charset("IsN") 341 | 342 | # 步骤1 - 替换空白字符 相关正则表达式 343 | DEDUPLICATE_SPACE = r"\s+", r" " 344 | ASCII_JUNK = r"[\000-\037]", r"" 345 | 346 | # 步骤2 - 将常见标点、乱码等符号与词语分开 相关正则表达式 347 | PAD_NOT_ISALNUM = r"([^{}\s\.'\`\,\-])".format(IsAlnum), r" \1 " 348 | 349 | # 步骤4 - 分割逗号 相关正则表达式 350 | # 若逗号前面不是数字,则分离逗号,如 hello,120 -> hello , 120 351 | COMMA_SEPARATE_1 = r"([^{}])[,]".format(IsN), r"\1 , " 352 | # 若逗号后面不是数字,则分离逗号,如 120, hello -> 120 , hello 353 | COMMA_SEPARATE_2 = r"[,]([^{}])".format(IsN), r" , \1" 354 | COMMA_SEPARATE_3 = r"([{}])[,]$".format( 355 | IsN), r"\1 , " # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 , 356 | COMMA_SEPARATE_4 = r"^[,]([{}])".format( 357 | IsN), r" \1, " # 如果数字后匹配到结尾符,则分离逗号。 如120, -> 120 , 358 | 359 | COMMA_SEPARATE = [ 360 | COMMA_SEPARATE_1, 361 | COMMA_SEPARATE_2, 362 | COMMA_SEPARATE_3, 363 | COMMA_SEPARATE_4 364 | ] 365 | 366 | # 步骤5 - 分割句号 受保护的前缀 367 | NONBREAKING_PREFIXES, NUMERIC_ONLY_PREFIXES = get_nobreaking_prefix( 368 | lang="en") 369 | 370 | # 步骤6 - 处理'号缩写 相关正则表达式 371 | EN_SPECIFIC_1 = r"([^{alpha}])[']([^{alpha}])".format( 372 | alpha=IsAlpha), r"\1 ' \2" 373 | EN_SPECIFIC_2 = ( 374 | r"([^{alpha}{isn}])[']([{alpha}])".format(alpha=IsAlpha, isn=IsN), 375 | r"\1 ' \2", 376 | ) 377 | EN_SPECIFIC_3 = r"([{alpha}])[']([^{alpha}])".format( 378 | alpha=IsAlpha), r"\1 ' \2" 379 | EN_SPECIFIC_4 = r"([{alpha}])[']([{alpha}])".format( 380 | alpha=IsAlpha), r"\1 '\2" 381 | EN_SPECIFIC_5 = r"([{isn}])[']([s])".format(isn=IsN), r"\1 '\2" 382 | 383 | EN_SPECIFIC = [ 384 | EN_SPECIFIC_1, 385 | EN_SPECIFIC_1, 386 | EN_SPECIFIC_1, 387 | EN_SPECIFIC_1, 388 | EN_SPECIFIC_1 389 | ] 390 | 391 | # 可选步骤 Mask 受保护的字符串 相关的正则表达式 392 | BASIC_PROTECTED_PATTERN_1 = r"<\/?\S+\/?>" 393 | BASIC_PROTECTED_PATTERN_2 = r'<\S+( [a-zA-Z0-9]+\="?[^"]")+ ?\/?>' 394 | BASIC_PROTECTED_PATTERN_3 = r"<\S+( [a-zA-Z0-9]+\='?[^']')+ ?\/?>" 395 | BASIC_PROTECTED_PATTERN_4 = r"[\w\-\_\.]+\@([\w\-\_]+\.)+[a-zA-Z]{2,}" 396 | BASIC_PROTECTED_PATTERN_5 = r"(http[s]?|ftp):\/\/[^:\/\s]+(\/\w+)*\/[\w\-\.]+" 397 | 398 | BASIC_PROTECTED_PATTERNS = [ 399 | BASIC_PROTECTED_PATTERN_1, 400 | BASIC_PROTECTED_PATTERN_2, 401 | BASIC_PROTECTED_PATTERN_3, 402 | BASIC_PROTECTED_PATTERN_4, 403 | BASIC_PROTECTED_PATTERN_5 404 | ] 405 | 406 | # 可选步骤 分割破折号 相关正则表达式 407 | AGGRESSIVE_HYPHEN_SPLIT = ( 408 | r"([{alphanum}])\-(?=[{alphanum}])".format(alphanum=IsAlnum), 409 | r"\1 @-@ ", 410 | ) 411 | 412 | def isanyalpha(self, text): 413 | # 判断给定字符串中是否全是字母(非数字、符号) 414 | return any(set(text).intersection(set(self.IsAlpha))) 415 | 416 | def islower(self, text): 417 | # 判断给定字符串中是否全部都是小写字母 418 | return not set(text).difference(set(self.IsLower)) 419 | 420 | @staticmethod 421 | def replace_multidots(text): 422 | # 处理情况1,对多个"."的情况作mask处理 423 | text = re.sub(r"\.([\.]+)", r" DOTMULTI\1", text) 424 | while re.search(r"DOTMULTI\.", text): 425 | text = re.sub(r"DOTMULTI\.([^\.])", r"DOTDOTMULTI \1", text) 426 | text = re.sub(r"DOTMULTI\.", "DOTDOTMULTI", text) 427 | return text 428 | 429 | @staticmethod 430 | def restore_multidots(text): 431 | # 恢复对多个"."的mask 432 | while re.search(r"DOTDOTMULTI", text): 433 | text = re.sub(r"DOTDOTMULTI", r"DOTMULTI.", text) 434 | return re.sub(r"DOTMULTI", r".", text) 435 | 436 | def handles_nonbreaking_prefixes(self, text): 437 | # 将文本拆分为标记以检查 "." 为结尾的部分是否符合拆分条件 438 | tokens = text.split() 439 | num_tokens = len(tokens) 440 | for i, token in enumerate(tokens): 441 | # 判断是否以"."结尾 442 | token_ends_with_period = re.search(r"^(\S+)\.$", token) 443 | if token_ends_with_period: 444 | prefix = token_ends_with_period.group(1) 445 | 446 | # 处理情况2,3,4,6 447 | if ( 448 | ("." in prefix and self.isanyalpha(prefix)) 449 | or ( 450 | prefix in self.NONBREAKING_PREFIXES 451 | and prefix not in self.NUMERIC_ONLY_PREFIXES 452 | ) 453 | or ( 454 | i != num_tokens - 1 455 | and tokens[i + 1] 456 | and self.islower(tokens[i + 1][0]) 457 | ) 458 | ): 459 | pass # 不做拆分处理 460 | 461 | # 处理情况 5 462 | elif ( 463 | prefix in self.NUMERIC_ONLY_PREFIXES 464 | and (i + 1) < num_tokens 465 | and re.search(r"^[0-9]+", tokens[i + 1]) 466 | ): 467 | pass # 不做拆分处理 468 | else: # 不在1-6中,做拆分处理 469 | tokens[i] = prefix + " ." 470 | return " ".join(tokens) # Stitch the tokens back. 471 | 472 | def tokenize(self, 473 | text, 474 | aggressive_dash_splits=False, # 是否分割破折号 "-" 475 | return_str=False, # 返回字符串还是以list的形式返回 476 | protected_patterns=None # Mask 受保护的字符串 (以正则表达式list的形式传入) 477 | ): 478 | 479 | # 步骤1 - 替换空白字符 480 | for regexp, substitution in [self.DEDUPLICATE_SPACE, self.ASCII_JUNK]: 481 | text = re.sub(regexp, substitution, text) 482 | 483 | # 可选步骤 Mask 受保护的字符串 484 | if protected_patterns: 485 | protecte_partterns.extend(self.BASIC_PROTECTED_PATTERNS) 486 | else: 487 | protecte_partterns = self.BASIC_PROTECTED_PATTERNS 488 | 489 | # Find the tokens that needs to be protected. 490 | protected_tokens = [ 491 | match.group() 492 | for protected_pattern in self.BASIC_PROTECTED_PATTERNS 493 | for match in re.finditer(protected_pattern, text, re.IGNORECASE) 494 | ] 495 | # Apply the protected_patterns. 496 | for i, token in enumerate(protected_tokens): 497 | substituition = "THISISPROTECTED" + str(i).zfill(3) 498 | text = text.replace(token, substituition) 499 | 500 | # 步骤2 - 将常见标点、乱码等符号与词语分开 相关正则表达式 501 | regxp, substitution = self.PAD_NOT_ISALNUM 502 | text = re.sub(regxp, substitution, text) 503 | 504 | # 步骤3 - 去掉句子开头和结尾的空白字符 505 | text = text.strip() 506 | 507 | # 步骤4 - 分割逗号 508 | for regxp, substitution in self.COMMA_SEPARATE: 509 | text = re.sub(regxp, substitution, text) 510 | 511 | # 步骤5 - 分割句号 512 | text = self.replace_multidots(text) 513 | text = self.handles_nonbreaking_prefixes(text) 514 | 515 | # 步骤6 - 处理'号缩写 516 | for regxp, substitution in self.EN_SPECIFIC: 517 | text = re.sub(regxp, substitution, text) 518 | 519 | if aggressive_dash_splits: 520 | regxp, substitution = self.AGGRESSIVE_HYPHEN_SPLIT 521 | text = re.sub(regxp, substitution, text) 522 | 523 | # 收尾工作 524 | regxp, substitution = self.DEDUPLICATE_SPACE 525 | text = self.restore_multidots(text) 526 | text = re.sub(regxp, substitution, text) 527 | 528 | # 恢复受保护的字符串 Mask->原字符串. 529 | for i, token in enumerate(protected_tokens): 530 | substituition = "THISISPROTECTED" + str(i).zfill(3) 531 | text = text.replace(substituition, token) 532 | 533 | return text if return_str else text.split() 534 | ``` 535 | 536 | 537 | ```python 538 | test_sentences = [ 539 | "this is a webpage https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl that kicks ass", 540 | "Sie sollten vor dem Upgrade eine Sicherung dieser Daten erstellen (wie unter Abschnitt 4.1.1, „Sichern aller Daten und Konfigurationsinformationen“ beschrieben).", 541 | "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [ ] & You're gonna shake it off? Don't?", 542 | "This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf" 543 | ] 544 | 545 | mt = MoseTokenizer() 546 | 547 | for text in test_sentences: 548 | text = mt.tokenize(text) 549 | print(text) 550 | ``` 551 | 552 | ['this', 'is', 'a', 'webpage', 'https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl', 'that', 'kicks', 'ass'] 553 | ['Sie', 'sollten', 'vor', 'dem', 'Upgrade', 'eine', 'Sicherung', 'dieser', 'Daten', 'erstellen', '(', 'wie', 'unter', 'Abschnitt', '4.1.1', ',', '„', 'Sichern', 'aller', 'Daten', 'und', 'Konfigurationsinformationen', '“', 'beschrieben', ')', '.'] 554 | ['This', "ain't", 'funny', '.', "It's", 'actually', 'hillarious', ',', 'yet', 'double', 'Ls', '.', '|', '[', ']', '<', '>', '[', ']', '&', "You're", 'gonna', 'shake', 'it', 'off', '?', "Don't", '?'] 555 | ['This', ',', 'is', 'a', 'sentence', 'with', 'weird', '»', 'symbols', '…', 'appearing', 'everywhere', '¿'] 556 | 557 | -------------------------------------------------------------------------------- /tutorials/Chapter2/Normalize.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 规范化\n", 8 | "\n", 9 | "在机器翻译的数据处理过程中,规范化是指对数据中的字符表示或者大小写等进行统一,具体包括符号规范化,大小写转换和中文的简繁体转化等。由于数据来源多样,不同的数据集中可能使用不同的符号标准或者大小写规范等。同一个符号,由于使用的编码不同,计算机也会认为是不同的符号或单词。此外,这种多样性会变相地导致数据中各种符号相对稀疏,增大了模型的学习负担。通过规范化,可以将功能相同的符号或者单词表示进行统一,去除其中的噪音,减小词表规模。\n", 10 | "\n", 11 | "符号规范化,主要指的是全角或者半角符号的统一。如表所示,虽然其中的百分号、字母‘A’和数字‘9’表示的含义没有变,但是在 Unicode 标准中存在不同的编码表示。因此,需要将不同的编码进行统一。在中英翻译中,通常会根据映射规则将符号全部统一成半角符号。\n", 12 | "\n", 13 | "| 字符 | Unicode 编码 16 进制 |\n", 14 | "| :----: |:----: |\n", 15 | "| % | FF05 |\n", 16 | "| ﹪ | FF6A |\n", 17 | "| % | 25 |\n", 18 | "| A | 41 |\n", 19 | "| 9 | 39 |\n", 20 | "| 9 | FF19 |\n", 21 | "\n", 22 | "这一步的原理并不复杂,主要是涉及到字符的替换,使用正则相应的正则表达式可以很容易做到。\n", 23 | "\n", 24 | "注:以下的规范化规则均来自[sacremoses](https://github.com/alvations/sacremoses)。是Moses中 [normalize-punctuation.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/normalize-punctuation.perl)脚本的一个python版本,对于大部分场景来说够用了,我们要做的是了解这些通用脚本里面做了什么,对于垂直领域的翻译,需要根据数据的特征在这些通用脚本的基础上进行增删。如果只把它当做黑盒子来用,我们就无法做定制化的开发处理。毕竟数据的清洗质量的好坏也会在很大程度上影响模型训练的效果。" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "## Unicode中统一字符的替换\n", 32 | "### 与引号破折号相关的字符规范化\n", 33 | "在英文或者其他语言中引号和破折号是有多种字符表达方式的,它们在表意方面没有任何区别,所以我们通常把它们统一进行替换,如两个单引号替换成双引号,长破折号、短破折号都替换成短破折号。\n", 34 | "\n", 35 | "| 全角字符 | 半角字符 |\n", 36 | "| :----: |:----: |\n", 37 | "| `„` | `\"` |\n", 38 | "| `“` | `\"`|\n", 39 | "| `”` | `\"`|\n", 40 | "| `–` | `-` |\n", 41 | "| `—` | ` - ` |\n", 42 | "| `´` | `'` |\n", 43 | "| `‘` | `'` |\n", 44 | "| `‚` | `'` |\n", 45 | "| `’` | `'` |\n", 46 | "| ` | `'` |\n", 47 | "| `''` | `\"` |\n", 48 | "| `''` | `\"` |\n", 49 | "\n", 50 | "### 全角和半角字符\n", 51 | "主要涉及到unicode中全角、半角符号之间的转换。因为这些符号在语义上没有区别,如果是英文或者其他拉丁语系可以统一转换成半角字符,如果是中文可以也统一转换为半角字符,在translate的后处理阶段将其恢复为全角字符。可以减少这些符号在翻译时由于不统一引起的歧义,并减少词表的大小,同时也减少了后续分词时的处理复杂度(分词时不需要考虑两套符号的影响了)。\n", 52 | "\n", 53 | "| 全角字符 | 半角字符 |\n", 54 | "| :----: |:----: |\n", 55 | "| `,` | `,` |\n", 56 | "| `。` | `.`|\n", 57 | "| `、` | `,`|\n", 58 | "| `”` | `\"` |\n", 59 | "| `“` | `\"` |\n", 60 | "| `∶` | `:` |\n", 61 | "| `:` | `:` |\n", 62 | "| `?` | `?` |\n", 63 | "| `《` | `<` |\n", 64 | "| `》` | `>` |\n", 65 | "| `)` | `)` |\n", 66 | "| `!` | `!` |\n", 67 | "| `(` | `(` |\n", 68 | "| `;` | `;` |\n", 69 | "| `0` | `0` |\n", 70 | "| `1` | `1` |\n", 71 | "| `2` | `2` |\n", 72 | "| `3` | `3` |\n", 73 | "| `4` | `4` |\n", 74 | "| `5` | `5` |\n", 75 | "| `6` | `6` |\n", 76 | "| `7` | `7` |\n", 77 | "| `8` | `8` |\n", 78 | "| `9` | `9` |\n", 79 | "| `.` | `.` |\n", 80 | "| `~` | `~` |\n", 81 | "| `’` | `,` |\n", 82 | "| `…` | `...` |\n", 83 | "| `━` | `-` |\n", 84 | "| `〈` | `<` |\n", 85 | "| `〉` | `>` |\n", 86 | "| `【` | `[` |\n", 87 | "| `】` | `]` |\n", 88 | "| `%` | `%` |\n", 89 | "\n" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 1, 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "name": "stdout", 99 | "output_type": "stream", 100 | "text": [ 101 | "0\"123\" 456% [789]...\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "import re\n", 107 | "\n", 108 | "NORMALIZE_UNICODE = [ # lines 37 - 50\n", 109 | " (u'„', r'\"'),\n", 110 | " (u'“', r'\"'),\n", 111 | " (u'”', r'\"'),\n", 112 | " (u'–', r'-'),\n", 113 | " (u'—', r' - '),\n", 114 | " (r' +', r' '),\n", 115 | " (u'´', r\"'\"),\n", 116 | " (u'([a-zA-Z])‘([a-zA-Z])', r\"\\g<1>'\\g<2>\"),\n", 117 | " (u'([a-zA-Z])’([a-zA-Z])', r\"\\g<1>'\\g<2>\"),\n", 118 | " (u'‘', r\"'\"),\n", 119 | " (u'‚', r\"'\"),\n", 120 | " (u'’', r\"'\"),\n", 121 | " (r\"''\", r'\"'),\n", 122 | " (u'´´', r'\"'),\n", 123 | " (u'…', r'...'),\n", 124 | " ]\n", 125 | "\n", 126 | "REPLACE_UNICODE_PUNCTUATION = [\n", 127 | " (u\",\", u\",\"),\n", 128 | " (r\"。\\s*\", u\". \"),\n", 129 | " (u\"、\", u\",\"),\n", 130 | " (u\"”\", u'\"'),\n", 131 | " (u\"“\", u'\"'),\n", 132 | " (u\"∶\", u\":\"),\n", 133 | " (u\":\", u\":\"),\n", 134 | " (u\"?\", u\"?\"),\n", 135 | " (u\"《\", u'\"'),\n", 136 | " (u\"》\", u'\"'),\n", 137 | " (u\")\", u\")\"),\n", 138 | " (u\"!\", u\"!\"),\n", 139 | " (u\"(\", u\"(\"),\n", 140 | " (u\";\", u\";\"),\n", 141 | " (u\"」\", u'\"'),\n", 142 | " (u\"「\", u'\"'),\n", 143 | " (u\"0\", u\"0\"),\n", 144 | " (u\"1\", u'1'),\n", 145 | " (u\"2\", u\"2\"),\n", 146 | " (u\"3\", u\"3\"),\n", 147 | " (u\"4\", u\"4\"),\n", 148 | " (u\"5\", u\"5\"),\n", 149 | " (u\"6\", u\"6\"),\n", 150 | " (u\"7\", u\"7\"),\n", 151 | " (u\"8\", u\"8\"),\n", 152 | " (u\"9\", u\"9\"),\n", 153 | " (r\".\\s*\", u\". \"),\n", 154 | " (u\"~\", u\"~\"),\n", 155 | " (u\"’\", u\"'\"),\n", 156 | " (u\"…\", u\"...\"),\n", 157 | " (u\"━\", u\"-\"),\n", 158 | " (u\"〈\", u\"<\"),\n", 159 | " (u\"〉\", u\">\"),\n", 160 | " (u\"【\", u\"[\"),\n", 161 | " (u\"】\", u\"]\"),\n", 162 | " (u\"%\", u\"%\"),\n", 163 | "]\n", 164 | "\n", 165 | "text = \"0《123》 456% 【789】…\"\n", 166 | "for regx, sub in NORMALIZE_UNICODE:\n", 167 | " text = re.sub(regx, sub, text)\n", 168 | " \n", 169 | "for regx, sub in REPLACE_UNICODE_PUNCTUATION:\n", 170 | " text = re.sub(regx, sub, text)\n", 171 | "\n", 172 | "print(text)" 173 | ] 174 | }, 175 | { 176 | "cell_type": "markdown", 177 | "metadata": {}, 178 | "source": [ 179 | "## 去除额外的空格\n", 180 | "需要处理的情况有以下几种\n", 181 | "\n", 182 | "| 情况描述 | 替换内容 | 举例 |\n", 183 | "| :----: |:----: | :----: |\n", 184 | "| \\r(CR) ,将当前位置移到本行开头,会覆盖之前的内容 | 替换为空字符串 | `Hello \\rworld` -> `Hello world`|\n", 185 | "| 正括号前无空格 | 在正括号前添加空格 | `Hello(world)` -> `Hello (world)` | \n", 186 | "| 反括号后无空格 | 在反括号后添加空格 | `Hello (world)` -> `Hello (world) `| \n", 187 | "| 连续多个空格 | 替换为一个空格 | `Hello world` -> `Hello world` | \n", 188 | "| 反括号+空格+其他符号| 将反括号与其他符号间空格去掉 | `Hello (world) .`->`Hello (world).`|\n", 189 | "| 正括号后有空格| 将空格去掉 | `Hello ( world)` -> `Hello (world)` |\n", 190 | "| 反括号前有空格| 将空格去掉 | `Hello (world )` -> `Hello (world)` |\n", 191 | "| 数字与百分号之间有空格 | 将空格去掉 | `20 %` -> `20%` |\n", 192 | "| 冒号前有空格|将空格去掉|`11 :20` -> `11:20`|\n", 193 | "| 分号前有空格|将空格去掉|`hello ; world` -> `hello; world`|" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 2, 199 | "metadata": {}, 200 | "outputs": [ 201 | { 202 | "name": "stdout", 203 | "output_type": "stream", 204 | "text": [ 205 | "The United States in 1805 (color map) _Facing_ 193\n" 206 | ] 207 | } 208 | ], 209 | "source": [ 210 | "EXTRA_WHITESPACE = [ # lines 21 - 30\n", 211 | " (r\"\\r\", r\"\"),\n", 212 | " (r\"\\(\", r\" (\"),\n", 213 | " (r\"\\)\", r\") \"),\n", 214 | " (r\" +\", r\" \"),\n", 215 | " (r\"\\) ([.!:?;,])\", r\")\\g<1>\"),\n", 216 | " (r\"\\( \", r\"(\"),\n", 217 | " (r\" \\)\", r\")\"),\n", 218 | " (r\"(\\d) %\", r\"\\g<1>%\"),\n", 219 | " (r\" :\", r\":\"),\n", 220 | " (r\" ;\", r\";\"),\n", 221 | "]\n", 222 | "\n", 223 | "text = \"The United States in 1805 (color map) _Facing_ 193\"\n", 224 | "for regx, sub in EXTRA_WHITESPACE:\n", 225 | " text = re.sub(regx, sub, text)\n", 226 | "print(text)" 227 | ] 228 | }, 229 | { 230 | "cell_type": "markdown", 231 | "metadata": {}, 232 | "source": [ 233 | "## 去除不间断空格(Non-breaking space)\n", 234 | "什么是不间断空格呢?在unicode中使用`\\u00A0`标识不间断空格。英文写作的时候,我们写的一些词组为了避免他们分开在两行导致人们阅读的时候看不懂,就要把它们写在一起,就用到了不间断空格。这里举个例子来说明。\n", 235 | "![NormalSpacee](assets/NormalSpace.webp)\n", 236 | "这里由于我们输入的是普通空格,在输入空格后将hello和world分开了。如果我们输入一个不间断空格,会怎么样呢?\n", 237 | "![Non-BreakingSpace.webp](assets/Non-BreakingSpace.webp)\n", 238 | "这种空格如果用在单词质检对分词和后续的翻译没有什么影响,但是它经常和一些符号一起出现,我们就需要将它去掉或者做其他处理。譬如`%`,`;`。\n", 239 | "\n", 240 | "| 匹配正则 | 替换 |\n", 241 | "| :----: |:----: |\n", 242 | "| `\\u00A0%` | `%` |\n", 243 | "| `nº\\u00A0` | `nº `|\n", 244 | "| `\\u00A0:` | `:`|\n", 245 | "| `\\u00A0ºC` | ` ºC` |\n", 246 | "| `\\u00A0cm` | ` cm` |\n", 247 | "| `\\u00A0\\\\?` | `?` |\n", 248 | "| `\\u00A0\\\\!` | `!` |\n", 249 | "| `\\u00A0;` | `;` |\n", 250 | "| `,\\u00A0` | `, ` |\n", 251 | "\n", 252 | "### 规范化数字\n", 253 | "如果数字间存在不间断空格,用`.`进行替换。(Moses中是这么做的,这里具体什么原因我也没搞清楚,这里直接写上来)\n", 254 | "\n", 255 | "如`123\\u00A0123` -> `123.123`" 256 | ] 257 | }, 258 | { 259 | "cell_type": "code", 260 | "execution_count": 3, 261 | "metadata": {}, 262 | "outputs": [ 263 | { 264 | "name": "stdout", 265 | "output_type": "stream", 266 | "text": [ 267 | "20%, 11.22\n" 268 | ] 269 | } 270 | ], 271 | "source": [ 272 | "HANDLE_PSEUDO_SPACES = [ # lines 59 - 67\n", 273 | " (u'\\u00A0%', r'%'),\n", 274 | " (u'nº\\u00A0', u'nº '),\n", 275 | " (u'\\u00A0:', r':'),\n", 276 | " (u'\\u00A0ºC', u' ºC'),\n", 277 | " (u'\\u00A0cm', r' cm'),\n", 278 | " (u'\\u00A0\\\\?', u'?'),\n", 279 | " (u'\\u00A0\\\\!', u'!'),\n", 280 | " (u'\\u00A0;', r';'),\n", 281 | " (u',\\u00A0', r', '),\n", 282 | " (r' +', r' '),\n", 283 | "]\n", 284 | "\n", 285 | "NORM_NUM = [(u'(\\\\d)\\u00A0(\\\\d)', r'\\g<1>.\\g<2>'),]\n", 286 | "\n", 287 | "text = \"20{PSEUDO_SPACE}%, 11{PSEUDO_SPACE}22\".format(PSEUDO_SPACE=\"\\u00A0\")\n", 288 | "\n", 289 | "for regx, sub in HANDLE_PSEUDO_SPACES:\n", 290 | " text = re.sub(regx, sub, text)\n", 291 | " \n", 292 | "for regx, sub in NORM_NUM:\n", 293 | " text = re.sub(regx, sub, text)\n", 294 | " \n", 295 | "print(text)" 296 | ] 297 | }, 298 | { 299 | "cell_type": "markdown", 300 | "metadata": {}, 301 | "source": [ 302 | "## 删除控制字符\n", 303 | "删除如控制符:LF(换行)、CR(回车)、FF(换页)、DEL(删除)、BS(退格)、BEL(振铃)等。这一步也可以在分词的时候去做。这里不再去写。\n" 304 | ] 305 | } 306 | ], 307 | "metadata": { 308 | "kernelspec": { 309 | "display_name": "Python 3.8.3 64-bit ('tensorflow': conda)", 310 | "language": "python", 311 | "name": "python38364bittensorflowcondaec9a8a5cf0b5432291036442c49a5e7f" 312 | }, 313 | "language_info": { 314 | "codemirror_mode": { 315 | "name": "ipython", 316 | "version": 3 317 | }, 318 | "file_extension": ".py", 319 | "mimetype": "text/x-python", 320 | "name": "python", 321 | "nbconvert_exporter": "python", 322 | "pygments_lexer": "ipython3", 323 | "version": "3.8.3" 324 | } 325 | }, 326 | "nbformat": 4, 327 | "nbformat_minor": 4 328 | } 329 | -------------------------------------------------------------------------------- /tutorials/Chapter2/Normalize.md: -------------------------------------------------------------------------------- 1 | # 规范化 2 | 3 | 在机器翻译的数据处理过程中,规范化是指对数据中的字符表示或者大小写等进行统一,具体包括符号规范化,大小写转换和中文的简繁体转化等。由于数据来源多样,不同的数据集中可能使用不同的符号标准或者大小写规范等。同一个符号,由于使用的编码不同,计算机也会认为是不同的符号或单词。此外,这种多样性会变相地导致数据中各种符号相对稀疏,增大了模型的学习负担。通过规范化,可以将功能相同的符号或者单词表示进行统一,去除其中的噪音,减小词表规模。 4 | 5 | 符号规范化,主要指的是全角或者半角符号的统一。如表所示,虽然其中的百分号、字母‘A’和数字‘9’表示的含义没有变,但是在 Unicode 标准中存在不同的编码表示。因此,需要将不同的编码进行统一。在中英翻译中,通常会根据映射规则将符号全部统一成半角符号。 6 | 7 | | 字符 | Unicode 编码 16 进制 | 8 | | :----: |:----: | 9 | | % | FF05 | 10 | | ﹪ | FF6A | 11 | | % | 25 | 12 | | A | 41 | 13 | | 9 | 39 | 14 | | 9 | FF19 | 15 | 16 | 这一步的原理并不复杂,主要是涉及到字符的替换,使用正则相应的正则表达式可以很容易做到。 17 | 18 | 注:以下的规范化规则均来自[sacremoses](https://github.com/alvations/sacremoses)。是Moses中 [normalize-punctuation.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/normalize-punctuation.perl)脚本的一个python版本,对于大部分场景来说够用了,我们要做的是了解这些通用脚本里面做了什么,对于垂直领域的翻译,需要根据数据的特征在这些通用脚本的基础上进行增删。如果只把它当做黑盒子来用,我们就无法做定制化的开发处理。毕竟数据的清洗质量的好坏也会在很大程度上影响模型训练的效果。 19 | 20 | ## Unicode中统一字符的替换 21 | ### 与引号破折号相关的字符规范化 22 | 在英文或者其他语言中引号和破折号是有多种字符表达方式的,它们在表意方面没有任何区别,所以我们通常把它们统一进行替换,如两个单引号替换成双引号,长破折号、短破折号都替换成短破折号。 23 | 24 | | 全角字符 | 半角字符 | 25 | | :----: |:----: | 26 | | `„` | `"` | 27 | | `“` | `"`| 28 | | `”` | `"`| 29 | | `–` | `-` | 30 | | `—` | ` - ` | 31 | | `´` | `'` | 32 | | `‘` | `'` | 33 | | `‚` | `'` | 34 | | `’` | `'` | 35 | | ` | `'` | 36 | | `''` | `"` | 37 | | `''` | `"` | 38 | 39 | ### 全角和半角字符 40 | 主要涉及到unicode中全角、半角符号之间的转换。因为这些符号在语义上没有区别,如果是英文或者其他拉丁语系可以统一转换成半角字符,如果是中文可以也统一转换为半角字符,在translate的后处理阶段将其恢复为全角字符。可以减少这些符号在翻译时由于不统一引起的歧义,并减少词表的大小,同时也减少了后续分词时的处理复杂度(分词时不需要考虑两套符号的影响了)。 41 | 42 | | 全角字符 | 半角字符 | 43 | | :----: |:----: | 44 | | `,` | `,` | 45 | | `。` | `.`| 46 | | `、` | `,`| 47 | | `”` | `"` | 48 | | `“` | `"` | 49 | | `∶` | `:` | 50 | | `:` | `:` | 51 | | `?` | `?` | 52 | | `《` | `<` | 53 | | `》` | `>` | 54 | | `)` | `)` | 55 | | `!` | `!` | 56 | | `(` | `(` | 57 | | `;` | `;` | 58 | | `0` | `0` | 59 | | `1` | `1` | 60 | | `2` | `2` | 61 | | `3` | `3` | 62 | | `4` | `4` | 63 | | `5` | `5` | 64 | | `6` | `6` | 65 | | `7` | `7` | 66 | | `8` | `8` | 67 | | `9` | `9` | 68 | | `.` | `.` | 69 | | `~` | `~` | 70 | | `’` | `,` | 71 | | `…` | `...` | 72 | | `━` | `-` | 73 | | `〈` | `<` | 74 | | `〉` | `>` | 75 | | `【` | `[` | 76 | | `】` | `]` | 77 | | `%` | `%` | 78 | 79 | 80 | 81 | 82 | ```python 83 | import re 84 | 85 | NORMALIZE_UNICODE = [ # lines 37 - 50 86 | (u'„', r'"'), 87 | (u'“', r'"'), 88 | (u'”', r'"'), 89 | (u'–', r'-'), 90 | (u'—', r' - '), 91 | (r' +', r' '), 92 | (u'´', r"'"), 93 | (u'([a-zA-Z])‘([a-zA-Z])', r"\g<1>'\g<2>"), 94 | (u'([a-zA-Z])’([a-zA-Z])', r"\g<1>'\g<2>"), 95 | (u'‘', r"'"), 96 | (u'‚', r"'"), 97 | (u'’', r"'"), 98 | (r"''", r'"'), 99 | (u'´´', r'"'), 100 | (u'…', r'...'), 101 | ] 102 | 103 | REPLACE_UNICODE_PUNCTUATION = [ 104 | (u",", u","), 105 | (r"。\s*", u". "), 106 | (u"、", u","), 107 | (u"”", u'"'), 108 | (u"“", u'"'), 109 | (u"∶", u":"), 110 | (u":", u":"), 111 | (u"?", u"?"), 112 | (u"《", u'"'), 113 | (u"》", u'"'), 114 | (u")", u")"), 115 | (u"!", u"!"), 116 | (u"(", u"("), 117 | (u";", u";"), 118 | (u"」", u'"'), 119 | (u"「", u'"'), 120 | (u"0", u"0"), 121 | (u"1", u'1'), 122 | (u"2", u"2"), 123 | (u"3", u"3"), 124 | (u"4", u"4"), 125 | (u"5", u"5"), 126 | (u"6", u"6"), 127 | (u"7", u"7"), 128 | (u"8", u"8"), 129 | (u"9", u"9"), 130 | (r".\s*", u". "), 131 | (u"~", u"~"), 132 | (u"’", u"'"), 133 | (u"…", u"..."), 134 | (u"━", u"-"), 135 | (u"〈", u"<"), 136 | (u"〉", u">"), 137 | (u"【", u"["), 138 | (u"】", u"]"), 139 | (u"%", u"%"), 140 | ] 141 | 142 | text = "0《123》 456% 【789】…" 143 | for regx, sub in NORMALIZE_UNICODE: 144 | text = re.sub(regx, sub, text) 145 | 146 | for regx, sub in REPLACE_UNICODE_PUNCTUATION: 147 | text = re.sub(regx, sub, text) 148 | 149 | print(text) 150 | ``` 151 | 152 | 0"123" 456% [789]... 153 | 154 | 155 | ## 去除额外的空格 156 | 需要处理的情况有以下几种 157 | 158 | | 情况描述 | 替换内容 | 举例 | 159 | | :----: |:----: | :----: | 160 | | \r(CR) ,将当前位置移到本行开头,会覆盖之前的内容 | 替换为空字符串 | `Hello \rworld` -> `Hello world`| 161 | | 正括号前无空格 | 在正括号前添加空格 | `Hello(world)` -> `Hello (world)` | 162 | | 反括号后无空格 | 在反括号后添加空格 | `Hello (world)` -> `Hello (world) `| 163 | | 连续多个空格 | 替换为一个空格 | `Hello world` -> `Hello world` | 164 | | 反括号+空格+其他符号| 将反括号与其他符号间空格去掉 | `Hello (world) .`->`Hello (world).`| 165 | | 正括号后有空格| 将空格去掉 | `Hello ( world)` -> `Hello (world)` | 166 | | 反括号前有空格| 将空格去掉 | `Hello (world )` -> `Hello (world)` | 167 | | 数字与百分号之间有空格 | 将空格去掉 | `20 %` -> `20%` | 168 | | 冒号前有空格|将空格去掉|`11 :20` -> `11:20`| 169 | | 分号前有空格|将空格去掉|`hello ; world` -> `hello; world`| 170 | 171 | 172 | ```python 173 | EXTRA_WHITESPACE = [ # lines 21 - 30 174 | (r"\r", r""), 175 | (r"\(", r" ("), 176 | (r"\)", r") "), 177 | (r" +", r" "), 178 | (r"\) ([.!:?;,])", r")\g<1>"), 179 | (r"\( ", r"("), 180 | (r" \)", r")"), 181 | (r"(\d) %", r"\g<1>%"), 182 | (r" :", r":"), 183 | (r" ;", r";"), 184 | ] 185 | 186 | text = "The United States in 1805 (color map) _Facing_ 193" 187 | for regx, sub in EXTRA_WHITESPACE: 188 | text = re.sub(regx, sub, text) 189 | print(text) 190 | ``` 191 | 192 | The United States in 1805 (color map) _Facing_ 193 193 | 194 | 195 | ## 去除不间断空格(Non-breaking space) 196 | 什么是不间断空格呢?在unicode中使用`\u00A0`标识不间断空格。英文写作的时候,我们写的一些词组为了避免他们分开在两行导致人们阅读的时候看不懂,就要把它们写在一起,就用到了不间断空格。这里举个例子来说明。 197 | ![NormalSpacee](assets/NormalSpace.webp) 198 | 这里由于我们输入的是普通空格,在输入空格后将hello和world分开了。如果我们输入一个不间断空格,会怎么样呢? 199 | ![Non-BreakingSpace.webp](assets/Non-BreakingSpace.webp) 200 | 这种空格如果用在单词质检对分词和后续的翻译没有什么影响,但是它经常和一些符号一起出现,我们就需要将它去掉或者做其他处理。譬如`%`,`;`。 201 | 202 | | 匹配正则 | 替换 | 203 | | :----: |:----: | 204 | | `\u00A0%` | `%` | 205 | | `nº\u00A0` | `nº `| 206 | | `\u00A0:` | `:`| 207 | | `\u00A0ºC` | ` ºC` | 208 | | `\u00A0cm` | ` cm` | 209 | | `\u00A0\\?` | `?` | 210 | | `\u00A0\\!` | `!` | 211 | | `\u00A0;` | `;` | 212 | | `,\u00A0` | `, ` | 213 | 214 | ### 规范化数字 215 | 如果数字间存在不间断空格,用`.`进行替换。(Moses中是这么做的,这里具体什么原因我也没搞清楚,这里直接写上来) 216 | 217 | 如`123\u00A0123` -> `123.123` 218 | 219 | 220 | ```python 221 | HANDLE_PSEUDO_SPACES = [ # lines 59 - 67 222 | (u'\u00A0%', r'%'), 223 | (u'nº\u00A0', u'nº '), 224 | (u'\u00A0:', r':'), 225 | (u'\u00A0ºC', u' ºC'), 226 | (u'\u00A0cm', r' cm'), 227 | (u'\u00A0\\?', u'?'), 228 | (u'\u00A0\\!', u'!'), 229 | (u'\u00A0;', r';'), 230 | (u',\u00A0', r', '), 231 | (r' +', r' '), 232 | ] 233 | 234 | NORM_NUM = [(u'(\\d)\u00A0(\\d)', r'\g<1>.\g<2>'),] 235 | 236 | text = "20{PSEUDO_SPACE}%, 11{PSEUDO_SPACE}22".format(PSEUDO_SPACE="\u00A0") 237 | 238 | for regx, sub in HANDLE_PSEUDO_SPACES: 239 | text = re.sub(regx, sub, text) 240 | 241 | for regx, sub in NORM_NUM: 242 | text = re.sub(regx, sub, text) 243 | 244 | print(text) 245 | ``` 246 | 247 | 20%, 11.22 248 | 249 | 250 | ## 删除控制字符 251 | 删除如控制符:LF(换行)、CR(回车)、FF(换页)、DEL(删除)、BS(退格)、BEL(振铃)等。这一步也可以在分词的时候去做。这里不再去写。 252 | 253 | -------------------------------------------------------------------------------- /tutorials/Chapter2/Truecase.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Truecase\n", 8 | "\n", 9 | "在英语等一些大小写敏感的语言中,一些专有名词和有特殊用法的单词,以及每个句子的首字母都需要进行大写。此外,训练数据中也会包括一些大小写错误的用法。这导致许多单词由于大小写的区分存在多种形式。一种简单的做法是将数据全部进行小写化,这样可以使所有的单词进行统一,大大提升模型预测的准确性。然而,用小写化数据训练的模型翻译结果也都是小写的,需要额外的还原模型对结果进行处理。\n", 10 | "\n", 11 | " 现在更常用的做法是保留句子中每个单词的正确大小写形式。但是对于句子的首字母,需将其转换成这个单词最常见的形式,如下表所示。\n", 12 | " \n", 13 | " What is the WTO ? \n", 14 | " \n", 15 | " - Lowercase: what is the wto ?\n", 16 | " - Truecase: what is the WTO ? \n", 17 | "\n", 18 | "\n", 19 | "通过这种方式,训练数据中只包含单词的正确大小写形式,大写单词只存在于一些专有名词或者有特殊用法的单词中,在一定程度上减小了词表大小,同时,也去除了一部分数据中由于错误大小写形式所产生的噪音。在翻译结束后,对首字母进行大写就能得到大小写合理的翻译结果。另外,中文存在简繁体两种形式的汉字,训练数据中可能会同时包含这两种形式。因此通常也会考虑把繁体中文转化为简体中文,以统一汉字的编码。\n", 20 | " \n", 21 | "\n", 22 | "本节主要介绍如何训练Truecase模型,对训练数据进行Truecase处理,以及对Truecase数据进行还原(Detruecase)。" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## 训练Truecase模型\n", 30 | "由于Truecase是针对某一种语言的,并不要求一定要使用双语语料进行训练,还可以利用获取成本较低的单语语料进行训练。我们首先准备一个小的数据集来做实验。由于Truecase是以词为单位进行学习训练的,所以在做Truecase之前,先要对语料进行分词处理。这里使用sacremoses中的分词脚本进行分词。具体的分词流程与原理,在EnglishTokenizer章节中进行了详细的介绍。" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 1, 36 | "metadata": {}, 37 | "outputs": [ 38 | { 39 | "name": "stdout", 40 | "output_type": "stream", 41 | "text": [ 42 | "\u001b[33mWARNING: You are using pip version 20.2.4; however, version 20.3.1 is available.\n", 43 | "You should consider upgrading via the '/root/Softwares/anaconda3/bin/python -m pip install --upgrade pip' command.\u001b[0m\n", 44 | "100%|█████████████████████████████████| 128457/128457 [00:15<00:00, 8344.83it/s]\n" 45 | ] 46 | } 47 | ], 48 | "source": [ 49 | "# 安装sacremoses\n", 50 | "!pip -q install -i https://pypi.douban.com/simple sacremoses \n", 51 | "# 获取训练数据\n", 52 | "!wget -q https://gist.githubusercontent.com/alvations/6e878bab0eda2624167aa7ec13fc3e94/raw/4fb3bac1da1ba7a172ff1936e96bee3bc8892931/big.txt\n", 53 | "# 对数据进行分词处理\n", 54 | "!sacremoses -l en -j 4 tokenize < big.txt > big.txt.tok" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 2, 60 | "metadata": {}, 61 | "outputs": [ 62 | { 63 | "name": "stdout", 64 | "output_type": "stream", 65 | "text": [ 66 | "The Project Gutenberg EBook of The Adventures of Sherlock Holmes\r\n", 67 | "by Sir Arthur Conan Doyle\r\n", 68 | "( # 15 in our series by Sir Arthur Conan Doyle )\r\n", 69 | "\r\n", 70 | "Copyright laws are changing all over the world . Be sure to check the\r\n", 71 | "copyright laws for your country before downloading or redistributing\r\n", 72 | "this or any other Project Gutenberg eBook .\r\n", 73 | "\r\n", 74 | "This header should be the first thing seen when viewing this Project\r\n", 75 | "Gutenberg file . Please do not remove it . Do not change or edit the\r\n" 76 | ] 77 | } 78 | ], 79 | "source": [ 80 | "!head big.txt.tok" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "训练Truecase模型的原理其实非常简单,我们只需要统计每个单词不同形态下的词频。比如单词 “internet”,在我们的训练语料中有三种形态,分别是“internet”,“Internet”,“INTERNET”,这三种形态在训练语料中出现的频率分别是1,100,2次。当模型从训练数据中学习到这种分布特征之后,在平行语料预处理、后处理阶段,就能对不同Case的“internet”进行处理(具体处理方法细节后面会讲)。\n", 88 | "\n", 89 | "首先我们编写统计一句话中每个词不同形式的词频的代码。\n" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 3, 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "data": { 99 | "text/plain": [ 100 | "[('laws', 'laws', 1),\n", 101 | " ('are', 'are', 1),\n", 102 | " ('changing', 'changing', 1),\n", 103 | " ('all', 'all', 1),\n", 104 | " ('over', 'over', 1),\n", 105 | " ('the', 'the', 1),\n", 106 | " ('world', 'world', 1),\n", 107 | " ('sure', 'sure', 1),\n", 108 | " ('to', 'to', 1),\n", 109 | " ('check', 'check', 1),\n", 110 | " ('the', 'the', 1),\n", 111 | " ('copyright', 'copyright', 1),\n", 112 | " ('laws', 'laws', 1),\n", 113 | " ('for', 'for', 1),\n", 114 | " ('your', 'your', 1),\n", 115 | " ('country', 'country', 1),\n", 116 | " ('before', 'before', 1),\n", 117 | " ('downloading', 'downloading', 1),\n", 118 | " ('or', 'or', 1),\n", 119 | " ('redistributing', 'redistributing', 1),\n", 120 | " ('this', 'this', 1),\n", 121 | " ('or', 'or', 1),\n", 122 | " ('any', 'any', 1),\n", 123 | " ('other', 'other', 1),\n", 124 | " ('project', 'Project', 1),\n", 125 | " ('gutenberg', 'Gutenberg', 1),\n", 126 | " ('ebook', 'eBook', 1)]" 127 | ] 128 | }, 129 | "execution_count": 3, 130 | "metadata": {}, 131 | "output_type": "execute_result" 132 | } 133 | ], 134 | "source": [ 135 | "import re\n", 136 | "\n", 137 | "# 如果遇到这些词,这些词不能作为句子的开头,通常下一个词才是。如“( Additional editing by Jose Menendez )”\n", 138 | "DELAYED_SENT_START = {\n", 139 | " \"(\",\n", 140 | " \"[\",\n", 141 | " '\"',\n", 142 | " \"'\",\n", 143 | " \"'\",\n", 144 | " \""\",\n", 145 | " \"[\",\n", 146 | " \"]\",\n", 147 | "}\n", 148 | "\n", 149 | "# 如果遇到这些词意味着当前句子结束,下一个单词可能是句子的开头。\n", 150 | "SENT_END = {\".\", \":\", \"?\", \"!\"}\n", 151 | "\n", 152 | "# 该正则用于跳过不包含大写字母、小写字母和标题字母的词。如纯数字,纯符号 “( # 15 in our series by Sir Arthur Conan Doyle )”\n", 153 | "Lowercase_Letter = open(\"assets/Lowercase_Letter.txt\").read()\n", 154 | "Uppercase_Letter = open(\"assets/Uppercase_Letter.txt\").read()\n", 155 | "Titlecase_Letter = open(\"assets/Titlecase_Letter.txt\").read()\n", 156 | "\n", 157 | "SKIP_LETTERS_REGEX = re.compile(\n", 158 | " u\"[{}{}{}]\".format(\n", 159 | " Lowercase_Letter, Uppercase_Letter, Titlecase_Letter\n", 160 | " )\n", 161 | ")\n", 162 | "\n", 163 | "def learn_truecase_weights(tokens):\n", 164 | " \"\"\"\n", 165 | " tokens: 句子的分词结果.\n", 166 | " \"\"\"\n", 167 | " # 下一个词是否是句首单词的标记,如果是句首单词可能不计入统计\n", 168 | " is_first_word = True\n", 169 | " truecase_weights = []\n", 170 | " for i, token in enumerate(tokens):\n", 171 | " # 跳过xml标记中的词。这些词在分词时往往是一个整体,里面的词的Case与句首词语Case一样没有统计意义。\n", 172 | " if re.search(r\"(<\\S[^>]*>)\", token):\n", 173 | " continue\n", 174 | " # 如果遇到这些词,这些词不能作为句子的开头,通常下一个词才是。如“( Additional editing by Jose Menendez )”\n", 175 | " elif token in DELAYED_SENT_START:\n", 176 | " continue\n", 177 | "\n", 178 | " # 如果遇到这些词意味着当前句子结束,下一个单词可能是句子的开头。重置 is_first_word\n", 179 | " if not is_first_word and token in SENT_END:\n", 180 | " is_first_word = True\n", 181 | " continue\n", 182 | " # 跳过不需要进行大小写统计的词,如数字、符号或者他们的组合\n", 183 | " if not SKIP_LETTERS_REGEX.search(token):\n", 184 | " is_first_word = False\n", 185 | " continue\n", 186 | "\n", 187 | " # 将当前词的统计结果加入到truecase_weights中。如 (lowercasetoken, LowerCaseToken, 1)\n", 188 | " current_word_weight = 0\n", 189 | " if not is_first_word:\n", 190 | " current_word_weight = 1\n", 191 | "\n", 192 | " is_first_word = False\n", 193 | "\n", 194 | " if current_word_weight > 0:\n", 195 | " truecase_weights.append((token.lower(), token, current_word_weight))\n", 196 | " return truecase_weights\n", 197 | "\n", 198 | "example = \"Copyright laws are changing all over the world . Be sure to check the copyright laws for your country before downloading or redistributing this or any other Project Gutenberg eBook .\"\n", 199 | "learn_truecase_weights(example.split())" 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "metadata": {}, 205 | "source": [ 206 | "接下来,我们对训练语料中的每一句话的词频做统计,并将统计结果合并。" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": 4, 212 | "metadata": {}, 213 | "outputs": [], 214 | "source": [ 215 | "# 首先读取训练数据\n", 216 | "with open(\"big.txt.tok\", 'r') as f:\n", 217 | " corpus = f.readlines()\n", 218 | "\n", 219 | "from collections import defaultdict, Counter\n", 220 | "# 数据结构用于统计每个单词不同词频\n", 221 | "casing = defaultdict(Counter)\n", 222 | "\n", 223 | "token_weights = []\n", 224 | "for line in corpus:\n", 225 | " token_weights.extend(learn_truecase_weights(line.split()))\n", 226 | "\n", 227 | "for lowercase_token, surface_token, weight in token_weights:\n", 228 | " casing[lowercase_token][surface_token] += weight\n", 229 | "\n", 230 | "# 将统计结果分成best,known两部分。best表示统计频数最高的大小写形式,know表示其他的大小写形式\n", 231 | "best = {}\n", 232 | "# 此处为了保证know中的每个元素可以通过字典的形式访问,所以这里用一个Counter,每个元素的值默认为1\n", 233 | "known = Counter()\n", 234 | "\n", 235 | "for token_lower in casing:\n", 236 | " tokens = casing[token_lower].most_common()\n", 237 | " best[token_lower] = tokens[0][0]\n", 238 | " for token, count in tokens[1:]:\n", 239 | " known[token] += 1\n", 240 | "model = {\"best\": best, \"known\": known, \"casing\": casing} " 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "在进行Truecase操作前,输入的文本通常是经过分词处理后的文本,首先将他们以空格为分隔符分成单词(如果文本中有xml格式的文本,也将其包裹的单词分割开来。)" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 5, 253 | "metadata": {}, 254 | "outputs": [ 255 | { 256 | "data": { 257 | "text/plain": [ 258 | "['hello', '', 'Reminder', '']" 259 | ] 260 | }, 261 | "execution_count": 5, 262 | "metadata": {}, 263 | "output_type": "execute_result" 264 | } 265 | ], 266 | "source": [ 267 | "def split_xml(line):\n", 268 | " \"\"\"\n", 269 | " 将文本以空格为分隔字符分开,如果文本中包含xml格式的文本,也将他们分开。\n", 270 | " 如 hello Reminder 会将它分割成\n", 271 | " ['hello', '', 'Reminder', '']\n", 272 | " \"\"\"\n", 273 | " line = line.strip()\n", 274 | " tokens = []\n", 275 | " while line:\n", 276 | " # Assumes that xml tag is always separated by space.\n", 277 | " has_xml = re.search(r\"^\\s*(<\\S[^>]*>)(.*)$\", line)\n", 278 | " # non-XML test.\n", 279 | " is_non_xml = re.search(r\"^\\s*([^\\s<>]+)(.*)$\", line)\n", 280 | " # '<' or '>' occurs in word, but it's not an XML tag\n", 281 | " xml_cognates = re.search(r\"^\\s*(\\S+)(.*)$\", line)\n", 282 | " if has_xml:\n", 283 | " potential_xml, line_next = has_xml.groups()\n", 284 | " # exception for factor that is an XML tag\n", 285 | " if (\n", 286 | " re.search(r\"^\\S\", line)\n", 287 | " and len(tokens) > 0\n", 288 | " and re.search(r\"\\|$\", tokens[-1])\n", 289 | " ):\n", 290 | " tokens[-1] += potential_xml\n", 291 | " # If it's a token with factors, join with the previous token.\n", 292 | " is_factor = re.search(r\"^(\\|+)(.*)$\", line_next)\n", 293 | " if is_factor:\n", 294 | " tokens[-1] += is_factor.group(1)\n", 295 | " line_next = is_factor.group(2)\n", 296 | " else:\n", 297 | " tokens.append(\n", 298 | " potential_xml + \" \"\n", 299 | " ) # Token hack, unique to sacremoses.\n", 300 | " line = line_next\n", 301 | "\n", 302 | " elif is_non_xml:\n", 303 | " tokens.append(is_non_xml.group(1)) # Token hack, unique to sacremoses.\n", 304 | " line = is_non_xml.group(2)\n", 305 | " elif xml_cognates:\n", 306 | " tokens.append(\n", 307 | " xml_cognates.group(1)\n", 308 | " ) # Token hack, unique to sacremoses.\n", 309 | " line = xml_cognates.group(2)\n", 310 | " else:\n", 311 | " raise Exception(\"ERROR: huh? {}\".format(line))\n", 312 | " tokens[-1] = tokens[-1].strip() # Token hack, unique to sacremoses.\n", 313 | " return tokens\n", 314 | "\n", 315 | "text = \"hello Reminder\"\n", 316 | "split_xml(text)" 317 | ] 318 | }, 319 | { 320 | "cell_type": "markdown", 321 | "metadata": {}, 322 | "source": [ 323 | "有了模型和输入文本之后,我们就可以使用模型对文本进行Truecase处理。" 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": 6, 329 | "metadata": {}, 330 | "outputs": [], 331 | "source": [ 332 | "def truecase(text, model, return_str=False, use_known=False):\n", 333 | " \"\"\"\n", 334 | " 对一句话或者一段文本进行Truecase操作\n", 335 | " \n", 336 | " Args:\n", 337 | " text (str): 输入文本(已经经过分词处理)\n", 338 | " model (dict): 从训练数据中学习到的case的统计数据\n", 339 | " return_str (bool, optional): 以str的形式返回还是以List[str]的形式返回. Defaults to True.\n", 340 | " use_known (bool, optional): 当该参数为True时,当某个词不是句首单词,并且是在训练数据中出现过的大小写形式,则保留原大小写形式不变。\n", 341 | " 当该参数为False时,优先使用该词最常见的大小写形式\n", 342 | " \"\"\"\n", 343 | " # 记录当前单词是否应为句首单词\n", 344 | " is_first_word = True\n", 345 | " truecased_tokens = []\n", 346 | " tokens = split_xml(text)\n", 347 | "\n", 348 | " for i, token in enumerate(tokens):\n", 349 | " # 这里以 ”|“ 符号开头的单词不做处理。注:这里为什么要对这个符号做特殊处理还不太清除\n", 350 | " if token == \"|\" or token.startswith(\"|\"):\n", 351 | " truecased_tokens.append(token)\n", 352 | " continue\n", 353 | " \n", 354 | " # 处理这种情况 ”hello|thankyou“ -> token=\"hello\", other_fectors=\"|thankyou\"是处理词中有”|符号的情况“\n", 355 | " token, other_factors = re.search(r\"^([^\\|]+)(.*)\", token).groups()\n", 356 | "\n", 357 | " # 最常见的(训练中频数最高的)单词大小写形式\n", 358 | " best_case = model[\"best\"].get(token.lower(), None)\n", 359 | " # 其他的单词大小写形式\n", 360 | " known_case = model[\"known\"].get(token, None)\n", 361 | " \n", 362 | " if is_first_word and best_case: # 句首单词采用最常见的大小写形式\n", 363 | " token = best_case\n", 364 | " elif known_case and use_known: # 在训练集中出现过的并且use_known=True大小写形式保持不变\n", 365 | " token = token\n", 366 | " elif (\n", 367 | " best_case\n", 368 | " ): # 如果匹配到best_case使用最常见的大小写形式\n", 369 | " token = best_case\n", 370 | " # 否则是没有见过的单词,大小写形式也保持不变\n", 371 | " \n", 372 | " # 处理之前以”|“将词分开的情况,将他们重新拼接在一起\n", 373 | " token = token + other_factors\n", 374 | " # Adds the truecased\n", 375 | " truecased_tokens.append(token)\n", 376 | "\n", 377 | " # 遇见句末标点重置句首标志\n", 378 | " is_first_word = token in SENT_END\n", 379 | " \n", 380 | " # 延迟将句首标志置为False\n", 381 | " if token in DELAYED_SENT_START:\n", 382 | " is_first_word = False\n", 383 | "\n", 384 | " # 根据return_str参数判断是以词的形式返回还是以字符串的形式返回\n", 385 | " return \" \".join(truecased_tokens) if return_str else truecased_tokens" 386 | ] 387 | }, 388 | { 389 | "cell_type": "markdown", 390 | "metadata": {}, 391 | "source": [ 392 | "找一段文本来试一下效果" 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 7, 398 | "metadata": {}, 399 | "outputs": [ 400 | { 401 | "name": "stdout", 402 | "output_type": "stream", 403 | "text": [ 404 | "you can also find out about how to make a donation to Project Gutenberg, and how to get involved.\n" 405 | ] 406 | } 407 | ], 408 | "source": [ 409 | "input_str = \"You can also find out about how to make a donation to Project Gutenberg, and how to get involved.\"\n", 410 | "output_str = truecase(input_str, model, return_str=True)\n", 411 | "print(output_str)" 412 | ] 413 | }, 414 | { 415 | "cell_type": "markdown", 416 | "metadata": {}, 417 | "source": [ 418 | "可以看到,首字母You变成了小写,人名Project Gutenberg还保留了原来的形式。" 419 | ] 420 | } 421 | ], 422 | "metadata": { 423 | "kernelspec": { 424 | "display_name": "Python 3", 425 | "language": "python", 426 | "name": "python3" 427 | }, 428 | "language_info": { 429 | "codemirror_mode": { 430 | "name": "ipython", 431 | "version": 3 432 | }, 433 | "file_extension": ".py", 434 | "mimetype": "text/x-python", 435 | "name": "python", 436 | "nbconvert_exporter": "python", 437 | "pygments_lexer": "ipython3", 438 | "version": "3.7.0" 439 | } 440 | }, 441 | "nbformat": 4, 442 | "nbformat_minor": 2 443 | } 444 | -------------------------------------------------------------------------------- /tutorials/Chapter2/Truecase.md: -------------------------------------------------------------------------------- 1 | 2 | # Truecase 3 | 4 | 在英语等一些大小写敏感的语言中,一些专有名词和有特殊用法的单词,以及每个句子的首字母都需要进行大写。此外,训练数据中也会包括一些大小写错误的用法。这导致许多单词由于大小写的区分存在多种形式。一种简单的做法是将数据全部进行小写化,这样可以使所有的单词进行统一,大大提升模型预测的准确性。然而,用小写化数据训练的模型翻译结果也都是小写的,需要额外的还原模型对结果进行处理。 5 | 6 | 现在更常用的做法是保留句子中每个单词的正确大小写形式。但是对于句子的首字母,需将其转换成这个单词最常见的形式,如下表所示。 7 | 8 | What is the WTO ? 9 | 10 | - Lowercase: what is the wto ? 11 | - Truecase: what is the WTO ? 12 | 13 | 14 | 通过这种方式,训练数据中只包含单词的正确大小写形式,大写单词只存在于一些专有名词或者有特殊用法的单词中,在一定程度上减小了词表大小,同时,也去除了一部分数据中由于错误大小写形式所产生的噪音。在翻译结束后,对首字母进行大写就能得到大小写合理的翻译结果。另外,中文存在简繁体两种形式的汉字,训练数据中可能会同时包含这两种形式。因此通常也会考虑把繁体中文转化为简体中文,以统一汉字的编码。 15 | 16 | 17 | 本节主要介绍如何训练Truecase模型,对训练数据进行Truecase处理,以及对Truecase数据进行还原(Detruecase)。 18 | 19 | ## 训练Truecase模型 20 | 由于Truecase是针对某一种语言的,并不要求一定要使用双语语料进行训练,还可以利用获取成本较低的单语语料进行训练。我们首先准备一个小的数据集来做实验。由于Truecase是以词为单位进行学习训练的,所以在做Truecase之前,先要对语料进行分词处理。这里使用sacremoses中的分词脚本进行分词。具体的分词流程与原理,在EnglishTokenizer章节中进行了详细的介绍。 21 | 22 | 23 | ```python 24 | # 安装sacremoses 25 | !pip -q install -i https://pypi.douban.com/simple sacremoses 26 | # 获取训练数据 27 | !wget -q https://gist.githubusercontent.com/alvations/6e878bab0eda2624167aa7ec13fc3e94/raw/4fb3bac1da1ba7a172ff1936e96bee3bc8892931/big.txt 28 | # 对数据进行分词处理 29 | !sacremoses -l en -j 4 tokenize < big.txt > big.txt.tok 30 | ``` 31 | 32 | WARNING: You are using pip version 20.2.4; however, version 20.3.1 is available. 33 | You should consider upgrading via the '/root/Softwares/anaconda3/bin/python -m pip install --upgrade pip' command. 34 | 100%|█████████████████████████████████| 128457/128457 [00:15<00:00, 8344.83it/s] 35 | 36 | 37 | 38 | ```python 39 | !head big.txt.tok 40 | ``` 41 | 42 | The Project Gutenberg EBook of The Adventures of Sherlock Holmes 43 | by Sir Arthur Conan Doyle 44 | ( # 15 in our series by Sir Arthur Conan Doyle ) 45 | 46 | Copyright laws are changing all over the world . Be sure to check the 47 | copyright laws for your country before downloading or redistributing 48 | this or any other Project Gutenberg eBook . 49 | 50 | This header should be the first thing seen when viewing this Project 51 | Gutenberg file . Please do not remove it . Do not change or edit the 52 | 53 | 54 | 训练Truecase模型的原理其实非常简单,我们只需要统计每个单词不同形态下的词频。比如单词 “internet”,在我们的训练语料中有三种形态,分别是“internet”,“Internet”,“INTERNET”,这三种形态在训练语料中出现的频率分别是1,100,2次。当模型从训练数据中学习到这种分布特征之后,在平行语料预处理、后处理阶段,就能对不同Case的“internet”进行处理(具体处理方法细节后面会讲)。 55 | 56 | 首先我们编写统计一句话中每个词不同形式的词频的代码。 57 | 58 | 59 | 60 | ```python 61 | import re 62 | 63 | # 如果遇到这些词,这些词不能作为句子的开头,通常下一个词才是。如“( Additional editing by Jose Menendez )” 64 | DELAYED_SENT_START = { 65 | "(", 66 | "[", 67 | '"', 68 | "'", 69 | "'", 70 | """, 71 | "[", 72 | "]", 73 | } 74 | 75 | # 如果遇到这些词意味着当前句子结束,下一个单词可能是句子的开头。 76 | SENT_END = {".", ":", "?", "!"} 77 | 78 | # 该正则用于跳过不包含大写字母、小写字母和标题字母的词。如纯数字,纯符号 “( # 15 in our series by Sir Arthur Conan Doyle )” 79 | Lowercase_Letter = open("assets/Lowercase_Letter.txt").read() 80 | Uppercase_Letter = open("assets/Uppercase_Letter.txt").read() 81 | Titlecase_Letter = open("assets/Titlecase_Letter.txt").read() 82 | 83 | SKIP_LETTERS_REGEX = re.compile( 84 | u"[{}{}{}]".format( 85 | Lowercase_Letter, Uppercase_Letter, Titlecase_Letter 86 | ) 87 | ) 88 | 89 | def learn_truecase_weights(tokens): 90 | """ 91 | tokens: 句子的分词结果. 92 | """ 93 | # 下一个词是否是句首单词的标记,如果是句首单词可能不计入统计 94 | is_first_word = True 95 | truecase_weights = [] 96 | for i, token in enumerate(tokens): 97 | # 跳过xml标记中的词。这些词在分词时往往是一个整体,里面的词的Case与句首词语Case一样没有统计意义。 98 | if re.search(r"(<\S[^>]*>)", token): 99 | continue 100 | # 如果遇到这些词,这些词不能作为句子的开头,通常下一个词才是。如“( Additional editing by Jose Menendez )” 101 | elif token in DELAYED_SENT_START: 102 | continue 103 | 104 | # 如果遇到这些词意味着当前句子结束,下一个单词可能是句子的开头。重置 is_first_word 105 | if not is_first_word and token in SENT_END: 106 | is_first_word = True 107 | continue 108 | # 跳过不需要进行大小写统计的词,如数字、符号或者他们的组合 109 | if not SKIP_LETTERS_REGEX.search(token): 110 | is_first_word = False 111 | continue 112 | 113 | # 将当前词的统计结果加入到truecase_weights中。如 (lowercasetoken, LowerCaseToken, 1) 114 | current_word_weight = 0 115 | if not is_first_word: 116 | current_word_weight = 1 117 | 118 | is_first_word = False 119 | 120 | if current_word_weight > 0: 121 | truecase_weights.append((token.lower(), token, current_word_weight)) 122 | return truecase_weights 123 | 124 | example = "Copyright laws are changing all over the world . Be sure to check the copyright laws for your country before downloading or redistributing this or any other Project Gutenberg eBook ." 125 | learn_truecase_weights(example.split()) 126 | ``` 127 | 128 | 129 | 130 | 131 | [('laws', 'laws', 1), 132 | ('are', 'are', 1), 133 | ('changing', 'changing', 1), 134 | ('all', 'all', 1), 135 | ('over', 'over', 1), 136 | ('the', 'the', 1), 137 | ('world', 'world', 1), 138 | ('sure', 'sure', 1), 139 | ('to', 'to', 1), 140 | ('check', 'check', 1), 141 | ('the', 'the', 1), 142 | ('copyright', 'copyright', 1), 143 | ('laws', 'laws', 1), 144 | ('for', 'for', 1), 145 | ('your', 'your', 1), 146 | ('country', 'country', 1), 147 | ('before', 'before', 1), 148 | ('downloading', 'downloading', 1), 149 | ('or', 'or', 1), 150 | ('redistributing', 'redistributing', 1), 151 | ('this', 'this', 1), 152 | ('or', 'or', 1), 153 | ('any', 'any', 1), 154 | ('other', 'other', 1), 155 | ('project', 'Project', 1), 156 | ('gutenberg', 'Gutenberg', 1), 157 | ('ebook', 'eBook', 1)] 158 | 159 | 160 | 161 | 接下来,我们对训练语料中的每一句话的词频做统计,并将统计结果合并。 162 | 163 | 164 | ```python 165 | # 首先读取训练数据 166 | with open("big.txt.tok", 'r') as f: 167 | corpus = f.readlines() 168 | 169 | from collections import defaultdict, Counter 170 | # 数据结构用于统计每个单词不同词频 171 | casing = defaultdict(Counter) 172 | 173 | token_weights = [] 174 | for line in corpus: 175 | token_weights.extend(learn_truecase_weights(line.split())) 176 | 177 | for lowercase_token, surface_token, weight in token_weights: 178 | casing[lowercase_token][surface_token] += weight 179 | 180 | # 将统计结果分成best,known两部分。best表示统计频数最高的大小写形式,know表示其他的大小写形式 181 | best = {} 182 | # 此处为了保证know中的每个元素可以通过字典的形式访问,所以这里用一个Counter,每个元素的值默认为1 183 | known = Counter() 184 | 185 | for token_lower in casing: 186 | tokens = casing[token_lower].most_common() 187 | best[token_lower] = tokens[0][0] 188 | for token, count in tokens[1:]: 189 | known[token] += 1 190 | model = {"best": best, "known": known, "casing": casing} 191 | ``` 192 | 193 | 在进行Truecase操作前,输入的文本通常是经过分词处理后的文本,首先将他们以空格为分隔符分成单词(如果文本中有xml格式的文本,也将其包裹的单词分割开来。) 194 | 195 | 196 | ```python 197 | def split_xml(line): 198 | """ 199 | 将文本以空格为分隔字符分开,如果文本中包含xml格式的文本,也将他们分开。 200 | 如 hello Reminder 会将它分割成 201 | ['hello', '', 'Reminder', ''] 202 | """ 203 | line = line.strip() 204 | tokens = [] 205 | while line: 206 | # Assumes that xml tag is always separated by space. 207 | has_xml = re.search(r"^\s*(<\S[^>]*>)(.*)$", line) 208 | # non-XML test. 209 | is_non_xml = re.search(r"^\s*([^\s<>]+)(.*)$", line) 210 | # '<' or '>' occurs in word, but it's not an XML tag 211 | xml_cognates = re.search(r"^\s*(\S+)(.*)$", line) 212 | if has_xml: 213 | potential_xml, line_next = has_xml.groups() 214 | # exception for factor that is an XML tag 215 | if ( 216 | re.search(r"^\S", line) 217 | and len(tokens) > 0 218 | and re.search(r"\|$", tokens[-1]) 219 | ): 220 | tokens[-1] += potential_xml 221 | # If it's a token with factors, join with the previous token. 222 | is_factor = re.search(r"^(\|+)(.*)$", line_next) 223 | if is_factor: 224 | tokens[-1] += is_factor.group(1) 225 | line_next = is_factor.group(2) 226 | else: 227 | tokens.append( 228 | potential_xml + " " 229 | ) # Token hack, unique to sacremoses. 230 | line = line_next 231 | 232 | elif is_non_xml: 233 | tokens.append(is_non_xml.group(1)) # Token hack, unique to sacremoses. 234 | line = is_non_xml.group(2) 235 | elif xml_cognates: 236 | tokens.append( 237 | xml_cognates.group(1) 238 | ) # Token hack, unique to sacremoses. 239 | line = xml_cognates.group(2) 240 | else: 241 | raise Exception("ERROR: huh? {}".format(line)) 242 | tokens[-1] = tokens[-1].strip() # Token hack, unique to sacremoses. 243 | return tokens 244 | 245 | text = "hello Reminder" 246 | split_xml(text) 247 | ``` 248 | 249 | 250 | 251 | 252 | ['hello', '', 'Reminder', ''] 253 | 254 | 255 | 256 | 有了模型和输入文本之后,我们就可以使用模型对文本进行Truecase处理。 257 | 258 | 259 | ```python 260 | def truecase(text, model, return_str=False, use_known=False): 261 | """ 262 | 对一句话或者一段文本进行Truecase操作 263 | 264 | Args: 265 | text (str): 输入文本(已经经过分词处理) 266 | model (dict): 从训练数据中学习到的case的统计数据 267 | return_str (bool, optional): 以str的形式返回还是以List[str]的形式返回. Defaults to True. 268 | use_known (bool, optional): 当该参数为True时,当某个词不是句首单词,并且是在训练数据中出现过的大小写形式,则保留原大小写形式不变。 269 | 当该参数为False时,优先使用该词最常见的大小写形式 270 | """ 271 | # 记录当前单词是否应为句首单词 272 | is_first_word = True 273 | truecased_tokens = [] 274 | tokens = split_xml(text) 275 | 276 | for i, token in enumerate(tokens): 277 | # 这里以 ”|“ 符号开头的单词不做处理。注:这里为什么要对这个符号做特殊处理还不太清除 278 | if token == "|" or token.startswith("|"): 279 | truecased_tokens.append(token) 280 | continue 281 | 282 | # 处理这种情况 ”hello|thankyou“ -> token="hello", other_fectors="|thankyou"是处理词中有”|符号的情况“ 283 | token, other_factors = re.search(r"^([^\|]+)(.*)", token).groups() 284 | 285 | # 最常见的(训练中频数最高的)单词大小写形式 286 | best_case = model["best"].get(token.lower(), None) 287 | # 其他的单词大小写形式 288 | known_case = model["known"].get(token, None) 289 | 290 | if is_first_word and best_case: # 句首单词采用最常见的大小写形式 291 | token = best_case 292 | elif known_case and use_known: # 在训练集中出现过的并且use_known=True大小写形式保持不变 293 | token = token 294 | elif ( 295 | best_case 296 | ): # 如果匹配到best_case使用最常见的大小写形式 297 | token = best_case 298 | # 否则是没有见过的单词,大小写形式也保持不变 299 | 300 | # 处理之前以”|“将词分开的情况,将他们重新拼接在一起 301 | token = token + other_factors 302 | # Adds the truecased 303 | truecased_tokens.append(token) 304 | 305 | # 遇见句末标点重置句首标志 306 | is_first_word = token in SENT_END 307 | 308 | # 延迟将句首标志置为False 309 | if token in DELAYED_SENT_START: 310 | is_first_word = False 311 | 312 | # 根据return_str参数判断是以词的形式返回还是以字符串的形式返回 313 | return " ".join(truecased_tokens) if return_str else truecased_tokens 314 | ``` 315 | 316 | 找一段文本来试一下效果 317 | 318 | 319 | ```python 320 | input_str = "You can also find out about how to make a donation to Project Gutenberg, and how to get involved." 321 | output_str = truecase(input_str, model, return_str=True) 322 | print(output_str) 323 | ``` 324 | 325 | you can also find out about how to make a donation to Project Gutenberg, and how to get involved. 326 | 327 | 328 | 可以看到,首字母You变成了小写,人名Project Gutenberg还保留了原来的形式。 329 | -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/DAG.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/DAG.jpg -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/HMM分词篱笆型图.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/HMM分词篱笆型图.drawio.png -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/HMM模型.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/HMM模型.png -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/IsAlnum.txt: -------------------------------------------------------------------------------- 1 | 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏƐƑƒƓƔƕƖƗƘƙƚƛƜƝƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƻƼƽƾƿǀǁǂǃDŽDždžLJLjljNJNjnjǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZDzdzǴǵǶǷǸǹǺǻǼǽǾǿȀȁȂȃȄȅȆȇȈȉȊȋȌȍȎȏȐȑȒȓȔȕȖȗȘșȚțȜȝȞȟȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀɁɂɃɄɅɆɇɈɉɊɋɌɍɎɏɐɑɒɓɔɕɖɗɘəɚɛɜɝɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀʁʂʃʄʅʆʇʈʉʊʋʌʍʎʏʐʑʒʓʔʕʖʗʘʙʚʛʜʝʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯʰʱʲʳʴʵʶʷʸʹʺʻʼʽʾʿˀˁˆˇˈˉˊˋˌˍˎˏːˑˠˡˢˣˤˬˮ◌ͅͰͱͲͳʹͶͷͺͻͼͽͿΆΈΉΊΌΎΏΐΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώϏϐϑϒϓϔϕϖϗϘϙϚϛϜϝϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяѐёђѓєѕіїјљњћќѝўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀҁҊҋҌҍҎҏҐґҒғҔҕҖҗҘҙҚқҜҝҞҟҠҡҢңҤҥҦҧҨҩҪҫҬҭҮүҰұҲҳҴҵҶҷҸҹҺһҼҽҾҿӀӁӂӃӄӅӆӇӈӉӊӋӌӍӎӏӐӑӒӓӔӕӖӗӘәӚӛӜӝӞӟӠӡӢӣӤӥӦӧӨөӪӫӬӭӮӯӰӱӲӳӴӵӶӷӸӹӺӻӼӽӾӿԀԁԂԃԄԅԆԇԈԉԊԋԌԍԎԏԐԑԒԓԔԕԖԗԘԙԚԛԜԝԞԟԠԡԢԣԤԥԦԧԨԩԪԫԬԭԮԯԱԲԳԴԵԶԷԸԹԺԻԼԽԾԿՀՁՂՃՄՅՆՇՈՉՊՋՌՍՎՏՐՑՒՓՔՕՖՙաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆև◌ְ◌ֱ◌ֲ◌ֳ◌ִ◌ֵ◌ֶ◌ַ◌ָ◌ֹ◌ֺ◌ֻ◌ּ◌ֽ◌ֿ◌ׁ◌ׂ◌ׄ◌ׅ◌ׇאבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ◌ؐ◌ؑ◌ؒ◌ؓ◌ؔ◌ؕ◌ؖ◌ؗ◌ؘ◌ؙ◌ؚؠءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىي◌ً◌ٌ◌ٍ◌َ◌ُ◌ِ◌ّ◌ْ◌ٓ◌ٔ◌ٕ◌ٖ◌ٗ◌ٙ◌ٚ◌ٛ◌ٜ◌ٝ◌ٞ◌ٟ٠١٢٣٤٥٦٧٨٩ٮٯ◌ٰٱٲٳٴٵٶٷٸٹٺٻټٽپٿڀځڂڃڄڅچڇڈډڊڋڌڍڎڏڐڑڒړڔڕږڗژڙښڛڜڝڞڟڠڡڢڣڤڥڦڧڨکڪګڬڭڮگڰڱڲڳڴڵڶڷڸڹںڻڼڽھڿۀہۂۃۄۅۆۇۈۉۊۋیۍێۏېۑےۓە◌ۖ◌ۗ◌ۘ◌ۙ◌ۚ◌ۛ◌ۜ◌ۡ◌ۢ◌ۣ◌ۤۥۦ◌ۧ◌ۨ◌ۭۮۯ۰۱۲۳۴۵۶۷۸۹ۺۻۼۿܐ◌ܑܒܓܔܕܖܗܘܙܚܛܜܝܞܟܠܡܢܣܤܥܦܧܨܩܪܫܬܭܮܯ◌ܰ◌ܱ◌ܲ◌ܳ◌ܴ◌ܵ◌ܶ◌ܷ◌ܸ◌ܹ◌ܺ◌ܻ◌ܼ◌ܽ◌ܾ◌ܿݍݎݏݐݑݒݓݔݕݖݗݘݙݚݛݜݝݞݟݠݡݢݣݤݥݦݧݨݩݪݫݬݭݮݯݰݱݲݳݴݵݶݷݸݹݺݻݼݽݾݿހށނރބޅކއވމފދތލގޏސޑޒޓޔޕޖޗޘޙޚޛޜޝޞޟޠޡޢޣޤޥ◌ަ◌ާ◌ި◌ީ◌ު◌ޫ◌ެ◌ޭ◌ޮ◌ޯ◌ްޱ߀߁߂߃߄߅߆߇߈߉ߊߋߌߍߎߏߐߑߒߓߔߕߖߗߘߙߚߛߜߝߞߟߠߡߢߣߤߥߦߧߨߩߪߴߵߺࠀࠁࠂࠃࠄࠅࠆࠇࠈࠉࠊࠋࠌࠍࠎࠏࠐࠑࠒࠓࠔࠕ◌ࠖ◌ࠗࠚ◌ࠛ◌ࠜ◌ࠝ◌ࠞ◌ࠟ◌ࠠ◌ࠡ◌ࠢ◌ࠣࠤ◌ࠥ◌ࠦ◌ࠧࠨ◌ࠩ◌ࠪ◌ࠫ◌ࠬࡀࡁࡂࡃࡄࡅࡆࡇࡈࡉࡊࡋࡌࡍࡎࡏࡐࡑࡒࡓࡔࡕࡖࡗࡘࢠࢡࢢࢣࢤࢥࢦࢧࢨࢩࢪࢫࢬࢭࢮࢯࢰࢱࢲ◌ࣤ◌ࣥ◌ࣦ◌ࣧ◌ࣨ◌ࣩ◌ࣰ◌ࣱ◌ࣲ◌ࣳ◌ࣴ◌ࣵ◌ࣶ◌ࣷ◌ࣸ◌ࣹ◌ࣺ◌ࣻ◌ࣼ◌ࣽ◌ࣾ◌ࣿ◌ऀ◌ँ◌ंःऄअआइईउऊऋऌऍऎएऐऑऒओऔकखगघङचछजझञटठडढणतथदधनऩपफबभमयरऱलळऴवशषसह◌ऺऻऽािी◌ु◌ू◌ृ◌ॄ◌ॅ◌ॆ◌े◌ैॉॊोौॎॏॐ◌ॕ◌ॖ◌ॗक़ख़ग़ज़ड़ढ़फ़य़ॠॡ◌ॢ◌ॣ०१२३४५६७८९ॱॲॳॴॵॶॷॸॹॺॻॼॽॾॿঀ◌ঁংঃঅআইঈউঊঋঌএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহঽািী◌ু◌ূ◌ৃ◌ৄেৈোৌৎৗড়ঢ়য়ৠৡ◌ৢ◌ৣ০১২৩৪৫৬৭৮৯ৰৱ◌ਁ◌ਂਃਅਆਇਈਉਊਏਐਓਔਕਖਗਘਙਚਛਜਝਞਟਠਡਢਣਤਥਦਧਨਪਫਬਭਮਯਰਲਲ਼ਵਸ਼ਸਹਾਿੀ◌ੁ◌ੂ◌ੇ◌ੈ◌ੋ◌ੌ◌ੑਖ਼ਗ਼ਜ਼ੜਫ਼੦੧੨੩੪੫੬੭੮੯◌ੰ◌ੱੲੳੴ◌ੵ◌ઁ◌ંઃઅઆઇઈઉઊઋઌઍએઐઑઓઔકખગઘઙચછજઝઞટઠડઢણતથદધનપફબભમયરલળવશષસહઽાિી◌ુ◌ૂ◌ૃ◌ૄ◌ૅ◌ે◌ૈૉોૌૐૠૡ◌ૢ◌ૣ૦૧૨૩૪૫૬૭૮૯◌ଁଂଃଅଆଇଈଉଊଋଌଏଐଓଔକଖଗଘଙଚଛଜଝଞଟଠଡଢଣତଥଦଧନପଫବଭମଯରଲଳଵଶଷସହଽା◌ିୀ◌ୁ◌ୂ◌ୃ◌ୄେୈୋୌ◌ୖୗଡ଼ଢ଼ୟୠୡ◌ୢ◌ୣ୦୧୨୩୪୫୬୭୮୯ୱ◌ஂஃஅஆஇஈஉஊஎஏஐஒஓஔகஙசஜஞடணதநனபமயரறலளழவஶஷஸஹாி◌ீுூெேைொோௌௐௗ௦௧௨௩௪௫௬௭௮௯◌ఀఁంఃఅఆఇఈఉఊఋఌఎఏఐఒఓఔకఖగఘఙచఛజఝఞటఠడఢణతథదధనపఫబభమయరఱలళఴవశషసహఽ◌ా◌ి◌ీుూృౄ◌ె◌ే◌ై◌ొ◌ో◌ౌ◌ౕ◌ౖౘౙౠౡ◌ౢ◌ౣ౦౧౨౩౪౫౬౭౮౯◌ಁಂಃಅಆಇಈಉಊಋಌಎಏಐಒಓಔಕಖಗಘಙಚಛಜಝಞಟಠಡಢಣತಥದಧನಪಫಬಭಮಯರಱಲಳವಶಷಸಹಽಾಿೀುೂೃೄೆೇೈೊೋ◌ೌೕೖೞೠೡ◌ೢ◌ೣ೦೧೨೩೪೫೬೭೮೯ೱೲ◌ഁംഃഅആഇഈഉഊഋഌഎഏഐഒഓഔകഖഗഘങചഛജഝഞടഠഡഢണതഥദധനഩപഫബഭമയരറലളഴവശഷസഹഺഽാിീ◌ു◌ൂ◌ൃ◌ൄെേൈൊോൌൎൗൠൡ◌ൢ◌ൣ൦൧൨൩൪൫൬൭൮൯ൺൻർൽൾൿංඃඅආඇඈඉඊඋඌඍඎඏඐඑඒඓඔඕඖකඛගඝඞඟචඡජඣඤඥඦටඨඩඪණඬතථදධනඳපඵබභමඹයරලවශෂසහළෆාැෑ◌ි◌ී◌ු◌ූෘෙේෛොෝෞෟ෦෧෨෩෪෫෬෭෮෯ෲෳกขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะ◌ัาำ◌ิ◌ี◌ึ◌ื◌ุ◌ู◌ฺเแโใไๅๆ◌ํ๐๑๒๓๔๕๖๗๘๙ກຂຄງຈຊຍດຕຖທນບປຜຝພຟມຢຣລວສຫອຮຯະ◌ັາຳ◌ິ◌ີ◌ຶ◌ື◌ຸ◌ູ◌ົ◌ຼຽເແໂໃໄໆ◌ໍ໐໑໒໓໔໕໖໗໘໙ໜໝໞໟༀ༠༡༢༣༤༥༦༧༨༩ཀཁགགྷངཅཆཇཉཊཋཌཌྷཎཏཐདདྷནཔཕབབྷམཙཚཛཛྷཝཞཟའཡརལཤཥསཧཨཀྵཪཫཬ◌ཱ◌ི◌ཱི◌ུ◌ཱུ◌ྲྀ◌ཷ◌ླྀ◌ཹ◌ེ◌ཻ◌ོ◌ཽ◌ཾཿ◌ྀ◌ཱྀྈྉྊྋྌ◌ྍ◌ྎ◌ྏ◌ྐ◌ྑ◌ྒ◌ྒྷ◌ྔ◌ྕ◌ྖ◌ྗ◌ྙ◌ྚ◌ྛ◌ྜ◌ྜྷ◌ྞ◌ྟ◌ྠ◌ྡ◌ྡྷ◌ྣ◌ྤ◌ྥ◌ྦ◌ྦྷ◌ྨ◌ྩ◌ྪ◌ྫ◌ྫྷ◌ྭ◌ྮ◌ྯ◌ྰ◌ྱ◌ྲ◌ླ◌ྴ◌ྵ◌ྶ◌ྷ◌ྸ◌ྐྵ◌ྺ◌ྻ◌ྼကခဂဃငစဆဇဈဉညဋဌဍဎဏတထဒဓနပဖဗဘမယရလဝသဟဠအဢဣဤဥဦဧဨဩဪါာ◌ိ◌ီ◌ု◌ူေ◌ဲ◌ဳ◌ဴ◌ဵ◌ံးျြ◌ွ◌ှဿ၀၁၂၃၄၅၆၇၈၉ၐၑၒၓၔၕၖၗ◌ၘ◌ၙၚၛၜၝ◌ၞ◌ၟ◌ၠၡၢၥၦၧၨၮၯၰ◌ၱ◌ၲ◌ၳ◌ၴၵၶၷၸၹၺၻၼၽၾၿႀႁ◌ႂႃႄ◌ႅ◌ႆႎ႐႑႒႓႔႕႖႗႘႙ႜ◌ႝႠႡႢႣႤႥႦႧႨႩႪႫႬႭႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀჁჂჃჄჅჇჍაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰჱჲჳჴჵჶჷჸჹჺჼჽჾჿᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑᄒᄓᄔᄕᄖᄗᄘᄙᄚᄛᄜᄝᄞᄟᄠᄡᄢᄣᄤᄥᄦᄧᄨᄩᄪᄫᄬᄭᄮᄯᄰᄱᄲᄳᄴᄵᄶᄷᄸᄹᄺᄻᄼᄽᄾᄿᅀᅁᅂᅃᅄᅅᅆᅇᅈᅉᅊᅋᅌᅍᅎᅏᅐᅑᅒᅓᅔᅕᅖᅗᅘᅙᅚᅛᅜᅝᅞᅟᅠᅡᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᅶᅷᅸᅹᅺᅻᅼᅽᅾᅿᆀᆁᆂᆃᆄᆅᆆᆇᆈᆉᆊᆋᆌᆍᆎᆏᆐᆑᆒᆓᆔᆕᆖᆗᆘᆙᆚᆛᆜᆝᆞᆟᆠᆡᆢᆣᆤᆥᆦᆧᆨᆩᆪᆫᆬᆭᆮᆯᆰᆱᆲᆳᆴᆵᆶᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂᇃᇄᇅᇆᇇᇈᇉᇊᇋᇌᇍᇎᇏᇐᇑᇒᇓᇔᇕᇖᇗᇘᇙᇚᇛᇜᇝᇞᇟᇠᇡᇢᇣᇤᇥᇦᇧᇨᇩᇪᇫᇬᇭᇮᇯᇰᇱᇲᇳᇴᇵᇶᇷᇸᇹᇺᇻᇼᇽᇾᇿሀሁሂሃሄህሆሇለሉሊላሌልሎሏሐሑሒሓሔሕሖሗመሙሚማሜምሞሟሠሡሢሣሤሥሦሧረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቇቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኇኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኯኰኲኳኴኵኸኹኺኻኼኽኾዀዂዃዄዅወዉዊዋዌውዎዏዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮዯደዱዲዳዴድዶዷዸዹዺዻዼዽዾዿጀጁጂጃጄጅጆጇገጉጊጋጌግጎጏጐጒጓጔጕጘጙጚጛጜጝጞጟጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፇፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ◌፟ᎀᎁᎂᎃᎄᎅᎆᎇᎈᎉᎊᎋᎌᎍᎎᎏᎠᎡᎢᎣᎤᎥᎦᎧᎨᎩᎪᎫᎬᎭᎮᎯᎰᎱᎲᎳᎴᎵᎶᎷᎸᎹᎺᎻᎼᎽᎾᎿᏀᏁᏂᏃᏄᏅᏆᏇᏈᏉᏊᏋᏌᏍᏎᏏᏐᏑᏒᏓᏔᏕᏖᏗᏘᏙᏚᏛᏜᏝᏞᏟᏠᏡᏢᏣᏤᏥᏦᏧᏨᏩᏪᏫᏬᏭᏮᏯᏰᏱᏲᏳᏴᐁᐂᐃᐄᐅᐆᐇᐈᐉᐊᐋᐌᐍᐎᐏᐐᐑᐒᐓᐔᐕᐖᐗᐘᐙᐚᐛᐜᐝᐞᐟᐠᐡᐢᐣᐤᐥᐦᐧᐨᐩᐪᐫᐬᐭᐮᐯᐰᐱᐲᐳᐴᐵᐶᐷᐸᐹᐺᐻᐼᐽᐾᐿᑀᑁᑂᑃᑄᑅᑆᑇᑈᑉᑊᑋᑌᑍᑎᑏᑐᑑᑒᑓᑔᑕᑖᑗᑘᑙᑚᑛᑜᑝᑞᑟᑠᑡᑢᑣᑤᑥᑦᑧᑨᑩᑪᑫᑬᑭᑮᑯᑰᑱᑲᑳᑴᑵᑶᑷᑸᑹᑺᑻᑼᑽᑾᑿᒀᒁᒂᒃᒄᒅᒆᒇᒈᒉᒊᒋᒌᒍᒎᒏᒐᒑᒒᒓᒔᒕᒖᒗᒘᒙᒚᒛᒜᒝᒞᒟᒠᒡᒢᒣᒤᒥᒦᒧᒨᒩᒪᒫᒬᒭᒮᒯᒰᒱᒲᒳᒴᒵᒶᒷᒸᒹᒺᒻᒼᒽᒾᒿᓀᓁᓂᓃᓄᓅᓆᓇᓈᓉᓊᓋᓌᓍᓎᓏᓐᓑᓒᓓᓔᓕᓖᓗᓘᓙᓚᓛᓜᓝᓞᓟᓠᓡᓢᓣᓤᓥᓦᓧᓨᓩᓪᓫᓬᓭᓮᓯᓰᓱᓲᓳᓴᓵᓶᓷᓸᓹᓺᓻᓼᓽᓾᓿᔀᔁᔂᔃᔄᔅᔆᔇᔈᔉᔊᔋᔌᔍᔎᔏᔐᔑᔒᔓᔔᔕᔖᔗᔘᔙᔚᔛᔜᔝᔞᔟᔠᔡᔢᔣᔤᔥᔦᔧᔨᔩᔪᔫᔬᔭᔮᔯᔰᔱᔲᔳᔴᔵᔶᔷᔸᔹᔺᔻᔼᔽᔾᔿᕀᕁᕂᕃᕄᕅᕆᕇᕈᕉᕊᕋᕌᕍᕎᕏᕐᕑᕒᕓᕔᕕᕖᕗᕘᕙᕚᕛᕜᕝᕞᕟᕠᕡᕢᕣᕤᕥᕦᕧᕨᕩᕪᕫᕬᕭᕮᕯᕰᕱᕲᕳᕴᕵᕶᕷᕸᕹᕺᕻᕼᕽᕾᕿᖀᖁᖂᖃᖄᖅᖆᖇᖈᖉᖊᖋᖌᖍᖎᖏᖐᖑᖒᖓᖔᖕᖖᖗᖘᖙᖚᖛᖜᖝᖞᖟᖠᖡᖢᖣᖤᖥᖦᖧᖨᖩᖪᖫᖬᖭᖮᖯᖰᖱᖲᖳᖴᖵᖶᖷᖸᖹᖺᖻᖼᖽᖾᖿᗀᗁᗂᗃᗄᗅᗆᗇᗈᗉᗊᗋᗌᗍᗎᗏᗐᗑᗒᗓᗔᗕᗖᗗᗘᗙᗚᗛᗜᗝᗞᗟᗠᗡᗢᗣᗤᗥᗦᗧᗨᗩᗪᗫᗬᗭᗮᗯᗰᗱᗲᗳᗴᗵᗶᗷᗸᗹᗺᗻᗼᗽᗾᗿᘀᘁᘂᘃᘄᘅᘆᘇᘈᘉᘊᘋᘌᘍᘎᘏᘐᘑᘒᘓᘔᘕᘖᘗᘘᘙᘚᘛᘜᘝᘞᘟᘠᘡᘢᘣᘤᘥᘦᘧᘨᘩᘪᘫᘬᘭᘮᘯᘰᘱᘲᘳᘴᘵᘶᘷᘸᘹᘺᘻᘼᘽᘾᘿᙀᙁᙂᙃᙄᙅᙆᙇᙈᙉᙊᙋᙌᙍᙎᙏᙐᙑᙒᙓᙔᙕᙖᙗᙘᙙᙚᙛᙜᙝᙞᙟᙠᙡᙢᙣᙤᙥᙦᙧᙨᙩᙪᙫᙬᙯᙰᙱᙲᙳᙴᙵᙶᙷᙸᙹᙺᙻᙼᙽᙾᙿᚁᚂᚃᚄᚅᚆᚇᚈᚉᚊᚋᚌᚍᚎᚏᚐᚑᚒᚓᚔᚕᚖᚗᚘᚙᚚᚠᚡᚢᚣᚤᚥᚦᚧᚨᚩᚪᚫᚬᚭᚮᚯᚰᚱᚲᚳᚴᚵᚶᚷᚸᚹᚺᚻᚼᚽᚾᚿᛀᛁᛂᛃᛄᛅᛆᛇᛈᛉᛊᛋᛌᛍᛎᛏᛐᛑᛒᛓᛔᛕᛖᛗᛘᛙᛚᛛᛜᛝᛞᛟᛠᛡᛢᛣᛤᛥᛦᛧᛨᛩᛪᛮᛯᛰᛱᛲᛳᛴᛵᛶᛷᛸᜀᜁᜂᜃᜄᜅᜆᜇᜈᜉᜊᜋᜌᜎᜏᜐᜑ◌ᜒ◌ᜓᜠᜡᜢᜣᜤᜥᜦᜧᜨᜩᜪᜫᜬᜭᜮᜯᜰᜱ◌ᜲ◌ᜳᝀᝁᝂᝃᝄᝅᝆᝇᝈᝉᝊᝋᝌᝍᝎᝏᝐᝑ◌ᝒ◌ᝓᝠᝡᝢᝣᝤᝥᝦᝧᝨᝩᝪᝫᝬᝮᝯᝰ◌ᝲ◌ᝳកខគឃងចឆជឈញដឋឌឍណតថទធនបផពភមយរលវឝឞសហឡអឣឤឥឦឧឨឩឪឫឬឭឮឯឰឱឲឳា◌ិ◌ី◌ឹ◌ឺ◌ុ◌ូ◌ួើឿៀេែៃោៅ◌ំះៈៗៜ០១២៣៤៥៦៧៨៩᠐᠑᠒᠓᠔᠕᠖᠗᠘᠙ᠠᠡᠢᠣᠤᠥᠦᠧᠨᠩᠪᠫᠬᠭᠮᠯᠰᠱᠲᠳᠴᠵᠶᠷᠸᠹᠺᠻᠼᠽᠾᠿᡀᡁᡂᡃᡄᡅᡆᡇᡈᡉᡊᡋᡌᡍᡎᡏᡐᡑᡒᡓᡔᡕᡖᡗᡘᡙᡚᡛᡜᡝᡞᡟᡠᡡᡢᡣᡤᡥᡦᡧᡨᡩᡪᡫᡬᡭᡮᡯᡰᡱᡲᡳᡴᡵᡶᡷᢀᢁᢂᢃᢄᢅᢆᢇᢈᢉᢊᢋᢌᢍᢎᢏᢐᢑᢒᢓᢔᢕᢖᢗᢘᢙᢚᢛᢜᢝᢞᢟᢠᢡᢢᢣᢤᢥᢦᢧᢨ◌ᢩᢪᢰᢱᢲᢳᢴᢵᢶᢷᢸᢹᢺᢻᢼᢽᢾᢿᣀᣁᣂᣃᣄᣅᣆᣇᣈᣉᣊᣋᣌᣍᣎᣏᣐᣑᣒᣓᣔᣕᣖᣗᣘᣙᣚᣛᣜᣝᣞᣟᣠᣡᣢᣣᣤᣥᣦᣧᣨᣩᣪᣫᣬᣭᣮᣯᣰᣱᣲᣳᣴᣵᤀᤁᤂᤃᤄᤅᤆᤇᤈᤉᤊᤋᤌᤍᤎᤏᤐᤑᤒᤓᤔᤕᤖᤗᤘᤙᤚᤛᤜᤝᤞ◌ᤠ◌ᤡ◌ᤢᤣᤤᤥᤦ◌ᤧ◌ᤨᤩᤪᤫᤰᤱ◌ᤲᤳᤴᤵᤶᤷᤸ᥆᥇᥈᥉᥊᥋᥌᥍᥎᥏ᥐᥑᥒᥓᥔᥕᥖᥗᥘᥙᥚᥛᥜᥝᥞᥟᥠᥡᥢᥣᥤᥥᥦᥧᥨᥩᥪᥫᥬᥭᥰᥱᥲᥳᥴᦀᦁᦂᦃᦄᦅᦆᦇᦈᦉᦊᦋᦌᦍᦎᦏᦐᦑᦒᦓᦔᦕᦖᦗᦘᦙᦚᦛᦜᦝᦞᦟᦠᦡᦢᦣᦤᦥᦦᦧᦨᦩᦪᦫᦰᦱᦲᦳᦴᦵᦶᦷᦸᦹᦺᦻᦼᦽᦾᦿᧀᧁᧂᧃᧄᧅᧆᧇᧈᧉ᧐᧑᧒᧓᧔᧕᧖᧗᧘᧙ᨀᨁᨂᨃᨄᨅᨆᨇᨈᨉᨊᨋᨌᨍᨎᨏᨐᨑᨒᨓᨔᨕᨖ◌ᨗ◌ᨘᨙᨚ◌ᨛᨠᨡᨢᨣᨤᨥᨦᨧᨨᨩᨪᨫᨬᨭᨮᨯᨰᨱᨲᨳᨴᨵᨶᨷᨸᨹᨺᨻᨼᨽᨾᨿᩀᩁᩂᩃᩄᩅᩆᩇᩈᩉᩊᩋᩌᩍᩎᩏᩐᩑᩒᩓᩔᩕ◌ᩖᩗ◌ᩘ◌ᩙ◌ᩚ◌ᩛ◌ᩜ◌ᩝ◌ᩞᩡ◌ᩢᩣᩤ◌ᩥ◌ᩦ◌ᩧ◌ᩨ◌ᩩ◌ᩪ◌ᩫ◌ᩬᩭᩮᩯᩰᩱᩲ◌ᩳ◌ᩴ᪀᪁᪂᪃᪄᪅᪆᪇᪈᪉᪐᪑᪒᪓᪔᪕᪖᪗᪘᪙ᪧ◌ᬀ◌ᬁ◌ᬂ◌ᬃᬄᬅᬆᬇᬈᬉᬊᬋᬌᬍᬎᬏᬐᬑᬒᬓᬔᬕᬖᬗᬘᬙᬚᬛᬜᬝᬞᬟᬠᬡᬢᬣᬤᬥᬦᬧᬨᬩᬪᬫᬬᬭᬮᬯᬰᬱᬲᬳᬵ◌ᬶ◌ᬷ◌ᬸ◌ᬹ◌ᬺᬻ◌ᬼᬽᬾᬿᭀᭁ◌ᭂᭃᭅᭆᭇᭈᭉᭊᭋ᭐᭑᭒᭓᭔᭕᭖᭗᭘᭙◌ᮀ◌ᮁᮂᮃᮄᮅᮆᮇᮈᮉᮊᮋᮌᮍᮎᮏᮐᮑᮒᮓᮔᮕᮖᮗᮘᮙᮚᮛᮜᮝᮞᮟᮠᮡ◌ᮢ◌ᮣ◌ᮤ◌ᮥᮦᮧ◌ᮨ◌ᮩ◌ᮬ◌ᮭᮮᮯ᮰᮱᮲᮳᮴᮵᮶᮷᮸᮹ᮺᮻᮼᮽᮾᮿᯀᯁᯂᯃᯄᯅᯆᯇᯈᯉᯊᯋᯌᯍᯎᯏᯐᯑᯒᯓᯔᯕᯖᯗᯘᯙᯚᯛᯜᯝᯞᯟᯠᯡᯢᯣᯤᯥᯧ◌ᯨ◌ᯩᯪᯫᯬ◌ᯭᯮ◌ᯯ◌ᯰ◌ᯱᰀᰁᰂᰃᰄᰅᰆᰇᰈᰉᰊᰋᰌᰍᰎᰏᰐᰑᰒᰓᰔᰕᰖᰗᰘᰙᰚᰛᰜᰝᰞᰟᰠᰡᰢᰣᰤᰥᰦᰧᰨᰩᰪᰫ◌ᰬ◌ᰭ◌ᰮ◌ᰯ◌ᰰ◌ᰱ◌ᰲ◌ᰳᰴᰵ᱀᱁᱂᱃᱄᱅᱆᱇᱈᱉ᱍᱎᱏ᱐᱑᱒᱓᱔᱕᱖᱗᱘᱙ᱚᱛᱜᱝᱞᱟᱠᱡᱢᱣᱤᱥᱦᱧᱨᱩᱪᱫᱬᱭᱮᱯᱰᱱᱲᱳᱴᱵᱶᱷᱸᱹᱺᱻᱼᱽᳩᳪᳫᳬᳮᳯᳰᳱᳲᳳᳵᳶᴀᴁᴂᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌᴍᴎᴏᴐᴑᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜᴝᴞᴟᴠᴡᴢᴣᴤᴥᴦᴧᴨᴩᴪᴫᴬᴭᴮᴯᴰᴱᴲᴳᴴᴵᴶᴷᴸᴹᴺᴻᴼᴽᴾᴿᵀᵁᵂᵃᵄᵅᵆᵇᵈᵉᵊᵋᵌᵍᵎᵏᵐᵑᵒᵓᵔᵕᵖᵗᵘᵙᵚᵛᵜᵝᵞᵟᵠᵡᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵸᵹᵺᵻᵼᵽᵾᵿᶀᶁᶂᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌᶍᶎᶏᶐᶑᶒᶓᶔᶕᶖᶗᶘᶙᶚᶛᶜᶝᶞᶟᶠᶡᶢᶣᶤᶥᶦᶧᶨᶩᶪᶫᶬᶭᶮᶯᶰᶱᶲᶳᶴᶵᶶᶷᶸᶹᶺᶻᶼᶽᶾᶿ◌ᷧ◌ᷨ◌ᷩ◌ᷪ◌ᷫ◌ᷬ◌ᷭ◌ᷮ◌ᷯ◌ᷰ◌ᷱ◌ᷲ◌ᷳ◌ᷴḀḁḂḃḄḅḆḇḈḉḊḋḌḍḎḏḐḑḒḓḔḕḖḗḘḙḚḛḜḝḞḟḠḡḢḣḤḥḦḧḨḩḪḫḬḭḮḯḰḱḲḳḴḵḶḷḸḹḺḻḼḽḾḿṀṁṂṃṄṅṆṇṈṉṊṋṌṍṎṏṐṑṒṓṔṕṖṗṘṙṚṛṜṝṞṟṠṡṢṣṤṥṦṧṨṩṪṫṬṭṮṯṰṱṲṳṴṵṶṷṸṹṺṻṼṽṾṿẀẁẂẃẄẅẆẇẈẉẊẋẌẍẎẏẐẑẒẓẔẕẖẗẘẙẚẛẜẝẞẟẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊịỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹỺỻỼỽỾỿἀἁἂἃἄἅἆἇἈἉἊἋἌἍἎἏἐἑἒἓἔἕἘἙἚἛἜἝἠἡἢἣἤἥἦἧἨἩἪἫἬἭἮἯἰἱἲἳἴἵἶἷἸἹἺἻἼἽἾἿὀὁὂὃὄὅὈὉὊὋὌὍὐὑὒὓὔὕὖὗὙὛὝὟὠὡὢὣὤὥὦὧὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀᾁᾂᾃᾄᾅᾆᾇᾈᾉᾊᾋᾌᾍᾎᾏᾐᾑᾒᾓᾔᾕᾖᾗᾘᾙᾚᾛᾜᾝᾞᾟᾠᾡᾢᾣᾤᾥᾦᾧᾨᾩᾪᾫᾬᾭᾮᾯᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆᾼιῂῃῄῆῇῈΈῊΉῌῐῑῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏῼⁱⁿₐₑₒₓₔₕₖₗₘₙₚₛₜℂℇℊℋℌℍℎℏℐℑℒℓℕℙℚℛℜℝℤΩℨKÅℬℭℯℰℱℲℳℴℵℶℷℸℹℼℽℾℿⅅⅆⅇⅈⅉⅎⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫⅬⅭⅮⅯⅰⅱⅲⅳⅴⅵⅶⅷⅸⅹⅺⅻⅼⅽⅾⅿↀↁↂↃↄↅↆↇↈⒶⒷⒸⒹⒺⒻⒼⒽⒾⒿⓀⓁⓂⓃⓄⓅⓆⓇⓈⓉⓊⓋⓌⓍⓎⓏⓐⓑⓒⓓⓔⓕⓖⓗⓘⓙⓚⓛⓜⓝⓞⓟⓠⓡⓢⓣⓤⓥⓦⓧⓨⓩⰀⰁⰂⰃⰄⰅⰆⰇⰈⰉⰊⰋⰌⰍⰎⰏⰐⰑⰒⰓⰔⰕⰖⰗⰘⰙⰚⰛⰜⰝⰞⰟⰠⰡⰢⰣⰤⰥⰦⰧⰨⰩⰪⰫⰬⰭⰮⰰⰱⰲⰳⰴⰵⰶⰷⰸⰹⰺⰻⰼⰽⰾⰿⱀⱁⱂⱃⱄⱅⱆⱇⱈⱉⱊⱋⱌⱍⱎⱏⱐⱑⱒⱓⱔⱕⱖⱗⱘⱙⱚⱛⱜⱝⱞⱠⱡⱢⱣⱤⱥⱦⱧⱨⱩⱪⱫⱬⱭⱮⱯⱰⱱⱲⱳⱴⱵⱶⱷⱸⱹⱺⱻⱼⱽⱾⱿⲀⲁⲂⲃⲄⲅⲆⲇⲈⲉⲊⲋⲌⲍⲎⲏⲐⲑⲒⲓⲔⲕⲖⲗⲘⲙⲚⲛⲜⲝⲞⲟⲠⲡⲢⲣⲤⲥⲦⲧⲨⲩⲪⲫⲬⲭⲮⲯⲰⲱⲲⲳⲴⲵⲶⲷⲸⲹⲺⲻⲼⲽⲾⲿⳀⳁⳂⳃⳄⳅⳆⳇⳈⳉⳊⳋⳌⳍⳎⳏⳐⳑⳒⳓⳔⳕⳖⳗⳘⳙⳚⳛⳜⳝⳞⳟⳠⳡⳢⳣⳤⳫⳬⳭⳮⳲⳳⴀⴁⴂⴃⴄⴅⴆⴇⴈⴉⴊⴋⴌⴍⴎⴏⴐⴑⴒⴓⴔⴕⴖⴗⴘⴙⴚⴛⴜⴝⴞⴟⴠⴡⴢⴣⴤⴥⴧⴭⴰⴱⴲⴳⴴⴵⴶⴷⴸⴹⴺⴻⴼⴽⴾⴿⵀⵁⵂⵃⵄⵅⵆⵇⵈⵉⵊⵋⵌⵍⵎⵏⵐⵑⵒⵓⵔⵕⵖⵗⵘⵙⵚⵛⵜⵝⵞⵟⵠⵡⵢⵣⵤⵥⵦⵧⵯⶀⶁⶂⶃⶄⶅⶆⶇⶈⶉⶊⶋⶌⶍⶎⶏⶐⶑⶒⶓⶔⶕⶖⶠⶡⶢⶣⶤⶥⶦⶨⶩⶪⶫⶬⶭⶮⶰⶱⶲⶳⶴⶵⶶⶸⶹⶺⶻⶼⶽⶾⷀⷁⷂⷃⷄⷅⷆⷈⷉⷊⷋⷌⷍⷎⷐⷑⷒⷓⷔⷕⷖⷘⷙⷚⷛⷜⷝⷞ◌ⷠ◌ⷡ◌ⷢ◌ⷣ◌ⷤ◌ⷥ◌ⷦ◌ⷧ◌ⷨ◌ⷩ◌ⷪ◌ⷫ◌ⷬ◌ⷭ◌ⷮ◌ⷯ◌ⷰ◌ⷱ◌ⷲ◌ⷳ◌ⷴ◌ⷵ◌ⷶ◌ⷷ◌ⷸ◌ⷹ◌ⷺ◌ⷻ◌ⷼ◌ⷽ◌ⷾ◌ⷿⸯ〆〱〲〳〴〵〼ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろゎわゐゑをんゔゕゖゝゞゟァアィイゥウェエォオカガキギクグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨラリルレロヮワヰヱヲンヴヵヶヷヸヹヺーヽヾヿㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩㄪㄫㄬㄭㄱㄲㄳㄴㄵㄶㄷㄸㄹㄺㄻㄼㄽㄾㄿㅀㅁㅂㅃㅄㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣㅤㅥㅦㅧㅨㅩㅪㅫㅬㅭㅮㅯㅰㅱㅲㅳㅴㅵㅶㅷㅸㅹㅺㅻㅼㅽㅾㅿㆀㆁㆂㆃㆄㆅㆆㆇㆈㆉㆊㆋㆌㆍㆎㆠㆡㆢㆣㆤㆥㆦㆧㆨㆩㆪㆫㆬㆭㆮㆯㆰㆱㆲㆳㆴㆵㆶㆷㆸㆹㆺㇰㇱㇲㇳㇴㇵㇶㇷㇸㇹㇺㇻㇼㇽㇾㇿꀀꀁꀂꀃꀄꀅꀆꀇꀈꀉꀊꀋꀌꀍꀎꀏꀐꀑꀒꀓꀔꀕꀖꀗꀘꀙꀚꀛꀜꀝꀞꀟꀠꀡꀢꀣꀤꀥꀦꀧꀨꀩꀪꀫꀬꀭꀮꀯꀰꀱꀲꀳꀴꀵꀶꀷꀸꀹꀺꀻꀼꀽꀾꀿꁀꁁꁂꁃꁄꁅꁆꁇꁈꁉꁊꁋꁌꁍꁎꁏꁐꁑꁒꁓꁔꁕꁖꁗꁘꁙꁚꁛꁜꁝꁞꁟꁠꁡꁢꁣꁤꁥꁦꁧꁨꁩꁪꁫꁬꁭꁮꁯꁰꁱꁲꁳꁴꁵꁶꁷꁸꁹꁺꁻꁼꁽꁾꁿꂀꂁꂂꂃꂄꂅꂆꂇꂈꂉꂊꂋꂌꂍꂎꂏꂐꂑꂒꂓꂔꂕꂖꂗꂘꂙꂚꂛꂜꂝꂞꂟꂠꂡꂢꂣꂤꂥꂦꂧꂨꂩꂪꂫꂬꂭꂮꂯꂰꂱꂲꂳꂴꂵꂶꂷꂸꂹꂺꂻꂼꂽꂾꂿꃀꃁꃂꃃꃄꃅꃆꃇꃈꃉꃊꃋꃌꃍꃎꃏꃐꃑꃒꃓꃔꃕꃖꃗꃘꃙꃚꃛꃜꃝꃞꃟꃠꃡꃢꃣꃤꃥꃦꃧꃨꃩꃪꃫꃬꃭꃮꃯꃰꃱꃲꃳꃴꃵꃶꃷꃸꃹꃺꃻꃼꃽꃾꃿꄀꄁꄂꄃꄄꄅꄆꄇꄈꄉꄊꄋꄌꄍꄎꄏꄐꄑꄒꄓꄔꄕꄖꄗꄘꄙꄚꄛꄜꄝꄞꄟꄠꄡꄢꄣꄤꄥꄦꄧꄨꄩꄪꄫꄬꄭꄮꄯꄰꄱꄲꄳꄴꄵꄶꄷꄸꄹꄺꄻꄼꄽꄾꄿꅀꅁꅂꅃꅄꅅꅆꅇꅈꅉꅊꅋꅌꅍꅎꅏꅐꅑꅒꅓꅔꅕꅖꅗꅘꅙꅚꅛꅜꅝꅞꅟꅠꅡꅢꅣꅤꅥꅦꅧꅨꅩꅪꅫꅬꅭꅮꅯꅰꅱꅲꅳꅴꅵꅶꅷꅸꅹꅺꅻꅼꅽꅾꅿꆀꆁꆂꆃꆄꆅꆆꆇꆈꆉꆊꆋꆌꆍꆎꆏꆐꆑꆒꆓꆔꆕꆖꆗꆘꆙꆚꆛꆜꆝꆞꆟꆠꆡꆢꆣꆤꆥꆦꆧꆨꆩꆪꆫꆬꆭꆮꆯꆰꆱꆲꆳꆴꆵꆶꆷꆸꆹꆺꆻꆼꆽꆾꆿꇀꇁꇂꇃꇄꇅꇆꇇꇈꇉꇊꇋꇌꇍꇎꇏꇐꇑꇒꇓꇔꇕꇖꇗꇘꇙꇚꇛꇜꇝꇞꇟꇠꇡꇢꇣꇤꇥꇦꇧꇨꇩꇪꇫꇬꇭꇮꇯꇰꇱꇲꇳꇴꇵꇶꇷꇸꇹꇺꇻꇼꇽꇾꇿꈀꈁꈂꈃꈄꈅꈆꈇꈈꈉꈊꈋꈌꈍꈎꈏꈐꈑꈒꈓꈔꈕꈖꈗꈘꈙꈚꈛꈜꈝꈞꈟꈠꈡꈢꈣꈤꈥꈦꈧꈨꈩꈪꈫꈬꈭꈮꈯꈰꈱꈲꈳꈴꈵꈶꈷꈸꈹꈺꈻꈼꈽꈾꈿꉀꉁꉂꉃꉄꉅꉆꉇꉈꉉꉊꉋꉌꉍꉎꉏꉐꉑꉒꉓꉔꉕꉖꉗꉘꉙꉚꉛꉜꉝꉞꉟꉠꉡꉢꉣꉤꉥꉦꉧꉨꉩꉪꉫꉬꉭꉮꉯꉰꉱꉲꉳꉴꉵꉶꉷꉸꉹꉺꉻꉼꉽꉾꉿꊀꊁꊂꊃꊄꊅꊆꊇꊈꊉꊊꊋꊌꊍꊎꊏꊐꊑꊒꊓꊔꊕꊖꊗꊘꊙꊚꊛꊜꊝꊞꊟꊠꊡꊢꊣꊤꊥꊦꊧꊨꊩꊪꊫꊬꊭꊮꊯꊰꊱꊲꊳꊴꊵꊶꊷꊸꊹꊺꊻꊼꊽꊾꊿꋀꋁꋂꋃꋄꋅꋆꋇꋈꋉꋊꋋꋌꋍꋎꋏꋐꋑꋒꋓꋔꋕꋖꋗꋘꋙꋚꋛꋜꋝꋞꋟꋠꋡꋢꋣꋤꋥꋦꋧꋨꋩꋪꋫꋬꋭꋮꋯꋰꋱꋲꋳꋴꋵꋶꋷꋸꋹꋺꋻꋼꋽꋾꋿꌀꌁꌂꌃꌄꌅꌆꌇꌈꌉꌊꌋꌌꌍꌎꌏꌐꌑꌒꌓꌔꌕꌖꌗꌘꌙꌚꌛꌜꌝꌞꌟꌠꌡꌢꌣꌤꌥꌦꌧꌨꌩꌪꌫꌬꌭꌮꌯꌰꌱꌲꌳꌴꌵꌶꌷꌸꌹꌺꌻꌼꌽꌾꌿꍀꍁꍂꍃꍄꍅꍆꍇꍈꍉꍊꍋꍌꍍꍎꍏꍐꍑꍒꍓꍔꍕꍖꍗꍘꍙꍚꍛꍜꍝꍞꍟꍠꍡꍢꍣꍤꍥꍦꍧꍨꍩꍪꍫꍬꍭꍮꍯꍰꍱꍲꍳꍴꍵꍶꍷꍸꍹꍺꍻꍼꍽꍾꍿꎀꎁꎂꎃꎄꎅꎆꎇꎈꎉꎊꎋꎌꎍꎎꎏꎐꎑꎒꎓꎔꎕꎖꎗꎘꎙꎚꎛꎜꎝꎞꎟꎠꎡꎢꎣꎤꎥꎦꎧꎨꎩꎪꎫꎬꎭꎮꎯꎰꎱꎲꎳꎴꎵꎶꎷꎸꎹꎺꎻꎼꎽꎾꎿꏀꏁꏂꏃꏄꏅꏆꏇꏈꏉꏊꏋꏌꏍꏎꏏꏐꏑꏒꏓꏔꏕꏖꏗꏘꏙꏚꏛꏜꏝꏞꏟꏠꏡꏢꏣꏤꏥꏦꏧꏨꏩꏪꏫꏬꏭꏮꏯꏰꏱꏲꏳꏴꏵꏶꏷꏸꏹꏺꏻꏼꏽꏾꏿꐀꐁꐂꐃꐄꐅꐆꐇꐈꐉꐊꐋꐌꐍꐎꐏꐐꐑꐒꐓꐔꐕꐖꐗꐘꐙꐚꐛꐜꐝꐞꐟꐠꐡꐢꐣꐤꐥꐦꐧꐨꐩꐪꐫꐬꐭꐮꐯꐰꐱꐲꐳꐴꐵꐶꐷꐸꐹꐺꐻꐼꐽꐾꐿꑀꑁꑂꑃꑄꑅꑆꑇꑈꑉꑊꑋꑌꑍꑎꑏꑐꑑꑒꑓꑔꑕꑖꑗꑘꑙꑚꑛꑜꑝꑞꑟꑠꑡꑢꑣꑤꑥꑦꑧꑨꑩꑪꑫꑬꑭꑮꑯꑰꑱꑲꑳꑴꑵꑶꑷꑸꑹꑺꑻꑼꑽꑾꑿꒀꒁꒂꒃꒄꒅꒆꒇꒈꒉꒊꒋꒌꓐꓑꓒꓓꓔꓕꓖꓗꓘꓙꓚꓛꓜꓝꓞꓟꓠꓡꓢꓣꓤꓥꓦꓧꓨꓩꓪꓫꓬꓭꓮꓯꓰꓱꓲꓳꓴꓵꓶꓷꓸꓹꓺꓻꓼꓽꔀꔁꔂꔃꔄꔅꔆꔇꔈꔉꔊꔋꔌꔍꔎꔏꔐꔑꔒꔓꔔꔕꔖꔗꔘꔙꔚꔛꔜꔝꔞꔟꔠꔡꔢꔣꔤꔥꔦꔧꔨꔩꔪꔫꔬꔭꔮꔯꔰꔱꔲꔳꔴꔵꔶꔷꔸꔹꔺꔻꔼꔽꔾꔿꕀꕁꕂꕃꕄꕅꕆꕇꕈꕉꕊꕋꕌꕍꕎꕏꕐꕑꕒꕓꕔꕕꕖꕗꕘꕙꕚꕛꕜꕝꕞꕟꕠꕡꕢꕣꕤꕥꕦꕧꕨꕩꕪꕫꕬꕭꕮꕯꕰꕱꕲꕳꕴꕵꕶꕷꕸꕹꕺꕻꕼꕽꕾꕿꖀꖁꖂꖃꖄꖅꖆꖇꖈꖉꖊꖋꖌꖍꖎꖏꖐꖑꖒꖓꖔꖕꖖꖗꖘꖙꖚꖛꖜꖝꖞꖟꖠꖡꖢꖣꖤꖥꖦꖧꖨꖩꖪꖫꖬꖭꖮꖯꖰꖱꖲꖳꖴꖵꖶꖷꖸꖹꖺꖻꖼꖽꖾꖿꗀꗁꗂꗃꗄꗅꗆꗇꗈꗉꗊꗋꗌꗍꗎꗏꗐꗑꗒꗓꗔꗕꗖꗗꗘꗙꗚꗛꗜꗝꗞꗟꗠꗡꗢꗣꗤꗥꗦꗧꗨꗩꗪꗫꗬꗭꗮꗯꗰꗱꗲꗳꗴꗵꗶꗷꗸꗹꗺꗻꗼꗽꗾꗿꘀꘁꘂꘃꘄꘅꘆꘇꘈꘉꘊꘋꘌꘐꘑꘒꘓꘔꘕꘖꘗꘘꘙꘚꘛꘜꘝꘞꘟ꘠꘡꘢꘣꘤꘥꘦꘧꘨꘩ꘪꘫꙀꙁꙂꙃꙄꙅꙆꙇꙈꙉꙊꙋꙌꙍꙎꙏꙐꙑꙒꙓꙔꙕꙖꙗꙘꙙꙚꙛꙜꙝꙞꙟꙠꙡꙢꙣꙤꙥꙦꙧꙨꙩꙪꙫꙬꙭꙮ◌ꙴ◌ꙵ◌ꙶ◌ꙷ◌ꙸ◌ꙹ◌ꙺ◌ꙻꙿꚀꚁꚂꚃꚄꚅꚆꚇꚈꚉꚊꚋꚌꚍꚎꚏꚐꚑꚒꚓꚔꚕꚖꚗꚘꚙꚚꚛꚜꚝ◌ꚟꚠꚡꚢꚣꚤꚥꚦꚧꚨꚩꚪꚫꚬꚭꚮꚯꚰꚱꚲꚳꚴꚵꚶꚷꚸꚹꚺꚻꚼꚽꚾꚿꛀꛁꛂꛃꛄꛅꛆꛇꛈꛉꛊꛋꛌꛍꛎꛏꛐꛑꛒꛓꛔꛕꛖꛗꛘꛙꛚꛛꛜꛝꛞꛟꛠꛡꛢꛣꛤꛥꛦꛧꛨꛩꛪꛫꛬꛭꛮꛯꜗꜘꜙꜚꜛꜜꜝꜞꜟꜢꜣꜤꜥꜦꜧꜨꜩꜪꜫꜬꜭꜮꜯꜰꜱꜲꜳꜴꜵꜶꜷꜸꜹꜺꜻꜼꜽꜾꜿꝀꝁꝂꝃꝄꝅꝆꝇꝈꝉꝊꝋꝌꝍꝎꝏꝐꝑꝒꝓꝔꝕꝖꝗꝘꝙꝚꝛꝜꝝꝞꝟꝠꝡꝢꝣꝤꝥꝦꝧꝨꝩꝪꝫꝬꝭꝮꝯꝰꝱꝲꝳꝴꝵꝶꝷꝸꝹꝺꝻꝼꝽꝾꝿꞀꞁꞂꞃꞄꞅꞆꞇꞈꞋꞌꞍꞎꞐꞑꞒꞓꞔꞕꞖꞗꞘꞙꞚꞛꞜꞝꞞꞟꞠꞡꞢꞣꞤꞥꞦꞧꞨꞩꞪꞫꞬꞭꞰꞱꟷꟸꟹꟺꟻꟼꟽꟾꟿꠀꠁꠃꠄꠅꠇꠈꠉꠊꠌꠍꠎꠏꠐꠑꠒꠓꠔꠕꠖꠗꠘꠙꠚꠛꠜꠝꠞꠟꠠꠡꠢꠣꠤ◌ꠥ◌ꠦꠧꡀꡁꡂꡃꡄꡅꡆꡇꡈꡉꡊꡋꡌꡍꡎꡏꡐꡑꡒꡓꡔꡕꡖꡗꡘꡙꡚꡛꡜꡝꡞꡟꡠꡡꡢꡣꡤꡥꡦꡧꡨꡩꡪꡫꡬꡭꡮꡯꡰꡱꡲꡳꢀꢁꢂꢃꢄꢅꢆꢇꢈꢉꢊꢋꢌꢍꢎꢏꢐꢑꢒꢓꢔꢕꢖꢗꢘꢙꢚꢛꢜꢝꢞꢟꢠꢡꢢꢣꢤꢥꢦꢧꢨꢩꢪꢫꢬꢭꢮꢯꢰꢱꢲꢳꢴꢵꢶꢷꢸꢹꢺꢻꢼꢽꢾꢿꣀꣁꣂꣃ꣐꣑꣒꣓꣔꣕꣖꣗꣘꣙ꣲꣳꣴꣵꣶꣷꣻ꤀꤁꤂꤃꤄꤅꤆꤇꤈꤉ꤊꤋꤌꤍꤎꤏꤐꤑꤒꤓꤔꤕꤖꤗꤘꤙꤚꤛꤜꤝꤞꤟꤠꤡꤢꤣꤤꤥ◌ꤦ◌ꤧ◌ꤨ◌ꤩ◌ꤪꤰꤱꤲꤳꤴꤵꤶꤷꤸꤹꤺꤻꤼꤽꤾꤿꥀꥁꥂꥃꥄꥅꥆ◌ꥇ◌ꥈ◌ꥉ◌ꥊ◌ꥋ◌ꥌ◌ꥍ◌ꥎ◌ꥏ◌ꥐ◌ꥑꥒꥠꥡꥢꥣꥤꥥꥦꥧꥨꥩꥪꥫꥬꥭꥮꥯꥰꥱꥲꥳꥴꥵꥶꥷꥸꥹꥺꥻꥼ◌ꦀ◌ꦁ◌ꦂꦃꦄꦅꦆꦇꦈꦉꦊꦋꦌꦍꦎꦏꦐꦑꦒꦓꦔꦕꦖꦗꦘꦙꦚꦛꦜꦝꦞꦟꦠꦡꦢꦣꦤꦥꦦꦧꦨꦩꦪꦫꦬꦭꦮꦯꦰꦱꦲꦴꦵ◌ꦶ◌ꦷ◌ꦸ◌ꦹꦺꦻ◌ꦼꦽꦾꦿꧏ꧐꧑꧒꧓꧔꧕꧖꧗꧘꧙ꧠꧡꧢꧣꧤꧦꧧꧨꧩꧪꧫꧬꧭꧮꧯ꧰꧱꧲꧳꧴꧵꧶꧷꧸꧹ꧺꧻꧼꧽꧾꨀꨁꨂꨃꨄꨅꨆꨇꨈꨉꨊꨋꨌꨍꨎꨏꨐꨑꨒꨓꨔꨕꨖꨗꨘꨙꨚꨛꨜꨝꨞꨟꨠꨡꨢꨣꨤꨥꨦꨧꨨ◌ꨩ◌ꨪ◌ꨫ◌ꨬ◌ꨭ◌ꨮꨯꨰ◌ꨱ◌ꨲꨳꨴ◌ꨵ◌ꨶꩀꩁꩂ◌ꩃꩄꩅꩆꩇꩈꩉꩊꩋ◌ꩌꩍ꩐꩑꩒꩓꩔꩕꩖꩗꩘꩙ꩠꩡꩢꩣꩤꩥꩦꩧꩨꩩꩪꩫꩬꩭꩮꩯꩰꩱꩲꩳꩴꩵꩶꩺꩾꩿꪀꪁꪂꪃꪄꪅꪆꪇꪈꪉꪊꪋꪌꪍꪎꪏꪐꪑꪒꪓꪔꪕꪖꪗꪘꪙꪚꪛꪜꪝꪞꪟꪠꪡꪢꪣꪤꪥꪦꪧꪨꪩꪪꪫꪬꪭꪮꪯ◌ꪰꪱ◌ꪲ◌ꪳ◌ꪴꪵꪶ◌ꪷ◌ꪸꪹꪺꪻꪼꪽ◌ꪾꫀꫂꫛꫜꫝꫠꫡꫢꫣꫤꫥꫦꫧꫨꫩꫪꫫ◌ꫬ◌ꫭꫮꫯꫲꫳꫴꫵꬁꬂꬃꬄꬅꬆꬉꬊꬋꬌꬍꬎꬑꬒꬓꬔꬕꬖꬠꬡꬢꬣꬤꬥꬦꬨꬩꬪꬫꬬꬭꬮꬰꬱꬲꬳꬴꬵꬶꬷꬸꬹꬺꬻꬼꬽꬾꬿꭀꭁꭂꭃꭄꭅꭆꭇꭈꭉꭊꭋꭌꭍꭎꭏꭐꭑꭒꭓꭔꭕꭖꭗꭘꭙꭚꭜꭝꭞꭟꭤꭥꯀꯁꯂꯃꯄꯅꯆꯇꯈꯉꯊꯋꯌꯍꯎꯏꯐꯑꯒꯓꯔꯕꯖꯗꯘꯙꯚꯛꯜꯝꯞꯟꯠꯡꯢꯣꯤ◌ꯥꯦꯧ◌ꯨꯩꯪ꯰꯱꯲꯳꯴꯵꯶꯷꯸꯹ힰힱힲힳힴힵힶힷힸힹힺힻힼힽힾힿퟀퟁퟂퟃퟄퟅퟆퟋퟌퟍퟎퟏퟐퟑퟒퟓퟔퟕퟖퟗퟘퟙퟚퟛퟜퟝퟞퟟퟠퟡퟢퟣퟤퟥퟦퟧퟨퟩퟪퟫퟬퟭퟮퟯퟰퟱퟲퟳퟴퟵퟶퟷퟸퟹퟺퟻfffiflffifflſtstﬓﬔﬕﬖﬗיִ◌ﬞײַﬠﬡﬢﬣﬤﬥﬦﬧﬨשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּוֹבֿכֿפֿﭏﭐﭑﭒﭓﭔﭕﭖﭗﭘﭙﭚﭛﭜﭝﭞﭟﭠﭡﭢﭣﭤﭥﭦﭧﭨﭩﭪﭫﭬﭭﭮﭯﭰﭱﭲﭳﭴﭵﭶﭷﭸﭹﭺﭻﭼﭽﭾﭿﮀﮁﮂﮃﮄﮅﮆﮇﮈﮉﮊﮋﮌﮍﮎﮏﮐﮑﮒﮓﮔﮕﮖﮗﮘﮙﮚﮛﮜﮝﮞﮟﮠﮡﮢﮣﮤﮥﮦﮧﮨﮩﮪﮫﮬﮭﮮﮯﮰﮱﯓﯔﯕﯖﯗﯘﯙﯚﯛﯜﯝﯞﯟﯠﯡﯢﯣﯤﯥﯦﯧﯨﯩﯪﯫﯬﯭﯮﯯﯰﯱﯲﯳﯴﯵﯶﯷﯸﯹﯺﯻﯼﯽﯾﯿﰀﰁﰂﰃﰄﰅﰆﰇﰈﰉﰊﰋﰌﰍﰎﰏﰐﰑﰒﰓﰔﰕﰖﰗﰘﰙﰚﰛﰜﰝﰞﰟﰠﰡﰢﰣﰤﰥﰦﰧﰨﰩﰪﰫﰬﰭﰮﰯﰰﰱﰲﰳﰴﰵﰶﰷﰸﰹﰺﰻﰼﰽﰾﰿﱀﱁﱂﱃﱄﱅﱆﱇﱈﱉﱊﱋﱌﱍﱎﱏﱐﱑﱒﱓﱔﱕﱖﱗﱘﱙﱚﱛﱜﱝﱞﱟﱠﱡﱢﱣﱤﱥﱦﱧﱨﱩﱪﱫﱬﱭﱮﱯﱰﱱﱲﱳﱴﱵﱶﱷﱸﱹﱺﱻﱼﱽﱾﱿﲀﲁﲂﲃﲄﲅﲆﲇﲈﲉﲊﲋﲌﲍﲎﲏﲐﲑﲒﲓﲔﲕﲖﲗﲘﲙﲚﲛﲜﲝﲞﲟﲠﲡﲢﲣﲤﲥﲦﲧﲨﲩﲪﲫﲬﲭﲮﲯﲰﲱﲲﲳﲴﲵﲶﲷﲸﲹﲺﲻﲼﲽﲾﲿﳀﳁﳂﳃﳄﳅﳆﳇﳈﳉﳊﳋﳌﳍﳎﳏﳐﳑﳒﳓﳔﳕﳖﳗﳘﳙﳚﳛﳜﳝﳞﳟﳠﳡﳢﳣﳤﳥﳦﳧﳨﳩﳪﳫﳬﳭﳮﳯﳰﳱﳲﳳﳴﳵﳶﳷﳸﳹﳺﳻﳼﳽﳾﳿﴀﴁﴂﴃﴄﴅﴆﴇﴈﴉﴊﴋﴌﴍﴎﴏﴐﴑﴒﴓﴔﴕﴖﴗﴘﴙﴚﴛﴜﴝﴞﴟﴠﴡﴢﴣﴤﴥﴦﴧﴨﴩﴪﴫﴬﴭﴮﴯﴰﴱﴲﴳﴴﴵﴶﴷﴸﴹﴺﴻﴼﴽﵐﵑﵒﵓﵔﵕﵖﵗﵘﵙﵚﵛﵜﵝﵞﵟﵠﵡﵢﵣﵤﵥﵦﵧﵨﵩﵪﵫﵬﵭﵮﵯﵰﵱﵲﵳﵴﵵﵶﵷﵸﵹﵺﵻﵼﵽﵾﵿﶀﶁﶂﶃﶄﶅﶆﶇﶈﶉﶊﶋﶌﶍﶎﶏﶒﶓﶔﶕﶖﶗﶘﶙﶚﶛﶜﶝﶞﶟﶠﶡﶢﶣﶤﶥﶦﶧﶨﶩﶪﶫﶬﶭﶮﶯﶰﶱﶲﶳﶴﶵﶶﶷﶸﶹﶺﶻﶼﶽﶾﶿﷀﷁﷂﷃﷄﷅﷆﷇﷰﷱﷲﷳﷴﷵﷶﷷﷸﷹﷺﷻﹰﹱﹲﹳﹴﹶﹷﹸﹹﹺﹻﹼﹽﹾﹿﺀﺁﺂﺃﺄﺅﺆﺇﺈﺉﺊﺋﺌﺍﺎﺏﺐﺑﺒﺓﺔﺕﺖﺗﺘﺙﺚﺛﺜﺝﺞﺟﺠﺡﺢﺣﺤﺥﺦﺧﺨﺩﺪﺫﺬﺭﺮﺯﺰﺱﺲﺳﺴﺵﺶﺷﺸﺹﺺﺻﺼﺽﺾﺿﻀﻁﻂﻃﻄﻅﻆﻇﻈﻉﻊﻋﻌﻍﻎﻏﻐﻑﻒﻓﻔﻕﻖﻗﻘﻙﻚﻛﻜﻝﻞﻟﻠﻡﻢﻣﻤﻥﻦﻧﻨﻩﻪﻫﻬﻭﻮﻯﻰﻱﻲﻳﻴﻵﻶﻷﻸﻹﻺﻻﻼ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzヲァィゥェォャュョッーアイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワン゙゚ᅠᄀᄁᆪᄂᆬᆭᄃᄄᄅᆰᆱᆲᆳᆴᆵᄚᄆᄇᄈᄡᄉᄊᄋᄌᄍᄎᄏᄐᄑ하ᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵ -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/IsAlpha.txt: -------------------------------------------------------------------------------- 1 | ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏƐƑƒƓƔƕƖƗƘƙƚƛƜƝƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƻƼƽƾƿǀǁǂǃDŽDždžLJLjljNJNjnjǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZDzdzǴǵǶǷǸǹǺǻǼǽǾǿȀȁȂȃȄȅȆȇȈȉȊȋȌȍȎȏȐȑȒȓȔȕȖȗȘșȚțȜȝȞȟȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀɁɂɃɄɅɆɇɈɉɊɋɌɍɎɏɐɑɒɓɔɕɖɗɘəɚɛɜɝɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀʁʂʃʄʅʆʇʈʉʊʋʌʍʎʏʐʑʒʓʔʕʖʗʘʙʚʛʜʝʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯʰʱʲʳʴʵʶʷʸʹʺʻʼʽʾʿˀˁˆˇˈˉˊˋˌˍˎˏːˑˠˡˢˣˤˬˮ◌ͅͰͱͲͳʹͶͷͺͻͼͽͿΆΈΉΊΌΎΏΐΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώϏϐϑϒϓϔϕϖϗϘϙϚϛϜϝϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяѐёђѓєѕіїјљњћќѝўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀҁҊҋҌҍҎҏҐґҒғҔҕҖҗҘҙҚқҜҝҞҟҠҡҢңҤҥҦҧҨҩҪҫҬҭҮүҰұҲҳҴҵҶҷҸҹҺһҼҽҾҿӀӁӂӃӄӅӆӇӈӉӊӋӌӍӎӏӐӑӒӓӔӕӖӗӘәӚӛӜӝӞӟӠӡӢӣӤӥӦӧӨөӪӫӬӭӮӯӰӱӲӳӴӵӶӷӸӹӺӻӼӽӾӿԀԁԂԃԄԅԆԇԈԉԊԋԌԍԎԏԐԑԒԓԔԕԖԗԘԙԚԛԜԝԞԟԠԡԢԣԤԥԦԧԨԩԪԫԬԭԮԯԱԲԳԴԵԶԷԸԹԺԻԼԽԾԿՀՁՂՃՄՅՆՇՈՉՊՋՌՍՎՏՐՑՒՓՔՕՖՙաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆև◌ְ◌ֱ◌ֲ◌ֳ◌ִ◌ֵ◌ֶ◌ַ◌ָ◌ֹ◌ֺ◌ֻ◌ּ◌ֽ◌ֿ◌ׁ◌ׂ◌ׄ◌ׅ◌ׇאבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ◌ؐ◌ؑ◌ؒ◌ؓ◌ؔ◌ؕ◌ؖ◌ؗ◌ؘ◌ؙ◌ؚؠءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىي◌ً◌ٌ◌ٍ◌َ◌ُ◌ِ◌ّ◌ْ◌ٓ◌ٔ◌ٕ◌ٖ◌ٗ◌ٙ◌ٚ◌ٛ◌ٜ◌ٝ◌ٞ◌ٟٮٯ◌ٰٱٲٳٴٵٶٷٸٹٺٻټٽپٿڀځڂڃڄڅچڇڈډڊڋڌڍڎڏڐڑڒړڔڕږڗژڙښڛڜڝڞڟڠڡڢڣڤڥڦڧڨکڪګڬڭڮگڰڱڲڳڴڵڶڷڸڹںڻڼڽھڿۀہۂۃۄۅۆۇۈۉۊۋیۍێۏېۑےۓە◌ۖ◌ۗ◌ۘ◌ۙ◌ۚ◌ۛ◌ۜ◌ۡ◌ۢ◌ۣ◌ۤۥۦ◌ۧ◌ۨ◌ۭۮۯۺۻۼۿܐ◌ܑܒܓܔܕܖܗܘܙܚܛܜܝܞܟܠܡܢܣܤܥܦܧܨܩܪܫܬܭܮܯ◌ܰ◌ܱ◌ܲ◌ܳ◌ܴ◌ܵ◌ܶ◌ܷ◌ܸ◌ܹ◌ܺ◌ܻ◌ܼ◌ܽ◌ܾ◌ܿݍݎݏݐݑݒݓݔݕݖݗݘݙݚݛݜݝݞݟݠݡݢݣݤݥݦݧݨݩݪݫݬݭݮݯݰݱݲݳݴݵݶݷݸݹݺݻݼݽݾݿހށނރބޅކއވމފދތލގޏސޑޒޓޔޕޖޗޘޙޚޛޜޝޞޟޠޡޢޣޤޥ◌ަ◌ާ◌ި◌ީ◌ު◌ޫ◌ެ◌ޭ◌ޮ◌ޯ◌ްޱߊߋߌߍߎߏߐߑߒߓߔߕߖߗߘߙߚߛߜߝߞߟߠߡߢߣߤߥߦߧߨߩߪߴߵߺࠀࠁࠂࠃࠄࠅࠆࠇࠈࠉࠊࠋࠌࠍࠎࠏࠐࠑࠒࠓࠔࠕ◌ࠖ◌ࠗࠚ◌ࠛ◌ࠜ◌ࠝ◌ࠞ◌ࠟ◌ࠠ◌ࠡ◌ࠢ◌ࠣࠤ◌ࠥ◌ࠦ◌ࠧࠨ◌ࠩ◌ࠪ◌ࠫ◌ࠬࡀࡁࡂࡃࡄࡅࡆࡇࡈࡉࡊࡋࡌࡍࡎࡏࡐࡑࡒࡓࡔࡕࡖࡗࡘࢠࢡࢢࢣࢤࢥࢦࢧࢨࢩࢪࢫࢬࢭࢮࢯࢰࢱࢲ◌ࣤ◌ࣥ◌ࣦ◌ࣧ◌ࣨ◌ࣩ◌ࣰ◌ࣱ◌ࣲ◌ࣳ◌ࣴ◌ࣵ◌ࣶ◌ࣷ◌ࣸ◌ࣹ◌ࣺ◌ࣻ◌ࣼ◌ࣽ◌ࣾ◌ࣿ◌ऀ◌ँ◌ंःऄअआइईउऊऋऌऍऎएऐऑऒओऔकखगघङचछजझञटठडढणतथदधनऩपफबभमयरऱलळऴवशषसह◌ऺऻऽािी◌ु◌ू◌ृ◌ॄ◌ॅ◌ॆ◌े◌ैॉॊोौॎॏॐ◌ॕ◌ॖ◌ॗक़ख़ग़ज़ड़ढ़फ़य़ॠॡ◌ॢ◌ॣॱॲॳॴॵॶॷॸॹॺॻॼॽॾॿঀ◌ঁংঃঅআইঈউঊঋঌএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহঽািী◌ু◌ূ◌ৃ◌ৄেৈোৌৎৗড়ঢ়য়ৠৡ◌ৢ◌ৣৰৱ◌ਁ◌ਂਃਅਆਇਈਉਊਏਐਓਔਕਖਗਘਙਚਛਜਝਞਟਠਡਢਣਤਥਦਧਨਪਫਬਭਮਯਰਲਲ਼ਵਸ਼ਸਹਾਿੀ◌ੁ◌ੂ◌ੇ◌ੈ◌ੋ◌ੌ◌ੑਖ਼ਗ਼ਜ਼ੜਫ਼◌ੰ◌ੱੲੳੴ◌ੵ◌ઁ◌ંઃઅઆઇઈઉઊઋઌઍએઐઑઓઔકખગઘઙચછજઝઞટઠડઢણતથદધનપફબભમયરલળવશષસહઽાિી◌ુ◌ૂ◌ૃ◌ૄ◌ૅ◌ે◌ૈૉોૌૐૠૡ◌ૢ◌ૣ◌ଁଂଃଅଆଇଈଉଊଋଌଏଐଓଔକଖଗଘଙଚଛଜଝଞଟଠଡଢଣତଥଦଧନପଫବଭମଯରଲଳଵଶଷସହଽା◌ିୀ◌ୁ◌ୂ◌ୃ◌ୄେୈୋୌ◌ୖୗଡ଼ଢ଼ୟୠୡ◌ୢ◌ୣୱ◌ஂஃஅஆஇஈஉஊஎஏஐஒஓஔகஙசஜஞடணதநனபமயரறலளழவஶஷஸஹாி◌ீுூெேைொோௌௐௗ◌ఀఁంఃఅఆఇఈఉఊఋఌఎఏఐఒఓఔకఖగఘఙచఛజఝఞటఠడఢణతథదధనపఫబభమయరఱలళఴవశషసహఽ◌ా◌ి◌ీుూృౄ◌ె◌ే◌ై◌ొ◌ో◌ౌ◌ౕ◌ౖౘౙౠౡ◌ౢ◌ౣ◌ಁಂಃಅಆಇಈಉಊಋಌಎಏಐಒಓಔಕಖಗಘಙಚಛಜಝಞಟಠಡಢಣತಥದಧನಪಫಬಭಮಯರಱಲಳವಶಷಸಹಽಾಿೀುೂೃೄೆೇೈೊೋ◌ೌೕೖೞೠೡ◌ೢ◌ೣೱೲ◌ഁംഃഅആഇഈഉഊഋഌഎഏഐഒഓഔകഖഗഘങചഛജഝഞടഠഡഢണതഥദധനഩപഫബഭമയരറലളഴവശഷസഹഺഽാിീ◌ു◌ൂ◌ൃ◌ൄെേൈൊോൌൎൗൠൡ◌ൢ◌ൣൺൻർൽൾൿංඃඅආඇඈඉඊඋඌඍඎඏඐඑඒඓඔඕඖකඛගඝඞඟචඡජඣඤඥඦටඨඩඪණඬතථදධනඳපඵබභමඹයරලවශෂසහළෆාැෑ◌ි◌ී◌ු◌ූෘෙේෛොෝෞෟෲෳกขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะ◌ัาำ◌ิ◌ี◌ึ◌ื◌ุ◌ู◌ฺเแโใไๅๆ◌ํກຂຄງຈຊຍດຕຖທນບປຜຝພຟມຢຣລວສຫອຮຯະ◌ັາຳ◌ິ◌ີ◌ຶ◌ື◌ຸ◌ູ◌ົ◌ຼຽເແໂໃໄໆ◌ໍໜໝໞໟༀཀཁགགྷངཅཆཇཉཊཋཌཌྷཎཏཐདདྷནཔཕབབྷམཙཚཛཛྷཝཞཟའཡརལཤཥསཧཨཀྵཪཫཬ◌ཱ◌ི◌ཱི◌ུ◌ཱུ◌ྲྀ◌ཷ◌ླྀ◌ཹ◌ེ◌ཻ◌ོ◌ཽ◌ཾཿ◌ྀ◌ཱྀྈྉྊྋྌ◌ྍ◌ྎ◌ྏ◌ྐ◌ྑ◌ྒ◌ྒྷ◌ྔ◌ྕ◌ྖ◌ྗ◌ྙ◌ྚ◌ྛ◌ྜ◌ྜྷ◌ྞ◌ྟ◌ྠ◌ྡ◌ྡྷ◌ྣ◌ྤ◌ྥ◌ྦ◌ྦྷ◌ྨ◌ྩ◌ྪ◌ྫ◌ྫྷ◌ྭ◌ྮ◌ྯ◌ྰ◌ྱ◌ྲ◌ླ◌ྴ◌ྵ◌ྶ◌ྷ◌ྸ◌ྐྵ◌ྺ◌ྻ◌ྼကခဂဃငစဆဇဈဉညဋဌဍဎဏတထဒဓနပဖဗဘမယရလဝသဟဠအဢဣဤဥဦဧဨဩဪါာ◌ိ◌ီ◌ု◌ူေ◌ဲ◌ဳ◌ဴ◌ဵ◌ံးျြ◌ွ◌ှဿၐၑၒၓၔၕၖၗ◌ၘ◌ၙၚၛၜၝ◌ၞ◌ၟ◌ၠၡၢၥၦၧၨၮၯၰ◌ၱ◌ၲ◌ၳ◌ၴၵၶၷၸၹၺၻၼၽၾၿႀႁ◌ႂႃႄ◌ႅ◌ႆႎႜ◌ႝႠႡႢႣႤႥႦႧႨႩႪႫႬႭႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀჁჂჃჄჅჇჍაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰჱჲჳჴჵჶჷჸჹჺჼჽჾჿᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑᄒᄓᄔᄕᄖᄗᄘᄙᄚᄛᄜᄝᄞᄟᄠᄡᄢᄣᄤᄥᄦᄧᄨᄩᄪᄫᄬᄭᄮᄯᄰᄱᄲᄳᄴᄵᄶᄷᄸᄹᄺᄻᄼᄽᄾᄿᅀᅁᅂᅃᅄᅅᅆᅇᅈᅉᅊᅋᅌᅍᅎᅏᅐᅑᅒᅓᅔᅕᅖᅗᅘᅙᅚᅛᅜᅝᅞᅟᅠᅡᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᅶᅷᅸᅹᅺᅻᅼᅽᅾᅿᆀᆁᆂᆃᆄᆅᆆᆇᆈᆉᆊᆋᆌᆍᆎᆏᆐᆑᆒᆓᆔᆕᆖᆗᆘᆙᆚᆛᆜᆝᆞᆟᆠᆡᆢᆣᆤᆥᆦᆧᆨᆩᆪᆫᆬᆭᆮᆯᆰᆱᆲᆳᆴᆵᆶᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂᇃᇄᇅᇆᇇᇈᇉᇊᇋᇌᇍᇎᇏᇐᇑᇒᇓᇔᇕᇖᇗᇘᇙᇚᇛᇜᇝᇞᇟᇠᇡᇢᇣᇤᇥᇦᇧᇨᇩᇪᇫᇬᇭᇮᇯᇰᇱᇲᇳᇴᇵᇶᇷᇸᇹᇺᇻᇼᇽᇾᇿሀሁሂሃሄህሆሇለሉሊላሌልሎሏሐሑሒሓሔሕሖሗመሙሚማሜምሞሟሠሡሢሣሤሥሦሧረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቇቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኇኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኯኰኲኳኴኵኸኹኺኻኼኽኾዀዂዃዄዅወዉዊዋዌውዎዏዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮዯደዱዲዳዴድዶዷዸዹዺዻዼዽዾዿጀጁጂጃጄጅጆጇገጉጊጋጌግጎጏጐጒጓጔጕጘጙጚጛጜጝጞጟጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፇፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ◌፟ᎀᎁᎂᎃᎄᎅᎆᎇᎈᎉᎊᎋᎌᎍᎎᎏᎠᎡᎢᎣᎤᎥᎦᎧᎨᎩᎪᎫᎬᎭᎮᎯᎰᎱᎲᎳᎴᎵᎶᎷᎸᎹᎺᎻᎼᎽᎾᎿᏀᏁᏂᏃᏄᏅᏆᏇᏈᏉᏊᏋᏌᏍᏎᏏᏐᏑᏒᏓᏔᏕᏖᏗᏘᏙᏚᏛᏜᏝᏞᏟᏠᏡᏢᏣᏤᏥᏦᏧᏨᏩᏪᏫᏬᏭᏮᏯᏰᏱᏲᏳᏴᐁᐂᐃᐄᐅᐆᐇᐈᐉᐊᐋᐌᐍᐎᐏᐐᐑᐒᐓᐔᐕᐖᐗᐘᐙᐚᐛᐜᐝᐞᐟᐠᐡᐢᐣᐤᐥᐦᐧᐨᐩᐪᐫᐬᐭᐮᐯᐰᐱᐲᐳᐴᐵᐶᐷᐸᐹᐺᐻᐼᐽᐾᐿᑀᑁᑂᑃᑄᑅᑆᑇᑈᑉᑊᑋᑌᑍᑎᑏᑐᑑᑒᑓᑔᑕᑖᑗᑘᑙᑚᑛᑜᑝᑞᑟᑠᑡᑢᑣᑤᑥᑦᑧᑨᑩᑪᑫᑬᑭᑮᑯᑰᑱᑲᑳᑴᑵᑶᑷᑸᑹᑺᑻᑼᑽᑾᑿᒀᒁᒂᒃᒄᒅᒆᒇᒈᒉᒊᒋᒌᒍᒎᒏᒐᒑᒒᒓᒔᒕᒖᒗᒘᒙᒚᒛᒜᒝᒞᒟᒠᒡᒢᒣᒤᒥᒦᒧᒨᒩᒪᒫᒬᒭᒮᒯᒰᒱᒲᒳᒴᒵᒶᒷᒸᒹᒺᒻᒼᒽᒾᒿᓀᓁᓂᓃᓄᓅᓆᓇᓈᓉᓊᓋᓌᓍᓎᓏᓐᓑᓒᓓᓔᓕᓖᓗᓘᓙᓚᓛᓜᓝᓞᓟᓠᓡᓢᓣᓤᓥᓦᓧᓨᓩᓪᓫᓬᓭᓮᓯᓰᓱᓲᓳᓴᓵᓶᓷᓸᓹᓺᓻᓼᓽᓾᓿᔀᔁᔂᔃᔄᔅᔆᔇᔈᔉᔊᔋᔌᔍᔎᔏᔐᔑᔒᔓᔔᔕᔖᔗᔘᔙᔚᔛᔜᔝᔞᔟᔠᔡᔢᔣᔤᔥᔦᔧᔨᔩᔪᔫᔬᔭᔮᔯᔰᔱᔲᔳᔴᔵᔶᔷᔸᔹᔺᔻᔼᔽᔾᔿᕀᕁᕂᕃᕄᕅᕆᕇᕈᕉᕊᕋᕌᕍᕎᕏᕐᕑᕒᕓᕔᕕᕖᕗᕘᕙᕚᕛᕜᕝᕞᕟᕠᕡᕢᕣᕤᕥᕦᕧᕨᕩᕪᕫᕬᕭᕮᕯᕰᕱᕲᕳᕴᕵᕶᕷᕸᕹᕺᕻᕼᕽᕾᕿᖀᖁᖂᖃᖄᖅᖆᖇᖈᖉᖊᖋᖌᖍᖎᖏᖐᖑᖒᖓᖔᖕᖖᖗᖘᖙᖚᖛᖜᖝᖞᖟᖠᖡᖢᖣᖤᖥᖦᖧᖨᖩᖪᖫᖬᖭᖮᖯᖰᖱᖲᖳᖴᖵᖶᖷᖸᖹᖺᖻᖼᖽᖾᖿᗀᗁᗂᗃᗄᗅᗆᗇᗈᗉᗊᗋᗌᗍᗎᗏᗐᗑᗒᗓᗔᗕᗖᗗᗘᗙᗚᗛᗜᗝᗞᗟᗠᗡᗢᗣᗤᗥᗦᗧᗨᗩᗪᗫᗬᗭᗮᗯᗰᗱᗲᗳᗴᗵᗶᗷᗸᗹᗺᗻᗼᗽᗾᗿᘀᘁᘂᘃᘄᘅᘆᘇᘈᘉᘊᘋᘌᘍᘎᘏᘐᘑᘒᘓᘔᘕᘖᘗᘘᘙᘚᘛᘜᘝᘞᘟᘠᘡᘢᘣᘤᘥᘦᘧᘨᘩᘪᘫᘬᘭᘮᘯᘰᘱᘲᘳᘴᘵᘶᘷᘸᘹᘺᘻᘼᘽᘾᘿᙀᙁᙂᙃᙄᙅᙆᙇᙈᙉᙊᙋᙌᙍᙎᙏᙐᙑᙒᙓᙔᙕᙖᙗᙘᙙᙚᙛᙜᙝᙞᙟᙠᙡᙢᙣᙤᙥᙦᙧᙨᙩᙪᙫᙬᙯᙰᙱᙲᙳᙴᙵᙶᙷᙸᙹᙺᙻᙼᙽᙾᙿᚁᚂᚃᚄᚅᚆᚇᚈᚉᚊᚋᚌᚍᚎᚏᚐᚑᚒᚓᚔᚕᚖᚗᚘᚙᚚᚠᚡᚢᚣᚤᚥᚦᚧᚨᚩᚪᚫᚬᚭᚮᚯᚰᚱᚲᚳᚴᚵᚶᚷᚸᚹᚺᚻᚼᚽᚾᚿᛀᛁᛂᛃᛄᛅᛆᛇᛈᛉᛊᛋᛌᛍᛎᛏᛐᛑᛒᛓᛔᛕᛖᛗᛘᛙᛚᛛᛜᛝᛞᛟᛠᛡᛢᛣᛤᛥᛦᛧᛨᛩᛪᛮᛯᛰᛱᛲᛳᛴᛵᛶᛷᛸᜀᜁᜂᜃᜄᜅᜆᜇᜈᜉᜊᜋᜌᜎᜏᜐᜑ◌ᜒ◌ᜓᜠᜡᜢᜣᜤᜥᜦᜧᜨᜩᜪᜫᜬᜭᜮᜯᜰᜱ◌ᜲ◌ᜳᝀᝁᝂᝃᝄᝅᝆᝇᝈᝉᝊᝋᝌᝍᝎᝏᝐᝑ◌ᝒ◌ᝓᝠᝡᝢᝣᝤᝥᝦᝧᝨᝩᝪᝫᝬᝮᝯᝰ◌ᝲ◌ᝳកខគឃងចឆជឈញដឋឌឍណតថទធនបផពភមយរលវឝឞសហឡអឣឤឥឦឧឨឩឪឫឬឭឮឯឰឱឲឳា◌ិ◌ី◌ឹ◌ឺ◌ុ◌ូ◌ួើឿៀេែៃោៅ◌ំះៈៗៜᠠᠡᠢᠣᠤᠥᠦᠧᠨᠩᠪᠫᠬᠭᠮᠯᠰᠱᠲᠳᠴᠵᠶᠷᠸᠹᠺᠻᠼᠽᠾᠿᡀᡁᡂᡃᡄᡅᡆᡇᡈᡉᡊᡋᡌᡍᡎᡏᡐᡑᡒᡓᡔᡕᡖᡗᡘᡙᡚᡛᡜᡝᡞᡟᡠᡡᡢᡣᡤᡥᡦᡧᡨᡩᡪᡫᡬᡭᡮᡯᡰᡱᡲᡳᡴᡵᡶᡷᢀᢁᢂᢃᢄᢅᢆᢇᢈᢉᢊᢋᢌᢍᢎᢏᢐᢑᢒᢓᢔᢕᢖᢗᢘᢙᢚᢛᢜᢝᢞᢟᢠᢡᢢᢣᢤᢥᢦᢧᢨ◌ᢩᢪᢰᢱᢲᢳᢴᢵᢶᢷᢸᢹᢺᢻᢼᢽᢾᢿᣀᣁᣂᣃᣄᣅᣆᣇᣈᣉᣊᣋᣌᣍᣎᣏᣐᣑᣒᣓᣔᣕᣖᣗᣘᣙᣚᣛᣜᣝᣞᣟᣠᣡᣢᣣᣤᣥᣦᣧᣨᣩᣪᣫᣬᣭᣮᣯᣰᣱᣲᣳᣴᣵᤀᤁᤂᤃᤄᤅᤆᤇᤈᤉᤊᤋᤌᤍᤎᤏᤐᤑᤒᤓᤔᤕᤖᤗᤘᤙᤚᤛᤜᤝᤞ◌ᤠ◌ᤡ◌ᤢᤣᤤᤥᤦ◌ᤧ◌ᤨᤩᤪᤫᤰᤱ◌ᤲᤳᤴᤵᤶᤷᤸᥐᥑᥒᥓᥔᥕᥖᥗᥘᥙᥚᥛᥜᥝᥞᥟᥠᥡᥢᥣᥤᥥᥦᥧᥨᥩᥪᥫᥬᥭᥰᥱᥲᥳᥴᦀᦁᦂᦃᦄᦅᦆᦇᦈᦉᦊᦋᦌᦍᦎᦏᦐᦑᦒᦓᦔᦕᦖᦗᦘᦙᦚᦛᦜᦝᦞᦟᦠᦡᦢᦣᦤᦥᦦᦧᦨᦩᦪᦫᦰᦱᦲᦳᦴᦵᦶᦷᦸᦹᦺᦻᦼᦽᦾᦿᧀᧁᧂᧃᧄᧅᧆᧇᧈᧉᨀᨁᨂᨃᨄᨅᨆᨇᨈᨉᨊᨋᨌᨍᨎᨏᨐᨑᨒᨓᨔᨕᨖ◌ᨗ◌ᨘᨙᨚ◌ᨛᨠᨡᨢᨣᨤᨥᨦᨧᨨᨩᨪᨫᨬᨭᨮᨯᨰᨱᨲᨳᨴᨵᨶᨷᨸᨹᨺᨻᨼᨽᨾᨿᩀᩁᩂᩃᩄᩅᩆᩇᩈᩉᩊᩋᩌᩍᩎᩏᩐᩑᩒᩓᩔᩕ◌ᩖᩗ◌ᩘ◌ᩙ◌ᩚ◌ᩛ◌ᩜ◌ᩝ◌ᩞᩡ◌ᩢᩣᩤ◌ᩥ◌ᩦ◌ᩧ◌ᩨ◌ᩩ◌ᩪ◌ᩫ◌ᩬᩭᩮᩯᩰᩱᩲ◌ᩳ◌ᩴᪧ◌ᬀ◌ᬁ◌ᬂ◌ᬃᬄᬅᬆᬇᬈᬉᬊᬋᬌᬍᬎᬏᬐᬑᬒᬓᬔᬕᬖᬗᬘᬙᬚᬛᬜᬝᬞᬟᬠᬡᬢᬣᬤᬥᬦᬧᬨᬩᬪᬫᬬᬭᬮᬯᬰᬱᬲᬳᬵ◌ᬶ◌ᬷ◌ᬸ◌ᬹ◌ᬺᬻ◌ᬼᬽᬾᬿᭀᭁ◌ᭂᭃᭅᭆᭇᭈᭉᭊᭋ◌ᮀ◌ᮁᮂᮃᮄᮅᮆᮇᮈᮉᮊᮋᮌᮍᮎᮏᮐᮑᮒᮓᮔᮕᮖᮗᮘᮙᮚᮛᮜᮝᮞᮟᮠᮡ◌ᮢ◌ᮣ◌ᮤ◌ᮥᮦᮧ◌ᮨ◌ᮩ◌ᮬ◌ᮭᮮᮯᮺᮻᮼᮽᮾᮿᯀᯁᯂᯃᯄᯅᯆᯇᯈᯉᯊᯋᯌᯍᯎᯏᯐᯑᯒᯓᯔᯕᯖᯗᯘᯙᯚᯛᯜᯝᯞᯟᯠᯡᯢᯣᯤᯥᯧ◌ᯨ◌ᯩᯪᯫᯬ◌ᯭᯮ◌ᯯ◌ᯰ◌ᯱᰀᰁᰂᰃᰄᰅᰆᰇᰈᰉᰊᰋᰌᰍᰎᰏᰐᰑᰒᰓᰔᰕᰖᰗᰘᰙᰚᰛᰜᰝᰞᰟᰠᰡᰢᰣᰤᰥᰦᰧᰨᰩᰪᰫ◌ᰬ◌ᰭ◌ᰮ◌ᰯ◌ᰰ◌ᰱ◌ᰲ◌ᰳᰴᰵᱍᱎᱏᱚᱛᱜᱝᱞᱟᱠᱡᱢᱣᱤᱥᱦᱧᱨᱩᱪᱫᱬᱭᱮᱯᱰᱱᱲᱳᱴᱵᱶᱷᱸᱹᱺᱻᱼᱽᳩᳪᳫᳬᳮᳯᳰᳱᳲᳳᳵᳶᴀᴁᴂᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌᴍᴎᴏᴐᴑᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜᴝᴞᴟᴠᴡᴢᴣᴤᴥᴦᴧᴨᴩᴪᴫᴬᴭᴮᴯᴰᴱᴲᴳᴴᴵᴶᴷᴸᴹᴺᴻᴼᴽᴾᴿᵀᵁᵂᵃᵄᵅᵆᵇᵈᵉᵊᵋᵌᵍᵎᵏᵐᵑᵒᵓᵔᵕᵖᵗᵘᵙᵚᵛᵜᵝᵞᵟᵠᵡᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵸᵹᵺᵻᵼᵽᵾᵿᶀᶁᶂᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌᶍᶎᶏᶐᶑᶒᶓᶔᶕᶖᶗᶘᶙᶚᶛᶜᶝᶞᶟᶠᶡᶢᶣᶤᶥᶦᶧᶨᶩᶪᶫᶬᶭᶮᶯᶰᶱᶲᶳᶴᶵᶶᶷᶸᶹᶺᶻᶼᶽᶾᶿ◌ᷧ◌ᷨ◌ᷩ◌ᷪ◌ᷫ◌ᷬ◌ᷭ◌ᷮ◌ᷯ◌ᷰ◌ᷱ◌ᷲ◌ᷳ◌ᷴḀḁḂḃḄḅḆḇḈḉḊḋḌḍḎḏḐḑḒḓḔḕḖḗḘḙḚḛḜḝḞḟḠḡḢḣḤḥḦḧḨḩḪḫḬḭḮḯḰḱḲḳḴḵḶḷḸḹḺḻḼḽḾḿṀṁṂṃṄṅṆṇṈṉṊṋṌṍṎṏṐṑṒṓṔṕṖṗṘṙṚṛṜṝṞṟṠṡṢṣṤṥṦṧṨṩṪṫṬṭṮṯṰṱṲṳṴṵṶṷṸṹṺṻṼṽṾṿẀẁẂẃẄẅẆẇẈẉẊẋẌẍẎẏẐẑẒẓẔẕẖẗẘẙẚẛẜẝẞẟẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊịỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹỺỻỼỽỾỿἀἁἂἃἄἅἆἇἈἉἊἋἌἍἎἏἐἑἒἓἔἕἘἙἚἛἜἝἠἡἢἣἤἥἦἧἨἩἪἫἬἭἮἯἰἱἲἳἴἵἶἷἸἹἺἻἼἽἾἿὀὁὂὃὄὅὈὉὊὋὌὍὐὑὒὓὔὕὖὗὙὛὝὟὠὡὢὣὤὥὦὧὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀᾁᾂᾃᾄᾅᾆᾇᾈᾉᾊᾋᾌᾍᾎᾏᾐᾑᾒᾓᾔᾕᾖᾗᾘᾙᾚᾛᾜᾝᾞᾟᾠᾡᾢᾣᾤᾥᾦᾧᾨᾩᾪᾫᾬᾭᾮᾯᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆᾼιῂῃῄῆῇῈΈῊΉῌῐῑῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏῼⁱⁿₐₑₒₓₔₕₖₗₘₙₚₛₜℂℇℊℋℌℍℎℏℐℑℒℓℕℙℚℛℜℝℤΩℨKÅℬℭℯℰℱℲℳℴℵℶℷℸℹℼℽℾℿⅅⅆⅇⅈⅉⅎⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫⅬⅭⅮⅯⅰⅱⅲⅳⅴⅵⅶⅷⅸⅹⅺⅻⅼⅽⅾⅿↀↁↂↃↄↅↆↇↈⒶⒷⒸⒹⒺⒻⒼⒽⒾⒿⓀⓁⓂⓃⓄⓅⓆⓇⓈⓉⓊⓋⓌⓍⓎⓏⓐⓑⓒⓓⓔⓕⓖⓗⓘⓙⓚⓛⓜⓝⓞⓟⓠⓡⓢⓣⓤⓥⓦⓧⓨⓩⰀⰁⰂⰃⰄⰅⰆⰇⰈⰉⰊⰋⰌⰍⰎⰏⰐⰑⰒⰓⰔⰕⰖⰗⰘⰙⰚⰛⰜⰝⰞⰟⰠⰡⰢⰣⰤⰥⰦⰧⰨⰩⰪⰫⰬⰭⰮⰰⰱⰲⰳⰴⰵⰶⰷⰸⰹⰺⰻⰼⰽⰾⰿⱀⱁⱂⱃⱄⱅⱆⱇⱈⱉⱊⱋⱌⱍⱎⱏⱐⱑⱒⱓⱔⱕⱖⱗⱘⱙⱚⱛⱜⱝⱞⱠⱡⱢⱣⱤⱥⱦⱧⱨⱩⱪⱫⱬⱭⱮⱯⱰⱱⱲⱳⱴⱵⱶⱷⱸⱹⱺⱻⱼⱽⱾⱿⲀⲁⲂⲃⲄⲅⲆⲇⲈⲉⲊⲋⲌⲍⲎⲏⲐⲑⲒⲓⲔⲕⲖⲗⲘⲙⲚⲛⲜⲝⲞⲟⲠⲡⲢⲣⲤⲥⲦⲧⲨⲩⲪⲫⲬⲭⲮⲯⲰⲱⲲⲳⲴⲵⲶⲷⲸⲹⲺⲻⲼⲽⲾⲿⳀⳁⳂⳃⳄⳅⳆⳇⳈⳉⳊⳋⳌⳍⳎⳏⳐⳑⳒⳓⳔⳕⳖⳗⳘⳙⳚⳛⳜⳝⳞⳟⳠⳡⳢⳣⳤⳫⳬⳭⳮⳲⳳⴀⴁⴂⴃⴄⴅⴆⴇⴈⴉⴊⴋⴌⴍⴎⴏⴐⴑⴒⴓⴔⴕⴖⴗⴘⴙⴚⴛⴜⴝⴞⴟⴠⴡⴢⴣⴤⴥⴧⴭⴰⴱⴲⴳⴴⴵⴶⴷⴸⴹⴺⴻⴼⴽⴾⴿⵀⵁⵂⵃⵄⵅⵆⵇⵈⵉⵊⵋⵌⵍⵎⵏⵐⵑⵒⵓⵔⵕⵖⵗⵘⵙⵚⵛⵜⵝⵞⵟⵠⵡⵢⵣⵤⵥⵦⵧⵯⶀⶁⶂⶃⶄⶅⶆⶇⶈⶉⶊⶋⶌⶍⶎⶏⶐⶑⶒⶓⶔⶕⶖⶠⶡⶢⶣⶤⶥⶦⶨⶩⶪⶫⶬⶭⶮⶰⶱⶲⶳⶴⶵⶶⶸⶹⶺⶻⶼⶽⶾⷀⷁⷂⷃⷄⷅⷆⷈⷉⷊⷋⷌⷍⷎⷐⷑⷒⷓⷔⷕⷖⷘⷙⷚⷛⷜⷝⷞ◌ⷠ◌ⷡ◌ⷢ◌ⷣ◌ⷤ◌ⷥ◌ⷦ◌ⷧ◌ⷨ◌ⷩ◌ⷪ◌ⷫ◌ⷬ◌ⷭ◌ⷮ◌ⷯ◌ⷰ◌ⷱ◌ⷲ◌ⷳ◌ⷴ◌ⷵ◌ⷶ◌ⷷ◌ⷸ◌ⷹ◌ⷺ◌ⷻ◌ⷼ◌ⷽ◌ⷾ◌ⷿⸯ〆〱〲〳〴〵〼ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろゎわゐゑをんゔゕゖゝゞゟァアィイゥウェエォオカガキギクグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨラリルレロヮワヰヱヲンヴヵヶヷヸヹヺーヽヾヿㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩㄪㄫㄬㄭㄱㄲㄳㄴㄵㄶㄷㄸㄹㄺㄻㄼㄽㄾㄿㅀㅁㅂㅃㅄㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣㅤㅥㅦㅧㅨㅩㅪㅫㅬㅭㅮㅯㅰㅱㅲㅳㅴㅵㅶㅷㅸㅹㅺㅻㅼㅽㅾㅿㆀㆁㆂㆃㆄㆅㆆㆇㆈㆉㆊㆋㆌㆍㆎㆠㆡㆢㆣㆤㆥㆦㆧㆨㆩㆪㆫㆬㆭㆮㆯㆰㆱㆲㆳㆴㆵㆶㆷㆸㆹㆺㇰㇱㇲㇳㇴㇵㇶㇷㇸㇹㇺㇻㇼㇽㇾㇿꀀꀁꀂꀃꀄꀅꀆꀇꀈꀉꀊꀋꀌꀍꀎꀏꀐꀑꀒꀓꀔꀕꀖꀗꀘꀙꀚꀛꀜꀝꀞꀟꀠꀡꀢꀣꀤꀥꀦꀧꀨꀩꀪꀫꀬꀭꀮꀯꀰꀱꀲꀳꀴꀵꀶꀷꀸꀹꀺꀻꀼꀽꀾꀿꁀꁁꁂꁃꁄꁅꁆꁇꁈꁉꁊꁋꁌꁍꁎꁏꁐꁑꁒꁓꁔꁕꁖꁗꁘꁙꁚꁛꁜꁝꁞꁟꁠꁡꁢꁣꁤꁥꁦꁧꁨꁩꁪꁫꁬꁭꁮꁯꁰꁱꁲꁳꁴꁵꁶꁷꁸꁹꁺꁻꁼꁽꁾꁿꂀꂁꂂꂃꂄꂅꂆꂇꂈꂉꂊꂋꂌꂍꂎꂏꂐꂑꂒꂓꂔꂕꂖꂗꂘꂙꂚꂛꂜꂝꂞꂟꂠꂡꂢꂣꂤꂥꂦꂧꂨꂩꂪꂫꂬꂭꂮꂯꂰꂱꂲꂳꂴꂵꂶꂷꂸꂹꂺꂻꂼꂽꂾꂿꃀꃁꃂꃃꃄꃅꃆꃇꃈꃉꃊꃋꃌꃍꃎꃏꃐꃑꃒꃓꃔꃕꃖꃗꃘꃙꃚꃛꃜꃝꃞꃟꃠꃡꃢꃣꃤꃥꃦꃧꃨꃩꃪꃫꃬꃭꃮꃯꃰꃱꃲꃳꃴꃵꃶꃷꃸꃹꃺꃻꃼꃽꃾꃿꄀꄁꄂꄃꄄꄅꄆꄇꄈꄉꄊꄋꄌꄍꄎꄏꄐꄑꄒꄓꄔꄕꄖꄗꄘꄙꄚꄛꄜꄝꄞꄟꄠꄡꄢꄣꄤꄥꄦꄧꄨꄩꄪꄫꄬꄭꄮꄯꄰꄱꄲꄳꄴꄵꄶꄷꄸꄹꄺꄻꄼꄽꄾꄿꅀꅁꅂꅃꅄꅅꅆꅇꅈꅉꅊꅋꅌꅍꅎꅏꅐꅑꅒꅓꅔꅕꅖꅗꅘꅙꅚꅛꅜꅝꅞꅟꅠꅡꅢꅣꅤꅥꅦꅧꅨꅩꅪꅫꅬꅭꅮꅯꅰꅱꅲꅳꅴꅵꅶꅷꅸꅹꅺꅻꅼꅽꅾꅿꆀꆁꆂꆃꆄꆅꆆꆇꆈꆉꆊꆋꆌꆍꆎꆏꆐꆑꆒꆓꆔꆕꆖꆗꆘꆙꆚꆛꆜꆝꆞꆟꆠꆡꆢꆣꆤꆥꆦꆧꆨꆩꆪꆫꆬꆭꆮꆯꆰꆱꆲꆳꆴꆵꆶꆷꆸꆹꆺꆻꆼꆽꆾꆿꇀꇁꇂꇃꇄꇅꇆꇇꇈꇉꇊꇋꇌꇍꇎꇏꇐꇑꇒꇓꇔꇕꇖꇗꇘꇙꇚꇛꇜꇝꇞꇟꇠꇡꇢꇣꇤꇥꇦꇧꇨꇩꇪꇫꇬꇭꇮꇯꇰꇱꇲꇳꇴꇵꇶꇷꇸꇹꇺꇻꇼꇽꇾꇿꈀꈁꈂꈃꈄꈅꈆꈇꈈꈉꈊꈋꈌꈍꈎꈏꈐꈑꈒꈓꈔꈕꈖꈗꈘꈙꈚꈛꈜꈝꈞꈟꈠꈡꈢꈣꈤꈥꈦꈧꈨꈩꈪꈫꈬꈭꈮꈯꈰꈱꈲꈳꈴꈵꈶꈷꈸꈹꈺꈻꈼꈽꈾꈿꉀꉁꉂꉃꉄꉅꉆꉇꉈꉉꉊꉋꉌꉍꉎꉏꉐꉑꉒꉓꉔꉕꉖꉗꉘꉙꉚꉛꉜꉝꉞꉟꉠꉡꉢꉣꉤꉥꉦꉧꉨꉩꉪꉫꉬꉭꉮꉯꉰꉱꉲꉳꉴꉵꉶꉷꉸꉹꉺꉻꉼꉽꉾꉿꊀꊁꊂꊃꊄꊅꊆꊇꊈꊉꊊꊋꊌꊍꊎꊏꊐꊑꊒꊓꊔꊕꊖꊗꊘꊙꊚꊛꊜꊝꊞꊟꊠꊡꊢꊣꊤꊥꊦꊧꊨꊩꊪꊫꊬꊭꊮꊯꊰꊱꊲꊳꊴꊵꊶꊷꊸꊹꊺꊻꊼꊽꊾꊿꋀꋁꋂꋃꋄꋅꋆꋇꋈꋉꋊꋋꋌꋍꋎꋏꋐꋑꋒꋓꋔꋕꋖꋗꋘꋙꋚꋛꋜꋝꋞꋟꋠꋡꋢꋣꋤꋥꋦꋧꋨꋩꋪꋫꋬꋭꋮꋯꋰꋱꋲꋳꋴꋵꋶꋷꋸꋹꋺꋻꋼꋽꋾꋿꌀꌁꌂꌃꌄꌅꌆꌇꌈꌉꌊꌋꌌꌍꌎꌏꌐꌑꌒꌓꌔꌕꌖꌗꌘꌙꌚꌛꌜꌝꌞꌟꌠꌡꌢꌣꌤꌥꌦꌧꌨꌩꌪꌫꌬꌭꌮꌯꌰꌱꌲꌳꌴꌵꌶꌷꌸꌹꌺꌻꌼꌽꌾꌿꍀꍁꍂꍃꍄꍅꍆꍇꍈꍉꍊꍋꍌꍍꍎꍏꍐꍑꍒꍓꍔꍕꍖꍗꍘꍙꍚꍛꍜꍝꍞꍟꍠꍡꍢꍣꍤꍥꍦꍧꍨꍩꍪꍫꍬꍭꍮꍯꍰꍱꍲꍳꍴꍵꍶꍷꍸꍹꍺꍻꍼꍽꍾꍿꎀꎁꎂꎃꎄꎅꎆꎇꎈꎉꎊꎋꎌꎍꎎꎏꎐꎑꎒꎓꎔꎕꎖꎗꎘꎙꎚꎛꎜꎝꎞꎟꎠꎡꎢꎣꎤꎥꎦꎧꎨꎩꎪꎫꎬꎭꎮꎯꎰꎱꎲꎳꎴꎵꎶꎷꎸꎹꎺꎻꎼꎽꎾꎿꏀꏁꏂꏃꏄꏅꏆꏇꏈꏉꏊꏋꏌꏍꏎꏏꏐꏑꏒꏓꏔꏕꏖꏗꏘꏙꏚꏛꏜꏝꏞꏟꏠꏡꏢꏣꏤꏥꏦꏧꏨꏩꏪꏫꏬꏭꏮꏯꏰꏱꏲꏳꏴꏵꏶꏷꏸꏹꏺꏻꏼꏽꏾꏿꐀꐁꐂꐃꐄꐅꐆꐇꐈꐉꐊꐋꐌꐍꐎꐏꐐꐑꐒꐓꐔꐕꐖꐗꐘꐙꐚꐛꐜꐝꐞꐟꐠꐡꐢꐣꐤꐥꐦꐧꐨꐩꐪꐫꐬꐭꐮꐯꐰꐱꐲꐳꐴꐵꐶꐷꐸꐹꐺꐻꐼꐽꐾꐿꑀꑁꑂꑃꑄꑅꑆꑇꑈꑉꑊꑋꑌꑍꑎꑏꑐꑑꑒꑓꑔꑕꑖꑗꑘꑙꑚꑛꑜꑝꑞꑟꑠꑡꑢꑣꑤꑥꑦꑧꑨꑩꑪꑫꑬꑭꑮꑯꑰꑱꑲꑳꑴꑵꑶꑷꑸꑹꑺꑻꑼꑽꑾꑿꒀꒁꒂꒃꒄꒅꒆꒇꒈꒉꒊꒋꒌꓐꓑꓒꓓꓔꓕꓖꓗꓘꓙꓚꓛꓜꓝꓞꓟꓠꓡꓢꓣꓤꓥꓦꓧꓨꓩꓪꓫꓬꓭꓮꓯꓰꓱꓲꓳꓴꓵꓶꓷꓸꓹꓺꓻꓼꓽꔀꔁꔂꔃꔄꔅꔆꔇꔈꔉꔊꔋꔌꔍꔎꔏꔐꔑꔒꔓꔔꔕꔖꔗꔘꔙꔚꔛꔜꔝꔞꔟꔠꔡꔢꔣꔤꔥꔦꔧꔨꔩꔪꔫꔬꔭꔮꔯꔰꔱꔲꔳꔴꔵꔶꔷꔸꔹꔺꔻꔼꔽꔾꔿꕀꕁꕂꕃꕄꕅꕆꕇꕈꕉꕊꕋꕌꕍꕎꕏꕐꕑꕒꕓꕔꕕꕖꕗꕘꕙꕚꕛꕜꕝꕞꕟꕠꕡꕢꕣꕤꕥꕦꕧꕨꕩꕪꕫꕬꕭꕮꕯꕰꕱꕲꕳꕴꕵꕶꕷꕸꕹꕺꕻꕼꕽꕾꕿꖀꖁꖂꖃꖄꖅꖆꖇꖈꖉꖊꖋꖌꖍꖎꖏꖐꖑꖒꖓꖔꖕꖖꖗꖘꖙꖚꖛꖜꖝꖞꖟꖠꖡꖢꖣꖤꖥꖦꖧꖨꖩꖪꖫꖬꖭꖮꖯꖰꖱꖲꖳꖴꖵꖶꖷꖸꖹꖺꖻꖼꖽꖾꖿꗀꗁꗂꗃꗄꗅꗆꗇꗈꗉꗊꗋꗌꗍꗎꗏꗐꗑꗒꗓꗔꗕꗖꗗꗘꗙꗚꗛꗜꗝꗞꗟꗠꗡꗢꗣꗤꗥꗦꗧꗨꗩꗪꗫꗬꗭꗮꗯꗰꗱꗲꗳꗴꗵꗶꗷꗸꗹꗺꗻꗼꗽꗾꗿꘀꘁꘂꘃꘄꘅꘆꘇꘈꘉꘊꘋꘌꘐꘑꘒꘓꘔꘕꘖꘗꘘꘙꘚꘛꘜꘝꘞꘟꘪꘫꙀꙁꙂꙃꙄꙅꙆꙇꙈꙉꙊꙋꙌꙍꙎꙏꙐꙑꙒꙓꙔꙕꙖꙗꙘꙙꙚꙛꙜꙝꙞꙟꙠꙡꙢꙣꙤꙥꙦꙧꙨꙩꙪꙫꙬꙭꙮ◌ꙴ◌ꙵ◌ꙶ◌ꙷ◌ꙸ◌ꙹ◌ꙺ◌ꙻꙿꚀꚁꚂꚃꚄꚅꚆꚇꚈꚉꚊꚋꚌꚍꚎꚏꚐꚑꚒꚓꚔꚕꚖꚗꚘꚙꚚꚛꚜꚝ◌ꚟꚠꚡꚢꚣꚤꚥꚦꚧꚨꚩꚪꚫꚬꚭꚮꚯꚰꚱꚲꚳꚴꚵꚶꚷꚸꚹꚺꚻꚼꚽꚾꚿꛀꛁꛂꛃꛄꛅꛆꛇꛈꛉꛊꛋꛌꛍꛎꛏꛐꛑꛒꛓꛔꛕꛖꛗꛘꛙꛚꛛꛜꛝꛞꛟꛠꛡꛢꛣꛤꛥꛦꛧꛨꛩꛪꛫꛬꛭꛮꛯꜗꜘꜙꜚꜛꜜꜝꜞꜟꜢꜣꜤꜥꜦꜧꜨꜩꜪꜫꜬꜭꜮꜯꜰꜱꜲꜳꜴꜵꜶꜷꜸꜹꜺꜻꜼꜽꜾꜿꝀꝁꝂꝃꝄꝅꝆꝇꝈꝉꝊꝋꝌꝍꝎꝏꝐꝑꝒꝓꝔꝕꝖꝗꝘꝙꝚꝛꝜꝝꝞꝟꝠꝡꝢꝣꝤꝥꝦꝧꝨꝩꝪꝫꝬꝭꝮꝯꝰꝱꝲꝳꝴꝵꝶꝷꝸꝹꝺꝻꝼꝽꝾꝿꞀꞁꞂꞃꞄꞅꞆꞇꞈꞋꞌꞍꞎꞐꞑꞒꞓꞔꞕꞖꞗꞘꞙꞚꞛꞜꞝꞞꞟꞠꞡꞢꞣꞤꞥꞦꞧꞨꞩꞪꞫꞬꞭꞰꞱꟷꟸꟹꟺꟻꟼꟽꟾꟿꠀꠁꠃꠄꠅꠇꠈꠉꠊꠌꠍꠎꠏꠐꠑꠒꠓꠔꠕꠖꠗꠘꠙꠚꠛꠜꠝꠞꠟꠠꠡꠢꠣꠤ◌ꠥ◌ꠦꠧꡀꡁꡂꡃꡄꡅꡆꡇꡈꡉꡊꡋꡌꡍꡎꡏꡐꡑꡒꡓꡔꡕꡖꡗꡘꡙꡚꡛꡜꡝꡞꡟꡠꡡꡢꡣꡤꡥꡦꡧꡨꡩꡪꡫꡬꡭꡮꡯꡰꡱꡲꡳꢀꢁꢂꢃꢄꢅꢆꢇꢈꢉꢊꢋꢌꢍꢎꢏꢐꢑꢒꢓꢔꢕꢖꢗꢘꢙꢚꢛꢜꢝꢞꢟꢠꢡꢢꢣꢤꢥꢦꢧꢨꢩꢪꢫꢬꢭꢮꢯꢰꢱꢲꢳꢴꢵꢶꢷꢸꢹꢺꢻꢼꢽꢾꢿꣀꣁꣂꣃꣲꣳꣴꣵꣶꣷꣻꤊꤋꤌꤍꤎꤏꤐꤑꤒꤓꤔꤕꤖꤗꤘꤙꤚꤛꤜꤝꤞꤟꤠꤡꤢꤣꤤꤥ◌ꤦ◌ꤧ◌ꤨ◌ꤩ◌ꤪꤰꤱꤲꤳꤴꤵꤶꤷꤸꤹꤺꤻꤼꤽꤾꤿꥀꥁꥂꥃꥄꥅꥆ◌ꥇ◌ꥈ◌ꥉ◌ꥊ◌ꥋ◌ꥌ◌ꥍ◌ꥎ◌ꥏ◌ꥐ◌ꥑꥒꥠꥡꥢꥣꥤꥥꥦꥧꥨꥩꥪꥫꥬꥭꥮꥯꥰꥱꥲꥳꥴꥵꥶꥷꥸꥹꥺꥻꥼ◌ꦀ◌ꦁ◌ꦂꦃꦄꦅꦆꦇꦈꦉꦊꦋꦌꦍꦎꦏꦐꦑꦒꦓꦔꦕꦖꦗꦘꦙꦚꦛꦜꦝꦞꦟꦠꦡꦢꦣꦤꦥꦦꦧꦨꦩꦪꦫꦬꦭꦮꦯꦰꦱꦲꦴꦵ◌ꦶ◌ꦷ◌ꦸ◌ꦹꦺꦻ◌ꦼꦽꦾꦿꧏꧠꧡꧢꧣꧤꧦꧧꧨꧩꧪꧫꧬꧭꧮꧯꧺꧻꧼꧽꧾꨀꨁꨂꨃꨄꨅꨆꨇꨈꨉꨊꨋꨌꨍꨎꨏꨐꨑꨒꨓꨔꨕꨖꨗꨘꨙꨚꨛꨜꨝꨞꨟꨠꨡꨢꨣꨤꨥꨦꨧꨨ◌ꨩ◌ꨪ◌ꨫ◌ꨬ◌ꨭ◌ꨮꨯꨰ◌ꨱ◌ꨲꨳꨴ◌ꨵ◌ꨶꩀꩁꩂ◌ꩃꩄꩅꩆꩇꩈꩉꩊꩋ◌ꩌꩍꩠꩡꩢꩣꩤꩥꩦꩧꩨꩩꩪꩫꩬꩭꩮꩯꩰꩱꩲꩳꩴꩵꩶꩺꩾꩿꪀꪁꪂꪃꪄꪅꪆꪇꪈꪉꪊꪋꪌꪍꪎꪏꪐꪑꪒꪓꪔꪕꪖꪗꪘꪙꪚꪛꪜꪝꪞꪟꪠꪡꪢꪣꪤꪥꪦꪧꪨꪩꪪꪫꪬꪭꪮꪯ◌ꪰꪱ◌ꪲ◌ꪳ◌ꪴꪵꪶ◌ꪷ◌ꪸꪹꪺꪻꪼꪽ◌ꪾꫀꫂꫛꫜꫝꫠꫡꫢꫣꫤꫥꫦꫧꫨꫩꫪꫫ◌ꫬ◌ꫭꫮꫯꫲꫳꫴꫵꬁꬂꬃꬄꬅꬆꬉꬊꬋꬌꬍꬎꬑꬒꬓꬔꬕꬖꬠꬡꬢꬣꬤꬥꬦꬨꬩꬪꬫꬬꬭꬮꬰꬱꬲꬳꬴꬵꬶꬷꬸꬹꬺꬻꬼꬽꬾꬿꭀꭁꭂꭃꭄꭅꭆꭇꭈꭉꭊꭋꭌꭍꭎꭏꭐꭑꭒꭓꭔꭕꭖꭗꭘꭙꭚꭜꭝꭞꭟꭤꭥꯀꯁꯂꯃꯄꯅꯆꯇꯈꯉꯊꯋꯌꯍꯎꯏꯐꯑꯒꯓꯔꯕꯖꯗꯘꯙꯚꯛꯜꯝꯞꯟꯠꯡꯢꯣꯤ◌ꯥꯦꯧ◌ꯨꯩꯪힰힱힲힳힴힵힶힷힸힹힺힻힼힽힾힿퟀퟁퟂퟃퟄퟅퟆퟋퟌퟍퟎퟏퟐퟑퟒퟓퟔퟕퟖퟗퟘퟙퟚퟛퟜퟝퟞퟟퟠퟡퟢퟣퟤퟥퟦퟧퟨퟩퟪퟫퟬퟭퟮퟯퟰퟱퟲퟳퟴퟵퟶퟷퟸퟹퟺퟻfffiflffifflſtstﬓﬔﬕﬖﬗיִ◌ﬞײַﬠﬡﬢﬣﬤﬥﬦﬧﬨשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּוֹבֿכֿפֿﭏﭐﭑﭒﭓﭔﭕﭖﭗﭘﭙﭚﭛﭜﭝﭞﭟﭠﭡﭢﭣﭤﭥﭦﭧﭨﭩﭪﭫﭬﭭﭮﭯﭰﭱﭲﭳﭴﭵﭶﭷﭸﭹﭺﭻﭼﭽﭾﭿﮀﮁﮂﮃﮄﮅﮆﮇﮈﮉﮊﮋﮌﮍﮎﮏﮐﮑﮒﮓﮔﮕﮖﮗﮘﮙﮚﮛﮜﮝﮞﮟﮠﮡﮢﮣﮤﮥﮦﮧﮨﮩﮪﮫﮬﮭﮮﮯﮰﮱﯓﯔﯕﯖﯗﯘﯙﯚﯛﯜﯝﯞﯟﯠﯡﯢﯣﯤﯥﯦﯧﯨﯩﯪﯫﯬﯭﯮﯯﯰﯱﯲﯳﯴﯵﯶﯷﯸﯹﯺﯻﯼﯽﯾﯿﰀﰁﰂﰃﰄﰅﰆﰇﰈﰉﰊﰋﰌﰍﰎﰏﰐﰑﰒﰓﰔﰕﰖﰗﰘﰙﰚﰛﰜﰝﰞﰟﰠﰡﰢﰣﰤﰥﰦﰧﰨﰩﰪﰫﰬﰭﰮﰯﰰﰱﰲﰳﰴﰵﰶﰷﰸﰹﰺﰻﰼﰽﰾﰿﱀﱁﱂﱃﱄﱅﱆﱇﱈﱉﱊﱋﱌﱍﱎﱏﱐﱑﱒﱓﱔﱕﱖﱗﱘﱙﱚﱛﱜﱝﱞﱟﱠﱡﱢﱣﱤﱥﱦﱧﱨﱩﱪﱫﱬﱭﱮﱯﱰﱱﱲﱳﱴﱵﱶﱷﱸﱹﱺﱻﱼﱽﱾﱿﲀﲁﲂﲃﲄﲅﲆﲇﲈﲉﲊﲋﲌﲍﲎﲏﲐﲑﲒﲓﲔﲕﲖﲗﲘﲙﲚﲛﲜﲝﲞﲟﲠﲡﲢﲣﲤﲥﲦﲧﲨﲩﲪﲫﲬﲭﲮﲯﲰﲱﲲﲳﲴﲵﲶﲷﲸﲹﲺﲻﲼﲽﲾﲿﳀﳁﳂﳃﳄﳅﳆﳇﳈﳉﳊﳋﳌﳍﳎﳏﳐﳑﳒﳓﳔﳕﳖﳗﳘﳙﳚﳛﳜﳝﳞﳟﳠﳡﳢﳣﳤﳥﳦﳧﳨﳩﳪﳫﳬﳭﳮﳯﳰﳱﳲﳳﳴﳵﳶﳷﳸﳹﳺﳻﳼﳽﳾﳿﴀﴁﴂﴃﴄﴅﴆﴇﴈﴉﴊﴋﴌﴍﴎﴏﴐﴑﴒﴓﴔﴕﴖﴗﴘﴙﴚﴛﴜﴝﴞﴟﴠﴡﴢﴣﴤﴥﴦﴧﴨﴩﴪﴫﴬﴭﴮﴯﴰﴱﴲﴳﴴﴵﴶﴷﴸﴹﴺﴻﴼﴽﵐﵑﵒﵓﵔﵕﵖﵗﵘﵙﵚﵛﵜﵝﵞﵟﵠﵡﵢﵣﵤﵥﵦﵧﵨﵩﵪﵫﵬﵭﵮﵯﵰﵱﵲﵳﵴﵵﵶﵷﵸﵹﵺﵻﵼﵽﵾﵿﶀﶁﶂﶃﶄﶅﶆﶇﶈﶉﶊﶋﶌﶍﶎﶏﶒﶓﶔﶕﶖﶗﶘﶙﶚﶛﶜﶝﶞﶟﶠﶡﶢﶣﶤﶥﶦﶧﶨﶩﶪﶫﶬﶭﶮﶯﶰﶱﶲﶳﶴﶵﶶﶷﶸﶹﶺﶻﶼﶽﶾﶿﷀﷁﷂﷃﷄﷅﷆﷇﷰﷱﷲﷳﷴﷵﷶﷷﷸﷹﷺﷻﹰﹱﹲﹳﹴﹶﹷﹸﹹﹺﹻﹼﹽﹾﹿﺀﺁﺂﺃﺄﺅﺆﺇﺈﺉﺊﺋﺌﺍﺎﺏﺐﺑﺒﺓﺔﺕﺖﺗﺘﺙﺚﺛﺜﺝﺞﺟﺠﺡﺢﺣﺤﺥﺦﺧﺨﺩﺪﺫﺬﺭﺮﺯﺰﺱﺲﺳﺴﺵﺶﺷﺸﺹﺺﺻﺼﺽﺾﺿﻀﻁﻂﻃﻄﻅﻆﻇﻈﻉﻊﻋﻌﻍﻎﻏﻐﻑﻒﻓﻔﻕﻖﻗﻘﻙﻚﻛﻜﻝﻞﻟﻠﻡﻢﻣﻤﻥﻦﻧﻨﻩﻪﻫﻬﻭﻮﻯﻰﻱﻲﻳﻴﻵﻶﻷﻸﻹﻺﻻﻼABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzヲァィゥェォャュョッーアイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワン゙゚ᅠᄀᄁᆪᄂᆬᆭᄃᄄᄅᆰᆱᆲᆳᆴᆵᄚᄆᄇᄈᄡᄉᄊᄋᄌᄍᄎᄏᄐᄑ하ᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵ -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/IsLower.txt: -------------------------------------------------------------------------------- 1 | abcdefghijklmnopqrstuvwxyzªµºßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿāăąćĉċčďđēĕėęěĝğġģĥħĩīĭįıijĵķĸĺļľŀłńņňʼnŋōŏőœŕŗřśŝşšţťŧũūŭůűųŵŷźżžſƀƃƅƈƌƍƒƕƙƚƛƞơƣƥƨƪƫƭưƴƶƹƺƽƾƿdžljnjǎǐǒǔǖǘǚǜǝǟǡǣǥǧǩǫǭǯǰdzǵǹǻǽǿȁȃȅȇȉȋȍȏȑȓȕȗșțȝȟȡȣȥȧȩȫȭȯȱȳȴȵȶȷȸȹȼȿɀɂɇɉɋɍɏɐɑɒɓɔɕɖɗɘəɚɛɜɝɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀʁʂʃʄʅʆʇʈʉʊʋʌʍʎʏʐʑʒʓʕʖʗʘʙʚʛʜʝʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯʰʱʲʳʴʵʶʷʸˀˁˠˡˢˣˤ◌ͅͱͳͷͺͻͼͽΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώϐϑϕϖϗϙϛϝϟϡϣϥϧϩϫϭϯϰϱϲϳϵϸϻϼабвгдежзийклмнопрстуфхцчшщъыьэюяѐёђѓєѕіїјљњћќѝўџѡѣѥѧѩѫѭѯѱѳѵѷѹѻѽѿҁҋҍҏґғҕҗҙқҝҟҡңҥҧҩҫҭүұҳҵҷҹһҽҿӂӄӆӈӊӌӎӏӑӓӕӗәӛӝӟӡӣӥӧөӫӭӯӱӳӵӷӹӻӽӿԁԃԅԇԉԋԍԏԑԓԕԗԙԛԝԟԡԣԥԧԩԫԭԯաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆևᏸᏹᏺᏻᏼᏽᲀᲁᲂᲃᲄᲅᲆᲇᲈᴀᴁᴂᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌᴍᴎᴏᴐᴑᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜᴝᴞᴟᴠᴡᴢᴣᴤᴥᴦᴧᴨᴩᴪᴫᴬᴭᴮᴯᴰᴱᴲᴳᴴᴵᴶᴷᴸᴹᴺᴻᴼᴽᴾᴿᵀᵁᵂᵃᵄᵅᵆᵇᵈᵉᵊᵋᵌᵍᵎᵏᵐᵑᵒᵓᵔᵕᵖᵗᵘᵙᵚᵛᵜᵝᵞᵟᵠᵡᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵸᵹᵺᵻᵼᵽᵾᵿᶀᶁᶂᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌᶍᶎᶏᶐᶑᶒᶓᶔᶕᶖᶗᶘᶙᶚᶛᶜᶝᶞᶟᶠᶡᶢᶣᶤᶥᶦᶧᶨᶩᶪᶫᶬᶭᶮᶯᶰᶱᶲᶳᶴᶵᶶᶷᶸᶹᶺᶻᶼᶽᶾᶿḁḃḅḇḉḋḍḏḑḓḕḗḙḛḝḟḡḣḥḧḩḫḭḯḱḳḵḷḹḻḽḿṁṃṅṇṉṋṍṏṑṓṕṗṙṛṝṟṡṣṥṧṩṫṭṯṱṳṵṷṹṻṽṿẁẃẅẇẉẋẍẏẑẓẕẖẗẘẙẚẛẜẝẟạảấầẩẫậắằẳẵặẹẻẽếềểễệỉịọỏốồổỗộớờởỡợụủứừửữựỳỵỷỹỻỽỿἀἁἂἃἄἅἆἇἐἑἒἓἔἕἠἡἢἣἤἥἦἧἰἱἲἳἴἵἶἷὀὁὂὃὄὅὐὑὒὓὔὕὖὗὠὡὢὣὤὥὦὧὰάὲέὴήὶίὸόὺύὼώᾀᾁᾂᾃᾄᾅᾆᾇᾐᾑᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷιῂῃῄῆῇῐῑῒΐῖῗῠῡῢΰῤῥῦῧῲῳῴῶῷⁱⁿₐₑₒₓₔₕₖₗₘₙₚₛₜℊℎℏℓℯℴℹℼℽⅆⅇⅈⅉⅎⅰⅱⅲⅳⅴⅵⅶⅷⅸⅹⅺⅻⅼⅽⅾⅿↄⓐⓑⓒⓓⓔⓕⓖⓗⓘⓙⓚⓛⓜⓝⓞⓟⓠⓡⓢⓣⓤⓥⓦⓧⓨⓩⰰⰱⰲⰳⰴⰵⰶⰷⰸⰹⰺⰻⰼⰽⰾⰿⱀⱁⱂⱃⱄⱅⱆⱇⱈⱉⱊⱋⱌⱍⱎⱏⱐⱑⱒⱓⱔⱕⱖⱗⱘⱙⱚⱛⱜⱝⱞⱡⱥⱦⱨⱪⱬⱱⱳⱴⱶⱷⱸⱹⱺⱻⱼⱽⲁⲃⲅⲇⲉⲋⲍⲏⲑⲓⲕⲗⲙⲛⲝⲟⲡⲣⲥⲧⲩⲫⲭⲯⲱⲳⲵⲷⲹⲻⲽⲿⳁⳃⳅⳇⳉⳋⳍⳏⳑⳓⳕⳗⳙⳛⳝⳟⳡⳣⳤⳬⳮⳳⴀⴁⴂⴃⴄⴅⴆⴇⴈⴉⴊⴋⴌⴍⴎⴏⴐⴑⴒⴓⴔⴕⴖⴗⴘⴙⴚⴛⴜⴝⴞⴟⴠⴡⴢⴣⴤⴥⴧⴭꙁꙃꙅꙇꙉꙋꙍꙏꙑꙓꙕꙗꙙꙛꙝꙟꙡꙣꙥꙧꙩꙫꙭꚁꚃꚅꚇꚉꚋꚍꚏꚑꚓꚕꚗꚙꚛꚜꚝꜣꜥꜧꜩꜫꜭꜯꜰꜱꜳꜵꜷꜹꜻꜽꜿꝁꝃꝅꝇꝉꝋꝍꝏꝑꝓꝕꝗꝙꝛꝝꝟꝡꝣꝥꝧꝩꝫꝭꝯꝰꝱꝲꝳꝴꝵꝶꝷꝸꝺꝼꝿꞁꞃꞅꞇꞌꞎꞑꞓꞔꞕꞗꞙꞛꞝꞟꞡꞣꞥꞧꞩꞵꞷꟸꟹꟺꬰꬱꬲꬳꬴꬵꬶꬷꬸꬹꬺꬻꬼꬽꬾꬿꭀꭁꭂꭃꭄꭅꭆꭇꭈꭉꭊꭋꭌꭍꭎꭏꭐꭑꭒꭓꭔꭕꭖꭗꭘꭙꭚꭜꭝꭞꭟꭠꭡꭢꭣꭤꭥꭰꭱꭲꭳꭴꭵꭶꭷꭸꭹꭺꭻꭼꭽꭾꭿꮀꮁꮂꮃꮄꮅꮆꮇꮈꮉꮊꮋꮌꮍꮎꮏꮐꮑꮒꮓꮔꮕꮖꮗꮘꮙꮚꮛꮜꮝꮞꮟꮠꮡꮢꮣꮤꮥꮦꮧꮨꮩꮪꮫꮬꮭꮮꮯꮰꮱꮲꮳꮴꮵꮶꮷꮸꮹꮺꮻꮼꮽꮾꮿfffiflffifflſtstﬓﬔﬕﬖﬗabcdefghijklmnopqrstuvwxyz𐐨𐐩𐐪𐐫𐐬𐐭𐐮𐐯𐐰𐐱𐐲𐐳𐐴𐐵𐐶𐐷𐐸𐐹𐐺𐐻𐐼𐐽𐐾𐐿𐑀𐑁𐑂𐑃𐑄𐑅𐑆𐑇𐑈𐑉𐑊𐑋𐑌𐑍𐑎𐑏𐓘𐓙𐓚𐓛𐓜𐓝𐓞𐓟𐓠𐓡𐓢𐓣𐓤𐓥𐓦𐓧𐓨𐓩𐓪𐓫𐓬𐓭𐓮𐓯𐓰𐓱𐓲𐓳𐓴𐓵𐓶𐓷𐓸𐓹𐓺𐓻𐳀𐳁𐳂𐳃𐳄𐳅𐳆𐳇𐳈𐳉𐳊𐳋𐳌𐳍𐳎𐳏𐳐𐳑𐳒𐳓𐳔𐳕𐳖𐳗𐳘𐳙𐳚𐳛𐳜𐳝𐳞𐳟𐳠𐳡𐳢𐳣𐳤𐳥𐳦𐳧𐳨𐳩𐳪𐳫𐳬𐳭𐳮𐳯𐳰𐳱𐳲𑣀𑣁𑣂𑣃𑣄𑣅𑣆𑣇𑣈𑣉𑣊𑣋𑣌𑣍𑣎𑣏𑣐𑣑𑣒𑣓𑣔𑣕𑣖𑣗𑣘𑣙𑣚𑣛𑣜𑣝𑣞𑣟𝐚𝐛𝐜𝐝𝐞𝐟𝐠𝐡𝐢𝐣𝐤𝐥𝐦𝐧𝐨𝐩𝐪𝐫𝐬𝐭𝐮𝐯𝐰𝐱𝐲𝐳𝑎𝑏𝑐𝑑𝑒𝑓𝑔𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝑠𝑡𝑢𝑣𝑤𝑥𝑦𝑧𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛𝒶𝒷𝒸𝒹𝒻𝒽𝒾𝒿𝓀𝓁𝓂𝓃𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃𝔞𝔟𝔠𝔡𝔢𝔣𝔤𝔥𝔦𝔧𝔨𝔩𝔪𝔫𝔬𝔭𝔮𝔯𝔰𝔱𝔲𝔳𝔴𝔵𝔶𝔷𝕒𝕓𝕔𝕕𝕖𝕗𝕘𝕙𝕚𝕛𝕜𝕝𝕞𝕟𝕠𝕡𝕢𝕣𝕤𝕥𝕦𝕧𝕨𝕩𝕪𝕫𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟𝖺𝖻𝖼𝖽𝖾𝖿𝗀𝗁𝗂𝗃𝗄𝗅𝗆𝗇𝗈𝗉𝗊𝗋𝗌𝗍𝗎𝗏𝗐𝗑𝗒𝗓𝗮𝗯𝗰𝗱𝗲𝗳𝗴𝗵𝗶𝗷𝗸𝗹𝗺𝗻𝗼𝗽𝗾𝗿𝘀𝘁𝘂𝘃𝘄𝘅𝘆𝘇𝘢𝘣𝘤𝘥𝘦𝘧𝘨𝘩𝘪𝘫𝘬𝘭𝘮𝘯𝘰𝘱𝘲𝘳𝘴𝘵𝘶𝘷𝘸𝘹𝘺𝘻𝙖𝙗𝙘𝙙𝙚𝙛𝙜𝙝𝙞𝙟𝙠𝙡𝙢𝙣𝙤𝙥𝙦𝙧𝙨𝙩𝙪𝙫𝙬𝙭𝙮𝙯𝚊𝚋𝚌𝚍𝚎𝚏𝚐𝚑𝚒𝚓𝚔𝚕𝚖𝚗𝚘𝚙𝚚𝚛𝚜𝚝𝚞𝚟𝚠𝚡𝚢𝚣𝚤𝚥𝛂𝛃𝛄𝛅𝛆𝛇𝛈𝛉𝛊𝛋𝛌𝛍𝛎𝛏𝛐𝛑𝛒𝛓𝛔𝛕𝛖𝛗𝛘𝛙𝛚𝛜𝛝𝛞𝛟𝛠𝛡𝛼𝛽𝛾𝛿𝜀𝜁𝜂𝜃𝜄𝜅𝜆𝜇𝜈𝜉𝜊𝜋𝜌𝜍𝜎𝜏𝜐𝜑𝜒𝜓𝜔𝜖𝜗𝜘𝜙𝜚𝜛𝜶𝜷𝜸𝜹𝜺𝜻𝜼𝜽𝜾𝜿𝝀𝝁𝝂𝝃𝝄𝝅𝝆𝝇𝝈𝝉𝝊𝝋𝝌𝝍𝝎𝝐𝝑𝝒𝝓𝝔𝝕𝝰𝝱𝝲𝝳𝝴𝝵𝝶𝝷𝝸𝝹𝝺𝝻𝝼𝝽𝝾𝝿𝞀𝞁𝞂𝞃𝞄𝞅𝞆𝞇𝞈𝞊𝞋𝞌𝞍𝞎𝞏𝞪𝞫𝞬𝞭𝞮𝞯𝞰𝞱𝞲𝞳𝞴𝞵𝞶𝞷𝞸𝞹𝞺𝞻𝞼𝞽𝞾𝞿𝟀𝟁𝟂𝟄𝟅𝟆𝟇𝟈𝟉𝟋𞤢𞤣𞤤𞤥𞤦𞤧𞤨𞤩𞤪𞤫𞤬𞤭𞤮𞤯𞤰𞤱𞤲𞤳𞤴𞤵𞤶𞤷𞤸𞤹𞤺𞤻𞤼𞤽𞤾𞤿𞥀𞥁𞥂𞥃 -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/IsN.txt: -------------------------------------------------------------------------------- 1 | 0123456789²³¹¼½¾٠١٢٣٤٥٦٧٨٩۰۱۲۳۴۵۶۷۸۹߀߁߂߃߄߅߆߇߈߉०१२३४५६७८९০১২৩৪৫৬৭৮৯৴৵৶৷৸৹੦੧੨੩੪੫੬੭੮੯૦૧૨૩૪૫૬૭૮૯୦୧୨୩୪୫୬୭୮୯୲୳୴୵୶୷௦௧௨௩௪௫௬௭௮௯௰௱௲౦౧౨౩౪౫౬౭౮౯౸౹౺౻౼౽౾೦೧೨೩೪೫೬೭೮೯൦൧൨൩൪൫൬൭൮൯൰൱൲൳൴൵෦෧෨෩෪෫෬෭෮෯๐๑๒๓๔๕๖๗๘๙໐໑໒໓໔໕໖໗໘໙༠༡༢༣༤༥༦༧༨༩༪༫༬༭༮༯༰༱༲༳၀၁၂၃၄၅၆၇၈၉႐႑႒႓႔႕႖႗႘႙፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻፼ᛮᛯᛰ០១២៣៤៥៦៧៨៩៰៱៲៳៴៵៶៷៸៹᠐᠑᠒᠓᠔᠕᠖᠗᠘᠙᥆᥇᥈᥉᥊᥋᥌᥍᥎᥏᧐᧑᧒᧓᧔᧕᧖᧗᧘᧙᧚᪀᪁᪂᪃᪄᪅᪆᪇᪈᪉᪐᪑᪒᪓᪔᪕᪖᪗᪘᪙᭐᭑᭒᭓᭔᭕᭖᭗᭘᭙᮰᮱᮲᮳᮴᮵᮶᮷᮸᮹᱀᱁᱂᱃᱄᱅᱆᱇᱈᱉᱐᱑᱒᱓᱔᱕᱖᱗᱘᱙⁰⁴⁵⁶⁷⁸⁹₀₁₂₃₄₅₆₇₈₉⅐⅑⅒⅓⅔⅕⅖⅗⅘⅙⅚⅛⅜⅝⅞⅟ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫⅬⅭⅮⅯⅰⅱⅲⅳⅴⅵⅶⅷⅸⅹⅺⅻⅼⅽⅾⅿↀↁↂↅↆↇↈ↉①②③④⑤⑥⑦⑧⑨⑩⑪⑫⑬⑭⑮⑯⑰⑱⑲⑳⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽⑾⑿⒀⒁⒂⒃⒄⒅⒆⒇⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑⒒⒓⒔⒕⒖⒗⒘⒙⒚⒛⓪⓫⓬⓭⓮⓯⓰⓱⓲⓳⓴⓵⓶⓷⓸⓹⓺⓻⓼⓽⓾⓿❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓⳽㆒㆓㆔㆕㈠㈡㈢㈣㈤㈥㈦㈧㈨㈩㉈㉉㉊㉋㉌㉍㉎㉏㉑㉒㉓㉔㉕㉖㉗㉘㉙㉚㉛㉜㉝㉞㉟㊀㊁㊂㊃㊄㊅㊆㊇㊈㊉㊱㊲㊳㊴㊵㊶㊷㊸㊹㊺㊻㊼㊽㊾㊿꘠꘡꘢꘣꘤꘥꘦꘧꘨꘩ꛦꛧꛨꛩꛪꛫꛬꛭꛮꛯ꠰꠱꠲꠳꠴꠵꣐꣑꣒꣓꣔꣕꣖꣗꣘꣙꤀꤁꤂꤃꤄꤅꤆꤇꤈꤉꧐꧑꧒꧓꧔꧕꧖꧗꧘꧙꧰꧱꧲꧳꧴꧵꧶꧷꧸꧹꩐꩑꩒꩓꩔꩕꩖꩗꩘꩙꯰꯱꯲꯳꯴꯵꯶꯷꯸꯹0123456789 2 | -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/Lowercase_Letter.txt: -------------------------------------------------------------------------------- 1 | abcdefghijklmnopqrstuvwxyzµßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿāăąćĉċčďđēĕėęěĝğġģĥħĩīĭįıijĵķĸĺļľŀłńņňʼnŋōŏőœŕŗřśŝşšţťŧũūŭůűųŵŷźżžſƀƃƅƈƌƍƒƕƙƚƛƞơƣƥƨƪƫƭưƴƶƹƺƽƾƿdžljnjǎǐǒǔǖǘǚǜǝǟǡǣǥǧǩǫǭǯǰdzǵǹǻǽǿȁȃȅȇȉȋȍȏȑȓȕȗșțȝȟȡȣȥȧȩȫȭȯȱȳȴȵȶȷȸȹȼȿɀɂɇɉɋɍɏɐɑɒɓɔɕɖɗɘəɚɛɜɝɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀʁʂʃʄʅʆʇʈʉʊʋʌʍʎʏʐʑʒʓʕʖʗʘʙʚʛʜʝʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯͱͳͷͻͼͽΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώϐϑϕϖϗϙϛϝϟϡϣϥϧϩϫϭϯϰϱϲϳϵϸϻϼабвгдежзийклмнопрстуфхцчшщъыьэюяѐёђѓєѕіїјљњћќѝўџѡѣѥѧѩѫѭѯѱѳѵѷѹѻѽѿҁҋҍҏґғҕҗҙқҝҟҡңҥҧҩҫҭүұҳҵҷҹһҽҿӂӄӆӈӊӌӎӏӑӓӕӗәӛӝӟӡӣӥӧөӫӭӯӱӳӵӷӹӻӽӿԁԃԅԇԉԋԍԏԑԓԕԗԙԛԝԟԡԣԥԧԩԫԭԯաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆևᏸᏹᏺᏻᏼᏽᲀᲁᲂᲃᲄᲅᲆᲇᲈᴀᴁᴂᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌᴍᴎᴏᴐᴑᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜᴝᴞᴟᴠᴡᴢᴣᴤᴥᴦᴧᴨᴩᴪᴫᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀᶁᶂᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌᶍᶎᶏᶐᶑᶒᶓᶔᶕᶖᶗᶘᶙᶚḁḃḅḇḉḋḍḏḑḓḕḗḙḛḝḟḡḣḥḧḩḫḭḯḱḳḵḷḹḻḽḿṁṃṅṇṉṋṍṏṑṓṕṗṙṛṝṟṡṣṥṧṩṫṭṯṱṳṵṷṹṻṽṿẁẃẅẇẉẋẍẏẑẓẕẖẗẘẙẚẛẜẝẟạảấầẩẫậắằẳẵặẹẻẽếềểễệỉịọỏốồổỗộớờởỡợụủứừửữựỳỵỷỹỻỽỿἀἁἂἃἄἅἆἇἐἑἒἓἔἕἠἡἢἣἤἥἦἧἰἱἲἳἴἵἶἷὀὁὂὃὄὅὐὑὒὓὔὕὖὗὠὡὢὣὤὥὦὧὰάὲέὴήὶίὸόὺύὼώᾀᾁᾂᾃᾄᾅᾆᾇᾐᾑᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷιῂῃῄῆῇῐῑῒΐῖῗῠῡῢΰῤῥῦῧῲῳῴῶῷℊℎℏℓℯℴℹℼℽⅆⅇⅈⅉⅎↄⰰⰱⰲⰳⰴⰵⰶⰷⰸⰹⰺⰻⰼⰽⰾⰿⱀⱁⱂⱃⱄⱅⱆⱇⱈⱉⱊⱋⱌⱍⱎⱏⱐⱑⱒⱓⱔⱕⱖⱗⱘⱙⱚⱛⱜⱝⱞⱡⱥⱦⱨⱪⱬⱱⱳⱴⱶⱷⱸⱹⱺⱻⲁⲃⲅⲇⲉⲋⲍⲏⲑⲓⲕⲗⲙⲛⲝⲟⲡⲣⲥⲧⲩⲫⲭⲯⲱⲳⲵⲷⲹⲻⲽⲿⳁⳃⳅⳇⳉⳋⳍⳏⳑⳓⳕⳗⳙⳛⳝⳟⳡⳣⳤⳬⳮⳳⴀⴁⴂⴃⴄⴅⴆⴇⴈⴉⴊⴋⴌⴍⴎⴏⴐⴑⴒⴓⴔⴕⴖⴗⴘⴙⴚⴛⴜⴝⴞⴟⴠⴡⴢⴣⴤⴥⴧⴭꙁꙃꙅꙇꙉꙋꙍꙏꙑꙓꙕꙗꙙꙛꙝꙟꙡꙣꙥꙧꙩꙫꙭꚁꚃꚅꚇꚉꚋꚍꚏꚑꚓꚕꚗꚙꚛꜣꜥꜧꜩꜫꜭꜯꜰꜱꜳꜵꜷꜹꜻꜽꜿꝁꝃꝅꝇꝉꝋꝍꝏꝑꝓꝕꝗꝙꝛꝝꝟꝡꝣꝥꝧꝩꝫꝭꝯꝱꝲꝳꝴꝵꝶꝷꝸꝺꝼꝿꞁꞃꞅꞇꞌꞎꞑꞓꞔꞕꞗꞙꞛꞝꞟꞡꞣꞥꞧꞩꞵꞷꟺꬰꬱꬲꬳꬴꬵꬶꬷꬸꬹꬺꬻꬼꬽꬾꬿꭀꭁꭂꭃꭄꭅꭆꭇꭈꭉꭊꭋꭌꭍꭎꭏꭐꭑꭒꭓꭔꭕꭖꭗꭘꭙꭚꭠꭡꭢꭣꭤꭥꭰꭱꭲꭳꭴꭵꭶꭷꭸꭹꭺꭻꭼꭽꭾꭿꮀꮁꮂꮃꮄꮅꮆꮇꮈꮉꮊꮋꮌꮍꮎꮏꮐꮑꮒꮓꮔꮕꮖꮗꮘꮙꮚꮛꮜꮝꮞꮟꮠꮡꮢꮣꮤꮥꮦꮧꮨꮩꮪꮫꮬꮭꮮꮯꮰꮱꮲꮳꮴꮵꮶꮷꮸꮹꮺꮻꮼꮽꮾꮿfffiflffifflſtstﬓﬔﬕﬖﬗabcdefghijklmnopqrstuvwxyz -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/Non-BreakingSpace.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/Non-BreakingSpace.webp -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/NormalSpace.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/NormalSpace.webp -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/Titlecase_Letter.txt: -------------------------------------------------------------------------------- 1 | DžLjNjDzᾈᾉᾊᾋᾌᾍᾎᾏᾘᾙᾚᾛᾜᾝᾞᾟᾨᾩᾪᾫᾬᾭᾮᾯᾼῌῼ -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/Uppercase_Letter.txt: -------------------------------------------------------------------------------- 1 | ABCDEFGHIJKLMNOPQRSTUVWXYZÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞĀĂĄĆĈĊČĎĐĒĔĖĘĚĜĞĠĢĤĦĨĪĬĮİIJĴĶĹĻĽĿŁŃŅŇŊŌŎŐŒŔŖŘŚŜŞŠŢŤŦŨŪŬŮŰŲŴŶŸŹŻŽƁƂƄƆƇƉƊƋƎƏƐƑƓƔƖƗƘƜƝƟƠƢƤƦƧƩƬƮƯƱƲƳƵƷƸƼDŽLJNJǍǏǑǓǕǗǙǛǞǠǢǤǦǨǪǬǮDZǴǶǷǸǺǼǾȀȂȄȆȈȊȌȎȐȒȔȖȘȚȜȞȠȢȤȦȨȪȬȮȰȲȺȻȽȾɁɃɄɅɆɈɊɌɎͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩΪΫϏϒϓϔϘϚϜϞϠϢϤϦϨϪϬϮϴϷϹϺϽϾϿЀЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯѠѢѤѦѨѪѬѮѰѲѴѶѸѺѼѾҀҊҌҎҐҒҔҖҘҚҜҞҠҢҤҦҨҪҬҮҰҲҴҶҸҺҼҾӀӁӃӅӇӉӋӍӐӒӔӖӘӚӜӞӠӢӤӦӨӪӬӮӰӲӴӶӸӺӼӾԀԂԄԆԈԊԌԎԐԒԔԖԘԚԜԞԠԢԤԦԨԪԬԮԱԲԳԴԵԶԷԸԹԺԻԼԽԾԿՀՁՂՃՄՅՆՇՈՉՊՋՌՍՎՏՐՑՒՓՔՕՖႠႡႢႣႤႥႦႧႨႩႪႫႬႭႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀჁჂჃჄჅჇჍᎠᎡᎢᎣᎤᎥᎦᎧᎨᎩᎪᎫᎬᎭᎮᎯᎰᎱᎲᎳᎴᎵᎶᎷᎸᎹᎺᎻᎼᎽᎾᎿᏀᏁᏂᏃᏄᏅᏆᏇᏈᏉᏊᏋᏌᏍᏎᏏᏐᏑᏒᏓᏔᏕᏖᏗᏘᏙᏚᏛᏜᏝᏞᏟᏠᏡᏢᏣᏤᏥᏦᏧᏨᏩᏪᏫᏬᏭᏮᏯᏰᏱᏲᏳᏴᏵḀḂḄḆḈḊḌḎḐḒḔḖḘḚḜḞḠḢḤḦḨḪḬḮḰḲḴḶḸḺḼḾṀṂṄṆṈṊṌṎṐṒṔṖṘṚṜṞṠṢṤṦṨṪṬṮṰṲṴṶṸṺṼṾẀẂẄẆẈẊẌẎẐẒẔẞẠẢẤẦẨẪẬẮẰẲẴẶẸẺẼẾỀỂỄỆỈỊỌỎỐỒỔỖỘỚỜỞỠỢỤỦỨỪỬỮỰỲỴỶỸỺỼỾἈἉἊἋἌἍἎἏἘἙἚἛἜἝἨἩἪἫἬἭἮἯἸἹἺἻἼἽἾἿὈὉὊὋὌὍὙὛὝὟὨὩὪὫὬὭὮὯᾸᾹᾺΆῈΈῊΉῘῙῚΊῨῩῪΎῬῸΌῺΏℂℇℋℌℍℐℑℒℕℙℚℛℜℝℤΩℨKÅℬℭℰℱℲℳℾℿⅅↃⰀⰁⰂⰃⰄⰅⰆⰇⰈⰉⰊⰋⰌⰍⰎⰏⰐⰑⰒⰓⰔⰕⰖⰗⰘⰙⰚⰛⰜⰝⰞⰟⰠⰡⰢⰣⰤⰥⰦⰧⰨⰩⰪⰫⰬⰭⰮⱠⱢⱣⱤⱧⱩⱫⱭⱮⱯⱰⱲⱵⱾⱿⲀⲂⲄⲆⲈⲊⲌⲎⲐⲒⲔⲖⲘⲚⲜⲞⲠⲢⲤⲦⲨⲪⲬⲮⲰⲲⲴⲶⲸⲺⲼⲾⳀⳂⳄⳆⳈⳊⳌⳎⳐⳒⳔⳖⳘⳚⳜⳞⳠⳢⳫⳭⳲꙀꙂꙄꙆꙈꙊꙌꙎꙐꙒꙔꙖꙘꙚꙜꙞꙠꙢꙤꙦꙨꙪꙬꚀꚂꚄꚆꚈꚊꚌꚎꚐꚒꚔꚖꚘꚚꜢꜤꜦꜨꜪꜬꜮꜲꜴꜶꜸꜺꜼꜾꝀꝂꝄꝆꝈꝊꝌꝎꝐꝒꝔꝖꝘꝚꝜꝞꝠꝢꝤꝦꝨꝪꝬꝮꝹꝻꝽꝾꞀꞂꞄꞆꞋꞍꞐꞒꞖꞘꞚꞜꞞꞠꞢꞤꞦꞨꞪꞫꞬꞭꞮꞰꞱꞲꞳꞴꞶABCDEFGHIJKLMNOPQRSTUVWXYZ -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/jieba分词算法流程图.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/jieba分词算法流程图.jpg -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/nonbreaking_prefix.en: -------------------------------------------------------------------------------- 1 | #Anything in this file, followed by a period (and an upper-case word), does NOT indicate an end-of-sentence marker. 2 | #Special cases are included for prefixes that ONLY appear before 0-9 numbers. 3 | 4 | #any single upper case letter followed by a period is not a sentence ender (excluding I occasionally, but we leave it in) 5 | #usually upper case letters are initials in a name 6 | A 7 | B 8 | C 9 | D 10 | E 11 | F 12 | G 13 | H 14 | I 15 | J 16 | K 17 | L 18 | M 19 | N 20 | O 21 | P 22 | Q 23 | R 24 | S 25 | T 26 | U 27 | V 28 | W 29 | X 30 | Y 31 | Z 32 | 33 | #List of titles. These are often followed by upper-case names, but do not indicate sentence breaks 34 | Adj 35 | Adm 36 | Adv 37 | Asst 38 | Bart 39 | Bldg 40 | Brig 41 | Bros 42 | Capt 43 | Cmdr 44 | Col 45 | Comdr 46 | Con 47 | Corp 48 | Cpl 49 | DR 50 | Dr 51 | Drs 52 | Ens 53 | Gen 54 | Gov 55 | Hon 56 | Hr 57 | Hosp 58 | Insp 59 | Lt 60 | MM 61 | MR 62 | MRS 63 | MS 64 | Maj 65 | Messrs 66 | Mlle 67 | Mme 68 | Mr 69 | Mrs 70 | Ms 71 | Msgr 72 | Op 73 | Ord 74 | Pfc 75 | Ph 76 | Prof 77 | Pvt 78 | Rep 79 | Reps 80 | Res 81 | Rev 82 | Rt 83 | Sen 84 | Sens 85 | Sfc 86 | Sgt 87 | Sr 88 | St 89 | Supt 90 | Surg 91 | 92 | #misc - odd period-ending items that NEVER indicate breaks (p.m. does NOT fall into this category - it sometimes ends a sentence) 93 | v 94 | vs 95 | i.e 96 | rev 97 | e.g 98 | 99 | #Numbers only. These should only induce breaks when followed by a numeric sequence 100 | # add NUMERIC_ONLY after the word for this function 101 | #This case is mostly for the english "No." which can either be a sentence of its own, or 102 | #if followed by a number, a non-breaking prefix 103 | No #NUMERIC_ONLY# 104 | Nos 105 | Art #NUMERIC_ONLY# 106 | Nr 107 | pp #NUMERIC_ONLY# 108 | 109 | #month abbreviations 110 | Jan 111 | Feb 112 | Mar 113 | Apr 114 | #May is a full word 115 | Jun 116 | Jul 117 | Aug 118 | Sep 119 | Oct 120 | Nov 121 | Dec 122 | -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/prob_start.p: -------------------------------------------------------------------------------- 1 | (dp0 2 | S'B' 3 | p1 4 | F-0.26268660809250016 5 | sS'E' 6 | p2 7 | F-3.14e+100 8 | sS'M' 9 | p3 10 | F-3.14e+100 11 | sS'S' 12 | p4 13 | F-1.4652633398537678 14 | s. -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/prob_trans.p: -------------------------------------------------------------------------------- 1 | (dp0 2 | S'B' 3 | p1 4 | (dp2 5 | S'E' 6 | p3 7 | F-0.51082562376599 8 | sS'M' 9 | p4 10 | F-0.916290731874155 11 | ssg3 12 | (dp5 13 | g1 14 | F-0.5897149736854513 15 | sS'S' 16 | p6 17 | F-0.8085250474669937 18 | ssg4 19 | (dp7 20 | g3 21 | F-0.33344856811948514 22 | sg4 23 | F-1.2603623820268226 24 | ssg6 25 | (dp8 26 | g1 27 | F-0.7211965654669841 28 | sg6 29 | F-0.6658631448798212 30 | ss. -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/viterbi_step1.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/viterbi_step1.drawio.png -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/viterbi_step2.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/viterbi_step2.drawio.png -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/viterbi_step3.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/viterbi_step3.drawio.png -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/viterbi_step4.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/viterbi_step4.drawio.png -------------------------------------------------------------------------------- /tutorials/Chapter2/assets/序列标注问题.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BrightXiaoHan/MachineTranslationTutorial/2c36dfa18fb2d3fb9ba6264f1c19af79dd69200f/tutorials/Chapter2/assets/序列标注问题.png --------------------------------------------------------------------------------