├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── environment-dev.yml ├── environment.yml ├── img └── SampleHeatmap.png ├── setup.cfg ├── setup.py ├── spacyface ├── __init__.py ├── aligner.py ├── checker │ ├── __init__.py │ └── against_corpus.py ├── simple_spacy_token.py └── utils │ ├── __init__.py │ ├── f.py │ └── sentence_extracting.py └── tests ├── EN_TEST_SENTS.py ├── __init__.py ├── test_aligner.py └── wiki.test.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vscode 3 | build 4 | 5 | # Emacs specific 6 | *~ 7 | .*~ 8 | #* 9 | .#* 10 | 11 | # Python 12 | __pycache__ 13 | *.egg-info 14 | .ipynb_checkpoints 15 | .pytest_cache 16 | 17 | # For holding files 18 | .archive 19 | 20 | # Notebooks that are not titled 21 | Untitled*.ipynb 22 | 23 | # For distribution on pip 24 | dist -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2020 Benjamin Hoover 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .ONESHELL: 2 | SHELL := /bin/bash 3 | 4 | test: 5 | pytest tests 6 | 7 | pypi: dist 8 | twine upload --repository pypi dist/* 9 | 10 | dist: clean 11 | python setup.py sdist bdist_wheel 12 | 13 | clean: 14 | rm -rf dist -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Spacyface aligner 3 | 4 | Align [Huggingface Transformer](https://github.com/huggingface/transformers) model tokenizations with linguistic metadata provided by [spaCy](https://spacy.io/)! 5 | 6 | *Currently only supports English tokenizations* 7 | 8 | ## Getting started 9 | ### Pip 10 | 1. `pip install spacyface` 11 | 2. `python -m spacy download en_core_web_sm` 12 | 13 | ### Manual (Clone and conda) 14 | From the root of this project: 15 | 16 | ``` 17 | conda env create -f environment.yml 18 | conda activate spacyface 19 | # conda env update -f environment-dev.yml # OPTIONAL 20 | python -m spacy download en_core_web_sm 21 | pip install -e . 22 | ``` 23 | 24 | ## Usage 25 | ### Basic Usage on a sentence 26 | Every aligner can be created and used as described in the example below: 27 | 28 | ``` python 29 | from aligner import BertAligner 30 | 31 | alnr = BertAligner.from_pretrained("bert-base-cased") 32 | sentence = "Do you know why they call me the Count? Because I love to count! Ah-hah-hah!" 33 | tokens = alnr.meta_tokenize(sentence) 34 | print("Tokens:\n\n", [(tok.token, tok.pos) for tok in tokens]) 35 | ``` 36 | 37 | ``` 38 | Tokens: 39 | 40 | [('Do', 'AUX'), ('you', 'PRON'), ('know', 'VERB'), ('why', 'ADV'), ('they', 'PRON'), ('call', 'VERB'), ('me', 'PRON'), ('the', 'DET'), ('Count', 'PROPN'), ('?', 'PUNCT'), ('Because', 'SCONJ'), ('I', 'PRON'), ('love', 'VERB'), ('to', 'PART'), ('count', 'VERB'), ('!', 'PUNCT'), ('Ah', 'INTJ'), ('-', 'PUNCT'), ('ha', 'X'), ('##h', 'X'), ('-', 'PUNCT'), ('ha', 'NOUN'), ('##h', 'NOUN'), ('!', 'PUNCT')] 41 | ``` 42 | 43 | Because the information is coming directly from spaCy's `Token` class, any information that spaCy exposes about a token can be included in the huggingface token. The user only needs to modify the exposed attributes in the [SimpleSpacyToken](./aligners/simple_spacy_token) class. 44 | 45 | This can also be extrapolated to tokenize entire English corpora with the use of a generator. An example raw corpus representing a subset of wikipedia is included in the [](./tests) directory. 46 | 47 | ### Observing attention between linguistic features 48 | This library also enables us to look at the attention pattern heatmaps for a particular layer and a particular head in terms of the linguistic features that belong to that layer and head. 49 | 50 | ``` python 51 | from transformers import AutoModel 52 | import torch 53 | import matplotlib.pyplot as plt 54 | import seaborn as sn 55 | from spacyface import RobertaAligner 56 | 57 | alnr_cls = RobertaAligner 58 | model_name = "roberta-base" 59 | sentence = "A simple sentence for the ages." 60 | layer = 8 61 | heads = [7] 62 | 63 | alnr = alnr_cls.from_pretrained(model_name) 64 | model = AutoModel.from_pretrained(model_name, output_attentions=True) 65 | model.eval() # Remove DropOut effect 66 | 67 | model_input, meta_info = alnr.sentence_to_input(sentence) 68 | 69 | _, _, atts = model(**model_input) 70 | 71 | to_show = atts[layer][0][heads].mean(0)[1:-1, 1:-1] # Don't show special tokens for Roberta Model 72 | 73 | deps = [t.dep for t in meta_info[1:-1]] 74 | 75 | # Plot 76 | plt.figure() 77 | sn.set(font_scale=1.5) 78 | sn.heatmap(to_show.detach().numpy(), xticklabels=deps, yticklabels=deps) 79 | plt.title(f"Layer {layer} for head(s): {heads}\n\"{sentence}\"") 80 | plt.show() 81 | ``` 82 | 83 | ![Attention heatmap Layer 8 head 7](./img/SampleHeatmap.png) 84 | 85 | Interestingly, we have discovered that Layer 8, head 7 has a strong affinity for a POBJ (Object of the Preposition) looking at a PREP (Preposition). Cool! 86 | 87 | ## Background 88 | Different transformer models use different tokenizations. At the time of this writing, many of these tokenizations split larger English words into smaller tokens called "wordpieces" and use different methods of indicating that a token was once part of a larger word. 89 | 90 | For inspection and research, it is helpful to align these tokenizations with the linguistic features of the original words of the sentence. [spaCy](https://spacy.io/) is a fantastic python library for assigning linguistic features (e.g., dependencies, parts of speech, tags, exceptions) to the words of different languages, but its method for tokenizing is vastly different from the tokenization schemes that operate on the wordpiece level. This repository aims to align spaCy tokens with the wordpiece tokens needed for training and inference of the different [Huggingface Transformer](https://github.com/huggingface/transformers) models. 91 | 92 | In short, *this repository enables the strange and varied tokenizations belonging to different transformer models to be correctly annotated with the metadata returned by spaCy's tokenization.* 93 | 94 | Currently, the repository only supports the English language, and the following huggingface pretrained models have been tested: 95 | 96 | - Bert 97 | - GPT2 (covers distilgpt2) 98 | - Roberta (covers distilroberta) 99 | - DistilBert 100 | - TransfoXL 101 | - XLNet 102 | - XLM 103 | - Albert 104 | - CTRL 105 | - OpenAIGPT 106 | - XLMRoberta 107 | 108 | At the time of release, the only model that doesn't work with the alignment is the T5 Tokenization scheme. 109 | 110 | Originally created to ease the development of [exBERT](http://exbert.net/), these tools have been made available for others to use in their own projects as they see fit. 111 | 112 | ## Testing the aligner 113 | A few edge case sentences that include hardcoded exceptions to the English language as well as strange punctuation have been included in [EN_TEST_SENTS.py](./tests/EN_TEST_SENTS.py). You can run these tests on the established aligners with `python -m pytest` from the root folder. 114 | 115 | Sometimes, your application may not care about edge cases that are hard to detect. You can test an alignment on a more representative subset of the English language with the included [wikipedia subset](./tests/wiki.test.txt), or use your own text file corpus. To do this, run 116 | 117 | ``` python 118 | from spacyface import TransfoXLAligner 119 | from spacyface.checker import check_against_corpus 120 | corpus = 'tests/wiki.test.txt' 121 | alnr = TransfoXLAligner.from_pretrained('transfo-xl-wt103') 122 | check_against_corpus(alnr, corpus) 123 | ``` 124 | 125 | and wait a few minutes to see if any sentences break. 126 | 127 | ## Notable Behavior and Exceptions 128 | This repository makes the large assumption that there is no English "word" which is smaller than a token needed for a transformer model. This is an accurate assumption for most of the published transformer models. 129 | 130 | It is difficult to align such completely different tokenization schemes. Namely, there are a few strange behaviors that, while not desired, are intentional to create a simplified methods to aligned different tokenization schemes. These behaviors are listed below. 131 | 132 | - When a token exists as a part of a larger word, the linguistic information belonging to the larger word is bestowed on the token. 133 | - Multiple consecutive spaces in a sentence are replaced with a single space. 134 | - The English language is riddled with exceptions to tokenization rules. Sometimes, a punctuation is included in the middle of what is a single token (e.g., "Mr." or "N.Y."). Other times, contractions that look nothing like the words it combines (e.g., "ain't" looks nothing like "is not" or "am not" or "are not") create difficulties for aligning. To prevent these from being an issue, this repository replaces the exceptions to the language with their original "normalized" spacy representations. 135 | - Many tokenizers insert special tokens (e.g., "[CLS]", "[SEP]", "[MASK]", "\") for certain functionalities. The metadata for all these tokens is assigned to `None`. 136 | 137 | **Specific to GPT2** 138 | - Sometimes, GPT2 tokenization will include a space before a punctuation mark that should not have been there. For example, the tokenization of "Hello Bob." should be `["Hello", "ĠBob", "."]`, but it is instead `["Hello", "ĠBob", "Ġ."]` This has not had any notable effects on performance, but note that it is different from the way the original model was pretrained. Hidden representations may also be slightly different than expected for terminating punctuation. 139 | 140 | ### Known Issues 141 | - A Spacy exception that is part of a `-`-delimited word (e.g. "dont-touch-me") will cause the meta tokenization to produce a different result from the tokenization strategy. See github issues for a more detailed description of this problem. 142 | 143 | ### Acknowledgements 144 | - IBM Research & Harvard NLP 145 | -------------------------------------------------------------------------------- /environment-dev.yml: -------------------------------------------------------------------------------- 1 | name: spacyface 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - jupyter 6 | - pytest 7 | - jupyter_client 8 | - jupyter_console 9 | - jupyter_contrib_core 10 | - jupyter_contrib_nbextensions 11 | - matplotlib 12 | - twine 13 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: spacyface 2 | channels: 3 | - pytorch 4 | - conda-forge 5 | - defaults 6 | - anaconda 7 | dependencies: 8 | - python=3.7 9 | - pip>=19.0.3 10 | - pytest 11 | - h5py 12 | - spacy 13 | - regex 14 | - numpy 15 | - pytorch 16 | - sacremoses 17 | - pip: 18 | - sentencepiece 19 | - transformers 20 | -------------------------------------------------------------------------------- /img/SampleHeatmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bhoov/spacyface/5265e9e8f645f86a2653c65f5d017ec2930e8009/img/SampleHeatmap.png -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | requires = [ 4 | 'transformers>=2.3.0', 5 | 'h5py>=2.10.0', 6 | 'numpy>=1.17.4', 7 | 'regex>=2020.1.8', 8 | 'spacy>=2.2.3', 9 | 'torch', 10 | ] 11 | 12 | setup( 13 | name="spacyface", 14 | description="Aligner for spacy and huggingface tokenization", 15 | packages=find_packages(exclude=['tests']), 16 | version='0.3.0', 17 | license='Apache 2.0', 18 | author="Ben Hoover", 19 | author_email="benjamin.hoover@ibm.com", 20 | url="https://github.com/bhoov/spacyface", 21 | keywords=["transformer", "pytorch", "spacy", "tokenize", "tokenization", "NLP", "Natural Language Processing", 22 | "huggingface", "linguistic"], 23 | include_package_data=True, 24 | install_requires=requires, 25 | classifiers=[ 26 | 'Development Status :: 3 - Alpha', 27 | 'Programming Language :: Python :: 3.6', 28 | 'Programming Language :: Python :: 3.7', 29 | ], 30 | python_requires='>=3.6, <3.8' 31 | ) 32 | -------------------------------------------------------------------------------- /spacyface/__init__.py: -------------------------------------------------------------------------------- 1 | from .aligner import ( 2 | MakeAligner, 3 | BertAligner, 4 | GPT2Aligner, 5 | RobertaAligner, 6 | DistilBertAligner, 7 | TransfoXLAligner, 8 | XLNetAligner, 9 | AlbertAligner, 10 | XLMAligner, 11 | CTRLAligner, 12 | OpenAIGPTAligner, 13 | T5Aligner, 14 | XLMRobertaAligner, 15 | ) 16 | 17 | from .simple_spacy_token import SimpleSpacyToken 18 | 19 | __all__ = ["MakeAligner", "SimpleSpacyToken", "BertAligner", "GPT2Aligner", "RobertaAligner", "DistilBertAligner", 20 | "TransfoXLAligner", "XLNetAligner", "AlbertAligner", "XLMAligner", "AlbertAligner", 21 | "CTRLAligner", "OpenAIGPTAligner", "T5Aligner", "XLMRobertaAligner"] 22 | -------------------------------------------------------------------------------- /spacyface/aligner.py: -------------------------------------------------------------------------------- 1 | from typing import List, Iterable, Union 2 | import spacy 3 | from spacy.tokens.token import Token as SpacyToken 4 | from spacy.tokens.doc import Doc as SpacyDoc 5 | import torch 6 | import regex as re 7 | 8 | from transformers import ( 9 | BertTokenizer, 10 | GPT2Tokenizer, 11 | RobertaTokenizer, 12 | DistilBertTokenizer, 13 | TransfoXLTokenizer, 14 | XLNetTokenizer, 15 | XLMTokenizer, 16 | AlbertTokenizer, 17 | CTRLTokenizer, 18 | T5Tokenizer, 19 | XLMRobertaTokenizer, 20 | OpenAIGPTTokenizer, 21 | XLMRobertaTokenizer, 22 | AutoTokenizer, 23 | ) 24 | 25 | from .simple_spacy_token import SimpleSpacyToken 26 | from .utils.f import flatten_, assoc, delegates 27 | 28 | def doc_to_fixed_tokens(doc: SpacyDoc) -> List[str]: 29 | """Fix the tokens in a document to not have exceptions""" 30 | return [fix_token(t) for t in doc] 31 | 32 | def fix_token(tok: SpacyToken) -> str: 33 | """Determine whether a token should be represented by its text or its norm 34 | 35 | This works to fix most instances EXCEPT when an exception is part of a word with a '-' in it. 36 | For example, "whatve-you-done" would produce two different tokenizations: 37 | 38 | >>> alnr = BertAligner.from_pretrained('bert-base-uncased') 39 | >>> s = "whatve-you-dont" 40 | >>> alnr.tokenize(s) # => ['what', '##ve', '-', 'you', '-', 'don', '##t'] 41 | >>> [t.token for t in alnr.meta_tokenize(s)] # => ['what', 'have', '-', 'you', '-', 'do', 'not'] 42 | 43 | In practice, this situation occurs so rarely that it is often not a problem for real sentences to analyze. 44 | """ 45 | out = tok.text if tok.text.lower() == tok.norm_ else tok.norm_ 46 | 47 | return out 48 | 49 | def MakeAligner(pretrained_tokenizer, spacy_language_model): 50 | """Create an aligner from the pretrained tokenizers. Some caveats to note: 51 | 52 | Usage: 53 | BrandNewHuggingfaceAligner = MakeAligner(BrandNewHuggingfaceTokenizer) 54 | """ 55 | class Aligner(pretrained_tokenizer): 56 | @delegates(pretrained_tokenizer.__init__) 57 | def __init__(self, **kwargs): 58 | super().__init__(**kwargs) 59 | self.spacy_nlp = spacy.load(spacy_language_model) 60 | self.meta_container = SimpleSpacyToken 61 | 62 | def prep_sentence(self, s: str) -> str: 63 | """Remove contractions and multiple spaces from input sentence""" 64 | s = re.sub(r"\s+", r" ", s).strip() 65 | out = " ".join(self._to_normed_spacy(s)) 66 | return out 67 | 68 | @delegates(pretrained_tokenizer.tokenize) 69 | def tokenize(self, s: str, **kwargs) -> List[str]: 70 | s = self.prep_sentence(s) 71 | return super().tokenize(s, **kwargs) 72 | 73 | def meta_tokenize(self, s: str) -> List[SimpleSpacyToken]: 74 | """Tokenize the sentence and return the metadata for it according to Spacy 75 | 76 | Due to implementation differences, does not provide the exact same API as the 77 | PreTrainedTokenizer's `tokenize` function 78 | """ 79 | meta_info = self._to_spacy_meta(self.prep_sentence(s)) 80 | return self._tokenize_from_spacy_meta(meta_info) 81 | 82 | def meta_from_tokens(self, sentence: str, tokens: List[str], perform_check=True) -> List[SimpleSpacyToken]: 83 | """Convert existing tokens into their metadata, ignoring effects of special tokens from the tokenizer 84 | 85 | NOTE that the sentence MUST be the same sentence that produced the tokens, otherwise, 86 | an unpredictable error may occur. Or worse, it will act like it works when it in fact doesn't. 87 | 88 | Parameters: 89 | - sentence: Sentence the tokens came from 90 | - tokens: Tokenized version of the sentence. Can be post encoding or pre-encoding 91 | (where special tokens are added) 92 | - perform_check: If True, check that the tokens come from the sentence. This slows down processing 93 | and should be False if speed is more important than accuracy 94 | """ 95 | orig_meta = self.meta_tokenize(sentence) 96 | 97 | new_meta = [] 98 | j = 0 99 | 100 | # Unfortunately, this can really slow down predictions. 101 | if perform_check: 102 | is_encoded = self.encode(sentence) == self.convert_tokens_to_ids(tokens) 103 | is_tokenized = self.tokenize(sentence) == tokens 104 | assert is_encoded or is_tokenized, "Can only take tokens that come from the original sentence!" 105 | 106 | for i, b in enumerate(tokens): 107 | if b in self.all_special_tokens: 108 | new_meta.append(self.meta_container(b)) 109 | else: 110 | new_meta.append(orig_meta[j]) 111 | j += 1 112 | 113 | return new_meta 114 | 115 | def _to_normed_spacy(self, s: str) -> List[str]: 116 | """Return the normalized tokens (i.e., language exceptions replaced by a lowercased version)""" 117 | doc = self.spacy_nlp(s) 118 | tokens = self._doc_to_fixed_tokens(doc) 119 | return tokens 120 | 121 | def _to_spacy_meta(self, s: str) -> List[SimpleSpacyToken]: # list of simple spacy tokens... 122 | """Convert a string into a list of records containing simplified spacy information""" 123 | doc = self.spacy_nlp(s) 124 | out = [self.meta_container(t) for t in doc] 125 | return out 126 | 127 | @delegates(pretrained_tokenizer.tokenize) 128 | def _raw_tokenize(self, s: str, **kwargs) -> List[str]: 129 | """This bypasses the custom tokenization for the tokenization of the original model.""" 130 | return super().tokenize(s, **kwargs) 131 | 132 | def _to_raw_spacy(self, s: str) -> List[str]: 133 | """Return the raw spacy tokens of a string""" 134 | doc = self.spacy_nlp(s) 135 | tokens = [t.text for t in doc] 136 | return tokens 137 | 138 | def _tokenize_from_spacy_meta(self, spacy_meta: List[SimpleSpacyToken]) -> List[SimpleSpacyToken]: 139 | """Convert spacy-tokenized SimpleSpacyTokens into the appropriate tokenizer's tokens""" 140 | out = [self._tokenize_from_meta_single(sm, i) for i, sm in enumerate(spacy_meta)] 141 | return flatten_(out) 142 | 143 | def _tokenize_from_meta_single(self, meta_token: SimpleSpacyToken, idx:int) -> List[SimpleSpacyToken]: 144 | """Split a single spacy token with metadata into tokenizer tokens. 145 | 146 | Because the transformer's tokenizer may split each Spacy-tokenized word into multiple subwords, 147 | output a list 148 | 149 | For GPT2 tokenization, there is a different behavior for the tokenization of a word if it 150 | starts the sentence vs if it occurs later in the sentence. 151 | """ 152 | BUFFER = "X " # GPT tokenization fusses if it thinks the token is the beginning of the sentence 153 | 154 | def choose_norm(t): 155 | return t['token'] if t['token'].lower() == t['norm'] else t['norm'] 156 | 157 | tok = choose_norm(meta_token) 158 | 159 | if idx != 0: 160 | s = BUFFER + tok # Add a buffer with guaranteed tokenization of length 1 to input 161 | offset = 1 162 | else: 163 | s = tok 164 | offset = 0 165 | 166 | bpe_tokens = super().tokenize(s) # Can't do `self.tokenize` because it will normalize again 167 | 168 | # Functional version that works with dictionaries 169 | return [meta_token.assoc("token", b) for b in bpe_tokens[offset:]] 170 | 171 | def _doc_to_fixed_tokens(self, doc: SpacyDoc) -> List[str]: 172 | """Extract tokens from a document, accounting for exceptions only if needed""" 173 | tokens = doc_to_fixed_tokens(doc) 174 | return tokens 175 | 176 | def _maybe_conv_to_token(self, tok_or_str:Union[str, SimpleSpacyToken]): 177 | """Convert a token to a SimpleSpacy token if a string. Otherwise, return input unmodified 178 | 179 | Args: 180 | tok_or_str: The token be analyzed 181 | 182 | Returns: 183 | SimpleSpacyToken. If input was a string, it has been converted to this class. 184 | """ 185 | 186 | if isinstance(tok_or_str, SimpleSpacyToken): 187 | return tok_or_str 188 | return SimpleSpacyToken(self.convert_ids_to_tokens([tok_or_str])[0]) 189 | 190 | def sentence_to_input(self, sentence:str): 191 | """Convert sentence to the input needed for a huggingface model 192 | 193 | Args: 194 | sentence: Sentence to prepare to send into the model 195 | 196 | Returns: 197 | Tuple of (object that can be directly passed into the model, modified meta tokens) 198 | 199 | Examples: 200 | 201 | >>> alnr = RobertaAligner.from_pretrained('roberta-base') 202 | >>> model = AutoModel.from_pretrained('roberta-base', output_attentions=True) 203 | >>> model.eval() # Remove DropOut effect 204 | >>> model_input, meta_info = alnr.sentence_to_input(sentence) 205 | >>> last_layer_hidden_state, pooler, atts = model(**model_input) 206 | """ 207 | 208 | meta_tokens = self.meta_tokenize(sentence) 209 | tokens = [tok.token for tok in meta_tokens] 210 | ids = self.convert_tokens_to_ids(tokens) 211 | raw_model_input = self.prepare_for_model(ids, add_special_tokens=True) 212 | model_input = {k: torch.tensor(v).unsqueeze(0) for k,v in raw_model_input.items() if isinstance(v, List)} 213 | 214 | meta_input = self.prepare_for_model(meta_tokens)['input_ids'] 215 | new_meta = list(map(self._maybe_conv_to_token, meta_input)) 216 | 217 | return model_input, new_meta 218 | 219 | def check_tokenization(self, sentence:str, hard_assert=True): 220 | tokens = self.tokenize(sentence) 221 | meta_tokens = self.meta_tokenize(sentence) 222 | mtokens = [m.token for m in meta_tokens] 223 | 224 | error_str = """Meta tokenization did not match expected tokenization! 225 | 226 | EXPECTED: 227 | {} 228 | 229 | META TOKENS REPORTED: 230 | {} 231 | 232 | """ 233 | is_fine = mtokens == tokens 234 | 235 | if hard_assert: 236 | assert is_fine, error_str.format(tokens, mtokens) 237 | else: 238 | if not is_fine: print(error_str.format(tokens, mtokens)) 239 | 240 | return Aligner 241 | 242 | english = "en_core_web_sm" 243 | 244 | BertAligner = MakeAligner(BertTokenizer, english) 245 | GPT2Aligner = MakeAligner(GPT2Tokenizer, english) 246 | RobertaAligner = MakeAligner(RobertaTokenizer, english) 247 | DistilBertAligner = MakeAligner(DistilBertTokenizer, english) 248 | TransfoXLAligner = MakeAligner(TransfoXLTokenizer, english) 249 | XLNetAligner = MakeAligner(XLNetTokenizer, english) 250 | XLMAligner = MakeAligner(XLMTokenizer, english) 251 | CTRLAligner = MakeAligner(CTRLTokenizer, english) 252 | AlbertAligner = MakeAligner(AlbertTokenizer, english) 253 | OpenAIGPTAligner= MakeAligner(OpenAIGPTTokenizer, english) 254 | T5Aligner= MakeAligner(T5Tokenizer, english) 255 | XLMRobertaAligner= MakeAligner(XLMRobertaTokenizer, english) 256 | -------------------------------------------------------------------------------- /spacyface/checker/__init__.py: -------------------------------------------------------------------------------- 1 | """Use to verify an aligner for a particular application""" 2 | from .against_corpus import check_against_corpus 3 | 4 | __all__ = ["check_against_corpus"] 5 | -------------------------------------------------------------------------------- /spacyface/checker/against_corpus.py: -------------------------------------------------------------------------------- 1 | """This module provides a a means to test an aligner against a desired corpus""" 2 | 3 | from pathlib import Path 4 | import argparse 5 | from spacyface.utils.sentence_extracting import extract_chars 6 | from spacyface import * 7 | 8 | 9 | def check_against_corpus(alnr, corpus_name, hard_assert=True): 10 | """Go through every sentence of the corpus and see if the meta tokenization is different than base transformer tokenization 11 | 12 | Args: 13 | alnr: Aligner 14 | corpus_name: Name of text file to parse 15 | hard_assert: If True, break on first error. Otherwise, print error msg and continue 16 | """ 17 | src = open(corpus_name) 18 | chunk_gen = extract_chars(src, 100000) 19 | for c, chunk in enumerate(chunk_gen): 20 | doc = alnr.spacy_nlp(chunk) 21 | sents = [sent.text for sent in doc.sents] 22 | for i, sent in enumerate(sents): 23 | if i % 100 == 0: print(f"Chunk {c}. Sentence {i}") 24 | alnr.check_tokenization(sent, hard_assert) 25 | 26 | src.close() 27 | -------------------------------------------------------------------------------- /spacyface/simple_spacy_token.py: -------------------------------------------------------------------------------- 1 | """ 2 | Describes the structure of a language token represented by Spacy-extracted metadata 3 | 4 | """ 5 | import h5py 6 | import numpy as np 7 | from spacy.tokens.token import Token as SpacyToken 8 | from typing import Union, List, Tuple 9 | 10 | 11 | def check_ent(tok: SpacyToken): 12 | """Check whether token is an entity 13 | 14 | Default Spacy Token does not assume what kind of entity you are looking for, but 15 | provides the following denotations: 16 | 17 | 0: No entity tag is set 18 | 1: inside an entity 19 | 2: outside an entity 20 | 3: Token begins an entity 21 | 22 | Args: 23 | tok: The Spacy Token 24 | 25 | Returns: 26 | Boolean indicating whether or not token is an entity 27 | """ 28 | OUT_OF_ENT = 2 29 | NO_ENT_DEFINED = 0 30 | return tok.ent_iob != OUT_OF_ENT and tok.ent_iob != NO_ENT_DEFINED 31 | 32 | class SimpleSpacyToken(): 33 | """A wrapper around a Spacy token to extract desired information 34 | 35 | This class implements a basic functional dictionary-like wrapper around the spacy token to 36 | make it easy to mutate and export attributes without directly changing state. Any attribute 37 | that is not prefixed by '_' is considered a key of this class. 38 | 39 | The design allows for the token to have no metadata by simply passing a `str` into 40 | the constructor. 41 | 42 | Attributes: 43 | token: str 44 | pos: str 45 | dep: str 46 | norm: str 47 | tag: str 48 | lemma: str 49 | head: str 50 | is_ent: bool 51 | 52 | Notes: 53 | If exporting to an HDF5 file, make sure to define what hdf5 datatype that attribute 54 | represents by changing the corresponding tuple in 'hdf5_token_dtype' 55 | """ 56 | 57 | # Define how each attribute is stored in an hdf5 file 58 | # Names MUST match attributes of this class 59 | hdf5_token_dtype = [ 60 | ("token", h5py.special_dtype(vlen=str)), 61 | ("pos", h5py.special_dtype(vlen=str)), 62 | ("dep", h5py.special_dtype(vlen=str)), 63 | ("norm", h5py.special_dtype(vlen=str)), 64 | ("tag", h5py.special_dtype(vlen=str)), 65 | ("lemma", h5py.special_dtype(vlen=str)), 66 | ("head", h5py.special_dtype(vlen=str)), 67 | ("is_ent", np.bool_), 68 | ] 69 | 70 | def __init__(self, t:Union[SpacyToken, str]): 71 | """Create a simplified version of a spacy token 72 | 73 | Args: 74 | t: A string or Spacy Token object to wrap 75 | 76 | Raises: 77 | ValueError: If input is not of type SpacyToken or str 78 | """ 79 | self._orig_token = t 80 | 81 | if type(t) == SpacyToken: 82 | self.token = t.text 83 | self.pos = t.pos_ 84 | self.dep = t.dep_ 85 | self.norm = t.norm_ 86 | self.tag = t.tag_ 87 | self.lemma = t.lemma_ 88 | self.head = t.head 89 | self.is_ent = check_ent(t) 90 | 91 | elif type(t) == str: 92 | self.token = t 93 | self.pos = None 94 | self.dep = None 95 | self.norm = None 96 | self.tag = None 97 | self.lemma = None 98 | self.head = None 99 | self.is_ent = None 100 | 101 | else: 102 | raise ValueError("Expected input of SpacyToken or str") 103 | 104 | def pick(self, keys:List[str]): 105 | """Return subset of the attributes specified in 'keys' as a simple dictioniary 106 | 107 | Args: 108 | keys: List of keys to extract 109 | 110 | Returns: 111 | Dictionary of only k in keys 112 | 113 | Raises: 114 | KeyError: If k in 'keys' is not an attribute 115 | 116 | """ 117 | return {k: self[k] for k in keys} 118 | 119 | def assoc(self, key:str, value): 120 | """Set the 'key' to the 'value', returning a new instance of this class. 121 | 122 | Args: 123 | key: Key that receives the value 124 | value: Value to assign to the key 125 | 126 | Returns: 127 | A new instance of this class with the modified key:value pair 128 | """ 129 | out = SimpleSpacyToken(self._orig_token) 130 | out[key] = value 131 | return out 132 | 133 | def __getitem__(self, key): 134 | """Access the key from this objects dictionary""" 135 | return self.__dict__[key] 136 | 137 | def __setitem__(self, key, value): 138 | """Assign, in place, the value to the key""" 139 | self.__dict__[key] = value 140 | 141 | def keys(self) -> List[str]: 142 | """Return a list of all attributes that don't start with '_'""" 143 | return [k for k in self.__dict__.keys() if not k.startswith('_')] 144 | 145 | def values(self) -> List: 146 | """Return a list of all values whose keys don't start with '_'""" 147 | 148 | return [v for _, v in self.__dict__.items() if not k.startswith('_')] 149 | 150 | def items(self) -> List[Tuple]: 151 | """Return a list of all items whose keys don't start with '_'""" 152 | return [(k, v) for k,v in self.__dict__.items() if not k.startswith('_')] 153 | 154 | def __repr__(self): 155 | return f"SimpleSpacyToken: {self.items()}" -------------------------------------------------------------------------------- /spacyface/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bhoov/spacyface/5265e9e8f645f86a2653c65f5d017ec2930e8009/spacyface/utils/__init__.py -------------------------------------------------------------------------------- /spacyface/utils/f.py: -------------------------------------------------------------------------------- 1 | """General programming utils, inclined toward functional programming. 2 | 3 | If ever a function changes its input in place, it is denoted by a trailing `_` 4 | """ 5 | 6 | import inspect 7 | from itertools import zip_longest 8 | from typing import List, Set, Union, Dict 9 | 10 | 11 | def ifnone(*xs): 12 | """Return the first item in 'x' that is not None""" 13 | for x in xs: 14 | if x is not None: return x 15 | return None 16 | 17 | def custom_dir(c, add): return dir(type(c)) + list(c.__dict__.keys()) + add 18 | 19 | class GetAttr: 20 | """Base class for attr accesses in `self._xtra` passed down to `self.default` 21 | 22 | Taken from article by Jeremy Howard: https://www.fast.ai/2019/08/06/delegation/ 23 | 24 | Usage: 25 | 26 | ``` 27 | class ProductPage(GetAttr): 28 | def __init__(self, page, price, cost): 29 | self.page,self.price,self.cost = page,price,cost 30 | self.default = page 31 | ``` 32 | """ 33 | @property 34 | def _xtra(self): return [o for o in dir(self.default) if not o.startswith('_')] 35 | def __getattr__(self,k): 36 | if k in self._xtra: return getattr(self.default, k) 37 | raise AttributeError(k) 38 | def __dir__(self): return custom_dir(self, self._xtra) 39 | 40 | # Can i delegate many different functions? 41 | # Can i add a new docstring to the existing docstring of the delgated function? Or at least point to the function delegated? 42 | def delegates(to=None, keep=False): 43 | """ Decorator: replace `**kwargs` in signature with params from `to`. 44 | 45 | Taken from article by Jeremy Howard: https://www.fast.ai/2019/08/06/delegation/ 46 | """ 47 | 48 | def _f(f): 49 | if to is None: to_f,from_f = f.__base__.__init__,f.__init__ 50 | else: to_f,from_f = to,f 51 | sig = inspect.signature(from_f) 52 | sigd = dict(sig.parameters) 53 | k = sigd.pop('kwargs') 54 | s2 = {k:v for k,v in inspect.signature(to_f).parameters.items() 55 | if v.default != inspect.Parameter.empty and k not in sigd} 56 | sigd.update(s2) 57 | if keep: sigd['kwargs'] = k 58 | from_f.__signature__ = sig.replace(parameters=sigd.values()) 59 | return f 60 | return _f 61 | 62 | def pick(keys:Union[List, Set], obj:Dict) -> Dict: 63 | """ Return a NEW object containing `keys` from the original `obj` """ 64 | return {k: obj[k] for k in keys} 65 | 66 | def memoize(f): 67 | """Memoize a function. 68 | 69 | Use lookup table when the same inputs are passed to the function instead of running that function again 70 | """ 71 | memo = {} 72 | def helper(*x): 73 | if x not in memo: 74 | memo[x] = f(*x) 75 | return memo[x] 76 | return helper 77 | 78 | def assoc(k, v, orig): 79 | """Given an original dictionary orig, return a cloned dictionary with `k` set to `v`""" 80 | out = orig.copy() 81 | out[k] = v 82 | return out 83 | 84 | def make_unique(f): 85 | """The input function will only run and return if it hasn't seen its argument before. 86 | 87 | Otherwise, it will return `None`. 88 | """ 89 | s = set() 90 | def helper(x): 91 | if x in s: 92 | return None 93 | s.add(x) 94 | return f(x) 95 | 96 | return helper 97 | 98 | def flatten_(items, seqtypes=(list, tuple)): 99 | """Flattten an arbitrarily nested list IN PLACE""" 100 | for i, x in enumerate(items): 101 | while i < len(items) and isinstance(items[i], seqtypes): 102 | items[i:i+1] = items[i] 103 | return items -------------------------------------------------------------------------------- /spacyface/utils/sentence_extracting.py: -------------------------------------------------------------------------------- 1 | """Extractor functions to retrieve sentences by character chunks from a file 2 | 3 | This script contains the logic that allows the user to process and filter 4 | sentences of the original corpus. By default, this considers a minimum sentence 5 | length, and removes newlines and multiple consecutive spaces. 6 | 7 | Configuration for existing functionality is at the top of the file. Feel free to 8 | add new processing and/or filter functions. The "process_line" and "filter_line" 9 | functions contain the pipeline for processing the scripts as needed. 10 | 11 | """ 12 | import regex as re 13 | import argparse 14 | from pathlib import Path 15 | from functools import partial 16 | from typing import Union 17 | 18 | MIN_LINE_LENGTH = 8 # words 19 | 20 | def parse_args(): 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument("-f", "--file", help="Path to .txt file to analyze and annotate") 23 | parser.add_argument("-o", "--outdir", help="Path of directory in which to store the analyzed sentences as a .pckl") 24 | 25 | 26 | args = parser.parse_args() 27 | return args 28 | 29 | # ============================================================ 30 | # Helper functions 31 | # ============================================================ 32 | # String -> String 33 | def replace_newlines(s:str) -> str: 34 | return re.sub(r"\n+", r" ", s) 35 | 36 | # String -> String 37 | def replace_multispace(s:str) -> str: 38 | return re.sub(r"\s+", r" ", s) 39 | 40 | def is_short_sentence(s:str, min_len=8) -> str: 41 | """Returns True if the sentence has less than `min_len` number of words""" 42 | return len(s.split(' ')) < min_len 43 | 44 | def contains_char(char:str, s:str) -> str: 45 | return char in s 46 | 47 | # ============================================================ 48 | # Compilation functions 49 | # ============================================================ 50 | 51 | def process_line(line:str) -> str: 52 | """"Replaces newlines with spaces and removes multiple consecutive spaces from a chunk of file. 53 | 54 | Args: 55 | line: Chunk of text 56 | 57 | Returns: 58 | Input that has been stripped of newlines and multiple consecutive spaces. 59 | """ 60 | s = replace_multispace(replace_newlines(line)) 61 | return s 62 | 63 | def filter_line(line:str) -> bool: 64 | """Returns True if the sentence passes the MIN_LINE_LENGTH configuration 65 | 66 | Redefine this function with desired helper functions, returning true if you want to keep the line 67 | """ 68 | fails = is_short_sentence(line, MIN_LINE_LENGTH) 69 | 70 | return not fails 71 | 72 | # ============================================================ 73 | # Main Logic 74 | # ============================================================ 75 | 76 | def read_outcomes(chars:str) -> Union[str, None]: 77 | """From a chunk of characters, decide whether to return the processed characters or Nothing. 78 | 79 | If the input is the empty string "", raise StopIteration 80 | 81 | Args: 82 | chars: Chunk of text to process 83 | 84 | Returns: 85 | The processed chunk of text or nothing if the characters do not pass the filtering 86 | 87 | Raises: 88 | StopIteration: If the input is the empty string "", raise StopIteration 89 | """ 90 | 91 | if chars == '': raise StopIteration 92 | line = process_line(chars) 93 | if filter_line(line): return line 94 | return None 95 | 96 | def get_chars(n:int, f) -> Union[str, None]: 97 | """Extract `n` chars from opened file `f` 98 | 99 | Args: 100 | n: Number of characters to read from the opened file 101 | f: Opened file from the return of `open(fname)` 102 | 103 | Returns: 104 | The processed chunk of text or nothing if the characters do not pass the filtering 105 | 106 | Raises: 107 | This function does not raise any errors of its own, but can pass up the StopIteration exception 108 | from read_outcomes 109 | """ 110 | chars = f.read(n) 111 | return read_outcomes(chars) 112 | 113 | def get_line(f): 114 | """Given an open file, get the next line and process it. Handles 3 scenarios: 115 | 116 | 1. StopIteration indicates the opened file has reached the end 117 | 2. Return a processed line if it passes the filter 118 | 3. If line does not pass the filter line, return None 119 | """ 120 | line = f.readline() 121 | return read_outcomes(line) 122 | 123 | def read_on(reader, f): 124 | """Read from an open file `f` according to the function `reader` 125 | 126 | Args: 127 | reader: A unary function of signature (f: _io.TextIOWrapper) -> str 128 | f: An opened file, as returned by `open(fname)` 129 | 130 | Yields: 131 | A generator that returns lines defined by `reader` until the end of the file is reached. 132 | """ 133 | while True: 134 | try: 135 | line = reader(f) 136 | except StopIteration: 137 | break 138 | 139 | if line is not None: 140 | yield line 141 | 142 | 143 | def extract_chars(infile, n=10000): 144 | """Extract `n` characters in batches from opened `infile`""" 145 | reader = partial(get_chars, n) 146 | return read_on(reader, infile) 147 | 148 | def extract_lines(infile): 149 | """Given a file, yield the processed lines from that file""" 150 | with open(infile, 'r') as src: 151 | return read_on(get_line, src) 152 | 153 | def extract_sentences_to_file(infile, outfname:str): 154 | """Extract sentences from a file into a new file indicated by `outfname`.""" 155 | out = open(outfname, 'x') 156 | 157 | linegen = extract_lines(infile) 158 | 159 | for line in linegen: 160 | out.write(line + "\n") 161 | 162 | out.close() 163 | 164 | def main(infile, outdir): 165 | """Main function for creating the outdir and saving the processed sentences to that file""" 166 | outfname = Path(infile).stem + '.txt' 167 | outdir = Path(outdir) 168 | outdir.mkdir(parents=True, exist_ok=True) 169 | outfile = outdir / outfname 170 | out_path = extract_sentences_to_file(infile, outfile) 171 | 172 | return out_path 173 | 174 | if __name__ == "__main__": 175 | args = parse_args() 176 | main(args.file, args.outdir) 177 | -------------------------------------------------------------------------------- /tests/EN_TEST_SENTS.py: -------------------------------------------------------------------------------- 1 | """A collection of english test sentences to use when testing the aligners""" 2 | 3 | SPACY_EN_TEST_SENTS = [ 4 | 'the LIFE', 5 | 'the LIFEST', 6 | 'the LIFESTPHSESDF', 7 | 'the LI FE ST', 8 | "I can't understand for the LIFE of me why we Aren't going home", 9 | "There is nothing I can say or do... that will me do what YOU want!!", 10 | "This ain't going to mess me up, Ain't it?", 11 | "It's tonsa fun in the whatve whatve@you@don't U.K.", 12 | "It's tonsa fun in the whatve whatve_you_dont U.K.", 13 | ] 14 | 15 | BROKEN_EN_TEST_SENTS = [ 16 | "It's tonsa fun in the whatve whatve-you-dont U.K.", 17 | "It's tonsa fun in the whatve whatve-you-done U.K.", 18 | ] 19 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Temporary init. This is not meant to be a package""" 2 | -------------------------------------------------------------------------------- /tests/test_aligner.py: -------------------------------------------------------------------------------- 1 | from spacyface import * 2 | import pytest 3 | 4 | def load_sample_en_sents(): 5 | from .EN_TEST_SENTS import SPACY_EN_TEST_SENTS 6 | return SPACY_EN_TEST_SENTS 7 | 8 | sentences = load_sample_en_sents() 9 | 10 | @pytest.mark.parametrize("model_name,alnr_class", 11 | [('bert-base-uncased', BertAligner), 12 | ('bert-base-cased', BertAligner), 13 | ('gpt2', GPT2Aligner), 14 | ('roberta-base', RobertaAligner), 15 | ('distilbert-base-uncased', DistilBertAligner), 16 | ('transfo-xl-wt103', TransfoXLAligner), 17 | ('xlnet-base-cased', XLNetAligner), 18 | ('xlm-mlm-en-2048', XLMAligner), 19 | ('ctrl', CTRLAligner), 20 | ('albert-base-v2', AlbertAligner), 21 | ('openai-gpt', OpenAIGPTAligner), 22 | ('xlm-roberta-base', XLMRobertaAligner), 23 | # ('t5-small', T5Aligner), # This does not currently work 24 | ]) 25 | def test_aligner(model_name, alnr_class): 26 | """NOTE: Will be obsolete when the aligner is able to work with transformer auto model""" 27 | a = alnr_class.from_pretrained(model_name) 28 | 29 | for s in sentences: 30 | mtokens = [m['token'] for m in a.meta_tokenize(s)] 31 | tokens = a.tokenize(s) 32 | assert tokens == mtokens, f"{tokens} \n {mtokens}" 33 | --------------------------------------------------------------------------------