├── .gitignore ├── LICENSE ├── README.md ├── build.sh ├── deploy.sh ├── release.sh ├── requirements.txt ├── setup.py └── src └── compcor ├── __init__.py ├── corpus_metrics.py ├── example.py ├── text_embedder.py ├── text_tokenizer_embedder.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | #PyCharm 3 | .idea 4 | 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | .DS_Store 56 | 57 | 58 | 59 | 60 | 61 | # PyBuilder 62 | .pybuilder/ 63 | target/ 64 | 65 | 66 | 67 | 68 | 69 | # pyenv 70 | # For a library or package, you might want to ignore these files since the code is 71 | # intended to run in multiple environments; otherwise, check them in: 72 | # .python-version 73 | 74 | # pipenv 75 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 76 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 77 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 78 | # install all needed dependencies. 79 | #Pipfile.lock 80 | 81 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 82 | __pypackages__/ 83 | # Pyre type checker 84 | .pyre/ 85 | 86 | # pytype static type analyzer 87 | .pytype/ 88 | 89 | 90 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # compcor 2 | 3 | ## TL;DR 4 | A python library of similarity measures which allow measuring the perceptual distance between text corpora. 5 | You can use compcor to easily calculate the perceptual distance between two sets of sentences using many classical and SOTA metrics. 6 | 7 | ## About 8 | The ability to compare the semantic similarity between text corpora is important in a variety of natural language processing applications. 9 | While one can reasonably measure the semantic distance between two individual sentences (e.g., by calculating the cosine distance between the sentence embeddings), measuring the dissimilarity between two text corpora remains a challenge. 10 | Corpus-level metrics seek to assess semantic similarity at the group level. 11 | Such metrics are essential for measuring how well corpus-based linguistic analysis generalizes from one data-set to another. 12 | Specifically, the recent advances in generative language models have led to an increased interest in the study of content similarity between human and generated language, as a mean for comparing the quality of generative models. 13 | 14 | ## installation: 15 | Using python>=3.8: 16 | ``` 17 | pip install compcor 18 | ``` 19 | 20 | ## Usage 21 | To calculate the perceptual distance between two corpora, the raw corpora can be simply provided as a list of strings: 22 | ``` 23 | setA = ['can you tell me how i would normally say thank you as a french person', 'can you translate hi into spanish for me', 'can you translate milk into spanish for me', 'how can i say thank you very much in chinese', 'how can i thank somebody in italian', 'how could i say twin in chinese', 'how do germans say goodnight','how do i ask about the weather in chinese', 'how do i say hotel in finnish', 'how do i say bathroom in italian'] 24 | setB = ['how can i say thank you very much in chinese', 'how can i thank somebody in italian', 'how could i say twin in chinese', 'how do they say tacos in mexico', 'how do they say yes in brazil', 'how do vietnameses people say hello', 'how do you say cat in spanish', 'how do you say dog in spanish', 'how do you say fast in spanish', 'how do you say good bye in french', 'how do you say goodbye in spanish', 'how do you say hello in french', 'how do you say hello in japanese', 'how do you say hello in mexico'] 25 | ``` 26 | 27 | ### Using raw data 28 | By calling any metric provided in the package, all metrics return a single scalar capturing the perceptual distance between two corpora: 29 | ``` 30 | import compcor.corpus_metrics as corpus_metrics 31 | distance = corpus_metrics.fid_distance(corpus1=setA, corpus2=setB) 32 | ``` 33 | In this case the sentences in both sets will be tokenized/embedded inside the function. 34 | 35 | ### Using embeddings 36 | The metrics also accept already-embedded/tokenized corpora, possibly using a custom embedding. 37 | ``` 38 | distance = corpus_metrics.fid_distance(corpus1=embedA, corpus2=embedB) 39 | ``` 40 | 41 | We provide an embedding and tokanization utility class 'STTokenizerEmbedder' which is a shallow wrapper for the [sentence-transformer](https://www.sbert.net/) 42 | SentenceTransformer class. 43 | STTokenizerEmbedder implements two simple interfaces 'TextEmbedder' and 'TextTokenizer'. 44 | The hugging-face model can be determined using the 'embedding_model_name' parameter. 45 | By default, we use the model 'all-MiniLM-L6-v2', but any other hugging-face model can be used. 46 | 47 | ``` 48 | from compcor.text_tokenizer_embedder import STTokenizerEmbedder 49 | embedder = STTokenizerEmbedder(embedding_model_name = "all-MiniLM-L12-v2") 50 | 51 | embedA = embedder.embed_sentences(setA) 52 | embedB = embedder.embed_sentences(setB) 53 | distance = corpus_metrics.fid_distance(corpus1=embedA, corpus2=embedB) 54 | ``` 55 | 56 | ### Using tokens 57 | If the distance operates on the tokens level, the tokenized sentences should be provided instead of the sentence embeddings. 58 | ``` 59 | tokensA = embedder.tokenize_sentences(setA) 60 | tokensB = embedder.tokenize_sentences(setB) 61 | 62 | distance = corpus_metrics.chi_square_distance(corpus1=tokensA, corpus2=tokensB) 63 | print("chi_square_distance={}".format(distance)) 64 | #chi_square_distance=0.9987177546738071 65 | ``` 66 | ## Full Metric list 67 | Given two corpora of strings, we want to calculate the distance between them. 68 | `comparing-corpora` provides the following distance metrics. 69 | The first metrics operate on a sentence-level embedding, while the last two operate on the token frequencies. 70 | 71 | | Name |function| representation | description | 72 | |---------------------------------------------------|---|----------------|--------------------------------------------------| 73 | | Classifier |`classifier_distance`| embbeding | Classifiability between reference and target | 74 | | [PR (precision and recall)](https://github.com/clovaai/generative-evaluation-prdc) |`PR_distance`| embbeding | Assessing distributional precision and recall | 75 | | IRPR (information-retrieval precision and recall) |`IRPR_distance`| embbeding | Average distance between closest samples pairs | 76 | | [DC (density and coverage)](https://github.com/clovaai/generative-evaluation-prdc) |`dc_distance`| embbeding | Estimating manifolds density and coverage | 77 | | [MAUVE](https://github.com/krishnap25/mauve) |`mauve_distance`| embbeding | Quality and diversity via divergence frontiers | 78 | | FID (Frechet Inception Distance) |`fid_distance`| embbeding | Wasserstein distance between densities | 79 | | Chi-squared ($\chi^2$) |`chi_square_distance`| token | Word/Token count comparison | 80 | | Zipf |`zipf_distance`| token | Unigram rank-frequency statistics | 81 | | t-test |`ttest_distance`| embbeding | T-test p-value on difference in elementwise means | 82 | | Medoid |`medoid_distance`| embbeding | Cosine distance between corpora centroids | 83 | 84 | ## Citation 85 | If you use this package for your scientific publication please cite the following work studies the quality, time 86 | performance and other properties of most of the metrics in this package. 87 | ``` 88 | @inproceedings{kour2022measuring, 89 | title={Measuring the Measuring Tools: An Automatic Evaluation of Semantic Metrics for Text Corpora}, 90 | author={Kour, George and Ackerman, Samuel and Farchi, Eitan and Raz, Orna and Carmeli, Boaz and Anaby-Tavor, Ateret}, 91 | booktitle={Proceedings of the 2nd Workshop on Natural Language Generation, Evaluation, and Metrics (GEM 2022)}, 92 | publisher = "Association for Computational Linguistics", 93 | year={2022} 94 | } 95 | ``` 96 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #python setup.py sdist bdist_wheel upload -r local 3 | echo Removing .env file from final distribution 4 | echo Cleaning ./dist folder 5 | rm -f ./dist/* 6 | echo Cleaning ./build folder 7 | rm -rf ./build 8 | echo Running build 9 | python3 setup.py sdist bdist_wheel 10 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | twine upload --repository testpypi --verbose dist/* -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | twine upload --verbose dist/* -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.23.0 2 | scipy==1.9.0 3 | scikit_learn==1.1.1 4 | prdc 5 | mauve-text 6 | sentence_transformers==2.2.1 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # # 2 | # ***************************************************************** 3 | # # 4 | # IBM Confidential 5 | # # 6 | # Licensed Materials - Property of IBM 7 | # # 8 | # (C) Copyright IBM Corp. 2001, 2021 All Rights Reserved. 9 | # # 10 | # The source code for this program is not published or otherwise 11 | # divested of its trade secrets, irrespective of what has been 12 | # deposited with the U.S. Copyright Office. 13 | # # 14 | # US Government Users Restricted Rights - Use, duplication or 15 | # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 16 | # # 17 | # ***************************************************************** 18 | import pathlib 19 | import pkg_resources 20 | from setuptools import setup, find_packages 21 | 22 | with pathlib.Path('requirements.txt').open() as requirements_txt: 23 | install_requires = [ 24 | str(requirement) 25 | for requirement 26 | in pkg_resources.parse_requirements(requirements_txt) 27 | ] 28 | 29 | setup(name='compcor', 30 | version='1.0.5', 31 | description='Corpus level similarity measures.', 32 | long_description=open("README.md").read(), 33 | long_description_content_type="text/markdown", 34 | keywords='Text Compare Corpus Metrics Measure Similarity', 35 | url='https://github.com/IBM/comparing-corpora', 36 | author='Language and Conversation, IBM Research AI', 37 | packages=find_packages('src', exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), 38 | package_dir={'': 'src'}, 39 | install_requires=[install_requires], 40 | scripts=[], 41 | include_package_data=True, 42 | python_requires='>=3.8', 43 | zip_safe=False, 44 | classifiers=[ 45 | "Programming Language :: Python :: 3", 46 | "Operating System :: OS Independent" 47 | ], 48 | license='Apache Software License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0' 49 | ) 50 | -------------------------------------------------------------------------------- /src/compcor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/comparing-corpora/08776da705ce3cea9446028063ddaee6b60de29f/src/compcor/__init__.py -------------------------------------------------------------------------------- /src/compcor/corpus_metrics.py: -------------------------------------------------------------------------------- 1 | import random 2 | import statsmodels.stats.multitest 3 | from collections import Counter, namedtuple 4 | from operator import itemgetter 5 | 6 | import numpy as np 7 | from prdc import compute_prdc 8 | import prdc.prdc as pr 9 | import mauve 10 | 11 | from scipy.linalg import sqrtm 12 | from scipy.stats import chisquare, ttest_ind 13 | from sklearn import svm 14 | from sklearn.metrics.pairwise import cosine_similarity 15 | from scipy import spatial 16 | from sklearn.metrics import f1_score 17 | import scipy 18 | 19 | from compcor.text_embedder import TextTokenizer, TextEmbedder 20 | import compcor.utils as utils 21 | from compcor.utils import Corpus, TCorpus 22 | from compcor.text_tokenizer_embedder import STTokenizerEmbedder 23 | 24 | # threshold below which to match distances to 0 25 | ZERO_THRESH = 0.005 26 | 27 | PR = namedtuple('pr', 'precision recall distance') 28 | DC = namedtuple('dc', 'density coverage distance') 29 | 30 | 31 | def cosine_arccos_transform(c1, c2=None): 32 | # c1 and c2 are lists of input arrays 33 | 34 | def process(input): 35 | if input is not None: 36 | if isinstance(input, list) or isinstance(input, tuple): 37 | return np.vstack(input) 38 | else: 39 | if isinstance(input, np.ndarray): 40 | if len(input.shape) == 1: 41 | # make it have one row 42 | return input.reshape(1,-1) 43 | else: 44 | return input 45 | else: 46 | return input 47 | 48 | c1, c2 = process(c1), process(c2) 49 | 50 | cosine = np.arccos(np.clip(cosine_similarity(X=c1, Y=c2), -1,1)) / np.pi # if None will be X with itself 51 | # due to numeric precision, sometimes cosine distance between identical vectors is not 0 exactly 52 | 53 | cosine[ cosine <= ZERO_THRESH] = 0.0 54 | 55 | return cosine 56 | 57 | 58 | 59 | def ttest_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 60 | # calculate mean and covariance statistics 61 | if model is not None: 62 | # if you just provide the matrices themselves 63 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 64 | else: 65 | embeddings1, embeddings2 = corpus1, corpus2 66 | 67 | res = ttest_ind(embeddings1, embeddings2) 68 | return 1 - np.nanmean(res.pvalue) 69 | 70 | 71 | def IRPR_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder(), components=False): 72 | if model is not None: 73 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 74 | else: 75 | embeddings1, embeddings2 = corpus1, corpus2 76 | 77 | table = cosine_arccos_transform(c1=embeddings1, c2=embeddings2) 78 | precision = np.nansum(np.nanmin(table, axis=1)) / table.shape[1] 79 | recall = np.nansum(np.nanmin(table, axis=0)) / table.shape[0] 80 | distance = 2 * (precision * recall) / (precision + recall) 81 | 82 | return PR(precision, recall, distance) if components else distance 83 | 84 | 85 | def classifier_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 86 | # distance between corpora is the F1 score of a classifier trained to classify membership of a random sample of each 87 | if model is not None: 88 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 89 | else: 90 | embeddings1, embeddings2 = corpus1, corpus2 91 | 92 | corpus1_vecs = embeddings1 93 | corpus1_train_indx = random.sample(range(len(embeddings1)), k=int(0.8 * len(embeddings1))) 94 | corpus1_train = itemgetter(*corpus1_train_indx)(corpus1_vecs) 95 | 96 | corpus1_test_indx = set(range(len(embeddings1))) - (set(corpus1_train_indx)) 97 | corpus1_test = itemgetter(*corpus1_test_indx)(corpus1_vecs) 98 | 99 | corpus2_vecs = embeddings2 100 | corpus2_train_indx = random.sample(range(len(embeddings2)), k=int(0.8 * len(embeddings2))) 101 | corpus2_train = itemgetter(*corpus2_train_indx)(corpus2_vecs) 102 | 103 | corpus2_test_indx = set(range(len(embeddings2))) - (set(corpus2_train_indx)) 104 | corpus2_test = itemgetter(*corpus2_test_indx)(corpus2_vecs) 105 | 106 | train_x = corpus1_train + corpus2_train 107 | train_y = [0] * len(corpus1_train) + [1] * len(corpus2_train) 108 | test_x = corpus1_test + corpus2_test 109 | test_y = [0] * len(corpus1_test) + [1] * len(corpus2_test) 110 | clf = svm.SVC(random_state=1) 111 | clf.fit(train_x, train_y) 112 | 113 | y_pred = clf.predict(test_x) 114 | correct = f1_score(test_y, y_pred) 115 | 116 | return correct 117 | 118 | 119 | def medoid_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 120 | if model is not None: 121 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 122 | else: 123 | embeddings1, embeddings2 = corpus1, corpus2 124 | 125 | # calculate mean and covariance statistics 126 | act1 = np.vstack(embeddings1) 127 | act2 = np.vstack(embeddings2) 128 | mu1 = np.mean(act1, axis=0) 129 | mu2 = np.mean(act2, axis=0) 130 | # calculate sum squared difference between means 131 | cosine = spatial.distance.cosine(mu1, mu2) 132 | return cosine 133 | 134 | def median_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 135 | if model is not None: 136 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 137 | else: 138 | embeddings1, embeddings2 = corpus1, corpus2 139 | 140 | # calculate mean and covariance statistics 141 | act1 = np.vstack(embeddings1) 142 | act2 = np.vstack(embeddings2) 143 | mu1 = np.median(act1, axis=0) 144 | mu2 = np.median(act2, axis=0) 145 | # calculate sum squared difference between medians 146 | cosine = spatial.distance.cosine(mu1, mu2) 147 | return cosine 148 | 149 | def fid_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 150 | if model is not None: 151 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 152 | else: 153 | embeddings1, embeddings2 = corpus1, corpus2 154 | # TODO: needs a note explaining what the resulting calculation is. Is it an overlap/probability as approximated by Gaussian curve 155 | # Note that the paper says FID is a F1 score but this is a different calculation (unless it is in effect an F1 score) 156 | if len(embeddings1) == 0 or len(embeddings2) == 0: 157 | return 0 158 | act1 = np.vstack(embeddings1) 159 | act2 = np.vstack(embeddings2) 160 | mu1 = np.mean(act1, axis=0) 161 | sigma1 = np.cov(act1, rowvar=False) 162 | mu2 = np.mean(act2, axis=0) 163 | sigma2 = np.cov(act2, rowvar=False) 164 | # calculate sum squared difference between means 165 | # ssdiff = np.sum((mu1 - mu2) ** 2.0) 166 | ssdiff = np.square(mu1 - mu2).sum() 167 | # calculate sqrt of product between cov 168 | covmean = sqrtm(sigma1.dot(sigma2)) 169 | # check and correct imaginary numbers from sqrt 170 | if np.iscomplexobj(covmean): 171 | covmean = covmean.real 172 | # calculate score 173 | fid = ssdiff + np.trace(sigma1 + sigma2 - 2.0 * covmean) 174 | return fid 175 | 176 | 177 | def mauve_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 178 | if model is not None: 179 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 180 | else: 181 | embeddings1, embeddings2 = corpus1, corpus2 182 | 183 | out = mauve.compute_mauve(p_features=embeddings1, q_features=embeddings2, device_id=0, verbose=False) 184 | return 1 - out.mauve 185 | 186 | 187 | def pr_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder(), nearest_k=5, cosine=False, components=False): 188 | if model is not None: 189 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 190 | else: 191 | embeddings1, embeddings2 = corpus1, corpus2 192 | 193 | f = compute_prdc_cosine if cosine else compute_prdc 194 | 195 | metric = f(real_features=np.vstack(embeddings1), 196 | fake_features=np.vstack(embeddings2), 197 | nearest_k=nearest_k) 198 | precision = np.clip(metric['precision'], 0, 1) 199 | recall = np.clip(metric['recall'] + 1e-6, 0, 1) 200 | distance = 1 - 2 * (precision * recall) / (precision + recall) 201 | 202 | return PR(precision, recall, distance) if components else distance 203 | 204 | def dc_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder(), nearest_k=5, cosine=False, components=False): 205 | if model is not None: 206 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 207 | else: 208 | embeddings1, embeddings2 = corpus1, corpus2 209 | 210 | f = compute_prdc_cosine if cosine else compute_prdc 211 | 212 | metric = f(real_features=np.vstack(embeddings1), 213 | fake_features=np.vstack(embeddings2), 214 | nearest_k=nearest_k) 215 | 216 | density = np.clip(metric['density'], 0, 1) 217 | coverage = np.clip(metric['coverage'] + 1e-6, 0, 1) 218 | distance = 1 - 2 * (density * coverage) / (density + coverage) 219 | return DC(density, coverage, distance) if components else distance 220 | 221 | 222 | def chi_square_distance(corpus1: TCorpus, corpus2: TCorpus, tokenizer: TextTokenizer = STTokenizerEmbedder(), 223 | top=5000): 224 | # calculate p-value of chi-square test between frequency counts of top most frequent shared tokens between corpora 225 | # note, does not normalize for the size of the corpora, so most common tokens may reflect more the larger corpus 226 | tokens1, tokens2 = utils.get_corpora_tokens(corpus1, corpus2, tokenizer) 227 | 228 | if type(tokens1[0]) is list: 229 | tokens1 = [x for xs in tokens1 for x in xs] 230 | tokens2 = [x for xs in tokens2 for x in xs] 231 | 232 | c1_word_count = Counter(tokens1) 233 | c2_word_count = Counter(tokens2) 234 | common_words = set([word for word, freq in Counter(tokens1 + tokens2).most_common(top)]) 235 | sum_count = {word: c1_word_count[word] + c2_word_count[word] for word in common_words} 236 | 237 | N1 = sum([c1_word_count[word] for word in common_words]) 238 | N2 = sum([c2_word_count[word] for word in common_words]) 239 | N = N1 + N2 240 | o1 = [] 241 | o2 = [] 242 | e1 = [] 243 | e2 = [] 244 | for word in common_words: 245 | e1 += [sum_count[word] * N1 / N] 246 | o1 += [c1_word_count[word]] 247 | e2 += [sum_count[word] * N2 / N] 248 | o2 += [c2_word_count[word]] 249 | 250 | # low p value means two corpora are different. 251 | chi_stat = chisquare(f_exp=e1, f_obs=o1)[0] + chisquare(f_exp=e2, f_obs=o2)[0] 252 | return 1-scipy.stats.chi2.cdf(chi_stat, 2 * (len(common_words) - 1)) 253 | 254 | 255 | def zipf_distance(corpus1: TCorpus, corpus2: TCorpus, tokenizer: TextTokenizer = STTokenizerEmbedder()): 256 | tokens1, tokens2 = utils.get_corpora_tokens(corpus1, corpus2, tokenizer) 257 | 258 | zipf1 = utils.zipf_coeff(tokens1) 259 | zipf2 = utils.zipf_coeff(tokens2) 260 | return np.abs(zipf2 - zipf1) 261 | 262 | 263 | def Directed_Hausdorff_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder()): 264 | # calculate nearest distance from each element in one corpus to an element in the other 265 | # like IRPR except take mean not harmonic mean (F1-score) 266 | if model is not None: 267 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 268 | else: 269 | embeddings1, embeddings2 = corpus1, corpus2 270 | 271 | table = cosine_arccos_transform(c1=embeddings1, c2=embeddings2) 272 | nearest_1to2 = np.nanmin(table, axis=1) # nearest in c2 from each in c1, min in each row 273 | nearest_2to1 = np.nanmin(table, axis=0) # nearest in c1 from each in c2, min in each column 274 | 275 | return np.mean([nearest_1to2.mean(), nearest_2to1.mean()]) 276 | 277 | 278 | def Energy_distance(corpus1: Corpus, corpus2: Corpus, model: TextEmbedder = STTokenizerEmbedder(), normalize=False): 279 | # https://en.wikipedia.org/wiki/Energy_distance 280 | if model is not None: 281 | embeddings1, embeddings2 = utils.get_corpora_embeddings(corpus1, corpus2, model) 282 | else: 283 | embeddings1, embeddings2 = corpus1, corpus2 284 | 285 | between = cosine_arccos_transform(c1=embeddings1, c2=embeddings2) 286 | within1 = cosine_arccos_transform(c1=embeddings1) 287 | within2 = cosine_arccos_transform(c1=embeddings2) 288 | A2 = 2 * between.mean() 289 | B = within1.mean() 290 | C = within2.mean() 291 | 292 | edist = A2 - B - C 293 | # E-coefficient of inhomogeneity is between 0 and 1 294 | return edist/A2 if normalize else np.sqrt(edist) 295 | 296 | 297 | def compute_nearest_neighbour_distances_cosine(real_features, nearest_k): 298 | d = cosine_arccos_transform(c1=real_features) # self distance 299 | return pr.get_kth_value(d, k=nearest_k + 1, axis=-1) 300 | 301 | def compute_prdc_cosine(real_features, fake_features, nearest_k): 302 | """ 303 | Computes precision, recall, density, and coverage given two manifolds. 304 | 305 | Args: 306 | real_features: numpy.ndarray([N, feature_dim], dtype=np.float32) 307 | fake_features: numpy.ndarray([N, feature_dim], dtype=np.float32) 308 | nearest_k: int. 309 | Returns: 310 | dict of precision, recall, density, and coverage. 311 | """ 312 | 313 | print('Num real: {} Num fake: {}' 314 | .format(real_features.shape[0], fake_features.shape[0])) 315 | 316 | real_nearest_neighbour_distances = compute_nearest_neighbour_distances_cosine( 317 | real_features, nearest_k) 318 | fake_nearest_neighbour_distances = compute_nearest_neighbour_distances_cosine( 319 | fake_features, nearest_k) 320 | distance_real_fake = cosine_arccos_transform(c1=real_features, c2=fake_features) 321 | 322 | # precision and recall = are fraction of internal sample distances (interchangeable for our purposes) 323 | # that are smaller than the distance to each kth nearest neighbor in the other sample 324 | # each column of the matrix is the probability that elementise, a column in distance_real_fake < real_nearest_neighbour_distances 325 | # precision looks at probability, for each element in -B, that it is closer to each element of A than that element a's kth NN in B, 326 | # (i.e whether it is contained in each element of A's NN radius 327 | # looks if any of these are True, then takes the mean 328 | # i.e. the share of elements in B that would be hit by the kth NN radius of an element in A. 329 | precision = ( 330 | distance_real_fake < 331 | np.expand_dims(real_nearest_neighbour_distances, axis=1) 332 | ).any(axis=0).mean() 333 | 334 | recall = ( 335 | distance_real_fake < 336 | np.expand_dims(fake_nearest_neighbour_distances, axis=0) 337 | ).any(axis=1).mean() 338 | 339 | density = (1. / float(nearest_k)) * ( 340 | distance_real_fake < 341 | np.expand_dims(real_nearest_neighbour_distances, axis=1) 342 | ).sum(axis=0).mean() 343 | 344 | coverage = ( 345 | distance_real_fake.min(axis=1) < 346 | real_nearest_neighbour_distances 347 | ).mean() 348 | 349 | return dict(precision=precision, recall=recall, 350 | density=density, coverage=coverage) -------------------------------------------------------------------------------- /src/compcor/example.py: -------------------------------------------------------------------------------- 1 | import compcor.corpus_metrics as corpus_metrics 2 | from compcor.text_tokenizer_embedder import STTokenizerEmbedder 3 | 4 | ### Example code 5 | 6 | setA = ['can you tell me how i would normally say thank you as a french person', 7 | 'can you translate hi into spanish for me', 8 | 'can you translate milk into spanish for me', 9 | 'how can i say thank you very much in chinese', 10 | 'how can i thank somebody in italian', 11 | 'how could i say twin in chinese', 12 | 'how do germans say goodnight', 13 | 'how do i ask about the weather in chinese', 14 | 'how do i say hotel in finnish', 15 | 'how do i say bathroom in italian'] 16 | 17 | setB = ['how can i say thank you very much in chinese', 18 | 'how can i thank somebody in italian', 19 | 'how could i say twin in chinese', 20 | 'how do they say tacos in mexico', 21 | 'how do they say yes in brazil', 22 | 'how do vietnameses people say hello', 23 | 'how do you say cat in spanish', 24 | 'how do you say dog in spanish', 25 | 'how do you say fast in spanish', 26 | 'how do you say good bye in french', 27 | 'how do you say goodbye in spanish', 28 | 'how do you say hello in french', 29 | 'how do you say hello in japanese', 30 | 'how do you say hello in mexico' 31 | ] 32 | 33 | distance = corpus_metrics.fid_distance(corpus1=setA, corpus2=setB) 34 | print("fid_distance={}".format(distance)) 35 | 36 | distance = corpus_metrics.dc_distance(corpus1=setA, corpus2=setB) 37 | print("dc_distance={}".format(distance)) 38 | 39 | distance = corpus_metrics.pr_distance(corpus1=setA, corpus2=setB) 40 | print("pr_distance={}".format(distance)) 41 | 42 | distance = corpus_metrics.mauve_distance(corpus1=setA, corpus2=setB) 43 | print("mauve_distance={}".format(distance)) 44 | 45 | 46 | distance = corpus_metrics.medoid_distance(corpus1=setA, corpus2=setB) 47 | print("medoid_distance={}".format(distance)) 48 | 49 | distance = corpus_metrics.ttest_distance(corpus1=setA, corpus2=setB) 50 | print("ttest_distance={}".format(distance)) 51 | 52 | distance = corpus_metrics.chi_square_distance(corpus1=setA, corpus2=setB) 53 | print("chi_square_distance={}".format(distance)) 54 | 55 | distance = corpus_metrics.zipf_distance(corpus1=setA, corpus2=setB) 56 | print("zipf_distance={}".format(distance)) 57 | 58 | print("Comparing corpora on embedding data...") 59 | 60 | embedder = STTokenizerEmbedder(embedding_model_name="all-MiniLM-L12-v2") 61 | embeddingA = embedder.embed_sentences(setA) 62 | embeddingB = embedder.embed_sentences(setB) 63 | 64 | distance = corpus_metrics.fid_distance(corpus1=embeddingA, corpus2=embeddingB) 65 | print("fid_distance={}".format(distance)) 66 | 67 | distance = corpus_metrics.dc_distance(corpus1=embeddingA, corpus2=embeddingB) 68 | print("dc_distance={}".format(distance)) 69 | 70 | distance = corpus_metrics.pr_distance(corpus1=embeddingA, corpus2=embeddingB) 71 | print("pr_distance={}".format(distance)) 72 | 73 | distance = corpus_metrics.mauve_distance(corpus1=embeddingA, corpus2=embeddingB) 74 | print("mauve_distance={}".format(distance)) 75 | 76 | distance = corpus_metrics.medoid_distance(corpus1=embeddingA, corpus2=embeddingB) 77 | print("medoid_distance={}".format(distance)) 78 | 79 | distance = corpus_metrics.ttest_distance(corpus1=embeddingA, corpus2=embeddingB) 80 | print("ttest_distance={}".format(distance)) 81 | 82 | 83 | 84 | embedder = STTokenizerEmbedder() 85 | tokensA = embedder.tokenize_sentences(setA) 86 | tokensB = embedder.tokenize_sentences(setB) 87 | 88 | distance = corpus_metrics.chi_square_distance(corpus1=tokensA, corpus2=tokensB) 89 | print("chi_square_distance={}".format(distance)) 90 | 91 | distance = corpus_metrics.zipf_distance(corpus1=tokensA, corpus2=tokensB) 92 | print("zipf_distance={}".format(distance)) 93 | -------------------------------------------------------------------------------- /src/compcor/text_embedder.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | 4 | class TextEmbedder: 5 | 6 | def embed_sentences(self, sentences: List[str], normalize=False) ->List[List[float]]: 7 | raise NotImplementedError() 8 | 9 | 10 | class TextTokenizer: 11 | def tokenize_sentences(self, sentences:List[str]) -> List[List[str]]: 12 | raise NotImplementedError() 13 | 14 | -------------------------------------------------------------------------------- /src/compcor/text_tokenizer_embedder.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import List 3 | 4 | from compcor.text_embedder import TextEmbedder, TextTokenizer 5 | from sentence_transformers import SentenceTransformer 6 | from sklearn import preprocessing 7 | 8 | 9 | class STTokenizerEmbedder(TextEmbedder, TextTokenizer): 10 | def __init__(self, embedding_model_name="all-MiniLM-L6-v2"): 11 | self.model_name = embedding_model_name 12 | self.embedder: SentenceTransformer = None 13 | self.words_vectors = None 14 | 15 | def _initialize_embedder(self): 16 | if self.embedder is None: 17 | self.embedder = SentenceTransformer(self.model_name) 18 | 19 | def embed_sentences(self, sentences: List[str], normalize=False): 20 | self._initialize_embedder() 21 | 22 | vectors = self.embedder.encode(sentences, show_progress_bar=False) 23 | vectors = preprocessing.normalize(vectors) if normalize else vectors 24 | return vectors 25 | 26 | def tokenize_sentences(self, sentences): 27 | self._initialize_embedder() 28 | return self.embedder.tokenize(sentences)['input_ids'].numpy().tolist() 29 | 30 | def set_embedding_model(self, model:SentenceTransformer): 31 | self.embedder = model -------------------------------------------------------------------------------- /src/compcor/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import operator 3 | from sklearn import linear_model 4 | from collections import Counter 5 | from typing import List, Union 6 | 7 | Corpus = Union[List[str], List[List[float]]] 8 | TCorpus = Union[List[str], List[List[str]]] 9 | 10 | 11 | def get_corpora_embeddings(corpus1: Corpus, corpus2: Corpus, model): 12 | ''' 13 | Returns token embedding of corpus1 and corpus2, if they are corpora of strings, 14 | or returns themselves, if they are already embedded (i.e., in float form) 15 | ''' 16 | if isinstance(corpus1[0], str): 17 | embeddings1 = model.embed_sentences(corpus1) 18 | else: 19 | embeddings1 = corpus1 20 | if isinstance(corpus2[0], str): 21 | embeddings2 = model.embed_sentences(corpus2) 22 | else: 23 | embeddings2 = corpus2 24 | return embeddings1, embeddings2 25 | 26 | def get_corpora_tokens(corpus1:TCorpus, corpus2:TCorpus, model): 27 | if isinstance(corpus1[0], str): 28 | tokens1 = model.tokenize_sentences(corpus1) 29 | else: 30 | tokens1 = corpus1 31 | if isinstance(corpus2[0], str): 32 | tokens2 = model.tokenize_sentences(corpus2) 33 | else: 34 | tokens2 = corpus2 35 | return tokens1, tokens2 36 | 37 | 38 | def zipf_coeff(samples, min_num=1, max_num=5000, stretch_factor=15): 39 | # samples: list of lists of tokens; max_num: how many top frequency words to consider 40 | counter = Counter() 41 | for s in samples: 42 | counter.update(s) 43 | top_freqs = np.array(sorted(counter.values(), key=operator.neg)[:max_num]) 44 | # log scale overweights tail, so subsample the tail 45 | # this also helps the best-fit line look more reasonable when plotted in log-scale. 46 | xs, idxs_u = np.unique(np.round( 47 | stretch_factor * np.log(np.arange(min_num, min(len(counter), max_num)).astype(np.float64))) / stretch_factor, 48 | return_index=True) 49 | ys = np.log(top_freqs[idxs_u]) 50 | 51 | lr = linear_model.LinearRegression() 52 | lr.fit(xs.reshape(-1, 1), ys) 53 | slope = lr.coef_[0] 54 | 55 | return slope 56 | --------------------------------------------------------------------------------