├── .gitignore ├── .travis.yml ├── .travis ├── download_iwnlp.sh └── requirements.txt ├── LICENSE.txt ├── README.md ├── iwnlp ├── __init__.py └── iwnlp_wrapper.py ├── setup.cfg ├── setup.py └── tests ├── __init__.py └── test_iwnlp_wrapper.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | #PyCharm 104 | .idea/ 105 | MANIFEST 106 | 107 | data/ 108 | debug.py -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.4" 4 | 5 | install: "pip install -r .travis/requirements.txt" 6 | 7 | before_script: 8 | - chmod +x .travis/download_iwnlp.sh 9 | - .travis/download_iwnlp.sh 10 | 11 | script: 12 | - python3 -m py.test -------------------------------------------------------------------------------- /.travis/download_iwnlp.sh: -------------------------------------------------------------------------------- 1 | export DOWNLOADURL='http://lager.cs.uni-duesseldorf.de/NLP/IWNLP/IWNLP.Lemmatizer_20170501.zip' 2 | 3 | wget -O IWNLP.ZIP $DOWNLOADURL 4 | unzip IWNLP.ZIP -d data/ -------------------------------------------------------------------------------- /.travis/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest==3.0.6 2 | pytest-pep8==1.0.6 -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Matthias Liebeck 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IWNLP-py 2 | [![license](https://img.shields.io/github/license/mashape/apistatus.svg?maxAge=2592000)](https://github.com/Liebeck/IWNLP-py/master/LICENSE.md) 3 | [![Build Status](https://api.travis-ci.org/Liebeck/IWNLP-py.svg?branch=master)](https://travis-ci.com/Liebeck/IWNLP-py) 4 | 5 | IWNLP-py is a Python port of [IWNLP.Lemmatizer](https://github.com/Liebeck/IWNLP.Lemmatizer). IWNLP-py offers a lemmatization of German words based on the [German Wiktionary](https://de.wiktionary.org/wiki) which is processed by [IWNLP](https://github.com/Liebeck/IWNLP). 6 | 7 | # How to setup IWNLP-py 8 | 1. Use pip to install iwnlp 9 | ``` bash 10 | pip install iwnlp 11 | ``` 12 | 2. Download the latest processed IWNLP dump from https://dbs.cs.uni-duesseldorf.de/datasets/iwnlp/IWNLP.Lemmatizer_20181001.zip and unzip it. 13 | 14 | # How to use IWNLP-py 15 | The Python package consists of the *IWNLPWrapper* class. **Keep in mind that the lemmatizer will return *None* for unknown words rather than guessing a lemma. If more than one lemma is found, all lemmas are returned.** In order to lemmatize single words, you can choose between two functions: 16 | 1. *lemmatize*: If you have access to POS tags of your words, you should use this function. The POS tagset is [Google's universal POS tagset](http://universaldependencies.org/u/pos/). The lemmatization performance is tuned to be as high as possible, as listed [here](http://www.iwnlp.com/iwnlp_results.html). [Our paper](http://www.aclweb.org/anthology/P15-2068) describes our approach in more detail. Keep in mind, that our results have improved a lot over the last two years. 17 | ``` python 18 | def lemmatize(self, word, pos_universal_google) 19 | ``` 20 | Usage: 21 | ``` python 22 | from iwnlp.iwnlp_wrapper import IWNLPWrapper 23 | lemmatizer = IWNLPWrapper(lemmatizer_path='data/IWNLP.Lemmatizer_20181001.json') 24 | lemmatizer.lemmatize('Lkws', pos_universal_google='NOUN') 25 | # ['Lkw'] 26 | lemmatizer.lemmatize('Onlineauftritten', pos_universal_google='NOUN') 27 | # ['Onlineauftritt'] 28 | lemmatizer.lemmatize('gespielt', pos_universal_google='VERB') 29 | # ['spielen'] 30 | ``` 31 | 32 | 2. *lemmatize*: If you don't have access to POS tags or don't want to use them you can simply pass the word without any POS tag and retrieve any lemma that is present in IWNLP. You may also specify if you want the lookup to be **case sensitive**, which it is by default. 33 | ``` python 34 | def lemmatize_plain(self, word, ignore_case=False): 35 | ``` 36 | 37 | Usage: 38 | ``` python 39 | from iwnlp.iwnlp_wrapper import IWNLPWrapper 40 | lemmatizer = IWNLPWrapper(lemmatizer_path='data/IWNLP.Lemmatizer_20181001.json') 41 | lemmatizer.lemmatize_plain('birne') 42 | # no result since the noun is lowercased 43 | lemmatizer.lemmatize_plain('birne', ignore_case=True) 44 | # ['Birne'] 45 | lemmatizer.lemmatize_plain('zerstreut', ignore_case=True) 46 | # ['zerstreut', 'zerstreuen'] 47 | ``` 48 | 49 | # Citation 50 | Please include the following BibTeX if you use IWNLP in your work: 51 | ``` bash 52 | @InProceedings{liebeck-conrad:2015:ACL-IJCNLP, 53 | author = {Liebeck, Matthias and Conrad, Stefan}, 54 | title = {{IWNLP: Inverse Wiktionary for Natural Language Processing}}, 55 | booktitle = {Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)}, 56 | year = {2015}, 57 | publisher = {Association for Computational Linguistics}, 58 | pages = {414--418}, 59 | url = {http://www.aclweb.org/anthology/P15-2068} 60 | } 61 | ``` 62 | -------------------------------------------------------------------------------- /iwnlp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Liebeck/IWNLP-py/cb14fefe451846b60e98338a287beb5bca960d80/iwnlp/__init__.py -------------------------------------------------------------------------------- /iwnlp/iwnlp_wrapper.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | import io 4 | 5 | 6 | class IWNLPWrapper(object): 7 | def __init__(self, lemmatizer_path='IWNLP.Lemmatizer_20170501.json'): 8 | self.logger = logging.getLogger() 9 | self.logger.setLevel(logging.DEBUG) 10 | self.logger.debug('Loading IWNLP lemmatizer') 11 | self.load(lemmatizer_path) 12 | self.logger.debug('IWNLP Lemmatizer loaded') 13 | 14 | def load(self, lemmatizer_path): 15 | """ 16 | This methods load the IWNLP.Lemmatizer json file and creates a dictionary 17 | of lowercased forms which maps each form to its possible lemmas. 18 | """ 19 | self.lemmatizer = {} 20 | with io.open(lemmatizer_path, encoding='utf-8') as data_file: 21 | raw = json.load(data_file) 22 | for entry in raw: 23 | self.lemmatizer[entry["Form"]] = entry["Lemmas"] 24 | self.apply_blacklist() 25 | 26 | def apply_blacklist(self): 27 | self.remove_entry("die", "Noun", "Adsorbens") # parser error in 20170501.json 28 | 29 | def remove_entry(self, form, pos, lemma): 30 | key = form.lower().strip() 31 | if key in self.lemmatizer: 32 | wrong_entry = {"POS": pos, "Form": form, "Lemma": lemma} 33 | if wrong_entry in self.lemmatizer[key]: 34 | self.lemmatizer[key].remove(wrong_entry) 35 | 36 | def contains_entry(self, word, pos=None, ignore_case=False): 37 | key = word.lower().strip() 38 | if not pos: 39 | if ignore_case: 40 | return key in self.lemmatizer 41 | else: 42 | return key in self.lemmatizer and any(filter(lambda x: x["Form"] == word, self.lemmatizer[key])) 43 | elif not isinstance(pos, list): 44 | if ignore_case: 45 | return key in self.lemmatizer and any(filter(lambda x: x["POS"] == pos, self.lemmatizer[key])) 46 | else: 47 | return key in self.lemmatizer and any( 48 | filter(lambda x: x["POS"] == pos and x["Form"] == word, self.lemmatizer[key])) 49 | else: 50 | for pos_entry in pos: 51 | if self.contains_entry(word, pos_entry, ignore_case): 52 | return True 53 | return False 54 | 55 | def get_entries(self, word, pos=None, ignore_case=False): 56 | entries = [] 57 | key = word.lower().strip() 58 | if not pos: 59 | if ignore_case: 60 | entries = self.lemmatizer[key] 61 | else: 62 | entries = list(filter(lambda x: x["Form"] == word, self.lemmatizer[key])) 63 | elif not isinstance(pos, list): 64 | if ignore_case: 65 | entries = list(filter(lambda x: x["POS"] == pos, self.lemmatizer[key])) 66 | else: 67 | entries = list(filter(lambda x: x["POS"] == pos and x["Form"] == word, self.lemmatizer[key])) 68 | else: 69 | for pos_entry in pos: 70 | if self.contains_entry(word, pos=pos_entry, ignore_case=ignore_case): 71 | entries.extend(self.get_entries(word, pos_entry, ignore_case)) 72 | return entries 73 | 74 | def get_lemmas(self, word, pos=None, ignore_case=False): 75 | """ 76 | Return all lemmas for a given word. This method assumes that the specified word is present in the dictionary 77 | :param word: Word that is present in the IWNLP lemmatizer 78 | """ 79 | entries = self.get_entries(word, pos, ignore_case) 80 | lemmas = list(set([entry["Lemma"] for entry in entries])) 81 | return sorted(lemmas) 82 | 83 | def lemmatize_plain(self, word, ignore_case=False): 84 | if self.contains_entry(word, ignore_case=ignore_case): 85 | return self.get_lemmas(word, ignore_case=ignore_case) 86 | else: 87 | return None 88 | 89 | def lemmatize(self, word, pos_universal_google): 90 | """ 91 | Python port of the lemmatize method, see https://github.com/Liebeck/IWNLP.Lemmatizer/blob/master/IWNLP.Lemmatizer.Predictor/IWNLPSentenceProcessor.cs 92 | 93 | """ 94 | if pos_universal_google == "NOUN": 95 | if self.contains_entry(word, "Noun"): 96 | return self.get_lemmas(word, "Noun") 97 | elif self.contains_entry(word, "X"): 98 | return self.get_lemmas(word, "X") 99 | elif self.contains_entry(word, "AdjectivalDeclension"): 100 | return self.get_lemmas(word, "AdjectivalDeclension") 101 | elif self.contains_entry(word, ["Noun", "X"], ignore_case=True): 102 | return self.get_lemmas(word, ["Noun", "X"], ignore_case=True) 103 | else: 104 | return None 105 | elif pos_universal_google == "ADJ": 106 | if self.contains_entry(word, "Adjective"): 107 | return self.get_lemmas(word, "Adjective") 108 | elif self.contains_entry(word, "Adjective", ignore_case=True): 109 | return self.get_lemmas(word, "Adjective", ignore_case=True) 110 | # Account for possible errors in the POS tagger. This order was fine-tuned in terms of accuracy 111 | elif self.contains_entry(word, "Noun", ignore_case=True): 112 | return self.get_lemmas(word, "Noun", ignore_case=True) 113 | elif self.contains_entry(word, "X", ignore_case=True): 114 | return self.get_lemmas(word, "X", ignore_case=True) 115 | elif self.contains_entry(word, "Verb", ignore_case=True): 116 | return self.get_lemmas(word, "Verb", ignore_case=True) 117 | else: 118 | return None 119 | elif pos_universal_google in ["VERB", "AUX"]: 120 | if self.contains_entry(word, "Verb", ignore_case=True): 121 | return self.get_lemmas(word, "Verb", ignore_case=True) 122 | else: 123 | return None 124 | else: 125 | return None 126 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | 4 | [tool:pytest] 5 | pep8ignore = E501 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | setup( 4 | name='iwnlp', 5 | packages=['iwnlp'], 6 | version='0.1.7', 7 | description='Python implementation for IWNLP', 8 | author='Matthias Liebeck', 9 | author_email='liebeck@cs.uni-duesseldorf.de', 10 | url='https://github.com/Liebeck/IWNLP-py', 11 | download_url='https://github.com/Liebeck/IWNLP-py/archive/0.1.6.tar.gz', 12 | keywords=['IWNLP', 'NLP', 'German', 'lemmatization', 'Wiktionary'], 13 | classifiers=[], 14 | ) 15 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Liebeck/IWNLP-py/cb14fefe451846b60e98338a287beb5bca960d80/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_iwnlp_wrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from iwnlp.iwnlp_wrapper import IWNLPWrapper 3 | 4 | 5 | class IWNLPWrapperTest(unittest.TestCase): 6 | @classmethod 7 | def setUpClass(self): 8 | self.iwnlp = IWNLPWrapper(lemmatizer_path='data/IWNLP.Lemmatizer_20170501.json') 9 | 10 | def test_lemmatize_plain_example1(self): 11 | predicted = self.iwnlp.lemmatize_plain('Hallo') 12 | self.assertEqual(predicted, ['Hallo']) 13 | 14 | def test_lemmatize_plain_example2(self): 15 | predicted = self.iwnlp.lemmatize_plain('Hallo', ignore_case=False) 16 | self.assertEqual(predicted, ['Hallo']) 17 | 18 | def test_lemmatize_plain_example3(self): 19 | predicted = self.iwnlp.lemmatize_plain('birne', ignore_case=False) 20 | self.assertEqual(predicted, None) 21 | 22 | def test_lemmatize_plain_example4(self): 23 | predicted = self.iwnlp.lemmatize_plain('birne', ignore_case=True) 24 | self.assertEqual(predicted, ['Birne']) 25 | 26 | def test_lemmatize_plain_example5(self): 27 | predicted = self.iwnlp.lemmatize_plain('gespielt') 28 | self.assertEqual(predicted, ['spielen']) 29 | 30 | def test_lemmatize_plain_example6(self): 31 | predicted = self.iwnlp.lemmatize_plain('schnell') 32 | self.assertEqual(predicted, ['schnell', 'schnellen']) 33 | 34 | def test_lemmatize_plain_example7(self): 35 | predicted = self.iwnlp.lemmatize_plain('Gartenhäuser') 36 | self.assertEqual(predicted, ['Gartenhaus']) 37 | 38 | def test_lemmatize_plain_example8(self): 39 | predicted = self.iwnlp.lemmatize_plain('ein') 40 | self.assertEqual(predicted, ['ein', 'einen']) 41 | 42 | def test_contains_entry_example1(self): 43 | self.assertEqual(self.iwnlp.contains_entry('Birne'), True) 44 | 45 | def test_contains_entry_example2(self): 46 | self.assertEqual(self.iwnlp.contains_entry('birne', ignore_case=False), False) 47 | 48 | def test_contains_entry_example3(self): 49 | self.assertEqual(self.iwnlp.contains_entry('birne', ignore_case=True), True) 50 | 51 | def test_contains_entry_example4(self): 52 | self.assertEqual(self.iwnlp.contains_entry('groko'), False) 53 | 54 | def test_contains_entry_example5(self): 55 | self.assertEqual(self.iwnlp.contains_entry('GroKo'), True) 56 | 57 | def test_contains_entry_example6(self): 58 | self.assertEqual(self.iwnlp.contains_entry('groko', ignore_case=True), True) 59 | 60 | def test_contains_entry_example7(self): 61 | self.assertEqual(self.iwnlp.contains_entry('groko', pos='Noun'), False) 62 | 63 | def test_contains_entry_example8(self): 64 | self.assertEqual(self.iwnlp.contains_entry('groko', pos='X'), False) 65 | 66 | def test_contains_entry_example9(self): 67 | self.assertEqual(self.iwnlp.contains_entry('groko', pos='AdjectivalDeclension'), False) 68 | 69 | def test_contains_entry_example10(self): 70 | self.assertEqual(self.iwnlp.contains_entry('groko', pos=["Noun", "X"], ignore_case=True), True) 71 | 72 | def test_lemmatize_example1(self): 73 | predicted = self.iwnlp.lemmatize('Lkws', pos_universal_google='NOUN') 74 | self.assertEqual(predicted, ['Lkw']) 75 | 76 | def test_lemmatize_example2(self): 77 | predicted = self.iwnlp.lemmatize('gespielt', pos_universal_google='VERB') 78 | self.assertEqual(predicted, ['spielen']) 79 | 80 | def test_get_lemmas_example1(self): 81 | predicted = self.iwnlp.get_lemmas('groko', pos=["Noun", "X"], ignore_case=True) 82 | self.assertEqual(predicted, ['GroKo']) 83 | --------------------------------------------------------------------------------