├── .github └── stale.yml ├── .gitignore ├── .travis.yml ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── bin └── cythonize.py ├── examples └── server.py ├── include ├── msvc9 │ └── stdint.h ├── murmurhash │ ├── MurmurHash2.h │ └── MurmurHash3.h └── numpy │ ├── __multiarray_api.h │ ├── __ufunc_api.h │ ├── _neighborhood_iterator_imp.h │ ├── _numpyconfig.h │ ├── arrayobject.h │ ├── arrayscalars.h │ ├── halffloat.h │ ├── multiarray_api.txt │ ├── ndarrayobject.h │ ├── ndarraytypes.h │ ├── noprefix.h │ ├── npy_3kcompat.h │ ├── npy_common.h │ ├── npy_cpu.h │ ├── npy_deprecated_api.h │ ├── npy_endian.h │ ├── npy_interrupt.h │ ├── npy_math.h │ ├── npy_no_deprecated_api.h │ ├── npy_os.h │ ├── numpyconfig.h │ ├── old_defines.h │ ├── oldnumeric.h │ ├── ufunc_api.txt │ ├── ufuncobject.h │ └── utils.h ├── neuralcoref ├── __init__.pxd ├── __init__.py ├── file_utils.py ├── neuralcoref.pxd ├── neuralcoref.pyx ├── tests │ ├── __init__.py │ └── test_neuralcoref.py └── train │ ├── __init__.pxd │ ├── __init__.py │ ├── algorithm.py │ ├── checkpoints │ └── .gitignore │ ├── compat.py │ ├── conll_processing_script │ └── compile_coref_data.sh │ ├── conllparser.py │ ├── data │ └── .gitignore │ ├── dataset.py │ ├── document.py │ ├── evaluator.py │ ├── learn.py │ ├── model.py │ ├── runs │ └── .gitignore │ ├── scorer │ ├── README.txt │ ├── lib │ │ ├── Algorithm │ │ │ ├── Munkres.pm │ │ │ └── README.Munkres │ │ ├── CorScorer.pm │ │ ├── Cwd.pm │ │ ├── Data │ │ │ └── Dumper.pm │ │ └── Math │ │ │ └── Combinatorics.pm │ ├── scorer.bat │ ├── scorer.pl │ └── test │ │ ├── CorefMetricTest.pm │ │ ├── CorefMetricTestConfig.pm │ │ ├── DataFiles │ │ ├── TC-A-1.response │ │ ├── TC-A-10.response │ │ ├── TC-A-11.response │ │ ├── TC-A-12.response │ │ ├── TC-A-13.response │ │ ├── TC-A-2.response │ │ ├── TC-A-3.response │ │ ├── TC-A-4.response │ │ ├── TC-A-5.response │ │ ├── TC-A-6.response │ │ ├── TC-A-7.response │ │ ├── TC-A-8.response │ │ ├── TC-A-9.response │ │ ├── TC-A.key │ │ ├── TC-B-1.response │ │ ├── TC-B.key │ │ ├── TC-C-1.response │ │ ├── TC-C.key │ │ ├── TC-D-1.response │ │ ├── TC-D.key │ │ ├── TC-E-1.response │ │ ├── TC-E.key │ │ ├── TC-F-1.response │ │ ├── TC-F.key │ │ ├── TC-G-1.response │ │ ├── TC-G.key │ │ ├── TC-H-1.response │ │ ├── TC-H.key │ │ ├── TC-I-1.response │ │ ├── TC-I.key │ │ ├── TC-J-1.response │ │ ├── TC-J.key │ │ ├── TC-K-1.response │ │ ├── TC-K.key │ │ ├── TC-L-1.response │ │ ├── TC-L.key │ │ ├── TC-M-1.response │ │ ├── TC-M-2.response │ │ ├── TC-M-3.response │ │ ├── TC-M-4.response │ │ ├── TC-M-5.response │ │ ├── TC-M-6.response │ │ ├── TC-M.key │ │ ├── TC-N-1.response │ │ ├── TC-N-2.response │ │ ├── TC-N-3.response │ │ ├── TC-N-4.response │ │ ├── TC-N-5.response │ │ ├── TC-N-6.response │ │ └── TC-N.key │ │ ├── TestCases.README │ │ └── test.pl │ ├── scorer_wrapper.pl │ ├── training.md │ ├── training_requirements.txt │ ├── utils.py │ └── weights │ ├── pair_mentions_bias_layer_0.npy │ ├── pair_mentions_bias_layer_1.npy │ ├── pair_mentions_bias_layer_2.npy │ ├── pair_mentions_bias_layer_3.npy │ ├── pair_mentions_bias_layer_4.npy │ ├── pair_mentions_weights_layer_0.npy │ ├── pair_mentions_weights_layer_1.npy │ ├── pair_mentions_weights_layer_2.npy │ ├── pair_mentions_weights_layer_3.npy │ ├── pair_mentions_weights_layer_4.npy │ ├── single_mention_bias_layer_0.npy │ ├── single_mention_bias_layer_1.npy │ ├── single_mention_bias_layer_2.npy │ ├── single_mention_bias_layer_3.npy │ ├── single_mention_bias_layer_4.npy │ ├── single_mention_weights_layer_0.npy │ ├── single_mention_weights_layer_1.npy │ ├── single_mention_weights_layer_2.npy │ ├── single_mention_weights_layer_3.npy │ ├── single_mention_weights_layer_4.npy │ ├── static_word_embeddings.npy │ ├── static_word_vocabulary.txt │ ├── tuned_word_embeddings.npy │ └── tuned_word_vocabulary.txt ├── requirements.txt └── setup.py /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 360 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 60 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - security 9 | # Label to use when marking an issue as stale 10 | staleLabel: wontfix 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: false 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # NeuralCoref 2 | /neuralcoref/train/runs/* 3 | /models/ 4 | /data/ 5 | 6 | # Cython / C extensions 7 | cythonize.json 8 | neuralcoref/*.html 9 | *.cpp 10 | *.so 11 | 12 | # Vim / VSCode / editors 13 | *.swp 14 | *.sw* 15 | Profile.prof 16 | .vscode 17 | .sass-cache 18 | .idea/* 19 | 20 | # Python 21 | /.Python 22 | .python-version 23 | __pycache__/ 24 | *.py[cod] 25 | .env/ 26 | .env* 27 | .~env/ 28 | .venv 29 | venv/ 30 | .dev 31 | .denv 32 | .pypyenv 33 | 34 | # Temporary files 35 | *.~* 36 | tmp/ 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Distribution / packaging 43 | env/ 44 | build/ 45 | develop-eggs/ 46 | dist/ 47 | eggs/ 48 | lib/ 49 | lib64/ 50 | parts/ 51 | sdist/ 52 | var/ 53 | *.egg-info/ 54 | .installed.cfg 55 | *.egg 56 | .eggs 57 | MANIFEST 58 | 59 | # Windows 60 | *.bat 61 | Thumbs.db 62 | Desktop.ini 63 | 64 | # Mac OS X 65 | *.DS_Store 66 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | group: travis_latest 2 | language: python 3 | cache: pip 4 | python: 5 | - 3.6 6 | - 3.8 7 | #- nightly 8 | #- pypy 9 | #- pypy3 10 | matrix: 11 | allow_failures: 12 | - python: nightly 13 | - python: pypy 14 | - python: pypy3 15 | install: 16 | #- pip install -r requirements.txt 17 | - pip install flake8 # pytest # add another testing frameworks later 18 | before_script: 19 | # stop the build if there are Python syntax errors or undefined names 20 | - flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics 21 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 22 | - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 23 | script: 24 | - true # pytest --capture=sys # add other tests here 25 | notifications: 26 | on_success: change 27 | on_failure: change # `always` will be the setting once code changes slow down 28 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Huggingface Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include include *.h 2 | include LICENSE.txt 3 | include README.md -------------------------------------------------------------------------------- /bin/cythonize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ cythonize.py 3 | 4 | Cythonize pyx files into C++ files as needed. 5 | 6 | Usage: cythonize.py [root] 7 | 8 | Checks pyx files to see if they have been changed relative to their 9 | corresponding C++ files. If they have, then runs cython on these files to 10 | recreate the C++ files. 11 | 12 | Additionally, checks pxd files and setup.py if they have been changed. If 13 | they have, rebuilds everything. 14 | 15 | Change detection based on file hashes stored in JSON format. 16 | 17 | For now, this script should be run by developers when changing Cython files 18 | and the resulting C++ files checked in, so that end-users (and Python-only 19 | developers) do not get the Cython dependencies. 20 | 21 | Based upon: 22 | 23 | https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py 24 | https://raw.githubusercontent.com/numpy/numpy/master/tools/cythonize.py 25 | 26 | Note: this script does not check any of the dependent C++ libraries. 27 | """ 28 | 29 | import os 30 | import sys 31 | import json 32 | import hashlib 33 | import subprocess 34 | import argparse 35 | 36 | 37 | HASH_FILE = "cythonize.json" 38 | 39 | 40 | def process_pyx(fromfile, tofile): 41 | print("Processing %s" % fromfile) 42 | try: 43 | from Cython.Compiler.Version import version as cython_version 44 | from distutils.version import LooseVersion 45 | 46 | if LooseVersion(cython_version) < LooseVersion("0.19"): 47 | raise Exception("Require Cython >= 0.19") 48 | 49 | except ImportError: 50 | pass 51 | 52 | flags = ["--fast-fail"] 53 | if tofile.endswith(".cpp"): 54 | flags += ["--cplus"] 55 | 56 | try: 57 | try: 58 | r = subprocess.call( 59 | ["cython"] + flags + ["-o", tofile, fromfile], env=os.environ 60 | ) # See Issue #791 61 | if r != 0: 62 | raise Exception("Cython failed") 63 | except OSError: 64 | # There are ways of installing Cython that don't result in a cython 65 | # executable on the path, see gh-2397. 66 | r = subprocess.call( 67 | [ 68 | sys.executable, 69 | "-c", 70 | "import sys; from Cython.Compiler.Main import " 71 | "setuptools_main as main; sys.exit(main())", 72 | ] 73 | + flags 74 | + ["-o", tofile, fromfile] 75 | ) 76 | if r != 0: 77 | raise Exception("Cython failed") 78 | except OSError: 79 | raise OSError("Cython needs to be installed") 80 | 81 | 82 | def preserve_cwd(path, func, *args): 83 | orig_cwd = os.getcwd() 84 | try: 85 | os.chdir(path) 86 | func(*args) 87 | finally: 88 | os.chdir(orig_cwd) 89 | 90 | 91 | def load_hashes(filename): 92 | try: 93 | return json.load(open(filename)) 94 | except (ValueError, IOError): 95 | return {} 96 | 97 | 98 | def save_hashes(hash_db, filename): 99 | with open(filename, "w") as f: 100 | f.write(json.dumps(hash_db)) 101 | 102 | 103 | def get_hash(path): 104 | return hashlib.md5(open(path, "rb").read()).hexdigest() 105 | 106 | 107 | def hash_changed(base, path, db): 108 | full_path = os.path.normpath(os.path.join(base, path)) 109 | return not get_hash(full_path) == db.get(full_path) 110 | 111 | 112 | def hash_add(base, path, db): 113 | full_path = os.path.normpath(os.path.join(base, path)) 114 | db[full_path] = get_hash(full_path) 115 | 116 | 117 | def process(base, filename, db): 118 | root, ext = os.path.splitext(filename) 119 | if ext in [".pyx", ".cpp"]: 120 | if hash_changed(base, filename, db) or not os.path.isfile( 121 | os.path.join(base, root + ".cpp") 122 | ): 123 | preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp") 124 | hash_add(base, root + ".cpp", db) 125 | hash_add(base, root + ".pyx", db) 126 | 127 | 128 | def check_changes(root, db): 129 | res = False 130 | new_db = {} 131 | 132 | setup_filename = "setup.py" 133 | hash_add(".", setup_filename, new_db) 134 | if hash_changed(".", setup_filename, db): 135 | res = True 136 | 137 | for base, _, files in os.walk(root): 138 | for filename in files: 139 | if filename.endswith(".pxd"): 140 | hash_add(base, filename, new_db) 141 | if hash_changed(base, filename, db): 142 | res = True 143 | 144 | if res: 145 | db.clear() 146 | db.update(new_db) 147 | return res 148 | 149 | 150 | def run(root): 151 | db = load_hashes(HASH_FILE) 152 | 153 | try: 154 | check_changes(root, db) 155 | for base, _, files in os.walk(root): 156 | for filename in files: 157 | process(base, filename, db) 158 | finally: 159 | save_hashes(db, HASH_FILE) 160 | 161 | 162 | if __name__ == "__main__": 163 | parser = argparse.ArgumentParser( 164 | description="Cythonize pyx files into C++ files as needed" 165 | ) 166 | parser.add_argument("root", help="root directory") 167 | args = parser.parse_args() 168 | run(args.root) 169 | -------------------------------------------------------------------------------- /examples/server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Coreference resolution server example. 3 | A simple server serving the coreference system. 4 | """ 5 | 6 | import json 7 | from wsgiref.simple_server import make_server 8 | import falcon 9 | import spacy 10 | import neuralcoref 11 | 12 | # Python 3 13 | unicode_ = str 14 | 15 | 16 | class AllResource(object): 17 | def __init__(self): 18 | self.nlp = spacy.load("en") 19 | neuralcoref.add_to_pipe(self.nlp) 20 | print("Server loaded") 21 | self.response = None 22 | 23 | def on_get(self, req, resp): 24 | self.response = {} 25 | 26 | text_param = req.get_param_as_list("text") 27 | print("text: ", text_param) 28 | if text_param is not None: 29 | text = ",".join(text_param) if isinstance(text_param, list) else text_param 30 | text = unicode_(text) 31 | doc = self.nlp(text) 32 | if doc._.has_coref: 33 | mentions = [ 34 | { 35 | "start": mention.start_char, 36 | "end": mention.end_char, 37 | "text": mention.text, 38 | "resolved": cluster.main.text, 39 | } 40 | for cluster in doc._.coref_clusters 41 | for mention in cluster.mentions 42 | ] 43 | clusters = list( 44 | list(span.text for span in cluster) 45 | for cluster in doc._.coref_clusters 46 | ) 47 | resolved = doc._.coref_resolved 48 | self.response["mentions"] = mentions 49 | self.response["clusters"] = clusters 50 | self.response["resolved"] = resolved 51 | 52 | resp.body = json.dumps(self.response) 53 | resp.content_type = "application/json" 54 | resp.append_header("Access-Control-Allow-Origin", "*") 55 | resp.status = falcon.HTTP_200 56 | 57 | 58 | if __name__ == "__main__": 59 | RESSOURCE = AllResource() 60 | APP = falcon.API() 61 | APP.add_route("/", RESSOURCE) 62 | HTTPD = make_server("0.0.0.0", 8000, APP) 63 | HTTPD.serve_forever() 64 | -------------------------------------------------------------------------------- /include/msvc9/stdint.h: -------------------------------------------------------------------------------- 1 | // ISO C9x compliant stdint.h for Microsoft Visual Studio 2 | // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 3 | // 4 | // Copyright (c) 2006-2013 Alexander Chemeris 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright notice, 10 | // this list of conditions and the following disclaimer. 11 | // 12 | // 2. Redistributions in binary form must reproduce the above copyright 13 | // notice, this list of conditions and the following disclaimer in the 14 | // documentation and/or other materials provided with the distribution. 15 | // 16 | // 3. Neither the name of the product nor the names of its contributors may 17 | // be used to endorse or promote products derived from this software 18 | // without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 21 | // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 22 | // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 23 | // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 | // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 | // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 | // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 | // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | // 31 | /////////////////////////////////////////////////////////////////////////////// 32 | 33 | #ifndef _MSC_VER // [ 34 | #error "Use this header only with Microsoft Visual C++ compilers!" 35 | #endif // _MSC_VER ] 36 | 37 | #ifndef _MSC_STDINT_H_ // [ 38 | #define _MSC_STDINT_H_ 39 | 40 | #if _MSC_VER > 1000 41 | #pragma once 42 | #endif 43 | 44 | #if _MSC_VER >= 1600 // [ 45 | #include 46 | #else // ] _MSC_VER >= 1600 [ 47 | 48 | #include 49 | 50 | // For Visual Studio 6 in C++ mode and for many Visual Studio versions when 51 | // compiling for ARM we should wrap include with 'extern "C++" {}' 52 | // or compiler give many errors like this: 53 | // error C2733: second C linkage of overloaded function 'wmemchr' not allowed 54 | #ifdef __cplusplus 55 | extern "C" { 56 | #endif 57 | # include 58 | #ifdef __cplusplus 59 | } 60 | #endif 61 | 62 | // Define _W64 macros to mark types changing their size, like intptr_t. 63 | #ifndef _W64 64 | # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 65 | # define _W64 __w64 66 | # else 67 | # define _W64 68 | # endif 69 | #endif 70 | 71 | 72 | // 7.18.1 Integer types 73 | 74 | // 7.18.1.1 Exact-width integer types 75 | 76 | // Visual Studio 6 and Embedded Visual C++ 4 doesn't 77 | // realize that, e.g. char has the same size as __int8 78 | // so we give up on __intX for them. 79 | #if (_MSC_VER < 1300) 80 | typedef signed char int8_t; 81 | typedef signed short int16_t; 82 | typedef signed int int32_t; 83 | typedef unsigned char uint8_t; 84 | typedef unsigned short uint16_t; 85 | typedef unsigned int uint32_t; 86 | #else 87 | typedef signed __int8 int8_t; 88 | typedef signed __int16 int16_t; 89 | typedef signed __int32 int32_t; 90 | typedef unsigned __int8 uint8_t; 91 | typedef unsigned __int16 uint16_t; 92 | typedef unsigned __int32 uint32_t; 93 | #endif 94 | typedef signed __int64 int64_t; 95 | typedef unsigned __int64 uint64_t; 96 | 97 | 98 | // 7.18.1.2 Minimum-width integer types 99 | typedef int8_t int_least8_t; 100 | typedef int16_t int_least16_t; 101 | typedef int32_t int_least32_t; 102 | typedef int64_t int_least64_t; 103 | typedef uint8_t uint_least8_t; 104 | typedef uint16_t uint_least16_t; 105 | typedef uint32_t uint_least32_t; 106 | typedef uint64_t uint_least64_t; 107 | 108 | // 7.18.1.3 Fastest minimum-width integer types 109 | typedef int8_t int_fast8_t; 110 | typedef int16_t int_fast16_t; 111 | typedef int32_t int_fast32_t; 112 | typedef int64_t int_fast64_t; 113 | typedef uint8_t uint_fast8_t; 114 | typedef uint16_t uint_fast16_t; 115 | typedef uint32_t uint_fast32_t; 116 | typedef uint64_t uint_fast64_t; 117 | 118 | // 7.18.1.4 Integer types capable of holding object pointers 119 | #ifdef _WIN64 // [ 120 | typedef signed __int64 intptr_t; 121 | typedef unsigned __int64 uintptr_t; 122 | #else // _WIN64 ][ 123 | typedef _W64 signed int intptr_t; 124 | typedef _W64 unsigned int uintptr_t; 125 | #endif // _WIN64 ] 126 | 127 | // 7.18.1.5 Greatest-width integer types 128 | typedef int64_t intmax_t; 129 | typedef uint64_t uintmax_t; 130 | 131 | 132 | // 7.18.2 Limits of specified-width integer types 133 | 134 | #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 135 | 136 | // 7.18.2.1 Limits of exact-width integer types 137 | #define INT8_MIN ((int8_t)_I8_MIN) 138 | #define INT8_MAX _I8_MAX 139 | #define INT16_MIN ((int16_t)_I16_MIN) 140 | #define INT16_MAX _I16_MAX 141 | #define INT32_MIN ((int32_t)_I32_MIN) 142 | #define INT32_MAX _I32_MAX 143 | #define INT64_MIN ((int64_t)_I64_MIN) 144 | #define INT64_MAX _I64_MAX 145 | #define UINT8_MAX _UI8_MAX 146 | #define UINT16_MAX _UI16_MAX 147 | #define UINT32_MAX _UI32_MAX 148 | #define UINT64_MAX _UI64_MAX 149 | 150 | // 7.18.2.2 Limits of minimum-width integer types 151 | #define INT_LEAST8_MIN INT8_MIN 152 | #define INT_LEAST8_MAX INT8_MAX 153 | #define INT_LEAST16_MIN INT16_MIN 154 | #define INT_LEAST16_MAX INT16_MAX 155 | #define INT_LEAST32_MIN INT32_MIN 156 | #define INT_LEAST32_MAX INT32_MAX 157 | #define INT_LEAST64_MIN INT64_MIN 158 | #define INT_LEAST64_MAX INT64_MAX 159 | #define UINT_LEAST8_MAX UINT8_MAX 160 | #define UINT_LEAST16_MAX UINT16_MAX 161 | #define UINT_LEAST32_MAX UINT32_MAX 162 | #define UINT_LEAST64_MAX UINT64_MAX 163 | 164 | // 7.18.2.3 Limits of fastest minimum-width integer types 165 | #define INT_FAST8_MIN INT8_MIN 166 | #define INT_FAST8_MAX INT8_MAX 167 | #define INT_FAST16_MIN INT16_MIN 168 | #define INT_FAST16_MAX INT16_MAX 169 | #define INT_FAST32_MIN INT32_MIN 170 | #define INT_FAST32_MAX INT32_MAX 171 | #define INT_FAST64_MIN INT64_MIN 172 | #define INT_FAST64_MAX INT64_MAX 173 | #define UINT_FAST8_MAX UINT8_MAX 174 | #define UINT_FAST16_MAX UINT16_MAX 175 | #define UINT_FAST32_MAX UINT32_MAX 176 | #define UINT_FAST64_MAX UINT64_MAX 177 | 178 | // 7.18.2.4 Limits of integer types capable of holding object pointers 179 | #ifdef _WIN64 // [ 180 | # define INTPTR_MIN INT64_MIN 181 | # define INTPTR_MAX INT64_MAX 182 | # define UINTPTR_MAX UINT64_MAX 183 | #else // _WIN64 ][ 184 | # define INTPTR_MIN INT32_MIN 185 | # define INTPTR_MAX INT32_MAX 186 | # define UINTPTR_MAX UINT32_MAX 187 | #endif // _WIN64 ] 188 | 189 | // 7.18.2.5 Limits of greatest-width integer types 190 | #define INTMAX_MIN INT64_MIN 191 | #define INTMAX_MAX INT64_MAX 192 | #define UINTMAX_MAX UINT64_MAX 193 | 194 | // 7.18.3 Limits of other integer types 195 | 196 | #ifdef _WIN64 // [ 197 | # define PTRDIFF_MIN _I64_MIN 198 | # define PTRDIFF_MAX _I64_MAX 199 | #else // _WIN64 ][ 200 | # define PTRDIFF_MIN _I32_MIN 201 | # define PTRDIFF_MAX _I32_MAX 202 | #endif // _WIN64 ] 203 | 204 | #define SIG_ATOMIC_MIN INT_MIN 205 | #define SIG_ATOMIC_MAX INT_MAX 206 | 207 | #ifndef SIZE_MAX // [ 208 | # ifdef _WIN64 // [ 209 | # define SIZE_MAX _UI64_MAX 210 | # else // _WIN64 ][ 211 | # define SIZE_MAX _UI32_MAX 212 | # endif // _WIN64 ] 213 | #endif // SIZE_MAX ] 214 | 215 | // WCHAR_MIN and WCHAR_MAX are also defined in 216 | #ifndef WCHAR_MIN // [ 217 | # define WCHAR_MIN 0 218 | #endif // WCHAR_MIN ] 219 | #ifndef WCHAR_MAX // [ 220 | # define WCHAR_MAX _UI16_MAX 221 | #endif // WCHAR_MAX ] 222 | 223 | #define WINT_MIN 0 224 | #define WINT_MAX _UI16_MAX 225 | 226 | #endif // __STDC_LIMIT_MACROS ] 227 | 228 | 229 | // 7.18.4 Limits of other integer types 230 | 231 | #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 232 | 233 | // 7.18.4.1 Macros for minimum-width integer constants 234 | 235 | #define INT8_C(val) val##i8 236 | #define INT16_C(val) val##i16 237 | #define INT32_C(val) val##i32 238 | #define INT64_C(val) val##i64 239 | 240 | #define UINT8_C(val) val##ui8 241 | #define UINT16_C(val) val##ui16 242 | #define UINT32_C(val) val##ui32 243 | #define UINT64_C(val) val##ui64 244 | 245 | // 7.18.4.2 Macros for greatest-width integer constants 246 | // These #ifndef's are needed to prevent collisions with . 247 | // Check out Issue 9 for the details. 248 | #ifndef INTMAX_C // [ 249 | # define INTMAX_C INT64_C 250 | #endif // INTMAX_C ] 251 | #ifndef UINTMAX_C // [ 252 | # define UINTMAX_C UINT64_C 253 | #endif // UINTMAX_C ] 254 | 255 | #endif // __STDC_CONSTANT_MACROS ] 256 | 257 | #endif // _MSC_VER >= 1600 ] 258 | 259 | #endif // _MSC_STDINT_H_ ] 260 | -------------------------------------------------------------------------------- /include/murmurhash/MurmurHash2.h: -------------------------------------------------------------------------------- 1 | //----------------------------------------------------------------------------- 2 | // MurmurHash2 was written by Austin Appleby, and is placed in the public 3 | // domain. The author hereby disclaims copyright to this source code. 4 | 5 | #ifndef _MURMURHASH2_H_ 6 | #define _MURMURHASH2_H_ 7 | 8 | #include 9 | 10 | //----------------------------------------------------------------------------- 11 | 12 | uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ); 13 | uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ); 14 | uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ); 15 | uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed ); 16 | uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ); 17 | uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ); 18 | 19 | //----------------------------------------------------------------------------- 20 | 21 | #endif // _MURMURHASH2_H_ 22 | 23 | -------------------------------------------------------------------------------- /include/murmurhash/MurmurHash3.h: -------------------------------------------------------------------------------- 1 | //----------------------------------------------------------------------------- 2 | // MurmurHash3 was written by Austin Appleby, and is placed in the public 3 | // domain. The author hereby disclaims copyright to this source code. 4 | 5 | #ifndef _MURMURHASH3_H_ 6 | #define _MURMURHASH3_H_ 7 | 8 | #include 9 | 10 | //----------------------------------------------------------------------------- 11 | #ifdef __cplusplus 12 | extern "C" { 13 | #endif 14 | 15 | 16 | void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); 17 | 18 | void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out ); 19 | 20 | void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out ); 21 | 22 | #ifdef __cplusplus 23 | } 24 | #endif 25 | 26 | //----------------------------------------------------------------------------- 27 | 28 | #endif // _MURMURHASH3_H_ 29 | -------------------------------------------------------------------------------- /include/numpy/_neighborhood_iterator_imp.h: -------------------------------------------------------------------------------- 1 | #ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP 2 | #error You should not include this header directly 3 | #endif 4 | /* 5 | * Private API (here for inline) 6 | */ 7 | static NPY_INLINE int 8 | _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); 9 | 10 | /* 11 | * Update to next item of the iterator 12 | * 13 | * Note: this simply increment the coordinates vector, last dimension 14 | * incremented first , i.e, for dimension 3 15 | * ... 16 | * -1, -1, -1 17 | * -1, -1, 0 18 | * -1, -1, 1 19 | * .... 20 | * -1, 0, -1 21 | * -1, 0, 0 22 | * .... 23 | * 0, -1, -1 24 | * 0, -1, 0 25 | * .... 26 | */ 27 | #define _UPDATE_COORD_ITER(c) \ 28 | wb = iter->coordinates[c] < iter->bounds[c][1]; \ 29 | if (wb) { \ 30 | iter->coordinates[c] += 1; \ 31 | return 0; \ 32 | } \ 33 | else { \ 34 | iter->coordinates[c] = iter->bounds[c][0]; \ 35 | } 36 | 37 | static NPY_INLINE int 38 | _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) 39 | { 40 | npy_intp i, wb; 41 | 42 | for (i = iter->nd - 1; i >= 0; --i) { 43 | _UPDATE_COORD_ITER(i) 44 | } 45 | 46 | return 0; 47 | } 48 | 49 | /* 50 | * Version optimized for 2d arrays, manual loop unrolling 51 | */ 52 | static NPY_INLINE int 53 | _PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) 54 | { 55 | npy_intp wb; 56 | 57 | _UPDATE_COORD_ITER(1) 58 | _UPDATE_COORD_ITER(0) 59 | 60 | return 0; 61 | } 62 | #undef _UPDATE_COORD_ITER 63 | 64 | /* 65 | * Advance to the next neighbour 66 | */ 67 | static NPY_INLINE int 68 | PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) 69 | { 70 | _PyArrayNeighborhoodIter_IncrCoord (iter); 71 | iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); 72 | 73 | return 0; 74 | } 75 | 76 | /* 77 | * Reset functions 78 | */ 79 | static NPY_INLINE int 80 | PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) 81 | { 82 | npy_intp i; 83 | 84 | for (i = 0; i < iter->nd; ++i) { 85 | iter->coordinates[i] = iter->bounds[i][0]; 86 | } 87 | iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); 88 | 89 | return 0; 90 | } 91 | -------------------------------------------------------------------------------- /include/numpy/_numpyconfig.h: -------------------------------------------------------------------------------- 1 | #define NPY_SIZEOF_SHORT SIZEOF_SHORT 2 | #define NPY_SIZEOF_INT SIZEOF_INT 3 | #define NPY_SIZEOF_LONG SIZEOF_LONG 4 | #define NPY_SIZEOF_FLOAT 4 5 | #define NPY_SIZEOF_COMPLEX_FLOAT 8 6 | #define NPY_SIZEOF_DOUBLE 8 7 | #define NPY_SIZEOF_COMPLEX_DOUBLE 16 8 | #define NPY_SIZEOF_LONGDOUBLE 16 9 | #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 10 | #define NPY_SIZEOF_PY_INTPTR_T 8 11 | #define NPY_SIZEOF_PY_LONG_LONG 8 12 | #define NPY_SIZEOF_LONGLONG 8 13 | #define NPY_NO_SMP 0 14 | #define NPY_HAVE_DECL_ISNAN 15 | #define NPY_HAVE_DECL_ISINF 16 | #define NPY_HAVE_DECL_ISFINITE 17 | #define NPY_HAVE_DECL_SIGNBIT 18 | #define NPY_USE_C99_COMPLEX 1 19 | #define NPY_HAVE_COMPLEX_DOUBLE 1 20 | #define NPY_HAVE_COMPLEX_FLOAT 1 21 | #define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 22 | #define NPY_USE_C99_FORMATS 1 23 | #define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) 24 | #define NPY_ABI_VERSION 0x01000009 25 | #define NPY_API_VERSION 0x00000007 26 | 27 | #ifndef __STDC_FORMAT_MACROS 28 | #define __STDC_FORMAT_MACROS 1 29 | #endif 30 | -------------------------------------------------------------------------------- /include/numpy/arrayobject.h: -------------------------------------------------------------------------------- 1 | 2 | /* This expects the following variables to be defined (besides 3 | the usual ones from pyconfig.h 4 | 5 | SIZEOF_LONG_DOUBLE -- sizeof(long double) or sizeof(double) if no 6 | long double is present on platform. 7 | CHAR_BIT -- number of bits in a char (usually 8) 8 | (should be in limits.h) 9 | 10 | */ 11 | 12 | #ifndef Py_ARRAYOBJECT_H 13 | #define Py_ARRAYOBJECT_H 14 | 15 | #include "ndarrayobject.h" 16 | #include "npy_interrupt.h" 17 | 18 | #ifdef NPY_NO_PREFIX 19 | #include "noprefix.h" 20 | #endif 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /include/numpy/arrayscalars.h: -------------------------------------------------------------------------------- 1 | #ifndef _NPY_ARRAYSCALARS_H_ 2 | #define _NPY_ARRAYSCALARS_H_ 3 | 4 | #ifndef _MULTIARRAYMODULE 5 | typedef struct { 6 | PyObject_HEAD 7 | npy_bool obval; 8 | } PyBoolScalarObject; 9 | #endif 10 | 11 | 12 | typedef struct { 13 | PyObject_HEAD 14 | signed char obval; 15 | } PyByteScalarObject; 16 | 17 | 18 | typedef struct { 19 | PyObject_HEAD 20 | short obval; 21 | } PyShortScalarObject; 22 | 23 | 24 | typedef struct { 25 | PyObject_HEAD 26 | int obval; 27 | } PyIntScalarObject; 28 | 29 | 30 | typedef struct { 31 | PyObject_HEAD 32 | long obval; 33 | } PyLongScalarObject; 34 | 35 | 36 | typedef struct { 37 | PyObject_HEAD 38 | npy_longlong obval; 39 | } PyLongLongScalarObject; 40 | 41 | 42 | typedef struct { 43 | PyObject_HEAD 44 | unsigned char obval; 45 | } PyUByteScalarObject; 46 | 47 | 48 | typedef struct { 49 | PyObject_HEAD 50 | unsigned short obval; 51 | } PyUShortScalarObject; 52 | 53 | 54 | typedef struct { 55 | PyObject_HEAD 56 | unsigned int obval; 57 | } PyUIntScalarObject; 58 | 59 | 60 | typedef struct { 61 | PyObject_HEAD 62 | unsigned long obval; 63 | } PyULongScalarObject; 64 | 65 | 66 | typedef struct { 67 | PyObject_HEAD 68 | npy_ulonglong obval; 69 | } PyULongLongScalarObject; 70 | 71 | 72 | typedef struct { 73 | PyObject_HEAD 74 | npy_half obval; 75 | } PyHalfScalarObject; 76 | 77 | 78 | typedef struct { 79 | PyObject_HEAD 80 | float obval; 81 | } PyFloatScalarObject; 82 | 83 | 84 | typedef struct { 85 | PyObject_HEAD 86 | double obval; 87 | } PyDoubleScalarObject; 88 | 89 | 90 | typedef struct { 91 | PyObject_HEAD 92 | npy_longdouble obval; 93 | } PyLongDoubleScalarObject; 94 | 95 | 96 | typedef struct { 97 | PyObject_HEAD 98 | npy_cfloat obval; 99 | } PyCFloatScalarObject; 100 | 101 | 102 | typedef struct { 103 | PyObject_HEAD 104 | npy_cdouble obval; 105 | } PyCDoubleScalarObject; 106 | 107 | 108 | typedef struct { 109 | PyObject_HEAD 110 | npy_clongdouble obval; 111 | } PyCLongDoubleScalarObject; 112 | 113 | 114 | typedef struct { 115 | PyObject_HEAD 116 | PyObject * obval; 117 | } PyObjectScalarObject; 118 | 119 | typedef struct { 120 | PyObject_HEAD 121 | npy_datetime obval; 122 | PyArray_DatetimeMetaData obmeta; 123 | } PyDatetimeScalarObject; 124 | 125 | typedef struct { 126 | PyObject_HEAD 127 | npy_timedelta obval; 128 | PyArray_DatetimeMetaData obmeta; 129 | } PyTimedeltaScalarObject; 130 | 131 | 132 | typedef struct { 133 | PyObject_HEAD 134 | char obval; 135 | } PyScalarObject; 136 | 137 | #define PyStringScalarObject PyStringObject 138 | #define PyUnicodeScalarObject PyUnicodeObject 139 | 140 | typedef struct { 141 | PyObject_VAR_HEAD 142 | char *obval; 143 | PyArray_Descr *descr; 144 | int flags; 145 | PyObject *base; 146 | } PyVoidScalarObject; 147 | 148 | /* Macros 149 | PyScalarObject 150 | PyArrType_Type 151 | are defined in ndarrayobject.h 152 | */ 153 | 154 | #define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) 155 | #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) 156 | #define PyArrayScalar_FromLong(i) \ 157 | ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) 158 | #define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ 159 | return Py_INCREF(PyArrayScalar_FromLong(i)), \ 160 | PyArrayScalar_FromLong(i) 161 | #define PyArrayScalar_RETURN_FALSE \ 162 | return Py_INCREF(PyArrayScalar_False), \ 163 | PyArrayScalar_False 164 | #define PyArrayScalar_RETURN_TRUE \ 165 | return Py_INCREF(PyArrayScalar_True), \ 166 | PyArrayScalar_True 167 | 168 | #define PyArrayScalar_New(cls) \ 169 | Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) 170 | #define PyArrayScalar_VAL(obj, cls) \ 171 | ((Py##cls##ScalarObject *)obj)->obval 172 | #define PyArrayScalar_ASSIGN(obj, cls, val) \ 173 | PyArrayScalar_VAL(obj, cls) = val 174 | 175 | #endif 176 | -------------------------------------------------------------------------------- /include/numpy/halffloat.h: -------------------------------------------------------------------------------- 1 | #ifndef __NPY_HALFFLOAT_H__ 2 | #define __NPY_HALFFLOAT_H__ 3 | 4 | #include 5 | #include 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | /* 12 | * Half-precision routines 13 | */ 14 | 15 | /* Conversions */ 16 | float npy_half_to_float(npy_half h); 17 | double npy_half_to_double(npy_half h); 18 | npy_half npy_float_to_half(float f); 19 | npy_half npy_double_to_half(double d); 20 | /* Comparisons */ 21 | int npy_half_eq(npy_half h1, npy_half h2); 22 | int npy_half_ne(npy_half h1, npy_half h2); 23 | int npy_half_le(npy_half h1, npy_half h2); 24 | int npy_half_lt(npy_half h1, npy_half h2); 25 | int npy_half_ge(npy_half h1, npy_half h2); 26 | int npy_half_gt(npy_half h1, npy_half h2); 27 | /* faster *_nonan variants for when you know h1 and h2 are not NaN */ 28 | int npy_half_eq_nonan(npy_half h1, npy_half h2); 29 | int npy_half_lt_nonan(npy_half h1, npy_half h2); 30 | int npy_half_le_nonan(npy_half h1, npy_half h2); 31 | /* Miscellaneous functions */ 32 | int npy_half_iszero(npy_half h); 33 | int npy_half_isnan(npy_half h); 34 | int npy_half_isinf(npy_half h); 35 | int npy_half_isfinite(npy_half h); 36 | int npy_half_signbit(npy_half h); 37 | npy_half npy_half_copysign(npy_half x, npy_half y); 38 | npy_half npy_half_spacing(npy_half h); 39 | npy_half npy_half_nextafter(npy_half x, npy_half y); 40 | 41 | /* 42 | * Half-precision constants 43 | */ 44 | 45 | #define NPY_HALF_ZERO (0x0000u) 46 | #define NPY_HALF_PZERO (0x0000u) 47 | #define NPY_HALF_NZERO (0x8000u) 48 | #define NPY_HALF_ONE (0x3c00u) 49 | #define NPY_HALF_NEGONE (0xbc00u) 50 | #define NPY_HALF_PINF (0x7c00u) 51 | #define NPY_HALF_NINF (0xfc00u) 52 | #define NPY_HALF_NAN (0x7e00u) 53 | 54 | #define NPY_MAX_HALF (0x7bffu) 55 | 56 | /* 57 | * Bit-level conversions 58 | */ 59 | 60 | npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); 61 | npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); 62 | npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); 63 | npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); 64 | 65 | #ifdef __cplusplus 66 | } 67 | #endif 68 | 69 | #endif 70 | -------------------------------------------------------------------------------- /include/numpy/ndarrayobject.h: -------------------------------------------------------------------------------- 1 | /* 2 | * DON'T INCLUDE THIS DIRECTLY. 3 | */ 4 | 5 | #ifndef NPY_NDARRAYOBJECT_H 6 | #define NPY_NDARRAYOBJECT_H 7 | #ifdef __cplusplus 8 | #define CONFUSE_EMACS { 9 | #define CONFUSE_EMACS2 } 10 | extern "C" CONFUSE_EMACS 11 | #undef CONFUSE_EMACS 12 | #undef CONFUSE_EMACS2 13 | /* ... otherwise a semi-smart identer (like emacs) tries to indent 14 | everything when you're typing */ 15 | #endif 16 | 17 | #include "ndarraytypes.h" 18 | 19 | /* Includes the "function" C-API -- these are all stored in a 20 | list of pointers --- one for each file 21 | The two lists are concatenated into one in multiarray. 22 | 23 | They are available as import_array() 24 | */ 25 | 26 | #include "__multiarray_api.h" 27 | 28 | 29 | /* C-API that requries previous API to be defined */ 30 | 31 | #define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type) 32 | 33 | #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) 34 | #define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) 35 | 36 | #define PyArray_HasArrayInterfaceType(op, type, context, out) \ 37 | ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ 38 | (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ 39 | (((out)=PyArray_FromArrayAttr(op, type, context)) != \ 40 | Py_NotImplemented)) 41 | 42 | #define PyArray_HasArrayInterface(op, out) \ 43 | PyArray_HasArrayInterfaceType(op, NULL, NULL, out) 44 | 45 | #define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ 46 | (PyArray_NDIM((PyArrayObject *)op) == 0)) 47 | 48 | #define PyArray_IsScalar(obj, cls) \ 49 | (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) 50 | 51 | #define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ 52 | PyArray_IsZeroDim(m)) 53 | 54 | #define PyArray_IsPythonNumber(obj) \ 55 | (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ 56 | PyLong_Check(obj) || PyBool_Check(obj)) 57 | 58 | #define PyArray_IsPythonScalar(obj) \ 59 | (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ 60 | PyUnicode_Check(obj)) 61 | 62 | #define PyArray_IsAnyScalar(obj) \ 63 | (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) 64 | 65 | #define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ 66 | PyArray_CheckScalar(obj)) 67 | 68 | #define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ 69 | || PyLong_Check(obj) \ 70 | || PyArray_IsScalar((obj), Integer)) 71 | 72 | 73 | #define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ 74 | Py_INCREF(m), (m) : \ 75 | (PyArrayObject *)(PyArray_Copy(m))) 76 | 77 | #define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ 78 | PyArray_CompareLists(PyArray_DIMS(a1), \ 79 | PyArray_DIMS(a2), \ 80 | PyArray_NDIM(a1))) 81 | 82 | #define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) 83 | #define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) 84 | #define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) 85 | 86 | #define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ 87 | NULL) 88 | 89 | #define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ 90 | PyArray_DescrFromType(type), 0, 0, 0, NULL); 91 | 92 | #define PyArray_FROM_OTF(m, type, flags) \ 93 | PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ 94 | (((flags) & NPY_ARRAY_ENSURECOPY) ? \ 95 | ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) 96 | 97 | #define PyArray_FROMANY(m, type, min, max, flags) \ 98 | PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ 99 | (((flags) & NPY_ARRAY_ENSURECOPY) ? \ 100 | (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) 101 | 102 | #define PyArray_ZEROS(m, dims, type, is_f_order) \ 103 | PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) 104 | 105 | #define PyArray_EMPTY(m, dims, type, is_f_order) \ 106 | PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) 107 | 108 | #define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ 109 | PyArray_NBYTES(obj)) 110 | 111 | #define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) 112 | #define NPY_REFCOUNT PyArray_REFCOUNT 113 | #define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) 114 | 115 | #define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ 116 | PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ 117 | max_depth, NPY_ARRAY_DEFAULT, NULL) 118 | 119 | #define PyArray_EquivArrTypes(a1, a2) \ 120 | PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) 121 | 122 | #define PyArray_EquivByteorders(b1, b2) \ 123 | (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) 124 | 125 | #define PyArray_SimpleNew(nd, dims, typenum) \ 126 | PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) 127 | 128 | #define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ 129 | PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ 130 | data, 0, NPY_ARRAY_CARRAY, NULL) 131 | 132 | #define PyArray_SimpleNewFromDescr(nd, dims, descr) \ 133 | PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ 134 | NULL, NULL, 0, NULL) 135 | 136 | #define PyArray_ToScalar(data, arr) \ 137 | PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) 138 | 139 | 140 | /* These might be faster without the dereferencing of obj 141 | going on inside -- of course an optimizing compiler should 142 | inline the constants inside a for loop making it a moot point 143 | */ 144 | 145 | #define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ 146 | (i)*PyArray_STRIDES(obj)[0])) 147 | 148 | #define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ 149 | (i)*PyArray_STRIDES(obj)[0] + \ 150 | (j)*PyArray_STRIDES(obj)[1])) 151 | 152 | #define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ 153 | (i)*PyArray_STRIDES(obj)[0] + \ 154 | (j)*PyArray_STRIDES(obj)[1] + \ 155 | (k)*PyArray_STRIDES(obj)[2])) 156 | 157 | #define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ 158 | (i)*PyArray_STRIDES(obj)[0] + \ 159 | (j)*PyArray_STRIDES(obj)[1] + \ 160 | (k)*PyArray_STRIDES(obj)[2] + \ 161 | (l)*PyArray_STRIDES(obj)[3])) 162 | 163 | static NPY_INLINE void 164 | PyArray_XDECREF_ERR(PyArrayObject *arr) 165 | { 166 | if (arr != NULL) { 167 | if (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY) { 168 | PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr); 169 | PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE); 170 | PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); 171 | } 172 | Py_DECREF(arr); 173 | } 174 | } 175 | 176 | #define PyArray_DESCR_REPLACE(descr) do { \ 177 | PyArray_Descr *_new_; \ 178 | _new_ = PyArray_DescrNew(descr); \ 179 | Py_XDECREF(descr); \ 180 | descr = _new_; \ 181 | } while(0) 182 | 183 | /* Copy should always return contiguous array */ 184 | #define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) 185 | 186 | #define PyArray_FromObject(op, type, min_depth, max_depth) \ 187 | PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ 188 | max_depth, NPY_ARRAY_BEHAVED | \ 189 | NPY_ARRAY_ENSUREARRAY, NULL) 190 | 191 | #define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ 192 | PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ 193 | max_depth, NPY_ARRAY_DEFAULT | \ 194 | NPY_ARRAY_ENSUREARRAY, NULL) 195 | 196 | #define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ 197 | PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ 198 | max_depth, NPY_ARRAY_ENSURECOPY | \ 199 | NPY_ARRAY_DEFAULT | \ 200 | NPY_ARRAY_ENSUREARRAY, NULL) 201 | 202 | #define PyArray_Cast(mp, type_num) \ 203 | PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) 204 | 205 | #define PyArray_Take(ap, items, axis) \ 206 | PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) 207 | 208 | #define PyArray_Put(ap, items, values) \ 209 | PyArray_PutTo(ap, items, values, NPY_RAISE) 210 | 211 | /* Compatibility with old Numeric stuff -- don't use in new code */ 212 | 213 | #define PyArray_FromDimsAndData(nd, d, type, data) \ 214 | PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ 215 | data) 216 | 217 | 218 | /* 219 | Check to see if this key in the dictionary is the "title" 220 | entry of the tuple (i.e. a duplicate dictionary entry in the fields 221 | dict. 222 | */ 223 | 224 | #define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \ 225 | (PyTuple_GET_ITEM((value), 2) == (key))) 226 | 227 | 228 | /* Define python version independent deprecation macro */ 229 | 230 | #if PY_VERSION_HEX >= 0x02050000 231 | #define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) 232 | #define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) 233 | #else 234 | #define DEPRECATE(msg) PyErr_Warn(PyExc_DeprecationWarning,msg) 235 | #define DEPRECATE_FUTUREWARNING(msg) PyErr_Warn(PyExc_FutureWarning,msg) 236 | #endif 237 | 238 | 239 | #ifdef __cplusplus 240 | } 241 | #endif 242 | 243 | 244 | #endif /* NPY_NDARRAYOBJECT_H */ 245 | -------------------------------------------------------------------------------- /include/numpy/noprefix.h: -------------------------------------------------------------------------------- 1 | #ifndef NPY_NOPREFIX_H 2 | #define NPY_NOPREFIX_H 3 | 4 | /* 5 | * You can directly include noprefix.h as a backward 6 | * compatibility measure 7 | */ 8 | #ifndef NPY_NO_PREFIX 9 | #include "ndarrayobject.h" 10 | #include "npy_interrupt.h" 11 | #endif 12 | 13 | #define SIGSETJMP NPY_SIGSETJMP 14 | #define SIGLONGJMP NPY_SIGLONGJMP 15 | #define SIGJMP_BUF NPY_SIGJMP_BUF 16 | 17 | #define MAX_DIMS NPY_MAXDIMS 18 | 19 | #define longlong npy_longlong 20 | #define ulonglong npy_ulonglong 21 | #define Bool npy_bool 22 | #define longdouble npy_longdouble 23 | #define byte npy_byte 24 | 25 | #ifndef _BSD_SOURCE 26 | #define ushort npy_ushort 27 | #define uint npy_uint 28 | #define ulong npy_ulong 29 | #endif 30 | 31 | #define ubyte npy_ubyte 32 | #define ushort npy_ushort 33 | #define uint npy_uint 34 | #define ulong npy_ulong 35 | #define cfloat npy_cfloat 36 | #define cdouble npy_cdouble 37 | #define clongdouble npy_clongdouble 38 | #define Int8 npy_int8 39 | #define UInt8 npy_uint8 40 | #define Int16 npy_int16 41 | #define UInt16 npy_uint16 42 | #define Int32 npy_int32 43 | #define UInt32 npy_uint32 44 | #define Int64 npy_int64 45 | #define UInt64 npy_uint64 46 | #define Int128 npy_int128 47 | #define UInt128 npy_uint128 48 | #define Int256 npy_int256 49 | #define UInt256 npy_uint256 50 | #define Float16 npy_float16 51 | #define Complex32 npy_complex32 52 | #define Float32 npy_float32 53 | #define Complex64 npy_complex64 54 | #define Float64 npy_float64 55 | #define Complex128 npy_complex128 56 | #define Float80 npy_float80 57 | #define Complex160 npy_complex160 58 | #define Float96 npy_float96 59 | #define Complex192 npy_complex192 60 | #define Float128 npy_float128 61 | #define Complex256 npy_complex256 62 | #define intp npy_intp 63 | #define uintp npy_uintp 64 | #define datetime npy_datetime 65 | #define timedelta npy_timedelta 66 | 67 | #define SIZEOF_INTP NPY_SIZEOF_INTP 68 | #define SIZEOF_UINTP NPY_SIZEOF_UINTP 69 | #define SIZEOF_DATETIME NPY_SIZEOF_DATETIME 70 | #define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA 71 | 72 | #define LONGLONG_FMT NPY_LONGLONG_FMT 73 | #define ULONGLONG_FMT NPY_ULONGLONG_FMT 74 | #define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX 75 | #define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX 76 | 77 | #define MAX_INT8 127 78 | #define MIN_INT8 -128 79 | #define MAX_UINT8 255 80 | #define MAX_INT16 32767 81 | #define MIN_INT16 -32768 82 | #define MAX_UINT16 65535 83 | #define MAX_INT32 2147483647 84 | #define MIN_INT32 (-MAX_INT32 - 1) 85 | #define MAX_UINT32 4294967295U 86 | #define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) 87 | #define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) 88 | #define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) 89 | #define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) 90 | #define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) 91 | #define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) 92 | #define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) 93 | #define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) 94 | #define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) 95 | 96 | #define MAX_BYTE NPY_MAX_BYTE 97 | #define MIN_BYTE NPY_MIN_BYTE 98 | #define MAX_UBYTE NPY_MAX_UBYTE 99 | #define MAX_SHORT NPY_MAX_SHORT 100 | #define MIN_SHORT NPY_MIN_SHORT 101 | #define MAX_USHORT NPY_MAX_USHORT 102 | #define MAX_INT NPY_MAX_INT 103 | #define MIN_INT NPY_MIN_INT 104 | #define MAX_UINT NPY_MAX_UINT 105 | #define MAX_LONG NPY_MAX_LONG 106 | #define MIN_LONG NPY_MIN_LONG 107 | #define MAX_ULONG NPY_MAX_ULONG 108 | #define MAX_LONGLONG NPY_MAX_LONGLONG 109 | #define MIN_LONGLONG NPY_MIN_LONGLONG 110 | #define MAX_ULONGLONG NPY_MAX_ULONGLONG 111 | #define MIN_DATETIME NPY_MIN_DATETIME 112 | #define MAX_DATETIME NPY_MAX_DATETIME 113 | #define MIN_TIMEDELTA NPY_MIN_TIMEDELTA 114 | #define MAX_TIMEDELTA NPY_MAX_TIMEDELTA 115 | 116 | #define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE 117 | #define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG 118 | #define SIZEOF_HALF NPY_SIZEOF_HALF 119 | #define BITSOF_BOOL NPY_BITSOF_BOOL 120 | #define BITSOF_CHAR NPY_BITSOF_CHAR 121 | #define BITSOF_SHORT NPY_BITSOF_SHORT 122 | #define BITSOF_INT NPY_BITSOF_INT 123 | #define BITSOF_LONG NPY_BITSOF_LONG 124 | #define BITSOF_LONGLONG NPY_BITSOF_LONGLONG 125 | #define BITSOF_HALF NPY_BITSOF_HALF 126 | #define BITSOF_FLOAT NPY_BITSOF_FLOAT 127 | #define BITSOF_DOUBLE NPY_BITSOF_DOUBLE 128 | #define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE 129 | #define BITSOF_DATETIME NPY_BITSOF_DATETIME 130 | #define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA 131 | 132 | #define _pya_malloc PyArray_malloc 133 | #define _pya_free PyArray_free 134 | #define _pya_realloc PyArray_realloc 135 | 136 | #define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF 137 | #define BEGIN_THREADS NPY_BEGIN_THREADS 138 | #define END_THREADS NPY_END_THREADS 139 | #define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF 140 | #define ALLOW_C_API NPY_ALLOW_C_API 141 | #define DISABLE_C_API NPY_DISABLE_C_API 142 | 143 | #define PY_FAIL NPY_FAIL 144 | #define PY_SUCCEED NPY_SUCCEED 145 | 146 | #ifndef TRUE 147 | #define TRUE NPY_TRUE 148 | #endif 149 | 150 | #ifndef FALSE 151 | #define FALSE NPY_FALSE 152 | #endif 153 | 154 | #define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT 155 | 156 | #define CONTIGUOUS NPY_CONTIGUOUS 157 | #define C_CONTIGUOUS NPY_C_CONTIGUOUS 158 | #define FORTRAN NPY_FORTRAN 159 | #define F_CONTIGUOUS NPY_F_CONTIGUOUS 160 | #define OWNDATA NPY_OWNDATA 161 | #define FORCECAST NPY_FORCECAST 162 | #define ENSURECOPY NPY_ENSURECOPY 163 | #define ENSUREARRAY NPY_ENSUREARRAY 164 | #define ELEMENTSTRIDES NPY_ELEMENTSTRIDES 165 | #define ALIGNED NPY_ALIGNED 166 | #define NOTSWAPPED NPY_NOTSWAPPED 167 | #define WRITEABLE NPY_WRITEABLE 168 | #define UPDATEIFCOPY NPY_UPDATEIFCOPY 169 | #define ARR_HAS_DESCR NPY_ARR_HAS_DESCR 170 | #define BEHAVED NPY_BEHAVED 171 | #define BEHAVED_NS NPY_BEHAVED_NS 172 | #define CARRAY NPY_CARRAY 173 | #define CARRAY_RO NPY_CARRAY_RO 174 | #define FARRAY NPY_FARRAY 175 | #define FARRAY_RO NPY_FARRAY_RO 176 | #define DEFAULT NPY_DEFAULT 177 | #define IN_ARRAY NPY_IN_ARRAY 178 | #define OUT_ARRAY NPY_OUT_ARRAY 179 | #define INOUT_ARRAY NPY_INOUT_ARRAY 180 | #define IN_FARRAY NPY_IN_FARRAY 181 | #define OUT_FARRAY NPY_OUT_FARRAY 182 | #define INOUT_FARRAY NPY_INOUT_FARRAY 183 | #define UPDATE_ALL NPY_UPDATE_ALL 184 | 185 | #define OWN_DATA NPY_OWNDATA 186 | #define BEHAVED_FLAGS NPY_BEHAVED 187 | #define BEHAVED_FLAGS_NS NPY_BEHAVED_NS 188 | #define CARRAY_FLAGS_RO NPY_CARRAY_RO 189 | #define CARRAY_FLAGS NPY_CARRAY 190 | #define FARRAY_FLAGS NPY_FARRAY 191 | #define FARRAY_FLAGS_RO NPY_FARRAY_RO 192 | #define DEFAULT_FLAGS NPY_DEFAULT 193 | #define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS 194 | 195 | #ifndef MIN 196 | #define MIN PyArray_MIN 197 | #endif 198 | #ifndef MAX 199 | #define MAX PyArray_MAX 200 | #endif 201 | #define MAX_INTP NPY_MAX_INTP 202 | #define MIN_INTP NPY_MIN_INTP 203 | #define MAX_UINTP NPY_MAX_UINTP 204 | #define INTP_FMT NPY_INTP_FMT 205 | 206 | #define REFCOUNT PyArray_REFCOUNT 207 | #define MAX_ELSIZE NPY_MAX_ELSIZE 208 | 209 | #endif 210 | -------------------------------------------------------------------------------- /include/numpy/npy_3kcompat.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This is a convenience header file providing compatibility utilities 3 | * for supporting Python 2 and Python 3 in the same code base. 4 | * 5 | * If you want to use this for your own projects, it's recommended to make a 6 | * copy of it. Although the stuff below is unlikely to change, we don't provide 7 | * strong backwards compatibility guarantees at the moment. 8 | */ 9 | 10 | #ifndef _NPY_3KCOMPAT_H_ 11 | #define _NPY_3KCOMPAT_H_ 12 | 13 | #include 14 | #include 15 | 16 | #if PY_VERSION_HEX >= 0x03000000 17 | #ifndef NPY_PY3K 18 | #define NPY_PY3K 1 19 | #endif 20 | #endif 21 | 22 | #include "numpy/npy_common.h" 23 | #include "numpy/ndarrayobject.h" 24 | 25 | #ifdef __cplusplus 26 | extern "C" { 27 | #endif 28 | 29 | /* 30 | * PyInt -> PyLong 31 | */ 32 | 33 | #if defined(NPY_PY3K) 34 | /* Return True only if the long fits in a C long */ 35 | static NPY_INLINE int PyInt_Check(PyObject *op) { 36 | int overflow = 0; 37 | if (!PyLong_Check(op)) { 38 | return 0; 39 | } 40 | PyLong_AsLongAndOverflow(op, &overflow); 41 | return (overflow == 0); 42 | } 43 | 44 | #define PyInt_FromLong PyLong_FromLong 45 | #define PyInt_AsLong PyLong_AsLong 46 | #define PyInt_AS_LONG PyLong_AsLong 47 | #define PyInt_AsSsize_t PyLong_AsSsize_t 48 | 49 | /* NOTE: 50 | * 51 | * Since the PyLong type is very different from the fixed-range PyInt, 52 | * we don't define PyInt_Type -> PyLong_Type. 53 | */ 54 | #endif /* NPY_PY3K */ 55 | 56 | /* 57 | * PyString -> PyBytes 58 | */ 59 | 60 | #if defined(NPY_PY3K) 61 | 62 | #define PyString_Type PyBytes_Type 63 | #define PyString_Check PyBytes_Check 64 | #define PyStringObject PyBytesObject 65 | #define PyString_FromString PyBytes_FromString 66 | #define PyString_FromStringAndSize PyBytes_FromStringAndSize 67 | #define PyString_AS_STRING PyBytes_AS_STRING 68 | #define PyString_AsStringAndSize PyBytes_AsStringAndSize 69 | #define PyString_FromFormat PyBytes_FromFormat 70 | #define PyString_Concat PyBytes_Concat 71 | #define PyString_ConcatAndDel PyBytes_ConcatAndDel 72 | #define PyString_AsString PyBytes_AsString 73 | #define PyString_GET_SIZE PyBytes_GET_SIZE 74 | #define PyString_Size PyBytes_Size 75 | 76 | #define PyUString_Type PyUnicode_Type 77 | #define PyUString_Check PyUnicode_Check 78 | #define PyUStringObject PyUnicodeObject 79 | #define PyUString_FromString PyUnicode_FromString 80 | #define PyUString_FromStringAndSize PyUnicode_FromStringAndSize 81 | #define PyUString_FromFormat PyUnicode_FromFormat 82 | #define PyUString_Concat PyUnicode_Concat2 83 | #define PyUString_ConcatAndDel PyUnicode_ConcatAndDel 84 | #define PyUString_GET_SIZE PyUnicode_GET_SIZE 85 | #define PyUString_Size PyUnicode_Size 86 | #define PyUString_InternFromString PyUnicode_InternFromString 87 | #define PyUString_Format PyUnicode_Format 88 | 89 | #else 90 | 91 | #define PyBytes_Type PyString_Type 92 | #define PyBytes_Check PyString_Check 93 | #define PyBytesObject PyStringObject 94 | #define PyBytes_FromString PyString_FromString 95 | #define PyBytes_FromStringAndSize PyString_FromStringAndSize 96 | #define PyBytes_AS_STRING PyString_AS_STRING 97 | #define PyBytes_AsStringAndSize PyString_AsStringAndSize 98 | #define PyBytes_FromFormat PyString_FromFormat 99 | #define PyBytes_Concat PyString_Concat 100 | #define PyBytes_ConcatAndDel PyString_ConcatAndDel 101 | #define PyBytes_AsString PyString_AsString 102 | #define PyBytes_GET_SIZE PyString_GET_SIZE 103 | #define PyBytes_Size PyString_Size 104 | 105 | #define PyUString_Type PyString_Type 106 | #define PyUString_Check PyString_Check 107 | #define PyUStringObject PyStringObject 108 | #define PyUString_FromString PyString_FromString 109 | #define PyUString_FromStringAndSize PyString_FromStringAndSize 110 | #define PyUString_FromFormat PyString_FromFormat 111 | #define PyUString_Concat PyString_Concat 112 | #define PyUString_ConcatAndDel PyString_ConcatAndDel 113 | #define PyUString_GET_SIZE PyString_GET_SIZE 114 | #define PyUString_Size PyString_Size 115 | #define PyUString_InternFromString PyString_InternFromString 116 | #define PyUString_Format PyString_Format 117 | 118 | #endif /* NPY_PY3K */ 119 | 120 | 121 | static NPY_INLINE void 122 | PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) 123 | { 124 | PyObject *newobj; 125 | newobj = PyUnicode_Concat(*left, right); 126 | Py_DECREF(*left); 127 | Py_DECREF(right); 128 | *left = newobj; 129 | } 130 | 131 | static NPY_INLINE void 132 | PyUnicode_Concat2(PyObject **left, PyObject *right) 133 | { 134 | PyObject *newobj; 135 | newobj = PyUnicode_Concat(*left, right); 136 | Py_DECREF(*left); 137 | *left = newobj; 138 | } 139 | 140 | /* 141 | * PyFile_* compatibility 142 | */ 143 | #if defined(NPY_PY3K) 144 | 145 | /* 146 | * Get a FILE* handle to the file represented by the Python object 147 | */ 148 | static NPY_INLINE FILE* 149 | npy_PyFile_Dup(PyObject *file, char *mode) 150 | { 151 | int fd, fd2; 152 | PyObject *ret, *os; 153 | Py_ssize_t pos; 154 | FILE *handle; 155 | /* Flush first to ensure things end up in the file in the correct order */ 156 | ret = PyObject_CallMethod(file, "flush", ""); 157 | if (ret == NULL) { 158 | return NULL; 159 | } 160 | Py_DECREF(ret); 161 | fd = PyObject_AsFileDescriptor(file); 162 | if (fd == -1) { 163 | return NULL; 164 | } 165 | os = PyImport_ImportModule("os"); 166 | if (os == NULL) { 167 | return NULL; 168 | } 169 | ret = PyObject_CallMethod(os, "dup", "i", fd); 170 | Py_DECREF(os); 171 | if (ret == NULL) { 172 | return NULL; 173 | } 174 | fd2 = PyNumber_AsSsize_t(ret, NULL); 175 | Py_DECREF(ret); 176 | #ifdef _WIN32 177 | handle = _fdopen(fd2, mode); 178 | #else 179 | handle = fdopen(fd2, mode); 180 | #endif 181 | if (handle == NULL) { 182 | PyErr_SetString(PyExc_IOError, 183 | "Getting a FILE* from a Python file object failed"); 184 | } 185 | ret = PyObject_CallMethod(file, "tell", ""); 186 | if (ret == NULL) { 187 | fclose(handle); 188 | return NULL; 189 | } 190 | pos = PyNumber_AsSsize_t(ret, PyExc_OverflowError); 191 | Py_DECREF(ret); 192 | if (PyErr_Occurred()) { 193 | fclose(handle); 194 | return NULL; 195 | } 196 | npy_fseek(handle, pos, SEEK_SET); 197 | return handle; 198 | } 199 | 200 | /* 201 | * Close the dup-ed file handle, and seek the Python one to the current position 202 | */ 203 | static NPY_INLINE int 204 | npy_PyFile_DupClose(PyObject *file, FILE* handle) 205 | { 206 | PyObject *ret; 207 | Py_ssize_t position; 208 | position = npy_ftell(handle); 209 | fclose(handle); 210 | 211 | ret = PyObject_CallMethod(file, "seek", NPY_SSIZE_T_PYFMT "i", position, 0); 212 | if (ret == NULL) { 213 | return -1; 214 | } 215 | Py_DECREF(ret); 216 | return 0; 217 | } 218 | 219 | static NPY_INLINE int 220 | npy_PyFile_Check(PyObject *file) 221 | { 222 | int fd; 223 | fd = PyObject_AsFileDescriptor(file); 224 | if (fd == -1) { 225 | PyErr_Clear(); 226 | return 0; 227 | } 228 | return 1; 229 | } 230 | 231 | #else 232 | 233 | #define npy_PyFile_Dup(file, mode) PyFile_AsFile(file) 234 | #define npy_PyFile_DupClose(file, handle) (0) 235 | #define npy_PyFile_Check PyFile_Check 236 | 237 | #endif 238 | 239 | static NPY_INLINE PyObject* 240 | npy_PyFile_OpenFile(PyObject *filename, const char *mode) 241 | { 242 | PyObject *open; 243 | open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); 244 | if (open == NULL) { 245 | return NULL; 246 | } 247 | return PyObject_CallFunction(open, "Os", filename, mode); 248 | } 249 | 250 | static NPY_INLINE int 251 | npy_PyFile_CloseFile(PyObject *file) 252 | { 253 | PyObject *ret; 254 | 255 | ret = PyObject_CallMethod(file, "close", NULL); 256 | if (ret == NULL) { 257 | return -1; 258 | } 259 | Py_DECREF(ret); 260 | return 0; 261 | } 262 | 263 | /* 264 | * PyObject_Cmp 265 | */ 266 | #if defined(NPY_PY3K) 267 | static NPY_INLINE int 268 | PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) 269 | { 270 | int v; 271 | v = PyObject_RichCompareBool(i1, i2, Py_LT); 272 | if (v == 0) { 273 | *cmp = -1; 274 | return 1; 275 | } 276 | else if (v == -1) { 277 | return -1; 278 | } 279 | 280 | v = PyObject_RichCompareBool(i1, i2, Py_GT); 281 | if (v == 0) { 282 | *cmp = 1; 283 | return 1; 284 | } 285 | else if (v == -1) { 286 | return -1; 287 | } 288 | 289 | v = PyObject_RichCompareBool(i1, i2, Py_EQ); 290 | if (v == 0) { 291 | *cmp = 0; 292 | return 1; 293 | } 294 | else { 295 | *cmp = 0; 296 | return -1; 297 | } 298 | } 299 | #endif 300 | 301 | /* 302 | * PyCObject functions adapted to PyCapsules. 303 | * 304 | * The main job here is to get rid of the improved error handling 305 | * of PyCapsules. It's a shame... 306 | */ 307 | #if PY_VERSION_HEX >= 0x03000000 308 | 309 | static NPY_INLINE PyObject * 310 | NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) 311 | { 312 | PyObject *ret = PyCapsule_New(ptr, NULL, dtor); 313 | if (ret == NULL) { 314 | PyErr_Clear(); 315 | } 316 | return ret; 317 | } 318 | 319 | static NPY_INLINE PyObject * 320 | NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) 321 | { 322 | PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); 323 | if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { 324 | PyErr_Clear(); 325 | Py_DECREF(ret); 326 | ret = NULL; 327 | } 328 | return ret; 329 | } 330 | 331 | static NPY_INLINE void * 332 | NpyCapsule_AsVoidPtr(PyObject *obj) 333 | { 334 | void *ret = PyCapsule_GetPointer(obj, NULL); 335 | if (ret == NULL) { 336 | PyErr_Clear(); 337 | } 338 | return ret; 339 | } 340 | 341 | static NPY_INLINE void * 342 | NpyCapsule_GetDesc(PyObject *obj) 343 | { 344 | return PyCapsule_GetContext(obj); 345 | } 346 | 347 | static NPY_INLINE int 348 | NpyCapsule_Check(PyObject *ptr) 349 | { 350 | return PyCapsule_CheckExact(ptr); 351 | } 352 | 353 | static NPY_INLINE void 354 | simple_capsule_dtor(PyObject *cap) 355 | { 356 | PyArray_free(PyCapsule_GetPointer(cap, NULL)); 357 | } 358 | 359 | #else 360 | 361 | static NPY_INLINE PyObject * 362 | NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) 363 | { 364 | return PyCObject_FromVoidPtr(ptr, dtor); 365 | } 366 | 367 | static NPY_INLINE PyObject * 368 | NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, 369 | void (*dtor)(void *, void *)) 370 | { 371 | return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); 372 | } 373 | 374 | static NPY_INLINE void * 375 | NpyCapsule_AsVoidPtr(PyObject *ptr) 376 | { 377 | return PyCObject_AsVoidPtr(ptr); 378 | } 379 | 380 | static NPY_INLINE void * 381 | NpyCapsule_GetDesc(PyObject *obj) 382 | { 383 | return PyCObject_GetDesc(obj); 384 | } 385 | 386 | static NPY_INLINE int 387 | NpyCapsule_Check(PyObject *ptr) 388 | { 389 | return PyCObject_Check(ptr); 390 | } 391 | 392 | static NPY_INLINE void 393 | simple_capsule_dtor(void *ptr) 394 | { 395 | PyArray_free(ptr); 396 | } 397 | 398 | #endif 399 | 400 | /* 401 | * Hash value compatibility. 402 | * As of Python 3.2 hash values are of type Py_hash_t. 403 | * Previous versions use C long. 404 | */ 405 | #if PY_VERSION_HEX < 0x03020000 406 | typedef long npy_hash_t; 407 | #define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG 408 | #else 409 | typedef Py_hash_t npy_hash_t; 410 | #define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP 411 | #endif 412 | 413 | #ifdef __cplusplus 414 | } 415 | #endif 416 | 417 | #endif /* _NPY_3KCOMPAT_H_ */ 418 | -------------------------------------------------------------------------------- /include/numpy/npy_cpu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This set (target) cpu specific macros: 3 | * - Possible values: 4 | * NPY_CPU_X86 5 | * NPY_CPU_AMD64 6 | * NPY_CPU_PPC 7 | * NPY_CPU_PPC64 8 | * NPY_CPU_SPARC 9 | * NPY_CPU_S390 10 | * NPY_CPU_IA64 11 | * NPY_CPU_HPPA 12 | * NPY_CPU_ALPHA 13 | * NPY_CPU_ARMEL 14 | * NPY_CPU_ARMEB 15 | * NPY_CPU_SH_LE 16 | * NPY_CPU_SH_BE 17 | */ 18 | #ifndef _NPY_CPUARCH_H_ 19 | #define _NPY_CPUARCH_H_ 20 | 21 | #include "numpyconfig.h" 22 | 23 | #if defined( __i386__ ) || defined(i386) || defined(_M_IX86) 24 | /* 25 | * __i386__ is defined by gcc and Intel compiler on Linux, 26 | * _M_IX86 by VS compiler, 27 | * i386 by Sun compilers on opensolaris at least 28 | */ 29 | #define NPY_CPU_X86 30 | #elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) 31 | /* 32 | * both __x86_64__ and __amd64__ are defined by gcc 33 | * __x86_64 defined by sun compiler on opensolaris at least 34 | * _M_AMD64 defined by MS compiler 35 | */ 36 | #define NPY_CPU_AMD64 37 | #elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) 38 | /* 39 | * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, 40 | * but can't find it ATM 41 | * _ARCH_PPC is used by at least gcc on AIX 42 | */ 43 | #define NPY_CPU_PPC 44 | #elif defined(__ppc64__) 45 | #define NPY_CPU_PPC64 46 | #elif defined(__sparc__) || defined(__sparc) 47 | /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ 48 | #define NPY_CPU_SPARC 49 | #elif defined(__s390__) 50 | #define NPY_CPU_S390 51 | #elif defined(__ia64) 52 | #define NPY_CPU_IA64 53 | #elif defined(__hppa) 54 | #define NPY_CPU_HPPA 55 | #elif defined(__alpha__) 56 | #define NPY_CPU_ALPHA 57 | #elif defined(__arm__) && defined(__ARMEL__) 58 | #define NPY_CPU_ARMEL 59 | #elif defined(__arm__) && defined(__ARMEB__) 60 | #define NPY_CPU_ARMEB 61 | #elif defined(__sh__) && defined(__LITTLE_ENDIAN__) 62 | #define NPY_CPU_SH_LE 63 | #elif defined(__sh__) && defined(__BIG_ENDIAN__) 64 | #define NPY_CPU_SH_BE 65 | #elif defined(__MIPSEL__) 66 | #define NPY_CPU_MIPSEL 67 | #elif defined(__MIPSEB__) 68 | #define NPY_CPU_MIPSEB 69 | #elif defined(__aarch64__) 70 | #define NPY_CPU_AARCH64 71 | #else 72 | #error Unknown CPU, please report this to numpy maintainers with \ 73 | information about your platform (OS, CPU and compiler) 74 | #endif 75 | 76 | /* 77 | This "white-lists" the architectures that we know don't require 78 | pointer alignment. We white-list, since the memcpy version will 79 | work everywhere, whereas assignment will only work where pointer 80 | dereferencing doesn't require alignment. 81 | 82 | TODO: There may be more architectures we can white list. 83 | */ 84 | #if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) 85 | #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src))) 86 | #else 87 | #if NPY_SIZEOF_PY_INTPTR_T == 4 88 | #define NPY_COPY_PYOBJECT_PTR(dst, src) \ 89 | ((char*)(dst))[0] = ((char*)(src))[0]; \ 90 | ((char*)(dst))[1] = ((char*)(src))[1]; \ 91 | ((char*)(dst))[2] = ((char*)(src))[2]; \ 92 | ((char*)(dst))[3] = ((char*)(src))[3]; 93 | #elif NPY_SIZEOF_PY_INTPTR_T == 8 94 | #define NPY_COPY_PYOBJECT_PTR(dst, src) \ 95 | ((char*)(dst))[0] = ((char*)(src))[0]; \ 96 | ((char*)(dst))[1] = ((char*)(src))[1]; \ 97 | ((char*)(dst))[2] = ((char*)(src))[2]; \ 98 | ((char*)(dst))[3] = ((char*)(src))[3]; \ 99 | ((char*)(dst))[4] = ((char*)(src))[4]; \ 100 | ((char*)(dst))[5] = ((char*)(src))[5]; \ 101 | ((char*)(dst))[6] = ((char*)(src))[6]; \ 102 | ((char*)(dst))[7] = ((char*)(src))[7]; 103 | #else 104 | #error Unknown architecture, please report this to numpy maintainers with \ 105 | information about your platform (OS, CPU and compiler) 106 | #endif 107 | #endif 108 | 109 | #endif 110 | -------------------------------------------------------------------------------- /include/numpy/npy_deprecated_api.h: -------------------------------------------------------------------------------- 1 | #ifndef _NPY_DEPRECATED_API_H 2 | #define _NPY_DEPRECATED_API_H 3 | 4 | #if defined(_WIN32) 5 | #define _WARN___STR2__(x) #x 6 | #define _WARN___STR1__(x) _WARN___STR2__(x) 7 | #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " 8 | #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it by " \ 9 | "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") 10 | #elif defined(__GNUC__) 11 | #warning "Using deprecated NumPy API, disable it by #defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" 12 | #endif 13 | /* TODO: How to do this warning message for other compilers? */ 14 | 15 | /* 16 | * This header exists to collect all dangerous/deprecated NumPy API. 17 | * 18 | * This is an attempt to remove bad API, the proliferation of macros, 19 | * and namespace pollution currently produced by the NumPy headers. 20 | */ 21 | 22 | #if defined(NPY_NO_DEPRECATED_API) 23 | #error Should never include npy_deprecated_api directly. 24 | #endif 25 | 26 | /* These array flags are deprecated as of NumPy 1.7 */ 27 | #define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS 28 | #define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS 29 | 30 | /* 31 | * The consistent NPY_ARRAY_* names which don't pollute the NPY_* 32 | * namespace were added in NumPy 1.7. 33 | * 34 | * These versions of the carray flags are deprecated, but 35 | * probably should only be removed after two releases instead of one. 36 | */ 37 | #define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS 38 | #define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS 39 | #define NPY_OWNDATA NPY_ARRAY_OWNDATA 40 | #define NPY_FORCECAST NPY_ARRAY_FORCECAST 41 | #define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY 42 | #define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY 43 | #define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES 44 | #define NPY_ALIGNED NPY_ARRAY_ALIGNED 45 | #define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED 46 | #define NPY_WRITEABLE NPY_ARRAY_WRITEABLE 47 | #define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY 48 | #define NPY_BEHAVED NPY_ARRAY_BEHAVED 49 | #define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS 50 | #define NPY_CARRAY NPY_ARRAY_CARRAY 51 | #define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO 52 | #define NPY_FARRAY NPY_ARRAY_FARRAY 53 | #define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO 54 | #define NPY_DEFAULT NPY_ARRAY_DEFAULT 55 | #define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY 56 | #define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY 57 | #define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY 58 | #define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY 59 | #define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY 60 | #define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY 61 | #define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL 62 | 63 | /* This way of accessing the default type is deprecated as of NumPy 1.7 */ 64 | #define PyArray_DEFAULT NPY_DEFAULT_TYPE 65 | 66 | /* These DATETIME bits aren't used internally */ 67 | #if PY_VERSION_HEX >= 0x03000000 68 | #define PyDataType_GetDatetimeMetaData(descr) \ 69 | ((descr->metadata == NULL) ? NULL : \ 70 | ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ 71 | PyDict_GetItemString( \ 72 | descr->metadata, NPY_METADATA_DTSTR), NULL)))) 73 | #else 74 | #define PyDataType_GetDatetimeMetaData(descr) \ 75 | ((descr->metadata == NULL) ? NULL : \ 76 | ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ 77 | PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) 78 | #endif 79 | 80 | /* 81 | * Deprecated as of NumPy 1.7, this kind of shortcut doesn't 82 | * belong in the public API. 83 | */ 84 | #define NPY_AO PyArrayObject 85 | 86 | /* 87 | * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't 88 | * belong in the public API. 89 | */ 90 | #define fortran fortran_ 91 | 92 | /* 93 | * Deprecated as of NumPy 1.7, as it is a namespace-polluting 94 | * macro. 95 | */ 96 | #define FORTRAN_IF PyArray_FORTRAN_IF 97 | 98 | /* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ 99 | #define NPY_METADATA_DTSTR "__timeunit__" 100 | 101 | /* 102 | * Deprecated as of NumPy 1.7. 103 | * The reasoning: 104 | * - These are for datetime, but there's no datetime "namespace". 105 | * - They just turn NPY_STR_ into "", which is just 106 | * making something simple be indirected. 107 | */ 108 | #define NPY_STR_Y "Y" 109 | #define NPY_STR_M "M" 110 | #define NPY_STR_W "W" 111 | #define NPY_STR_D "D" 112 | #define NPY_STR_h "h" 113 | #define NPY_STR_m "m" 114 | #define NPY_STR_s "s" 115 | #define NPY_STR_ms "ms" 116 | #define NPY_STR_us "us" 117 | #define NPY_STR_ns "ns" 118 | #define NPY_STR_ps "ps" 119 | #define NPY_STR_fs "fs" 120 | #define NPY_STR_as "as" 121 | 122 | /* 123 | * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be 124 | * removed in the next major release. 125 | */ 126 | #include "old_defines.h" 127 | 128 | 129 | #endif 130 | -------------------------------------------------------------------------------- /include/numpy/npy_endian.h: -------------------------------------------------------------------------------- 1 | #ifndef _NPY_ENDIAN_H_ 2 | #define _NPY_ENDIAN_H_ 3 | 4 | /* 5 | * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in 6 | * endian.h 7 | */ 8 | 9 | #ifdef NPY_HAVE_ENDIAN_H 10 | /* Use endian.h if available */ 11 | #include 12 | 13 | #define NPY_BYTE_ORDER __BYTE_ORDER 14 | #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN 15 | #define NPY_BIG_ENDIAN __BIG_ENDIAN 16 | #else 17 | /* Set endianness info using target CPU */ 18 | #include "npy_cpu.h" 19 | 20 | #define NPY_LITTLE_ENDIAN 1234 21 | #define NPY_BIG_ENDIAN 4321 22 | 23 | #if defined(NPY_CPU_X86) \ 24 | || defined(NPY_CPU_AMD64) \ 25 | || defined(NPY_CPU_IA64) \ 26 | || defined(NPY_CPU_ALPHA) \ 27 | || defined(NPY_CPU_ARMEL) \ 28 | || defined(NPY_CPU_AARCH64) \ 29 | || defined(NPY_CPU_SH_LE) \ 30 | || defined(NPY_CPU_MIPSEL) 31 | #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN 32 | #elif defined(NPY_CPU_PPC) \ 33 | || defined(NPY_CPU_SPARC) \ 34 | || defined(NPY_CPU_S390) \ 35 | || defined(NPY_CPU_HPPA) \ 36 | || defined(NPY_CPU_PPC64) \ 37 | || defined(NPY_CPU_ARMEB) \ 38 | || defined(NPY_CPU_SH_BE) \ 39 | || defined(NPY_CPU_MIPSEB) 40 | #define NPY_BYTE_ORDER NPY_BIG_ENDIAN 41 | #else 42 | #error Unknown CPU: can not set endianness 43 | #endif 44 | #endif 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /include/numpy/npy_interrupt.h: -------------------------------------------------------------------------------- 1 | 2 | /* Signal handling: 3 | 4 | This header file defines macros that allow your code to handle 5 | interrupts received during processing. Interrupts that 6 | could reasonably be handled: 7 | 8 | SIGINT, SIGABRT, SIGALRM, SIGSEGV 9 | 10 | ****Warning*************** 11 | 12 | Do not allow code that creates temporary memory or increases reference 13 | counts of Python objects to be interrupted unless you handle it 14 | differently. 15 | 16 | ************************** 17 | 18 | The mechanism for handling interrupts is conceptually simple: 19 | 20 | - replace the signal handler with our own home-grown version 21 | and store the old one. 22 | - run the code to be interrupted -- if an interrupt occurs 23 | the handler should basically just cause a return to the 24 | calling function for finish work. 25 | - restore the old signal handler 26 | 27 | Of course, every code that allows interrupts must account for 28 | returning via the interrupt and handle clean-up correctly. But, 29 | even still, the simple paradigm is complicated by at least three 30 | factors. 31 | 32 | 1) platform portability (i.e. Microsoft says not to use longjmp 33 | to return from signal handling. They have a __try and __except 34 | extension to C instead but what about mingw?). 35 | 36 | 2) how to handle threads: apparently whether signals are delivered to 37 | every thread of the process or the "invoking" thread is platform 38 | dependent. --- we don't handle threads for now. 39 | 40 | 3) do we need to worry about re-entrance. For now, assume the 41 | code will not call-back into itself. 42 | 43 | Ideas: 44 | 45 | 1) Start by implementing an approach that works on platforms that 46 | can use setjmp and longjmp functionality and does nothing 47 | on other platforms. 48 | 49 | 2) Ignore threads --- i.e. do not mix interrupt handling and threads 50 | 51 | 3) Add a default signal_handler function to the C-API but have the rest 52 | use macros. 53 | 54 | 55 | Simple Interface: 56 | 57 | 58 | In your C-extension: around a block of code you want to be interruptable 59 | with a SIGINT 60 | 61 | NPY_SIGINT_ON 62 | [code] 63 | NPY_SIGINT_OFF 64 | 65 | In order for this to work correctly, the 66 | [code] block must not allocate any memory or alter the reference count of any 67 | Python objects. In other words [code] must be interruptible so that continuation 68 | after NPY_SIGINT_OFF will only be "missing some computations" 69 | 70 | Interrupt handling does not work well with threads. 71 | 72 | */ 73 | 74 | /* Add signal handling macros 75 | Make the global variable and signal handler part of the C-API 76 | */ 77 | 78 | #ifndef NPY_INTERRUPT_H 79 | #define NPY_INTERRUPT_H 80 | 81 | #ifndef NPY_NO_SIGNAL 82 | 83 | #include 84 | #include 85 | 86 | #ifndef sigsetjmp 87 | 88 | #define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) 89 | #define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) 90 | #define NPY_SIGJMP_BUF jmp_buf 91 | 92 | #else 93 | 94 | #define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) 95 | #define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) 96 | #define NPY_SIGJMP_BUF sigjmp_buf 97 | 98 | #endif 99 | 100 | # define NPY_SIGINT_ON { \ 101 | PyOS_sighandler_t _npy_sig_save; \ 102 | _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ 103 | if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ 104 | 1) == 0) { \ 105 | 106 | # define NPY_SIGINT_OFF } \ 107 | PyOS_setsig(SIGINT, _npy_sig_save); \ 108 | } 109 | 110 | #else /* NPY_NO_SIGNAL */ 111 | 112 | #define NPY_SIGINT_ON 113 | #define NPY_SIGINT_OFF 114 | 115 | #endif /* HAVE_SIGSETJMP */ 116 | 117 | #endif /* NPY_INTERRUPT_H */ 118 | -------------------------------------------------------------------------------- /include/numpy/npy_no_deprecated_api.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This include file is provided for inclusion in Cython *.pyd files where 3 | * one would like to define the NPY_NO_DEPRECATED_API macro. It can be 4 | * included by 5 | * 6 | * cdef extern from "npy_no_deprecated_api.h": pass 7 | * 8 | */ 9 | #ifndef NPY_NO_DEPRECATED_API 10 | 11 | /* put this check here since there may be multiple includes in C extensions. */ 12 | #if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ 13 | defined(OLD_DEFINES_H) 14 | #error "npy_no_deprecated_api.h" must be first among numpy includes. 15 | #else 16 | #define NPY_NO_DEPRECATED_API NPY_API_VERSION 17 | #endif 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /include/numpy/npy_os.h: -------------------------------------------------------------------------------- 1 | #ifndef _NPY_OS_H_ 2 | #define _NPY_OS_H_ 3 | 4 | #if defined(linux) || defined(__linux) || defined(__linux__) 5 | #define NPY_OS_LINUX 6 | #elif defined(__FreeBSD__) || defined(__NetBSD__) || \ 7 | defined(__OpenBSD__) || defined(__DragonFly__) 8 | #define NPY_OS_BSD 9 | #ifdef __FreeBSD__ 10 | #define NPY_OS_FREEBSD 11 | #elif defined(__NetBSD__) 12 | #define NPY_OS_NETBSD 13 | #elif defined(__OpenBSD__) 14 | #define NPY_OS_OPENBSD 15 | #elif defined(__DragonFly__) 16 | #define NPY_OS_DRAGONFLY 17 | #endif 18 | #elif defined(sun) || defined(__sun) 19 | #define NPY_OS_SOLARIS 20 | #elif defined(__CYGWIN__) 21 | #define NPY_OS_CYGWIN 22 | #elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) 23 | #define NPY_OS_WIN32 24 | #elif defined(__APPLE__) 25 | #define NPY_OS_DARWIN 26 | #else 27 | #define NPY_OS_UNKNOWN 28 | #endif 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /include/numpy/numpyconfig.h: -------------------------------------------------------------------------------- 1 | #ifndef _NPY_NUMPYCONFIG_H_ 2 | #define _NPY_NUMPYCONFIG_H_ 3 | 4 | #include "_numpyconfig.h" 5 | 6 | /* 7 | * On Mac OS X, because there is only one configuration stage for all the archs 8 | * in universal builds, any macro which depends on the arch needs to be 9 | * harcoded 10 | */ 11 | #ifdef __APPLE__ 12 | #undef NPY_SIZEOF_LONG 13 | #undef NPY_SIZEOF_PY_INTPTR_T 14 | 15 | #ifdef __LP64__ 16 | #define NPY_SIZEOF_LONG 8 17 | #define NPY_SIZEOF_PY_INTPTR_T 8 18 | #else 19 | #define NPY_SIZEOF_LONG 4 20 | #define NPY_SIZEOF_PY_INTPTR_T 4 21 | #endif 22 | #endif 23 | 24 | /** 25 | * To help with the NPY_NO_DEPRECATED_API macro, we include API version 26 | * numbers for specific versions of NumPy. To exclude all API that was 27 | * deprecated as of 1.7, add the following before #including any NumPy 28 | * headers: 29 | * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION 30 | */ 31 | #define NPY_1_7_API_VERSION 0x00000007 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /include/numpy/old_defines.h: -------------------------------------------------------------------------------- 1 | /* This header is deprecated as of NumPy 1.7 */ 2 | #ifndef OLD_DEFINES_H 3 | #define OLD_DEFINES_H 4 | 5 | #if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION 6 | #error The header "old_defines.h" is deprecated as of NumPy 1.7. 7 | #endif 8 | 9 | #define NDARRAY_VERSION NPY_VERSION 10 | 11 | #define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE 12 | #define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE 13 | #define PyArray_BUFSIZE NPY_BUFSIZE 14 | 15 | #define PyArray_PRIORITY NPY_PRIORITY 16 | #define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY 17 | #define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE 18 | 19 | #define NPY_MAX PyArray_MAX 20 | #define NPY_MIN PyArray_MIN 21 | 22 | #define PyArray_TYPES NPY_TYPES 23 | #define PyArray_BOOL NPY_BOOL 24 | #define PyArray_BYTE NPY_BYTE 25 | #define PyArray_UBYTE NPY_UBYTE 26 | #define PyArray_SHORT NPY_SHORT 27 | #define PyArray_USHORT NPY_USHORT 28 | #define PyArray_INT NPY_INT 29 | #define PyArray_UINT NPY_UINT 30 | #define PyArray_LONG NPY_LONG 31 | #define PyArray_ULONG NPY_ULONG 32 | #define PyArray_LONGLONG NPY_LONGLONG 33 | #define PyArray_ULONGLONG NPY_ULONGLONG 34 | #define PyArray_HALF NPY_HALF 35 | #define PyArray_FLOAT NPY_FLOAT 36 | #define PyArray_DOUBLE NPY_DOUBLE 37 | #define PyArray_LONGDOUBLE NPY_LONGDOUBLE 38 | #define PyArray_CFLOAT NPY_CFLOAT 39 | #define PyArray_CDOUBLE NPY_CDOUBLE 40 | #define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE 41 | #define PyArray_OBJECT NPY_OBJECT 42 | #define PyArray_STRING NPY_STRING 43 | #define PyArray_UNICODE NPY_UNICODE 44 | #define PyArray_VOID NPY_VOID 45 | #define PyArray_DATETIME NPY_DATETIME 46 | #define PyArray_TIMEDELTA NPY_TIMEDELTA 47 | #define PyArray_NTYPES NPY_NTYPES 48 | #define PyArray_NOTYPE NPY_NOTYPE 49 | #define PyArray_CHAR NPY_CHAR 50 | #define PyArray_USERDEF NPY_USERDEF 51 | #define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES 52 | 53 | #define PyArray_INTP NPY_INTP 54 | #define PyArray_UINTP NPY_UINTP 55 | 56 | #define PyArray_INT8 NPY_INT8 57 | #define PyArray_UINT8 NPY_UINT8 58 | #define PyArray_INT16 NPY_INT16 59 | #define PyArray_UINT16 NPY_UINT16 60 | #define PyArray_INT32 NPY_INT32 61 | #define PyArray_UINT32 NPY_UINT32 62 | 63 | #ifdef NPY_INT64 64 | #define PyArray_INT64 NPY_INT64 65 | #define PyArray_UINT64 NPY_UINT64 66 | #endif 67 | 68 | #ifdef NPY_INT128 69 | #define PyArray_INT128 NPY_INT128 70 | #define PyArray_UINT128 NPY_UINT128 71 | #endif 72 | 73 | #ifdef NPY_FLOAT16 74 | #define PyArray_FLOAT16 NPY_FLOAT16 75 | #define PyArray_COMPLEX32 NPY_COMPLEX32 76 | #endif 77 | 78 | #ifdef NPY_FLOAT80 79 | #define PyArray_FLOAT80 NPY_FLOAT80 80 | #define PyArray_COMPLEX160 NPY_COMPLEX160 81 | #endif 82 | 83 | #ifdef NPY_FLOAT96 84 | #define PyArray_FLOAT96 NPY_FLOAT96 85 | #define PyArray_COMPLEX192 NPY_COMPLEX192 86 | #endif 87 | 88 | #ifdef NPY_FLOAT128 89 | #define PyArray_FLOAT128 NPY_FLOAT128 90 | #define PyArray_COMPLEX256 NPY_COMPLEX256 91 | #endif 92 | 93 | #define PyArray_FLOAT32 NPY_FLOAT32 94 | #define PyArray_COMPLEX64 NPY_COMPLEX64 95 | #define PyArray_FLOAT64 NPY_FLOAT64 96 | #define PyArray_COMPLEX128 NPY_COMPLEX128 97 | 98 | 99 | #define PyArray_TYPECHAR NPY_TYPECHAR 100 | #define PyArray_BOOLLTR NPY_BOOLLTR 101 | #define PyArray_BYTELTR NPY_BYTELTR 102 | #define PyArray_UBYTELTR NPY_UBYTELTR 103 | #define PyArray_SHORTLTR NPY_SHORTLTR 104 | #define PyArray_USHORTLTR NPY_USHORTLTR 105 | #define PyArray_INTLTR NPY_INTLTR 106 | #define PyArray_UINTLTR NPY_UINTLTR 107 | #define PyArray_LONGLTR NPY_LONGLTR 108 | #define PyArray_ULONGLTR NPY_ULONGLTR 109 | #define PyArray_LONGLONGLTR NPY_LONGLONGLTR 110 | #define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR 111 | #define PyArray_HALFLTR NPY_HALFLTR 112 | #define PyArray_FLOATLTR NPY_FLOATLTR 113 | #define PyArray_DOUBLELTR NPY_DOUBLELTR 114 | #define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR 115 | #define PyArray_CFLOATLTR NPY_CFLOATLTR 116 | #define PyArray_CDOUBLELTR NPY_CDOUBLELTR 117 | #define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR 118 | #define PyArray_OBJECTLTR NPY_OBJECTLTR 119 | #define PyArray_STRINGLTR NPY_STRINGLTR 120 | #define PyArray_STRINGLTR2 NPY_STRINGLTR2 121 | #define PyArray_UNICODELTR NPY_UNICODELTR 122 | #define PyArray_VOIDLTR NPY_VOIDLTR 123 | #define PyArray_DATETIMELTR NPY_DATETIMELTR 124 | #define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR 125 | #define PyArray_CHARLTR NPY_CHARLTR 126 | #define PyArray_INTPLTR NPY_INTPLTR 127 | #define PyArray_UINTPLTR NPY_UINTPLTR 128 | #define PyArray_GENBOOLLTR NPY_GENBOOLLTR 129 | #define PyArray_SIGNEDLTR NPY_SIGNEDLTR 130 | #define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR 131 | #define PyArray_FLOATINGLTR NPY_FLOATINGLTR 132 | #define PyArray_COMPLEXLTR NPY_COMPLEXLTR 133 | 134 | #define PyArray_QUICKSORT NPY_QUICKSORT 135 | #define PyArray_HEAPSORT NPY_HEAPSORT 136 | #define PyArray_MERGESORT NPY_MERGESORT 137 | #define PyArray_SORTKIND NPY_SORTKIND 138 | #define PyArray_NSORTS NPY_NSORTS 139 | 140 | #define PyArray_NOSCALAR NPY_NOSCALAR 141 | #define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR 142 | #define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR 143 | #define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR 144 | #define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR 145 | #define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR 146 | #define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR 147 | #define PyArray_SCALARKIND NPY_SCALARKIND 148 | #define PyArray_NSCALARKINDS NPY_NSCALARKINDS 149 | 150 | #define PyArray_ANYORDER NPY_ANYORDER 151 | #define PyArray_CORDER NPY_CORDER 152 | #define PyArray_FORTRANORDER NPY_FORTRANORDER 153 | #define PyArray_ORDER NPY_ORDER 154 | 155 | #define PyDescr_ISBOOL PyDataType_ISBOOL 156 | #define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED 157 | #define PyDescr_ISSIGNED PyDataType_ISSIGNED 158 | #define PyDescr_ISINTEGER PyDataType_ISINTEGER 159 | #define PyDescr_ISFLOAT PyDataType_ISFLOAT 160 | #define PyDescr_ISNUMBER PyDataType_ISNUMBER 161 | #define PyDescr_ISSTRING PyDataType_ISSTRING 162 | #define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX 163 | #define PyDescr_ISPYTHON PyDataType_ISPYTHON 164 | #define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE 165 | #define PyDescr_ISUSERDEF PyDataType_ISUSERDEF 166 | #define PyDescr_ISEXTENDED PyDataType_ISEXTENDED 167 | #define PyDescr_ISOBJECT PyDataType_ISOBJECT 168 | #define PyDescr_HASFIELDS PyDataType_HASFIELDS 169 | 170 | #define PyArray_LITTLE NPY_LITTLE 171 | #define PyArray_BIG NPY_BIG 172 | #define PyArray_NATIVE NPY_NATIVE 173 | #define PyArray_SWAP NPY_SWAP 174 | #define PyArray_IGNORE NPY_IGNORE 175 | 176 | #define PyArray_NATBYTE NPY_NATBYTE 177 | #define PyArray_OPPBYTE NPY_OPPBYTE 178 | 179 | #define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE 180 | 181 | #define PyArray_USE_PYMEM NPY_USE_PYMEM 182 | 183 | #define PyArray_RemoveLargest PyArray_RemoveSmallest 184 | 185 | #define PyArray_UCS4 npy_ucs4 186 | 187 | #endif 188 | -------------------------------------------------------------------------------- /include/numpy/oldnumeric.h: -------------------------------------------------------------------------------- 1 | #include "arrayobject.h" 2 | 3 | #ifndef REFCOUNT 4 | # define REFCOUNT NPY_REFCOUNT 5 | # define MAX_ELSIZE 16 6 | #endif 7 | 8 | #define PyArray_UNSIGNED_TYPES 9 | #define PyArray_SBYTE NPY_BYTE 10 | #define PyArray_CopyArray PyArray_CopyInto 11 | #define _PyArray_multiply_list PyArray_MultiplyIntList 12 | #define PyArray_ISSPACESAVER(m) NPY_FALSE 13 | #define PyScalarArray_Check PyArray_CheckScalar 14 | 15 | #define CONTIGUOUS NPY_CONTIGUOUS 16 | #define OWN_DIMENSIONS 0 17 | #define OWN_STRIDES 0 18 | #define OWN_DATA NPY_OWNDATA 19 | #define SAVESPACE 0 20 | #define SAVESPACEBIT 0 21 | 22 | #undef import_array 23 | #define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } 24 | -------------------------------------------------------------------------------- /include/numpy/ufunc_api.txt: -------------------------------------------------------------------------------- 1 | 2 | ================= 3 | Numpy Ufunc C-API 4 | ================= 5 | :: 6 | 7 | PyObject * 8 | PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void 9 | **data, char *types, int ntypes, int nin, int 10 | nout, int identity, char *name, char *doc, int 11 | check_return) 12 | 13 | 14 | :: 15 | 16 | int 17 | PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int 18 | usertype, PyUFuncGenericFunction 19 | function, int *arg_types, void *data) 20 | 21 | 22 | :: 23 | 24 | int 25 | PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject 26 | *kwds, PyArrayObject **op) 27 | 28 | 29 | This generic function is called with the ufunc object, the arguments to it, 30 | and an array of (pointers to) PyArrayObjects which are NULL. 31 | 32 | 'op' is an array of at least NPY_MAXARGS PyArrayObject *. 33 | 34 | :: 35 | 36 | void 37 | PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp 38 | *steps, void *func) 39 | 40 | 41 | :: 42 | 43 | void 44 | PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void 45 | *func) 46 | 47 | 48 | :: 49 | 50 | void 51 | PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void 52 | *func) 53 | 54 | 55 | :: 56 | 57 | void 58 | PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void 59 | *func) 60 | 61 | 62 | :: 63 | 64 | void 65 | PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp 66 | *steps, void *func) 67 | 68 | 69 | :: 70 | 71 | void 72 | PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void 73 | *func) 74 | 75 | 76 | :: 77 | 78 | void 79 | PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void 80 | *func) 81 | 82 | 83 | :: 84 | 85 | void 86 | PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void 87 | *func) 88 | 89 | 90 | :: 91 | 92 | void 93 | PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void 94 | *func) 95 | 96 | 97 | :: 98 | 99 | void 100 | PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp 101 | *steps, void *func) 102 | 103 | 104 | :: 105 | 106 | void 107 | PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void 108 | *func) 109 | 110 | 111 | :: 112 | 113 | void 114 | PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void 115 | *func) 116 | 117 | 118 | :: 119 | 120 | void 121 | PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void 122 | *func) 123 | 124 | 125 | :: 126 | 127 | void 128 | PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp 129 | *steps, void *func) 130 | 131 | 132 | :: 133 | 134 | void 135 | PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void 136 | *func) 137 | 138 | 139 | :: 140 | 141 | void 142 | PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void 143 | *func) 144 | 145 | 146 | :: 147 | 148 | void 149 | PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void 150 | *func) 151 | 152 | 153 | :: 154 | 155 | void 156 | PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void 157 | *func) 158 | 159 | 160 | :: 161 | 162 | void 163 | PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp 164 | *steps, void *func) 165 | 166 | 167 | :: 168 | 169 | void 170 | PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp 171 | *steps, void *func) 172 | 173 | 174 | :: 175 | 176 | void 177 | PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void 178 | *func) 179 | 180 | 181 | :: 182 | 183 | int 184 | PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject 185 | **errobj) 186 | 187 | 188 | On return, if errobj is populated with a non-NULL value, the caller 189 | owns a new reference to errobj. 190 | 191 | :: 192 | 193 | int 194 | PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) 195 | 196 | 197 | :: 198 | 199 | void 200 | PyUFunc_clearfperr() 201 | 202 | 203 | :: 204 | 205 | int 206 | PyUFunc_getfperr(void ) 207 | 208 | 209 | :: 210 | 211 | int 212 | PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int 213 | *first) 214 | 215 | 216 | :: 217 | 218 | int 219 | PyUFunc_ReplaceLoopBySignature(PyUFuncObject 220 | *func, PyUFuncGenericFunction 221 | newfunc, int 222 | *signature, PyUFuncGenericFunction 223 | *oldfunc) 224 | 225 | 226 | :: 227 | 228 | PyObject * 229 | PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void 230 | **data, char *types, int 231 | ntypes, int nin, int nout, int 232 | identity, char *name, char 233 | *doc, int check_return, const char 234 | *signature) 235 | 236 | 237 | :: 238 | 239 | int 240 | PyUFunc_SetUsesArraysAsData(void **data, size_t i) 241 | 242 | 243 | :: 244 | 245 | void 246 | PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void 247 | *func) 248 | 249 | 250 | :: 251 | 252 | void 253 | PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp 254 | *steps, void *func) 255 | 256 | 257 | :: 258 | 259 | void 260 | PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp 261 | *steps, void *func) 262 | 263 | 264 | :: 265 | 266 | void 267 | PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void 268 | *func) 269 | 270 | 271 | :: 272 | 273 | void 274 | PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp 275 | *steps, void *func) 276 | 277 | 278 | :: 279 | 280 | void 281 | PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp 282 | *steps, void *func) 283 | 284 | 285 | :: 286 | 287 | int 288 | PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING 289 | casting, PyArrayObject 290 | **operands, PyObject 291 | *type_tup, PyArray_Descr **out_dtypes) 292 | 293 | 294 | This function applies the default type resolution rules 295 | for the provided ufunc. 296 | 297 | Returns 0 on success, -1 on error. 298 | 299 | :: 300 | 301 | int 302 | PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING 303 | casting, PyArrayObject 304 | **operands, PyArray_Descr **dtypes) 305 | 306 | 307 | Validates that the input operands can be cast to 308 | the input types, and the output types can be cast to 309 | the output operands where provided. 310 | 311 | Returns 0 on success, -1 (with exception raised) on validation failure. 312 | 313 | -------------------------------------------------------------------------------- /include/numpy/utils.h: -------------------------------------------------------------------------------- 1 | #ifndef __NUMPY_UTILS_HEADER__ 2 | #define __NUMPY_UTILS_HEADER__ 3 | 4 | #ifndef __COMP_NPY_UNUSED 5 | #if defined(__GNUC__) 6 | #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) 7 | # elif defined(__ICC) 8 | #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) 9 | #else 10 | #define __COMP_NPY_UNUSED 11 | #endif 12 | #endif 13 | 14 | /* Use this to tag a variable as not used. It will remove unused variable 15 | * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable 16 | * to avoid accidental use */ 17 | #define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /neuralcoref/__init__.pxd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/__init__.pxd -------------------------------------------------------------------------------- /neuralcoref/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tarfile 3 | import logging 4 | 5 | # Filter Cython warnings that would force everybody to re-compile from source (like https://github.com/numpy/numpy/pull/432). 6 | import warnings 7 | 8 | warnings.filterwarnings("ignore", message="spacy.strings.StringStore size changed") 9 | 10 | from neuralcoref.neuralcoref import NeuralCoref 11 | from neuralcoref.file_utils import ( 12 | NEURALCOREF_MODEL_URL, 13 | NEURALCOREF_MODEL_PATH, 14 | NEURALCOREF_CACHE, 15 | cached_path, 16 | ) 17 | 18 | __all__ = ["NeuralCoref", "add_to_pipe"] 19 | __version__ = "4.1.0" 20 | 21 | logger = logging.getLogger(__name__) 22 | 23 | if os.path.exists(NEURALCOREF_MODEL_PATH) and os.path.exists( 24 | os.path.join(NEURALCOREF_MODEL_PATH, "cfg") 25 | ): 26 | logger.info(f"Loading model from {NEURALCOREF_MODEL_PATH}") 27 | local_model = cached_path(NEURALCOREF_MODEL_PATH) 28 | else: 29 | if not os.path.exists(NEURALCOREF_MODEL_PATH): 30 | os.makedirs(NEURALCOREF_MODEL_PATH, exist_ok=True) 31 | logger.info(f"Getting model from {NEURALCOREF_MODEL_URL} or cache") 32 | downloaded_model = cached_path(NEURALCOREF_MODEL_URL) 33 | 34 | logger.info( 35 | f"extracting archive file {downloaded_model} to dir {NEURALCOREF_MODEL_PATH}" 36 | ) 37 | with tarfile.open(downloaded_model, "r:gz") as archive: 38 | archive.extractall(NEURALCOREF_CACHE) 39 | 40 | 41 | def add_to_pipe(nlp, **kwargs): 42 | coref = NeuralCoref(nlp.vocab, **kwargs) 43 | nlp.add_pipe(coref, name="neuralcoref") 44 | return nlp 45 | -------------------------------------------------------------------------------- /neuralcoref/file_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for working with the local dataset cache. 3 | This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp 4 | Copyright by the AllenNLP authors. 5 | """ 6 | 7 | import json 8 | import logging 9 | import os 10 | import shutil 11 | import tempfile 12 | from functools import wraps 13 | from hashlib import sha256 14 | from io import open 15 | 16 | import boto3 17 | import requests 18 | from botocore.exceptions import ClientError 19 | from tqdm import tqdm 20 | 21 | try: 22 | from urllib.parse import urlparse 23 | except ImportError: 24 | from urlparse import urlparse 25 | 26 | NEURALCOREF_CACHE = os.getenv( 27 | "NEURALCOREF_CACHE", os.path.join(os.path.expanduser("~"), ".neuralcoref_cache") 28 | ) 29 | 30 | NEURALCOREF_MODEL_URL = ( 31 | "https://s3.amazonaws.com/models.huggingface.co/neuralcoref/neuralcoref.tar.gz" 32 | ) 33 | NEURALCOREF_MODEL_PATH = os.path.join(str(NEURALCOREF_CACHE), "neuralcoref") 34 | 35 | logger = logging.getLogger(__name__) # pylint: disable=invalid-name 36 | 37 | 38 | def url_to_filename(url, etag=None): 39 | """ 40 | Convert `url` into a hashed filename in a repeatable way. 41 | If `etag` is specified, append its hash to the url's, delimited 42 | by a period. 43 | """ 44 | url_bytes = url.encode("utf-8") 45 | url_hash = sha256(url_bytes) 46 | filename = url_hash.hexdigest() 47 | 48 | if etag: 49 | etag_bytes = etag.encode("utf-8") 50 | etag_hash = sha256(etag_bytes) 51 | filename += "." + etag_hash.hexdigest() 52 | 53 | return filename 54 | 55 | 56 | def filename_to_url(filename, cache_dir=None): 57 | """ 58 | Return the url and etag (which may be ``None``) stored for `filename`. 59 | Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. 60 | """ 61 | if cache_dir is None: 62 | cache_dir = NEURALCOREF_CACHE 63 | 64 | cache_path = os.path.join(cache_dir, filename) 65 | if not os.path.exists(cache_path): 66 | raise EnvironmentError(f"file {cache_path} not found") 67 | 68 | meta_path = cache_path + ".json" 69 | if not os.path.exists(meta_path): 70 | raise EnvironmentError(f"file {meta_path} not found") 71 | 72 | with open(meta_path, encoding="utf-8") as meta_file: 73 | metadata = json.load(meta_file) 74 | url = metadata["url"] 75 | etag = metadata["etag"] 76 | 77 | return url, etag 78 | 79 | 80 | def cached_path(url_or_filename, cache_dir=None): 81 | """ 82 | Given something that might be a URL (or might be a local path), 83 | determine which. If it's a URL, download the file and cache it, and 84 | return the path to the cached file. If it's already a local path, 85 | make sure the file exists and then return the path. 86 | """ 87 | if cache_dir is None: 88 | cache_dir = NEURALCOREF_CACHE 89 | 90 | parsed = urlparse(url_or_filename) 91 | 92 | if parsed.scheme in ("http", "https", "s3"): 93 | # URL, so get it from the cache (downloading if necessary) 94 | return get_from_cache(url_or_filename, cache_dir) 95 | elif os.path.exists(url_or_filename): 96 | # File, and it exists. 97 | return url_or_filename 98 | elif parsed.scheme == "": 99 | # File, but it doesn't exist. 100 | raise EnvironmentError(f"file {url_or_filename} not found") 101 | else: 102 | # Something unknown 103 | raise ValueError( 104 | f"unable to parse {url_or_filename} as a URL or as a local path" 105 | ) 106 | 107 | 108 | def split_s3_path(url): 109 | """Split a full s3 path into the bucket name and path.""" 110 | parsed = urlparse(url) 111 | if not parsed.netloc or not parsed.path: 112 | raise ValueError(f"bad s3 path {url}") 113 | bucket_name = parsed.netloc 114 | s3_path = parsed.path 115 | # Remove '/' at beginning of path. 116 | if s3_path.startswith("/"): 117 | s3_path = s3_path[1:] 118 | return bucket_name, s3_path 119 | 120 | 121 | def s3_request(func): 122 | """ 123 | Wrapper function for s3 requests in order to create more helpful error 124 | messages. 125 | """ 126 | 127 | @wraps(func) 128 | def wrapper(url, *args, **kwargs): 129 | try: 130 | return func(url, *args, **kwargs) 131 | except ClientError as exc: 132 | if int(exc.response["Error"]["Code"]) == 404: 133 | raise EnvironmentError(f"file {url} not found") 134 | else: 135 | raise 136 | 137 | return wrapper 138 | 139 | 140 | @s3_request 141 | def s3_etag(url): 142 | """Check ETag on S3 object.""" 143 | s3_resource = boto3.resource("s3") 144 | bucket_name, s3_path = split_s3_path(url) 145 | s3_object = s3_resource.Object(bucket_name, s3_path) 146 | return s3_object.e_tag 147 | 148 | 149 | @s3_request 150 | def s3_get(url, temp_file): 151 | """Pull a file directly from S3.""" 152 | s3_resource = boto3.resource("s3") 153 | bucket_name, s3_path = split_s3_path(url) 154 | s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) 155 | 156 | 157 | def http_get(url, temp_file): 158 | req = requests.get(url, stream=True) 159 | content_length = req.headers.get("Content-Length") 160 | total = int(content_length) if content_length is not None else None 161 | progress = tqdm(unit="B", total=total) 162 | for chunk in req.iter_content(chunk_size=1024): 163 | if chunk: # filter out keep-alive new chunks 164 | progress.update(len(chunk)) 165 | temp_file.write(chunk) 166 | progress.close() 167 | 168 | 169 | def get_from_cache(url, cache_dir=None): 170 | """ 171 | Given a URL, look for the corresponding dataset in the local cache. 172 | If it's not there, download it. Then return the path to the cached file. 173 | """ 174 | if cache_dir is None: 175 | cache_dir = NEURALCOREF_CACHE 176 | 177 | if not os.path.exists(cache_dir): 178 | os.makedirs(cache_dir) 179 | 180 | # Get eTag to add to filename, if it exists. 181 | if url.startswith("s3://"): 182 | etag = s3_etag(url) 183 | else: 184 | response = requests.head(url, allow_redirects=True) 185 | if response.status_code != 200: 186 | raise IOError( 187 | f"HEAD request failed for url {url} with status code {response.status_code}" 188 | ) 189 | etag = response.headers.get("ETag") 190 | 191 | filename = url_to_filename(url, etag) 192 | 193 | # get cache path to put the file 194 | cache_path = os.path.join(cache_dir, filename) 195 | 196 | if not os.path.exists(cache_path): 197 | # Download to temporary file, then copy to cache dir once finished. 198 | # Otherwise you get corrupt cache entries if the download gets interrupted. 199 | with tempfile.NamedTemporaryFile() as temp_file: 200 | logger.info("%s not found in cache, downloading to %s", url, temp_file.name) 201 | 202 | # GET file object 203 | if url.startswith("s3://"): 204 | s3_get(url, temp_file) 205 | else: 206 | http_get(url, temp_file) 207 | 208 | # we are copying the file before closing it, so flush to avoid truncation 209 | temp_file.flush() 210 | # shutil.copyfileobj() starts at the current position, so go to the start 211 | temp_file.seek(0) 212 | 213 | logger.info("copying %s to cache at %s", temp_file.name, cache_path) 214 | with open(cache_path, "wb") as cache_file: 215 | shutil.copyfileobj(temp_file, cache_file) 216 | 217 | logger.info("creating metadata file for %s", cache_path) 218 | meta = {"url": url, "etag": etag} 219 | meta_path = cache_path + ".json" 220 | with open(meta_path, "w", encoding="utf-8") as meta_file: 221 | json.dump(meta, meta_file) 222 | 223 | logger.info("removing temp file %s", temp_file.name) 224 | 225 | return cache_path 226 | 227 | 228 | def read_set_from_file(filename): 229 | """ 230 | Extract a de-duped collection (set) of text from a file. 231 | Expected file format is one item per line. 232 | """ 233 | collection = set() 234 | with open(filename, "r", encoding="utf-8") as file_: 235 | for line in file_: 236 | collection.add(line.rstrip()) 237 | return collection 238 | 239 | 240 | def get_file_extension(path, dot=True, lower=True): 241 | ext = os.path.splitext(path)[1] 242 | ext = ext if dot else ext[1:] 243 | return ext.lower() if lower else ext 244 | -------------------------------------------------------------------------------- /neuralcoref/neuralcoref.pxd: -------------------------------------------------------------------------------- 1 | from spacy.tokens.doc cimport Doc 2 | from spacy.tokens.span cimport Span 3 | from spacy.typedefs cimport flags_t, attr_t, hash_t 4 | from spacy.vectors import Vectors 5 | from spacy.vocab cimport Vocab 6 | from spacy.structs cimport TokenC, LexemeC 7 | from cymem.cymem cimport Pool 8 | 9 | cdef struct SpanC: 10 | int start 11 | int end 12 | 13 | cdef struct SentSpans: 14 | SpanC* spans 15 | int max_spans 16 | int num 17 | 18 | cdef struct Hashes: 19 | hash_t* arr 20 | int length 21 | 22 | cdef struct HashesList: 23 | Hashes no_coref_list 24 | Hashes keep_tags 25 | Hashes PRP_tags 26 | Hashes leave_dep 27 | Hashes keep_dep 28 | Hashes nsubj_or_dep 29 | Hashes conj_or_prep 30 | Hashes remove_pos 31 | Hashes lower_not_end 32 | Hashes conj_tags 33 | Hashes proper_tags 34 | Hashes puncts 35 | hash_t POSSESSIVE_MARK 36 | hash_t NSUBJ_MARK 37 | hash_t IN_TAG 38 | hash_t MARK_DEP 39 | hash_t missing_word 40 | hash_t digit_word 41 | hash_t unknown_word 42 | 43 | cdef struct Mention_C: 44 | hash_t entity_label 45 | int span_root 46 | int span_start 47 | int span_end 48 | int sent_idx 49 | int sent_start 50 | int sent_end 51 | int mention_type 52 | hash_t root_lower 53 | hash_t span_lower 54 | Hashes content_words 55 | 56 | cdef class NeuralCoref(object): 57 | cdef HashesList hashes 58 | cdef readonly Vocab vocab 59 | cdef readonly object cfg 60 | cdef readonly object cfg_inference 61 | cdef public object model 62 | cdef public object static_vectors 63 | cdef public object tuned_vectors 64 | cdef public object conv_dict 65 | -------------------------------------------------------------------------------- /neuralcoref/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/tests/__init__.py -------------------------------------------------------------------------------- /neuralcoref/tests/test_neuralcoref.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from ..__init__ import add_to_pipe 3 | 4 | 5 | def test_add_pipe(): 6 | nlp = spacy.lang.en.English() 7 | add_to_pipe(nlp) 8 | assert "neuralcoref" in nlp.pipe_names 9 | -------------------------------------------------------------------------------- /neuralcoref/train/__init__.pxd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/__init__.pxd -------------------------------------------------------------------------------- /neuralcoref/train/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/__init__.py -------------------------------------------------------------------------------- /neuralcoref/train/checkpoints/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /neuralcoref/train/compat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | is_windows = sys.platform.startswith("win") 4 | is_linux = sys.platform.startswith("linux") 5 | is_osx = sys.platform == "darwin" 6 | 7 | 8 | # Python 3 is default, Python 2 is not supported anymore 9 | unicode_ = str 10 | bytes_ = bytes 11 | string_types = (bytes, str) 12 | chr_ = chr 13 | 14 | 15 | def unicode_to_bytes(s, encoding="utf8", errors="strict"): 16 | return s.encode(encoding=encoding, errors=errors) 17 | 18 | 19 | def bytes_to_unicode(b, encoding="utf8", errors="strict"): 20 | return b.decode(encoding=encoding, errors=errors) 21 | -------------------------------------------------------------------------------- /neuralcoref/train/conll_processing_script/compile_coref_data.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/bash 3 | 4 | # Script from the Allen NLP research library (https://github.com/allenai/allennlp): 5 | # https://github.com/allenai/allennlp/blob/master/scripts/compile_coref_data.sh 6 | 7 | # This script downloads and compiles the Ontonotes 2012 data in a helpful format 8 | # for co-reference resolution. It generates 3 files: {train, dev, test}.english.v4_gold_conll, 9 | # as well as a directory 'conll-2012' which contains the raw extracted data. 10 | # The script downloads and runs some python scripts which require python 2.X. 11 | 12 | ONTONOTES_PATH=$1 13 | 14 | if [ ! -n "$ONTONOTES_PATH" ] ; then 15 | echo "USAGE: ./compile_coref_data.sh /path/to/ontonotes/data" 16 | exit 1 17 | fi 18 | 19 | function download_and_extract() { 20 | wget $1/$2 21 | tar -xvzf $2 22 | rm $2 23 | } 24 | 25 | function compile_partition() { 26 | rm -f $2.$5.$3$4 27 | cat conll-2012/$3/data/$1/data/$5/annotations/*/*/*/*.$3$4 >> $2.$5.$3$4 28 | } 29 | 30 | function compile_language() { 31 | compile_partition development dev v4 _gold_conll $1 32 | compile_partition train train v4 _gold_conll $1 33 | compile_partition test test v4 _gold_conll $1 34 | } 35 | 36 | conll_url=http://conll.cemantix.org/2012/download 37 | download_and_extract $conll_url conll-2012-train.v4.tar.gz 38 | download_and_extract $conll_url conll-2012-development.v4.tar.gz 39 | download_and_extract $conll_url/test conll-2012-test-key.tar.gz 40 | download_and_extract $conll_url/test conll-2012-test-official.v9.tar.gz 41 | 42 | download_and_extract $conll_url conll-2012-scripts.v3.tar.gz 43 | 44 | download_and_extract http://conll.cemantix.org/download reference-coreference-scorers.v8.01.tar.gz 45 | mv reference-coreference-scorers conll-2012/scorer 46 | 47 | # Convert the ontonotes data into the CONLL format. 48 | bash conll-2012/v3/scripts/skeleton2conll.sh -D $ONTONOTES_PATH/data/files/data conll-2012 49 | 50 | compile_language english 51 | -------------------------------------------------------------------------------- /neuralcoref/train/data/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /neuralcoref/train/model.py: -------------------------------------------------------------------------------- 1 | """Conll training algorithm""" 2 | 3 | import os 4 | import numpy as np 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.utils.data 9 | 10 | 11 | class Model(nn.Module): 12 | def __init__( 13 | self, vocab_size, embedding_dim, H1, H2, H3, D_pair_in, D_single_in, dropout=0.5 14 | ): 15 | super(Model, self).__init__() 16 | self.word_embeds = nn.Embedding(vocab_size, embedding_dim) 17 | self.drop = nn.Dropout(dropout) 18 | self.pair_top = nn.Sequential( 19 | nn.Linear(D_pair_in, H1), 20 | nn.ReLU(), 21 | nn.Dropout(dropout), 22 | nn.Linear(H1, H2), 23 | nn.ReLU(), 24 | nn.Dropout(dropout), 25 | nn.Linear(H2, H3), 26 | nn.ReLU(), 27 | nn.Dropout(dropout), 28 | nn.Linear(H3, 1), 29 | nn.Linear(1, 1), 30 | ) 31 | self.single_top = nn.Sequential( 32 | nn.Linear(D_single_in, H1), 33 | nn.ReLU(), 34 | nn.Dropout(dropout), 35 | nn.Linear(H1, H2), 36 | nn.ReLU(), 37 | nn.Dropout(dropout), 38 | nn.Linear(H2, H3), 39 | nn.ReLU(), 40 | nn.Dropout(dropout), 41 | nn.Linear(H3, 1), 42 | nn.Linear(1, 1), 43 | ) 44 | self.init_weights() 45 | 46 | def init_weights(self): 47 | w = (param.data for name, param in self.named_parameters() if "weight" in name) 48 | b = (param.data for name, param in self.named_parameters() if "bias" in name) 49 | nn.init.uniform_(self.word_embeds.weight.data, a=-0.5, b=0.5) 50 | for t in w: 51 | nn.init.xavier_uniform_(t) 52 | for t in b: 53 | nn.init.constant_(t, 0) 54 | 55 | def load_embeddings(self, preloaded_weights): 56 | self.word_embeds.weight = nn.Parameter(preloaded_weights) 57 | 58 | def load_weights(self, weights_path): 59 | print("Loading weights") 60 | single_layers_weights, single_layers_biases = [], [] 61 | for f in sorted(os.listdir(weights_path)): 62 | if f.startswith("single_mention_weights"): 63 | single_layers_weights.append(np.load(os.path.join(weights_path, f))) 64 | if f.startswith("single_mention_bias"): 65 | single_layers_biases.append(np.load(os.path.join(weights_path, f))) 66 | top_single_linear = ( 67 | layer for layer in self.single_top if isinstance(layer, nn.Linear) 68 | ) 69 | for w, b, layer in zip( 70 | single_layers_weights, single_layers_biases, top_single_linear 71 | ): 72 | layer.weight = nn.Parameter(torch.from_numpy(w).float()) 73 | layer.bias = nn.Parameter(torch.from_numpy(b).float().squeeze()) 74 | pair_layers_weights, pair_layers_biases = [], [] 75 | for f in sorted(os.listdir(weights_path)): 76 | if f.startswith("pair_mentions_weights"): 77 | pair_layers_weights.append(np.load(os.path.join(weights_path, f))) 78 | if f.startswith("pair_mentions_bias"): 79 | pair_layers_biases.append(np.load(os.path.join(weights_path, f))) 80 | top_pair_linear = ( 81 | layer for layer in self.pair_top if isinstance(layer, nn.Linear) 82 | ) 83 | for w, b, layer in zip( 84 | pair_layers_weights, pair_layers_biases, top_pair_linear 85 | ): 86 | layer.weight = nn.Parameter(torch.from_numpy(w).float()) 87 | layer.bias = nn.Parameter(torch.from_numpy(b).float().squeeze()) 88 | 89 | def forward(self, inputs, concat_axis=1): 90 | pairs = len(inputs) == 8 91 | if pairs: 92 | spans, words, single_features, ant_spans, ant_words, ana_spans, ana_words, pair_features = ( 93 | inputs 94 | ) 95 | else: 96 | spans, words, single_features = inputs 97 | words = words.type(torch.LongTensor) 98 | if torch.cuda.is_available(): 99 | words = words.cuda() 100 | embed_words = self.drop(self.word_embeds(words).view(words.size()[0], -1)) 101 | single_input = torch.cat([spans, embed_words, single_features], 1) 102 | single_scores = self.single_top(single_input) 103 | if pairs: 104 | batchsize, pairs_num, _ = ana_spans.size() 105 | ant_words_long = ant_words.view(batchsize, -1).type(torch.LongTensor) 106 | ana_words_long = ana_words.view(batchsize, -1).type(torch.LongTensor) 107 | if torch.cuda.is_available(): 108 | ant_words_long = ant_words_long.cuda() 109 | ana_words_long = ana_words_long.cuda() 110 | ant_embed_words = self.drop( 111 | self.word_embeds(ant_words_long).view(batchsize, pairs_num, -1) 112 | ) 113 | ana_embed_words = self.drop( 114 | self.word_embeds(ana_words_long).view(batchsize, pairs_num, -1) 115 | ) 116 | pair_input = torch.cat( 117 | [ant_spans, ant_embed_words, ana_spans, ana_embed_words, pair_features], 118 | 2, 119 | ) 120 | pair_scores = self.pair_top(pair_input).squeeze(dim=2) 121 | total_scores = torch.cat([pair_scores, single_scores], concat_axis) 122 | return total_scores if pairs else single_scores 123 | -------------------------------------------------------------------------------- /neuralcoref/train/runs/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /neuralcoref/train/scorer/README.txt: -------------------------------------------------------------------------------- 1 | NAME 2 | CorScorer: Perl package for scoring coreference resolution systems 3 | using different metrics. 4 | 5 | 6 | VERSION 7 | v8.01 -- reference implementations of MUC, B-cubed, CEAF and BLANC metrics. 8 | 9 | 10 | CHANGES SINCE v8.0 11 | - fixed a bug that crashed the BLANC scorer when a duplicate singleton 12 | mention was present in the response. 13 | 14 | INSTALLATION 15 | Requirements: 16 | 1. Perl: downloadable from http://perl.org 17 | 2. Algorithm-Munkres: included in this package and downloadable 18 | from CPAN http://search.cpan.org/~tpederse/Algorithm-Munkres-0.08 19 | 20 | USE 21 | This package is distributed with two scripts to execute the scorer from 22 | the command line. 23 | 24 | Windows (tm): scorer.bat 25 | Linux: scorer.pl 26 | 27 | 28 | SYNOPSIS 29 | use CorScorer; 30 | 31 | $metric = 'ceafm'; 32 | 33 | # Scores the whole dataset 34 | &CorScorer::Score($metric, $keys_file, $response_file); 35 | 36 | # Scores one file 37 | &CorScorer::Score($metric, $keys_file, $response_file, $name); 38 | 39 | 40 | INPUT 41 | metric: the metric desired to score the results: 42 | muc: MUCScorer (Vilain et al, 1995) 43 | bcub: B-Cubed (Bagga and Baldwin, 1998) 44 | ceafm: CEAF (Luo et al., 2005) using mention-based similarity 45 | ceafe: CEAF (Luo et al., 2005) using entity-based similarity 46 | blanc: BLANC (Luo et al., 2014) BLANC metric for gold and predicted mentions 47 | all: uses all the metrics to score 48 | 49 | keys_file: file with expected coreference chains in CoNLL-2011/2012 format 50 | 51 | response_file: file with output of coreference system (CoNLL-2011/2012 format) 52 | 53 | name: [optional] the name of the document to score. If name is not 54 | given, all the documents in the dataset will be scored. If given 55 | name is "none" then all the documents are scored but only total 56 | results are shown. 57 | 58 | 59 | OUTPUT 60 | The score subroutine returns an array with four values in this order: 61 | 1) Recall numerator 62 | 2) Recall denominator 63 | 3) Precision numerator 64 | 4) Precision denominator 65 | 66 | Also recall, precision and F1 are printed in the standard output when variable 67 | $VERBOSE is not null. 68 | 69 | Final scores: 70 | Recall = recall_numerator / recall_denominator 71 | Precision = precision_numerator / precision_denominator 72 | F1 = 2 * Recall * Precision / (Recall + Precision) 73 | 74 | Identification of mentions 75 | An scorer for identification of mentions (recall, precision and F1) is also included. 76 | Mentions from system response are compared with key mentions. This version performs 77 | strict mention matching as was used in the CoNLL-2011 and 2012 shared tasks. 78 | 79 | AUTHORS 80 | Emili Sapena, Universitat Politècnica de Catalunya, http://www.lsi.upc.edu/~esapena, esapena lsi.upc.edu 81 | Sameer Pradhan, sameer.pradhan childrens.harvard.edu 82 | Sebastian Martschat, sebastian.martschat h-its.org 83 | Xiaoqiang Luo, xql google.com 84 | 85 | COPYRIGHT AND LICENSE 86 | Copyright (C) 2009-2011, Emili Sapena esapena lsi.upc.edu 87 | 2011-2014, Sameer Pradhan sameer.pradhan childrens.harvard.edu 88 | 89 | This program is free software; you can redistribute it and/or modify it 90 | under the terms of the GNU General Public License as published by the 91 | Free Software Foundation; either version 2 of the License, or (at your 92 | option) any later version. This program is distributed in the hope that 93 | it will be useful, but WITHOUT ANY WARRANTY; without even the implied 94 | warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 95 | GNU General Public License for more details. 96 | 97 | You should have received a copy of the GNU General Public License along 98 | with this program; if not, write to the Free Software Foundation, Inc., 99 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 100 | 101 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/lib/Algorithm/README.Munkres: -------------------------------------------------------------------------------- 1 | NAME 2 | Algorithm-Munkres : Perl extension for Munkres' solution to 3 | classical Assignment problem for square and rectangular matrices 4 | This module extends the solution of Assignment problem for square 5 | matrices to rectangular matrices by padding zeros. Thus a rectangular 6 | matrix is converted to square matrix by padding necessary zeros. 7 | 8 | SYNOPSIS 9 | use Algorithm::Munkres; 10 | 11 | @mat = ( 12 | [2, 4, 7, 9], 13 | [3, 9, 5, 1], 14 | [8, 2, 9, 7], 15 | ); 16 | 17 | assign(\@mat,\@out_mat); 18 | 19 | Then the @out_mat array will have the output as: (0,3,1,2), 20 | where 21 | 0th element indicates that 0th row is assigned 0th column i.e value=2 22 | 1st element indicates that 1st row is assigned 3rd column i.e.value=1 23 | 2nd element indicates that 2nd row is assigned 1st column.i.e.value=2 24 | 3rd element indicates that 3rd row is assigned 2nd column.i.e.value=0 25 | 26 | DESCRIPTION 27 | Assignment Problem: Given N jobs, N workers and the time taken by 28 | each worker to complete a job then how should the assignment of a 29 | Worker to a Job be done, so as to minimize the time taken. 30 | 31 | Thus if we have 3 jobs p,q,r and 3 workers x,y,z such that: 32 | x y z 33 | p 2 4 7 34 | q 3 9 5 35 | r 8 2 9 36 | 37 | where the cell values of the above matrix give the time required 38 | for the worker(given by column name) to complete the job(given by 39 | the row name) 40 | 41 | then possible solutions are: 42 | Total 43 | 1. 2, 9, 9 20 44 | 2. 2, 2, 5 9 45 | 3. 3, 4, 9 16 46 | 4. 3, 2, 7 12 47 | 5. 8, 9, 7 24 48 | 6. 8, 4, 5 17 49 | 50 | Thus (2) is the optimal solution for the above problem. 51 | This kind of brute-force approach of solving Assignment problem 52 | quickly becomes slow and bulky as N grows, because the number of 53 | possible solution are N! and thus the task is to evaluate each 54 | and then find the optimal solution.(If N=10, number of possible 55 | solutions: 3628800 !) 56 | Munkres' gives us a solution to this problem, which is implemented 57 | in this module. 58 | 59 | This module also solves Assignment problem for rectangular matrices 60 | (M x N) by converting them to square matrices by padding zeros. ex: 61 | If input matrix is: 62 | [2, 4, 7, 9], 63 | [3, 9, 5, 1], 64 | [8, 2, 9, 7] 65 | i.e 3 x 4 then we will convert it to 4 x 4 and the modified input 66 | matrix will be: 67 | [2, 4, 7, 9], 68 | [3, 9, 5, 1], 69 | [8, 2, 9, 7], 70 | [0, 0, 0, 0] 71 | 72 | EXPORT 73 | "assign" function by default. 74 | 75 | INPUT 76 | The input matrix should be in a two dimensional array(array of 77 | array) and the 'assign' subroutine expects a reference to this 78 | array and not the complete array. 79 | eg:assign(\@inp_mat, \@out_mat); 80 | The second argument to the assign subroutine is the reference 81 | to the output array. 82 | 83 | OUTPUT 84 | The assign subroutine expects references to two arrays as its 85 | input paramenters. The second parameter is the reference to the 86 | output array. This array is populated by assign subroutine. This 87 | array is single dimensional Nx1 matrix. 88 | For above example the output array returned will be: 89 | (0, 90 | 2, 91 | 1) 92 | 93 | where 94 | 0th element indicates that 0th row is assigned 0th column i.e value=2 95 | 1st element indicates that 1st row is assigned 2nd column i.e.value=5 96 | 2nd element indicates that 2nd row is assigned 1st column.i.e.value=2 97 | 98 | SEE ALSO 99 | 1. http://216.249.163.93/bob.pilgrim/445/munkres.html 100 | 101 | 2. Munkres, J. Algorithms for the assignment and transportation 102 | Problems. J. Siam 5 (Mar. 1957), 32-38 103 | 104 | 3. François Bourgeois and Jean-Claude Lassalle. 1971. 105 | An extension of the Munkres algorithm for the assignment 106 | problem to rectangular matrices. 107 | Communication ACM, 14(12):802-804 108 | 109 | AUTHOR 110 | Anagha Kulkarni, University of Minnesota Duluth 111 | kulka020 d.umn.edu 112 | 113 | Ted Pedersen, University of Minnesota Duluth 114 | tpederse d.umn.edu 115 | 116 | COPYRIGHT AND LICENSE 117 | Copyright (C) 2007-2008, Ted Pedersen and Anagha Kulkarni 118 | 119 | This program is free software; you can redistribute it and/or modify it 120 | under the terms of the GNU General Public License as published by the 121 | Free Software Foundation; either version 2 of the License, or (at your 122 | option) any later version. This program is distributed in the hope that 123 | it will be useful, but WITHOUT ANY WARRANTY; without even the implied 124 | warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 125 | GNU General Public License for more details. 126 | 127 | You should have received a copy of the GNU General Public License along 128 | with this program; if not, write to the Free Software Foundation, Inc., 129 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 130 | 131 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/scorer.bat: -------------------------------------------------------------------------------- 1 | @rem = '--*-Perl-*-- 2 | @echo off 3 | if "%OS%" == "Windows_NT" goto WinNT 4 | perl -x -S "%0" %1 %2 %3 %4 %5 %6 %7 %8 %9 5 | goto endofperl 6 | :WinNT 7 | perl -x -S %0 %* 8 | if NOT "%COMSPEC%" == "%SystemRoot%\system32\cmd.exe" goto endofperl 9 | if %errorlevel% == 9009 echo You do not have Perl in your PATH. 10 | if errorlevel 1 goto script_failed_so_exit_with_non_zero_val 2>nul 11 | goto endofperl 12 | @rem '; 13 | #!perl 14 | #line 15 15 | 16 | BEGIN { 17 | $d = $0; 18 | $d =~ s/\/[^\/][^\/]*$//g; 19 | push(@INC, $d."/lib"); 20 | } 21 | 22 | use strict; 23 | use CorScorer; 24 | 25 | if (@ARGV < 3) { 26 | print q| 27 | use: scorer.bat [name] 28 | 29 | metric: the metric desired to score the results: 30 | muc: MUCScorer (Vilain et al, 1995) 31 | bcub: B-Cubed (Bagga and Baldwin, 1998) 32 | ceafm: CEAF (Luo et al, 2005) using mention-based similarity 33 | ceafe: CEAF (Luo et al, 2005) using entity-based similarity 34 | all: uses all the metrics to score 35 | 36 | keys_file: file with expected coreference chains in SemEval format 37 | 38 | response_file: file with output of coreference system (SemEval format) 39 | 40 | name: [optional] the name of the document to score. If name is not 41 | given, all the documents in the dataset will be scored. If given 42 | name is "none" then all the documents are scored but only total 43 | results are shown. 44 | 45 | |; 46 | exit; 47 | } 48 | 49 | my $metric = shift (@ARGV); 50 | if ($metric !~ /^(muc|bcub|ceafm|ceafe|all)/i) { 51 | print "Invalid metric\n"; 52 | exit; 53 | } 54 | 55 | 56 | if ($metric eq 'all') { 57 | foreach my $m ('muc', 'bcub', 'ceafm', 'ceafe') { 58 | print "\nMETRIC $m:\n"; 59 | &CorScorer::Score( $m, @ARGV ); 60 | } 61 | } 62 | else { 63 | &CorScorer::Score( $metric, @ARGV ); 64 | } 65 | 66 | __END__ 67 | :endofperl 68 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/scorer.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | BEGIN { 4 | $d = $0; 5 | $d =~ s/\/[^\/][^\/]*$//g; 6 | 7 | if ($d eq $0) { 8 | unshift(@INC, "lib"); 9 | } 10 | else { 11 | unshift(@INC, $d . "/lib"); 12 | } 13 | } 14 | 15 | use strict; 16 | use CorScorer; 17 | 18 | if (@ARGV < 3) { 19 | print q| 20 | use: scorer.pl [name] 21 | 22 | metric: the metric desired to score the results: 23 | muc: MUCScorer (Vilain et al, 1995) 24 | bcub: B-Cubed (Bagga and Baldwin, 1998) 25 | ceafm: CEAF (Luo et al, 2005) using mention-based similarity 26 | ceafe: CEAF (Luo et al, 2005) using entity-based similarity 27 | blanc: BLANC 28 | all: uses all the metrics to score 29 | 30 | keys_file: file with expected coreference chains in SemEval format 31 | 32 | response_file: file with output of coreference system (SemEval format) 33 | 34 | name: [optional] the name of the document to score. If name is not 35 | given, all the documents in the dataset will be scored. If given 36 | name is "none" then all the documents are scored but only total 37 | results are shown. 38 | 39 | |; 40 | exit; 41 | } 42 | 43 | my $metric = shift(@ARGV); 44 | if ($metric !~ /^(muc|bcub|ceafm|ceafe|blanc|all)/i) { 45 | print "Invalid metric\n"; 46 | exit; 47 | } 48 | 49 | if ($metric eq 'all') { 50 | foreach my $m ('muc', 'bcub', 'ceafm', 'ceafe', 'blanc') { 51 | print "\nMETRIC $m:\n"; 52 | &CorScorer::Score($m, @ARGV); 53 | } 54 | } 55 | else { 56 | &CorScorer::Score($metric, @ARGV); 57 | } 58 | 59 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/CorefMetricTest.pm: -------------------------------------------------------------------------------- 1 | package CorefMetricTest; 2 | use strict; 3 | use warnings; 4 | use Exporter; 5 | 6 | our @ISA= qw(Exporter); 7 | our @EXPORT = qw(ComputeScoreFromCounts DiffExpectedAndActual); 8 | 9 | ################################################################################ 10 | # Compute recall, precision and F1. 11 | # 12 | # Input: (numerator_counts_for_recall, denominator_counts_for_recall, 13 | # numerator_counts_for_precision, denominator_counts_for_precision) 14 | # Output: (recall, precision, F1) 15 | ################################################################################ 16 | sub ComputeScoreFromCounts { 17 | # The first 4 are also coref link counts when using BLANC. 18 | my ($recall_numerator, $recall_denominator, 19 | $precision_numerator, $precision_denominator, @noncoref_counts) = @_; 20 | # The coref recall, precision, and F1 when using BLANC. 21 | my ($recall, $precision, $F1) = 22 | RPFFromCounts($recall_numerator, $recall_denominator, 23 | $precision_numerator, $precision_denominator); 24 | 25 | # BLANC: @noncoref_counts= 26 | # (noncoref_numerator_recall, noncoref_denominator_recall, 27 | # noncoref_numerator_precision, noncoref_denominator_precision) 28 | if (scalar(@noncoref_counts) == 4) { 29 | ($recall, $precision, $F1) = CorScorer::ComputeBLANCFromCounts( 30 | $recall_numerator, $recall_denominator, $precision_denominator, 31 | $noncoref_counts[0], $noncoref_counts[1], $noncoref_counts[3]); 32 | } 33 | $recall = ($recall < 0) ? 0 : $recall; 34 | $precision = ($precision < 0) ? 0 : $precision; 35 | $F1 = ($F1 < 0) ? 0 : $F1; 36 | return ($recall, $precision, $F1); 37 | } 38 | 39 | sub RPFFromCounts 40 | { 41 | my ($recall_numerator, $recall_denominator, 42 | $precision_numerator, $precision_denominator, @nonCorefCounts) = @_; 43 | my ($recall, $precision, $F1) = (-1, -1, 0); 44 | if ($recall_denominator > 0) { 45 | $recall = $recall_numerator / $recall_denominator; 46 | } 47 | if ($precision_denominator > 0) { 48 | $precision = $precision_numerator / $precision_denominator; 49 | } 50 | 51 | if (($recall + $precision) > 0) { 52 | $F1 = 2 * $recall * $precision / ($recall + $precision); 53 | } 54 | 55 | return ($recall, $precision, $F1); 56 | } 57 | 58 | # deprecated -- see CorScorer::ComputeBLANCFromCounts(). 59 | sub ComputeBLANCRPF 60 | { 61 | my ($coref_recall, $coref_precision, $coref_F1, 62 | $noncoref_recall, $noncoref_precision, $noncoref_F1) = @_; 63 | 64 | my ($recall, $precision, $F1); 65 | 66 | if ($coref_recall < 0 && $noncoref_recall < 0) { 67 | # no key mention. 68 | $recall = $precision = $F1 = 0; 69 | } elsif ($coref_recall < 0) { 70 | # key: all links are non-coref (mentions are all singltons). 71 | $recall = $noncoref_recall; 72 | $precision = ($noncoref_precision < 0) ? 0 : $noncoref_precision; 73 | $F1 = $noncoref_F1; 74 | } elsif ($noncoref_recall < 0) { 75 | # key: all links are coref (all mentions are in one entity). 76 | $recall = $coref_recall; 77 | $precision = ($coref_precision < 0) ? 0 : $coref_precision; 78 | $F1 = $coref_F1; 79 | } else { 80 | #key contains both coref and non-coref links. 81 | if ($coref_precision < 0 && $noncoref_precision < 0) { 82 | # no response. 83 | $recall = $precision = $F1 = 0; 84 | } else { 85 | if ($coref_precision < 0) { 86 | # response: all links are non-coref, or response mentions are all 87 | # singletons. 88 | $coref_precision = 0; 89 | } elsif ($noncoref_precision < 0) { 90 | # response: all links are coref, or all mentions are in one entity. 91 | $noncoref_precision = 0; 92 | } 93 | $recall = ($coref_recall + $noncoref_recall)/2; 94 | $precision = ($coref_precision + $noncoref_precision)/2; 95 | $F1 = ($coref_F1 + $noncoref_F1)/2; 96 | } 97 | } 98 | 99 | return ($recall, $precision, $F1); 100 | } 101 | 102 | ############################################################################## 103 | # Compute the sum of the duifference between the expected recall, precision, 104 | # F1 and the actual one. 105 | ############################################################################## 106 | sub DiffExpectedAndActual { 107 | my ($expected, $actual) = @_; 108 | if (scalar(@$expected) != scalar(@$actual)) { 109 | print STDERR "Expected and actual have diff dimensions: \n"; 110 | print STDERR " Expected: ", join(" ", @$expected), "\n"; 111 | print STDERR " Actual: ", join(" ", @$actual), "\n"; 112 | return 1.0e5; 113 | } 114 | my $sum = 0.0; 115 | my $i = 0; 116 | foreach my $e (@$expected) { 117 | $sum += abs($e - $actual->[$i]); 118 | ++$i; 119 | } 120 | return $sum; 121 | } 122 | 123 | 1; 124 | 125 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-1.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 jnk - 17 | test2 0 5 e (2) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (2 20 | test2 0 8 f2 - 21 | test2 0 9 f3 2) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-10.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (2) 13 | test2 0 1 x - 14 | test2 0 2 d1 (3 15 | test2 0 3 d2 3) 16 | test2 0 4 z - 17 | test2 0 5 e (4) 18 | test2 0 6 y - 19 | test2 0 7 f1 (5 20 | test2 0 8 f2 - 21 | test2 0 9 f3 5) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-11.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 x - 14 | test2 0 2 d1 (0 15 | test2 0 3 d2 0) 16 | test2 0 4 z - 17 | test2 0 5 e (0) 18 | test2 0 6 y - 19 | test2 0 7 f1 (0 20 | test2 0 8 f2 - 21 | test2 0 9 f3 0) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-12.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 1) 7 | test1 0 5 b3 - 8 | test1 0 6 b4 - 9 | test1 0 7 jnk (2) 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (3) 13 | test2 0 1 x - 14 | test2 0 2 d1 (4 15 | test2 0 3 d2 4) 16 | test2 0 4 z - 17 | test2 0 5 e (5) 18 | test2 0 6 y - 19 | test2 0 7 f1 (6) 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-13.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 0) 7 | test1 0 5 b3 - 8 | test1 0 6 b4 - 9 | test1 0 7 jnk (0) 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 x - 14 | test2 0 2 d1 (0 15 | test2 0 3 d2 0) 16 | test2 0 4 z - 17 | test2 0 5 e (0) 18 | test2 0 6 y - 19 | test2 0 7 f1 (0) 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-2.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 - 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 - 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c - 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 jnk - 17 | test2 0 5 e (2) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-3.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 y (2) 17 | test2 0 5 e (2) 18 | test2 0 6 z (3) 19 | test2 0 7 f1 (2 20 | test2 0 8 f2 - 21 | test2 0 9 f3 2) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-4.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 x (3) 17 | test2 0 5 e - 18 | test2 0 6 y (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-5.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 (1 7 | test1 0 5 b3 1) 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 z (3) 17 | test2 0 5 e - 18 | test2 0 6 y (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-6.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 (3 7 | test1 0 5 b3 3) 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 z (3) 17 | test2 0 5 e - 18 | test2 0 6 y (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-7.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1(1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1)1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 z (3) 17 | test2 0 5 e - 18 | test2 0 6 y (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-8.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1(3 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 3)1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 z (3) 17 | test2 0 5 e - 18 | test2 0 6 y (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A-9.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1(3(3(3(3(3(3(3(3(3(3 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 3)3)3)3)3)3)3)3)3)3)1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 x (1) 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 z (3) 17 | test2 0 5 e - 18 | test2 0 6 y (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-A.key: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (2 15 | test2 0 3 d2 2) 16 | test2 0 4 jnk - 17 | test2 0 5 e (2) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (2 20 | test2 0 8 f2 - 21 | test2 0 9 f3 2) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-B-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 - 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 - 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 - 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 - 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | nw/xinhua/00/chtb_0009 - 31 | nw/xinhua/00/chtb_0009 (10043 32 | nw/xinhua/00/chtb_0009 - 33 | nw/xinhua/00/chtb_0009 10043) 34 | nw/xinhua/00/chtb_0009 - 35 | nw/xinhua/00/chtb_0009 - 36 | nw/xinhua/00/chtb_0009 - 37 | nw/xinhua/00/chtb_0009 - 38 | nw/xinhua/00/chtb_0009 - 39 | nw/xinhua/00/chtb_0009 - 40 | nw/xinhua/00/chtb_0009 - 41 | nw/xinhua/00/chtb_0009 - 42 | nw/xinhua/00/chtb_0009 - 43 | nw/xinhua/00/chtb_0009 - 44 | nw/xinhua/00/chtb_0009 - 45 | nw/xinhua/00/chtb_0009 - 46 | nw/xinhua/00/chtb_0009 - 47 | nw/xinhua/00/chtb_0009 - 48 | nw/xinhua/00/chtb_0009 - 49 | nw/xinhua/00/chtb_0009 (10043) 50 | nw/xinhua/00/chtb_0009 - 51 | nw/xinhua/00/chtb_0009 - 52 | nw/xinhua/00/chtb_0009 - 53 | nw/xinhua/00/chtb_0009 - 54 | nw/xinhua/00/chtb_0009 - 55 | nw/xinhua/00/chtb_0009 - 56 | nw/xinhua/00/chtb_0009 (10043 57 | nw/xinhua/00/chtb_0009 - 58 | nw/xinhua/00/chtb_0009 - 59 | nw/xinhua/00/chtb_0009 - 60 | nw/xinhua/00/chtb_0009 10043) 61 | nw/xinhua/00/chtb_0009 - 62 | nw/xinhua/00/chtb_0009 - 63 | nw/xinhua/00/chtb_0009 - 64 | nw/xinhua/00/chtb_0009 (10054 65 | nw/xinhua/00/chtb_0009 10054) 66 | nw/xinhua/00/chtb_0009 - 67 | nw/xinhua/00/chtb_0009 - 68 | nw/xinhua/00/chtb_0009 (10054) 69 | nw/xinhua/00/chtb_0009 - 70 | nw/xinhua/00/chtb_0009 - 71 | nw/xinhua/00/chtb_0009 - 72 | nw/xinhua/00/chtb_0009 - 73 | 74 | #end document 75 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-B.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (10043 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 - 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 - 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 - 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 10043) 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | nw/xinhua/00/chtb_0009 - 31 | nw/xinhua/00/chtb_0009 (10054 32 | nw/xinhua/00/chtb_0009 - 33 | nw/xinhua/00/chtb_0009 10054) 34 | nw/xinhua/00/chtb_0009 - 35 | nw/xinhua/00/chtb_0009 - 36 | nw/xinhua/00/chtb_0009 - 37 | nw/xinhua/00/chtb_0009 - 38 | nw/xinhua/00/chtb_0009 - 39 | nw/xinhua/00/chtb_0009 - 40 | nw/xinhua/00/chtb_0009 - 41 | nw/xinhua/00/chtb_0009 - 42 | nw/xinhua/00/chtb_0009 - 43 | nw/xinhua/00/chtb_0009 - 44 | nw/xinhua/00/chtb_0009 - 45 | nw/xinhua/00/chtb_0009 - 46 | nw/xinhua/00/chtb_0009 - 47 | nw/xinhua/00/chtb_0009 - 48 | nw/xinhua/00/chtb_0009 - 49 | nw/xinhua/00/chtb_0009 (10043) 50 | nw/xinhua/00/chtb_0009 - 51 | nw/xinhua/00/chtb_0009 - 52 | nw/xinhua/00/chtb_0009 - 53 | nw/xinhua/00/chtb_0009 - 54 | nw/xinhua/00/chtb_0009 - 55 | nw/xinhua/00/chtb_0009 - 56 | nw/xinhua/00/chtb_0009 - 57 | nw/xinhua/00/chtb_0009 - 58 | nw/xinhua/00/chtb_0009 - 59 | nw/xinhua/00/chtb_0009 - 60 | nw/xinhua/00/chtb_0009 - 61 | nw/xinhua/00/chtb_0009 - 62 | nw/xinhua/00/chtb_0009 - 63 | nw/xinhua/00/chtb_0009 - 64 | nw/xinhua/00/chtb_0009 (10054 65 | nw/xinhua/00/chtb_0009 10054) 66 | nw/xinhua/00/chtb_0009 - 67 | nw/xinhua/00/chtb_0009 - 68 | nw/xinhua/00/chtb_0009 (10054) 69 | nw/xinhua/00/chtb_0009 - 70 | nw/xinhua/00/chtb_0009 - 71 | nw/xinhua/00/chtb_0009 - 72 | nw/xinhua/00/chtb_0009 - 73 | 74 | #end document 75 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-C-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 - 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 - 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 - 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 - 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | nw/xinhua/00/chtb_0009 - 31 | nw/xinhua/00/chtb_0009 (10043 32 | nw/xinhua/00/chtb_0009 - 33 | nw/xinhua/00/chtb_0009 10043) 34 | nw/xinhua/00/chtb_0009 - 35 | nw/xinhua/00/chtb_0009 - 36 | nw/xinhua/00/chtb_0009 - 37 | nw/xinhua/00/chtb_0009 - 38 | nw/xinhua/00/chtb_0009 - 39 | nw/xinhua/00/chtb_0009 - 40 | nw/xinhua/00/chtb_0009 - 41 | nw/xinhua/00/chtb_0009 - 42 | nw/xinhua/00/chtb_0009 - 43 | nw/xinhua/00/chtb_0009 - 44 | nw/xinhua/00/chtb_0009 - 45 | nw/xinhua/00/chtb_0009 - 46 | nw/xinhua/00/chtb_0009 - 47 | nw/xinhua/00/chtb_0009 - 48 | nw/xinhua/00/chtb_0009 - 49 | nw/xinhua/00/chtb_0009 (10043) 50 | nw/xinhua/00/chtb_0009 - 51 | nw/xinhua/00/chtb_0009 - 52 | nw/xinhua/00/chtb_0009 - 53 | nw/xinhua/00/chtb_0009 - 54 | nw/xinhua/00/chtb_0009 - 55 | nw/xinhua/00/chtb_0009 - 56 | nw/xinhua/00/chtb_0009 (10043 57 | nw/xinhua/00/chtb_0009 - 58 | nw/xinhua/00/chtb_0009 - 59 | nw/xinhua/00/chtb_0009 - 60 | nw/xinhua/00/chtb_0009 10043) 61 | nw/xinhua/00/chtb_0009 - 62 | nw/xinhua/00/chtb_0009 - 63 | nw/xinhua/00/chtb_0009 - 64 | nw/xinhua/00/chtb_0009 (10054 65 | nw/xinhua/00/chtb_0009 10054) 66 | nw/xinhua/00/chtb_0009 - 67 | nw/xinhua/00/chtb_0009 - 68 | nw/xinhua/00/chtb_0009 (10054) 69 | nw/xinhua/00/chtb_0009 - 70 | nw/xinhua/00/chtb_0009 - 71 | nw/xinhua/00/chtb_0009 (10060) 72 | nw/xinhua/00/chtb_0009 (10060) 73 | 74 | #end document 75 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-C.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (10043 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 - 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 - 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 - 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 10043) 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | nw/xinhua/00/chtb_0009 - 31 | nw/xinhua/00/chtb_0009 (10054 32 | nw/xinhua/00/chtb_0009 - 33 | nw/xinhua/00/chtb_0009 10054) 34 | nw/xinhua/00/chtb_0009 - 35 | nw/xinhua/00/chtb_0009 - 36 | nw/xinhua/00/chtb_0009 - 37 | nw/xinhua/00/chtb_0009 - 38 | nw/xinhua/00/chtb_0009 - 39 | nw/xinhua/00/chtb_0009 - 40 | nw/xinhua/00/chtb_0009 - 41 | nw/xinhua/00/chtb_0009 - 42 | nw/xinhua/00/chtb_0009 - 43 | nw/xinhua/00/chtb_0009 - 44 | nw/xinhua/00/chtb_0009 - 45 | nw/xinhua/00/chtb_0009 - 46 | nw/xinhua/00/chtb_0009 - 47 | nw/xinhua/00/chtb_0009 - 48 | nw/xinhua/00/chtb_0009 - 49 | nw/xinhua/00/chtb_0009 (10043) 50 | nw/xinhua/00/chtb_0009 - 51 | nw/xinhua/00/chtb_0009 - 52 | nw/xinhua/00/chtb_0009 - 53 | nw/xinhua/00/chtb_0009 - 54 | nw/xinhua/00/chtb_0009 - 55 | nw/xinhua/00/chtb_0009 - 56 | nw/xinhua/00/chtb_0009 - 57 | nw/xinhua/00/chtb_0009 - 58 | nw/xinhua/00/chtb_0009 - 59 | nw/xinhua/00/chtb_0009 - 60 | nw/xinhua/00/chtb_0009 - 61 | nw/xinhua/00/chtb_0009 - 62 | nw/xinhua/00/chtb_0009 - 63 | nw/xinhua/00/chtb_0009 - 64 | nw/xinhua/00/chtb_0009 (10054 65 | nw/xinhua/00/chtb_0009 10054) 66 | nw/xinhua/00/chtb_0009 - 67 | nw/xinhua/00/chtb_0009 - 68 | nw/xinhua/00/chtb_0009 (10054) 69 | nw/xinhua/00/chtb_0009 - 70 | nw/xinhua/00/chtb_0009 - 71 | nw/xinhua/00/chtb_0009 (10060) 72 | nw/xinhua/00/chtb_0009 (10060) 73 | 74 | #end document 75 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-D-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (1) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (3) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (3) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 (3) 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 (3) 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 (3) 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 (3) 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 (3) 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-D.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (1) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (2) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (2) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 (3) 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 (3) 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 (3) 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 (3) 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 (3) 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-E-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (1) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (2) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (2) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 (1) 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 (1) 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 (1) 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 (1) 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 (1) 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-E.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (1) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (2) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (2) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 (3) 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 (3) 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 (3) 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 (3) 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 (3) 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-F-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (2) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (2) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-F.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-G-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-G.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (2) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (2) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-H-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-H.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-I-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (2) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (2) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-I.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-J-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 - 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 - 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-J.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 - 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 - 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-K-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (2) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (2) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 (2) 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (3) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (3) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 (3) 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-K.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 - 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (1) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (1) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 - 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (1) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (1) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 (1) 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-L-1.response: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (2) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (2) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 - 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 (3) 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (3) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 (3) 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-L.key: -------------------------------------------------------------------------------- 1 | #begin document (nw/xinhua/00/chtb_0009); part 000 2 | nw/xinhua/00/chtb_0009 - 3 | nw/xinhua/00/chtb_0009 (1) 4 | nw/xinhua/00/chtb_0009 - 5 | nw/xinhua/00/chtb_0009 (1) 6 | nw/xinhua/00/chtb_0009 - 7 | nw/xinhua/00/chtb_0009 (1) 8 | nw/xinhua/00/chtb_0009 - 9 | nw/xinhua/00/chtb_0009 (2) 10 | nw/xinhua/00/chtb_0009 - 11 | nw/xinhua/00/chtb_0009 (2) 12 | nw/xinhua/00/chtb_0009 - 13 | nw/xinhua/00/chtb_0009 (2) 14 | nw/xinhua/00/chtb_0009 - 15 | nw/xinhua/00/chtb_0009 (2) 16 | nw/xinhua/00/chtb_0009 - 17 | nw/xinhua/00/chtb_0009 - 18 | nw/xinhua/00/chtb_0009 - 19 | nw/xinhua/00/chtb_0009 - 20 | nw/xinhua/00/chtb_0009 - 21 | nw/xinhua/00/chtb_0009 - 22 | nw/xinhua/00/chtb_0009 - 23 | nw/xinhua/00/chtb_0009 - 24 | nw/xinhua/00/chtb_0009 - 25 | nw/xinhua/00/chtb_0009 - 26 | nw/xinhua/00/chtb_0009 - 27 | nw/xinhua/00/chtb_0009 - 28 | nw/xinhua/00/chtb_0009 - 29 | nw/xinhua/00/chtb_0009 - 30 | 31 | #end document 32 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M-1.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (0 15 | test2 0 3 d2 0) 16 | test2 0 4 jnk - 17 | test2 0 5 e (0) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (0 20 | test2 0 8 f2 - 21 | test2 0 9 f3 0) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M-2.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (2) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (3 15 | test2 0 3 d2 3) 16 | test2 0 4 jnk - 17 | test2 0 5 e (4) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (5 20 | test2 0 8 f2 - 21 | test2 0 9 f3 5) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M-3.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (1 15 | test2 0 3 d2 1) 16 | test2 0 4 jnk - 17 | test2 0 5 e (1) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (2 20 | test2 0 8 f2 - 21 | test2 0 9 f3 2) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M-4.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 jnk (0) 14 | test2 0 2 d1 - 15 | test2 0 3 d2 - 16 | test2 0 4 jnk (0) 17 | test2 0 5 e - 18 | test2 0 6 jnk (0) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M-5.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (2) 13 | test2 0 1 jnk (3) 14 | test2 0 2 d1 - 15 | test2 0 3 d2 - 16 | test2 0 4 jnk (4) 17 | test2 0 5 e - 18 | test2 0 6 jnk (5) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M-6.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 jnk (1) 14 | test2 0 2 d1 - 15 | test2 0 3 d2 - 16 | test2 0 4 jnk (1) 17 | test2 0 5 e - 18 | test2 0 6 jnk (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-M.key: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (0 15 | test2 0 3 d2 0) 16 | test2 0 4 jnk - 17 | test2 0 5 e (0) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (0 20 | test2 0 8 f2 - 21 | test2 0 9 f3 0) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N-1.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (2) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (3 15 | test2 0 3 d2 3) 16 | test2 0 4 jnk - 17 | test2 0 5 e (4) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (5 20 | test2 0 8 f2 - 21 | test2 0 9 f3 5) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N-2.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (0 15 | test2 0 3 d2 0) 16 | test2 0 4 jnk - 17 | test2 0 5 e (0) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (0 20 | test2 0 8 f2 - 21 | test2 0 9 f3 0) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N-3.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (1 15 | test2 0 3 d2 1) 16 | test2 0 4 jnk - 17 | test2 0 5 e (1) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (2 20 | test2 0 8 f2 - 21 | test2 0 9 f3 2) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N-4.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (2) 13 | test2 0 1 jnk (3) 14 | test2 0 2 d1 - 15 | test2 0 3 d2 - 16 | test2 0 4 jnk (4) 17 | test2 0 5 e - 18 | test2 0 6 jnk (5) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N-5.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (0) 13 | test2 0 1 jnk (0) 14 | test2 0 2 d1 - 15 | test2 0 3 d2 - 16 | test2 0 4 jnk (0) 17 | test2 0 5 e - 18 | test2 0 6 jnk (0) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N-6.response: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (0 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 0) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (1) 13 | test2 0 1 jnk (1) 14 | test2 0 2 d1 - 15 | test2 0 3 d2 - 16 | test2 0 4 jnk (1) 17 | test2 0 5 e - 18 | test2 0 6 jnk (2) 19 | test2 0 7 f1 - 20 | test2 0 8 f2 - 21 | test2 0 9 f3 - 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/DataFiles/TC-N.key: -------------------------------------------------------------------------------- 1 | #begin document (LuoTestCase); 2 | test1 0 0 a1 (0 3 | test1 0 1 a2 0) 4 | test1 0 2 junk - 5 | test1 0 3 b1 (1 6 | test1 0 4 b2 - 7 | test1 0 5 b3 - 8 | test1 0 6 b4 1) 9 | test1 0 7 jnk - 10 | test1 0 8 . - 11 | 12 | test2 0 0 c (2) 13 | test2 0 1 jnk - 14 | test2 0 2 d1 (3 15 | test2 0 3 d2 3) 16 | test2 0 4 jnk - 17 | test2 0 5 e (4) 18 | test2 0 6 jnk - 19 | test2 0 7 f1 (5 20 | test2 0 8 f2 - 21 | test2 0 9 f3 5) 22 | test2 0 10 . - 23 | #end document 24 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer/test/test.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | BEGIN { 4 | $d = $0; 5 | $d =~ s/\/[^\/][^\/]*$//g; 6 | push(@INC, $d); 7 | push(@INC, $d . "/../lib"); 8 | } 9 | 10 | use strict; 11 | use CorScorer; 12 | use CorefMetricTest; 13 | use CorefMetricTestConfig; 14 | 15 | my $error_tolerance = 1.e-4; 16 | my $script_dir = $0; 17 | $script_dir =~ s/\/[^\/][^\/]*$//g; 18 | 19 | foreach my $test_case (@CorefMetricTestConfig::TestCases) { 20 | my $id = $test_case->{'id'}; 21 | my @key_response_files = ($script_dir . "/" . $test_case->{'key_file'}, 22 | $script_dir . "/" . $test_case->{'response_file'}); 23 | print "\nTesting case ($id): keyFile=", $key_response_files[0], 24 | " responseFile=", $key_response_files[1], "\n"; 25 | my $expected_metrics = $test_case->{'expected_metrics'}; 26 | foreach my $metric_name (sort keys %$expected_metrics) { 27 | my $expected_values = $expected_metrics->{$metric_name}; 28 | *::SAVED_STDOUT = *STDOUT; 29 | *STDOUT = *::SUPRRES_STDOUT; 30 | my @actual_counts = &CorScorer::Score($metric_name, @key_response_files); 31 | # Compute R,P,and F1 from raw counts. 32 | my @actual_values = CorefMetricTest::ComputeScoreFromCounts(@actual_counts); 33 | *STDOUT = *::SAVED_STDOUT; 34 | my $diff = CorefMetricTest::DiffExpectedAndActual($expected_values, \@actual_values); 35 | printf " metric: %+10s", $metric_name; 36 | if ($diff < $error_tolerance) { 37 | print " => PASS\n"; 38 | } else { 39 | print " => FAIL\n"; 40 | print " Expected (recall, prec, F1) = (", join(" ", @$expected_values), ")\n"; 41 | print " Actual (recall, prec, F1) = (", join(" ", @actual_values), ")\n"; 42 | #exit(1); 43 | } 44 | } 45 | } 46 | 47 | -------------------------------------------------------------------------------- /neuralcoref/train/scorer_wrapper.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | BEGIN { 4 | $d = $0; 5 | $d =~ s/\/[^\/][^\/]*$//g; 6 | 7 | if ($d eq $0) { 8 | unshift(@INC, "scorer/lib"); 9 | } 10 | else { 11 | unshift(@INC, $d . "/scorer/lib"); 12 | } 13 | } 14 | 15 | use strict; 16 | use CorScorer; 17 | 18 | my $metric = shift(@ARGV); 19 | if ($metric !~ /^(muc|bcub|ceafm|ceafe|blanc|all)/i) { 20 | print "Invalid metric\n"; 21 | exit; 22 | } 23 | 24 | if ($metric eq 'all') { 25 | foreach my $m ('muc', 'bcub', 'ceafm', 'ceafe', 'blanc') { 26 | # print "\nMETRIC $m:\n"; 27 | my ($acumNR, $acumDR, $acumNP, $acumDP, $identNR, $identDR, $identNP, $identDP) = &CorScorer::Score($m, @ARGV); 28 | print "$acumNR $acumDR $acumNP $acumDP\n$identNR $identDR $identNP $identDP"; 29 | } 30 | } 31 | else { 32 | my ($acumNR, $acumDR, $acumNP, $acumDP, $identNR, $identDR, $identNP, $identDP) = &CorScorer::Score($metric, @ARGV); 33 | print "$acumNR $acumDR $acumNP $acumDP\n$identNR $identDR $identNP $identDP"; 34 | } 35 | 36 | -------------------------------------------------------------------------------- /neuralcoref/train/training.md: -------------------------------------------------------------------------------- 1 | # How to train and modify the neural coreference model 2 | 3 | Please check our [detailed blog post](https://medium.com/huggingface/how-to-train-a-neural-coreference-model-neuralcoref-2-7bb30c1abdfe) together with these short notes. 4 | 5 | ## Install 6 | 7 | As always, we recommend creating a clean environment (conda or virtual env) to install and train the model. 8 | 9 | You will need to install [pyTorch](http://pytorch.org/), the neuralcoref package with the additional training requirements and download a language model for spacy. 10 | Currently this can be done (assuming an English language model) with 11 | 12 | ```bash 13 | conda install pytorch -c pytorch 14 | pip install -r ./train/training_requirements.txt -e . 15 | python -m spacy download en 16 | ``` 17 | 18 | ## Get the data 19 | 20 | The following assumes you want to train on English, Arabic or Chinese. 21 | If you want to train on another language, see the section [train on a new language](#train-on-a-new-language) below. 22 | 23 | First, download the [OntoNotes 5.0 dataset](https://catalog.ldc.upenn.edu/LDC2013T19) from LDC. 24 | 25 | Then, download the [CoNLL-2012 skeleton files](http://conll.cemantix.org/2012/data.html) from the CoNLL 2012 shared task site, 26 | and combine these skeleton files with the OntoNotes files to get the `*._conll` text files which can be used as inputs for the training. 27 | 28 | This can be done by executing the script [compile_coref_data.sh](/neuralcoref/train/conll_processing_script/compile_coref_data.sh) 29 | or by following these steps: 30 | 31 | - From the [CoNLL 2012 download site](http://conll.cemantix.org/2012/download/), download and extract: 32 | - http://conll.cemantix.org/2012/download/conll-2012-train.v4.tar.gz 33 | - http://conll.cemantix.org/2012/download/conll-2012-development.v4.tar.gz 34 | - http://conll.cemantix.org/2012/download/test/conll-2012-test-key.tar.gz 35 | - http://conll.cemantix.org/2012/download/test/conll-2012-test-official.v9.tar.gz 36 | - http://conll.cemantix.org/2012/download/conll-2012-scripts.v3.tar.gz 37 | - http://conll.cemantix.org/download/reference-coreference-scorers.v8.01.tar.gz 38 | - Move `reference-coreference-scorers` into the folder `conll-2012/` and rename to `scorer` 39 | - If you are using Python 3.X, you have to edit the `conll-2012/v3/scripts/skeleton2conll.py` file 40 | - Change `except InvalidSexprException, e:` to `except InvalidSexprException as e` 41 | - Change all `print` statements to `print()` 42 | - Create the `*._conll` text files by executing 43 | - `conll-2012/v3/scripts/skeleton2conll.sh -D path_to_ontonotes_folder/data/ conll-2012` (may take a little while) 44 | - This will create `*.v4_gold_conll` files in each subdirectory of the `conll-2012` `data` folder. 45 | - Assemble the appropriate files into one large file each for training, development and testing 46 | - `my_lang` can be `english`, `arabic` or `chinese` 47 | - `cat conll-2012/v4/data/train/data/my_lang/annotations/*/*/*/*.v4_gold_conll >> train.my_lang.v4_gold_conll` 48 | - `cat conll-2012/v4/data/development/data/my_lang/annotations/*/*/*/*.v4_gold_conll >> dev.my_lang.v4_gold_conll` 49 | - `cat conll-2012/v4/data/test/data/my_lang/annotations/*/*/*/*.v4_gold_conll >> test.my_lang.v4_gold_conll` 50 | 51 | ## Prepare the data 52 | 53 | Once you have the set of `*.v4_gold_conll` files, move these files into separate (`train`, `test`, `dev`) subdirectories inside a new directory. You can use the already present `data` directory or create another directory anywhere you want. Now, you can prepare the training data by running 54 | [conllparser.py](/neuralcoref/train/conllparser.py) on each split of the data set (`train`, `test`, `dev`) as 55 | 56 | ```bash 57 | python -m neuralcoref.train.conllparser --path ./$path_to_data_directory/train/ 58 | python -m neuralcoref.train.conllparser --path ./$path_to_data_directory/test/ 59 | python -m neuralcoref.train.conllparser --path ./$path_to_data_directory/dev/ 60 | ``` 61 | 62 | Conllparser will: 63 | 64 | - parse the `*._conll` files using spaCy, 65 | - identify predicted mentions, 66 | - compute the mentions features (see our blog post), and 67 | - gather the mention features in a set of numpy arrays to be used as input for the neural net model. 68 | 69 | ## Train the model 70 | 71 | Once the files have been pre-processed 72 | (you should have a set of `*.npy` files in a sub-directory `/numpy` in each of your (`train`|`test`|`dev`) data folder), 73 | you can start the training process using [learn.py](/neuralcoref/train/learn.py), for example as 74 | 75 | ```bash 76 | python -m neuralcoref.train.learn --train ./data/train/ --eval ./data/dev/ 77 | ``` 78 | 79 | There are many parameters and options for the training. You can list them with the usual 80 | 81 | ```bash 82 | python -m neuralcoref.train.learn --help 83 | ``` 84 | 85 | You can follow the training by running [Tensorboard for pyTorch](https://github.com/lanpa/tensorboard-pytorch) 86 | (it requires a version of Tensorflow, any version will be fine). Run it with `tensorboard --logdir runs`. 87 | 88 | ## Some details on the training 89 | 90 | The model and the training as thoroughfully described in our 91 | [very detailed blog post](https://medium.com/huggingface/how-to-train-a-neural-coreference-model-neuralcoref-2-7bb30c1abdfe). 92 | The training process is similar to the mention-ranking training described in 93 | [Clark and Manning (2016)](http://cs.stanford.edu/people/kevclark/resources/clark-manning-emnlp2016-deep.pdf), namely: 94 | 95 | - A first step of training uses a standard cross entropy loss on the mention pair labels, 96 | - A second step of training uses a cross entropy loss on the top pairs only, and 97 | - A third step of training using a slack-rescaled ranking loss. 98 | 99 | With the default option, the training will switch from one step to the other as soon as the evaluation stop increasing. 100 | 101 | Traing the model with the default hyper-parameters reaches a test loss of about 61.2 which is lower than the mention ranking test loss of 64.7 reported in [Clark and Manning (2016)](http://cs.stanford.edu/people/kevclark/resources/clark-manning-emnlp2016-deep.pdf). 102 | 103 | Some possible explanations: 104 | 105 | - Our mention extraction function is a simple rule-based function (in [document.py](/document.py)) that was not extensively tuned on the CoNLL dataset and as a result only identifies about 90% of the gold mentions in the CoNLL-2012 dataset (see the evaluation at the start of the training) thereby reducing the maximum possible score. Manually tuning a mention identification module can be a lengthy process that basically involves designing a lot of heuristics to prune spurious mentions while keeping a high recall (see for example the [rule-based mention extraction used in CoreNLP](http://www.aclweb.org/anthology/D10-1048)). An alternative is to train an end-to-end identification module as used in the AllenAI coreference module but this is a lot more complex (you have to learn a pruning function) and the focus of the neuralcoref project is to have a coreference module with a good trade-off between accuracy and simplicity/speed. 106 | - The hyper-parameters and the optimization procedure has not been fully tuned and it is likely possible to find better hyper-parameters and smarter ways to optimize. One possibility is to adjust the balance between the gradients backpropagated in the single-mention and the mentions-pair feedforward networks (see our [blog post](https://medium.com/huggingface/how-to-train-a-neural-coreference-model-neuralcoref-2-7bb30c1abdfe) for more details on the model architecture). Here again, we aimed for a balance between the accuracy and the training speed. As a result, the model trains in about 18h versus about a week for the original model of [Clark and Manning (2016)](http://cs.stanford.edu/people/kevclark/resources/clark-manning-emnlp2016-deep.pdf) and 2 days for the current state-of-the-art model of AllenAI. 107 | - Again for the sake of high throughput, the parse tree output by the [standard English model](https://spacy.io/models/en#en_core_web_sm) of spaCy 2 (that we used for these tests) are slightly less accurate than the carefully tuned CoreNLP parse trees (but they are way faster to compute!) and will lead to a slightly higher percentage of wrong parsing annotations. 108 | - Eventually, it may also be interesting to use newer word-vectors like [ELMo](https://arxiv.org/abs/1802.05365) as they were shown to be able to increase the state-or-the-art coreference model F1 test measure by more than 3 percents. 109 | 110 | ## Train on a new language 111 | 112 | Training on a new language is now possible. However, do not expect it to be a plug-in operation as it involves finding a good annotated dataset and adapting the file-loading and mention-extraction functions to your file format and your language syntax (parse tree). 113 | 114 | To boot-strap your work, I detail here the general step you should follow: 115 | 116 | - Find a corpus with coreference annotations (as always, the bigger, the better). 117 | - Check that spaCy [supports your language](https://spacy.io/models/) (i.e. is able to parse it). If not, you will have to find another parser that is able to parse your language and integrate it with the project (might involve quite large modifications to neuralcoref depending on the parser). 118 | - Find a set of pre-trained word vectors in your language (gloVe or others). 119 | - If your dataset does not follow the tabular `*_conll` file format (see [details on the CoNLL file format](http://conll.cemantix.org/2012/data.html) on the CoNLL website), you will have to tweak the `load_file` function in [conllparser.py](/conllparser.py) to adapt it to your file format. 120 | - Adapt the mention extraction function to your language parse trees (`extract_mentions_spans` in [document.py](/document.py)) to reach an acceptable identification of mentions (the function should output the list of all possible mention in a document: pronouns, nouns, noun phrases and all the nested possible combinations). 121 | - Re-train the model and tune the hyper-parameters. 122 | -------------------------------------------------------------------------------- /neuralcoref/train/training_requirements.txt: -------------------------------------------------------------------------------- 1 | spacy 2 | torch>=1.3.0,<1.4.0 3 | tensorboardX -------------------------------------------------------------------------------- /neuralcoref/train/utils.py: -------------------------------------------------------------------------------- 1 | """Utils""" 2 | 3 | 4 | from concurrent.futures import ThreadPoolExecutor, as_completed 5 | import os 6 | import numpy as np 7 | from tqdm import tqdm 8 | 9 | PACKAGE_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) 10 | BATCH_SIZE_PATH = os.path.join( 11 | PACKAGE_DIRECTORY, "test_batch_size.txt" 12 | ) # fernandes.txt")# 13 | 14 | SIZE_SPAN = 250 # size of the span vector (averaged word embeddings) 15 | SIZE_WORD = 8 # number of words in a mention (tuned embeddings) 16 | SIZE_EMBEDDING = 50 # size of the words embeddings 17 | SIZE_FP = 70 # number of features for a pair of mention 18 | SIZE_FP_COMPRESSED = ( 19 | 9 20 | ) # size of the features for a pair of mentions as stored in numpy arrays 21 | SIZE_FS = 24 # number of features of a single mention 22 | SIZE_FS_COMPRESSED = 6 # size of the features for a mention as stored in numpy arrays 23 | SIZE_GENRE = 7 # Size of the genre one-hot array 24 | SIZE_MENTION_EMBEDDING = ( 25 | SIZE_SPAN + SIZE_WORD * SIZE_EMBEDDING 26 | ) # A mention embeddings (span + words vectors) 27 | SIZE_SNGL_FEATS = SIZE_FS - SIZE_GENRE 28 | SIZE_PAIR_FEATS = SIZE_FP - SIZE_GENRE 29 | SIZE_SNGL_IN_NO_GENRE = SIZE_MENTION_EMBEDDING + SIZE_SNGL_FEATS 30 | SIZE_PAIR_IN_NO_GENRE = 2 * SIZE_MENTION_EMBEDDING + SIZE_PAIR_FEATS 31 | 32 | SIZE_PAIR_IN = ( 33 | 2 * SIZE_MENTION_EMBEDDING + SIZE_FP 34 | ) # Input to the mentions pair neural network 35 | SIZE_SINGLE_IN = ( 36 | SIZE_MENTION_EMBEDDING + SIZE_FS 37 | ) # Input to the single mention neural network 38 | 39 | DISTANCE_BINS = list(range(5)) + [5] * 3 + [6] * 8 + [7] * 16 + [8] * 32 40 | BINS_NUM = float(len(DISTANCE_BINS)) 41 | MAX_BINS = DISTANCE_BINS[-1] + 1 42 | 43 | 44 | def encode_distance(x): 45 | """ Encode an integer or an array of integers as a (bined) one-hot numpy array """ 46 | 47 | def _encode_distance(d): 48 | """ Encode an integer as a (bined) one-hot numpy array """ 49 | dist_vect = np.zeros((11,), dtype="float32") 50 | if d < 64: 51 | dist_vect[DISTANCE_BINS[d]] = 1 52 | else: 53 | dist_vect[9] = 1 54 | dist_vect[10] = min(float(d), BINS_NUM) / BINS_NUM 55 | return dist_vect 56 | 57 | if isinstance(x, np.ndarray): 58 | arr_l = [_encode_distance(y)[np.newaxis, :] for y in x] 59 | out_arr = np.concatenate(arr_l) 60 | else: 61 | out_arr = _encode_distance(x) 62 | return out_arr 63 | 64 | 65 | def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=10): 66 | """ 67 | A parallel version of the map function with a progress bar. 68 | 69 | Args: 70 | array (array-like): An array to iterate over. 71 | function (function): A python function to apply to the elements of array 72 | n_jobs (int, default=16): The number of cores to use 73 | use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of 74 | keyword arguments to function 75 | front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job. 76 | Useful for catching bugs 77 | Returns: 78 | [function(array[0]), function(array[1]), ...] 79 | """ 80 | # We run the first few iterations serially to catch bugs 81 | if front_num > 0: 82 | front = [ 83 | function(**a) if use_kwargs else function(a) for a in array[:front_num] 84 | ] 85 | else: 86 | front = [] 87 | # If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging. 88 | if n_jobs == 1: 89 | return front + [ 90 | function(**a) if use_kwargs else function(a) 91 | for a in tqdm(array[front_num:]) 92 | ] 93 | # Assemble the workers 94 | with ThreadPoolExecutor(max_workers=n_jobs) as pool: 95 | # Pass the elements of array into function 96 | if use_kwargs: 97 | futures = [pool.submit(function, **a) for a in array[front_num:]] 98 | else: 99 | futures = [pool.submit(function, a) for a in array[front_num:]] 100 | kwargs = { 101 | "total": len(futures), 102 | "unit": "it", 103 | "unit_scale": True, 104 | "leave": True, 105 | } 106 | # #Print out the progress as tasks complete 107 | # for _ in tqdm(as_completed(futures), **kwargs): 108 | # pass 109 | out = [] 110 | # Get the results from the futures. 111 | for future in futures: # tqdm(futures): 112 | try: 113 | out.append(future.result()) 114 | except Exception as e: 115 | out.append(e) 116 | return front + out 117 | -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_bias_layer_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_bias_layer_0.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_bias_layer_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_bias_layer_1.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_bias_layer_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_bias_layer_2.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_bias_layer_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_bias_layer_3.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_bias_layer_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_bias_layer_4.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_weights_layer_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_weights_layer_0.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_weights_layer_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_weights_layer_1.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_weights_layer_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_weights_layer_2.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_weights_layer_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_weights_layer_3.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/pair_mentions_weights_layer_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/pair_mentions_weights_layer_4.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_bias_layer_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_bias_layer_0.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_bias_layer_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_bias_layer_1.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_bias_layer_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_bias_layer_2.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_bias_layer_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_bias_layer_3.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_bias_layer_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_bias_layer_4.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_weights_layer_0.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_weights_layer_0.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_weights_layer_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_weights_layer_1.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_weights_layer_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_weights_layer_2.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_weights_layer_3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_weights_layer_3.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/single_mention_weights_layer_4.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/single_mention_weights_layer_4.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/static_word_embeddings.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/static_word_embeddings.npy -------------------------------------------------------------------------------- /neuralcoref/train/weights/tuned_word_embeddings.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/neuralcoref/60338df6f9b0a44a6728b442193b7c66653b0731/neuralcoref/train/weights/tuned_word_embeddings.npy -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | spacy>=2.1.0,<3.0.0 2 | cython>=0.25 3 | pytest 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py 4 | 5 | To create the package for pypi. 6 | 7 | 1. Change the version in __init__.py and setup.py. 8 | 9 | 2. Commit these changes with the message: "Release: VERSION" 10 | 11 | 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " 12 | Push the tag to git: git push --tags origin master 13 | 14 | 4. Build both the sources and the wheel. Do not change anything in setup.py between 15 | creating the wheel and the source distribution (obviously). 16 | 17 | For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory. 18 | (this will build a wheel for the python version you use to build it - make sure you use python 3.x). 19 | 20 | For the sources, run: "python setup.py sdist" 21 | You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp. 22 | 23 | 5. Check that everything looks correct by uploading the package to the pypi test server: 24 | 25 | twine upload dist/* -r pypitest 26 | (pypi suggest using twine as other methods upload files via plaintext.) 27 | 28 | Check that you can install it in a virtualenv by running: 29 | pip install -i https://testpypi.python.org/pypi neuralcoref 30 | 31 | 6. Upload the final version to actual pypi: 32 | twine upload dist/* -r pypi 33 | 34 | 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. 35 | 36 | """ 37 | import os 38 | import subprocess 39 | import sys 40 | import contextlib 41 | from distutils.command.build_ext import build_ext 42 | from distutils.sysconfig import get_python_inc 43 | import distutils.util 44 | from distutils import ccompiler, msvccompiler 45 | from setuptools import Extension, setup, find_packages 46 | 47 | def is_new_osx(): 48 | """Check whether we're on OSX >= 10.10""" 49 | name = distutils.util.get_platform() 50 | if sys.platform != "darwin": 51 | return False 52 | elif name.startswith("macosx-10"): 53 | minor_version = int(name.split("-")[1].split(".")[1]) 54 | if minor_version >= 7: 55 | return True 56 | else: 57 | return False 58 | else: 59 | return False 60 | 61 | 62 | PACKAGE_DATA = {'': ['*.pyx', '*.pxd'], 63 | '': ['*.h'],} 64 | 65 | 66 | PACKAGES = find_packages() 67 | 68 | 69 | MOD_NAMES = ['neuralcoref.neuralcoref'] 70 | 71 | 72 | 73 | COMPILE_OPTIONS = { 74 | "msvc": ["/Ox", "/EHsc"], 75 | "mingw32": ["-O2", "-Wno-strict-prototypes", "-Wno-unused-function"], 76 | "other": ["-O2", "-Wno-strict-prototypes", "-Wno-unused-function"], 77 | } 78 | 79 | 80 | LINK_OPTIONS = {"msvc": [], "mingw32": [], "other": []} 81 | 82 | 83 | if is_new_osx(): 84 | # On Mac, use libc++ because Apple deprecated use of 85 | # libstdc 86 | COMPILE_OPTIONS["other"].append("-stdlib=libc++") 87 | LINK_OPTIONS["other"].append("-lc++") 88 | # g++ (used by unix compiler on mac) links to libstdc++ as a default lib. 89 | # See: https://stackoverflow.com/questions/1653047/avoid-linking-to-libstdc 90 | LINK_OPTIONS["other"].append("-nodefaultlibs") 91 | 92 | 93 | USE_OPENMP_DEFAULT = "0" if sys.platform != "darwin" else None 94 | if os.environ.get("USE_OPENMP", USE_OPENMP_DEFAULT) == "1": 95 | if sys.platform == "darwin": 96 | COMPILE_OPTIONS["other"].append("-fopenmp") 97 | LINK_OPTIONS["other"].append("-fopenmp") 98 | PACKAGE_DATA["spacy.platform.darwin.lib"] = ["*.dylib"] 99 | PACKAGES.append("spacy.platform.darwin.lib") 100 | 101 | elif sys.platform == "win32": 102 | COMPILE_OPTIONS["msvc"].append("/openmp") 103 | 104 | else: 105 | COMPILE_OPTIONS["other"].append("-fopenmp") 106 | LINK_OPTIONS["other"].append("-fopenmp") 107 | 108 | 109 | # By subclassing build_extensions we have the actual compiler that will be used which is really known only after finalize_options 110 | # http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used 111 | class build_ext_options: 112 | def build_options(self): 113 | for e in self.extensions: 114 | e.extra_compile_args += COMPILE_OPTIONS.get( 115 | self.compiler.compiler_type, COMPILE_OPTIONS["other"] 116 | ) 117 | for e in self.extensions: 118 | e.extra_link_args += LINK_OPTIONS.get( 119 | self.compiler.compiler_type, LINK_OPTIONS["other"] 120 | ) 121 | 122 | 123 | class build_ext_subclass(build_ext, build_ext_options): 124 | def build_extensions(self): 125 | build_ext_options.build_options(self) 126 | build_ext.build_extensions(self) 127 | 128 | 129 | # def is_installed(requirement): 130 | # try: 131 | # pkg_resources.require(requirement) 132 | # except pkg_resources.ResolutionError: 133 | # return False 134 | # else: 135 | # return True 136 | 137 | # if not is_installed('numpy>=1.11.0') or not is_installed('spacy>=2.1.0'): 138 | # print(textwrap.dedent(""" 139 | # Error: requirements needs to be installed first. 140 | # You can install them via: 141 | # $ pip install -r requirements.txt 142 | # """), file=sys.stderr) 143 | # exit(1) 144 | 145 | @contextlib.contextmanager 146 | def chdir(new_dir): 147 | old_dir = os.getcwd() 148 | try: 149 | os.chdir(new_dir) 150 | sys.path.insert(0, new_dir) 151 | yield 152 | finally: 153 | del sys.path[0] 154 | os.chdir(old_dir) 155 | 156 | 157 | def generate_cython(root, source): 158 | print('Cythonizing sources') 159 | p = subprocess.call([sys.executable, 160 | os.path.join(root, 'bin', 'cythonize.py'), 161 | source], env=os.environ) 162 | if p != 0: 163 | raise RuntimeError('Running cythonize failed') 164 | 165 | 166 | def is_source_release(path): 167 | return os.path.exists(os.path.join(path, 'PKG-INFO')) 168 | 169 | 170 | def setup_package(): 171 | root = os.path.abspath(os.path.dirname(__file__)) 172 | with chdir(root): 173 | if not is_source_release(root): 174 | generate_cython(root, 'neuralcoref') 175 | 176 | include_dirs = [ 177 | get_python_inc(plat_specific=True), 178 | os.path.join(root, 'include')] 179 | 180 | if (ccompiler.new_compiler().compiler_type == 'msvc' 181 | and msvccompiler.get_build_version() == 9): 182 | include_dirs.append(os.path.join(root, 'include', 'msvc9')) 183 | 184 | ext_modules = [] 185 | for mod_name in MOD_NAMES: 186 | mod_path = mod_name.replace('.', '/') + '.cpp' 187 | extra_link_args = [] 188 | # ??? 189 | # Imported from patch from @mikepb 190 | # See Issue #267. Running blind here... 191 | if sys.platform == 'darwin': 192 | dylib_path = ['..' for _ in range(mod_name.count('.'))] 193 | dylib_path = '/'.join(dylib_path) 194 | dylib_path = '@loader_path/%s/neuralcoref/platform/darwin/lib' % dylib_path 195 | extra_link_args.append('-Wl,-rpath,%s' % dylib_path) 196 | ext_modules.append( 197 | Extension(mod_name, [mod_path], 198 | language='c++', include_dirs=include_dirs, 199 | extra_link_args=extra_link_args)) 200 | 201 | setup(name='neuralcoref', 202 | version='4.0', 203 | description="Coreference Resolution in spaCy with Neural Networks", 204 | url='https://github.com/huggingface/neuralcoref', 205 | author='Thomas Wolf', 206 | author_email='thomwolf@gmail.com', 207 | ext_modules=ext_modules, 208 | classifiers=[ 209 | 'Development Status :: 3 - Alpha', 210 | 'Environment :: Console', 211 | 'Intended Audience :: Developers', 212 | "Intended Audience :: Science/Research", 213 | "License :: OSI Approved :: MIT License", 214 | "Operating System :: POSIX :: Linux", 215 | "Operating System :: MacOS :: MacOS X", 216 | "Operating System :: Microsoft :: Windows", 217 | "Programming Language :: Cython", 218 | "Programming Language :: Python :: 3.6", 219 | "Programming Language :: Python :: 3.7", 220 | "Programming Language :: Python :: 3.8", 221 | "Topic :: Scientific/Engineering", 222 | ], 223 | install_requires=[ 224 | "numpy>=1.15.0", 225 | "boto3", 226 | "requests>=2.13.0,<3.0.0", 227 | "spacy>=2.1.0,<3.0.0"], 228 | setup_requires=['wheel', 'spacy>=2.1.0,<3.0.0'], 229 | python_requires=">=3.6", 230 | packages=PACKAGES, 231 | package_data=PACKAGE_DATA, 232 | keywords='NLP chatbots coreference resolution', 233 | license='MIT', 234 | zip_safe=False, 235 | platforms='any', 236 | cmdclass={"build_ext": build_ext_subclass}) 237 | 238 | if __name__ == '__main__': 239 | setup_package() 240 | --------------------------------------------------------------------------------