├── .gitignore ├── LICENSE ├── README.md ├── analysis └── semeval │ ├── answer_keys_test.txt │ ├── semeval2010_task8_format_checker.pl │ └── semeval2010_task8_scorer-v1.2.pl ├── analysis_util.py ├── dataset_converter.py ├── datasets ├── __init__.py └── semeval_2010_task8.py ├── download-model.sh ├── logging_utils.py ├── loss.py ├── model_pytorch.py ├── opt.py ├── parameters_names.json ├── relation_extraction.py ├── requirements.txt ├── text_utils.py ├── train_utils.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Model, ipynb_checkpoints 2 | model 3 | models 4 | save 5 | log 6 | submission 7 | experiment/log/ 8 | 9 | .vscode 10 | 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | *$py.class 15 | 16 | # C extensions 17 | *.so 18 | 19 | # Distribution / packaging 20 | .Python 21 | build/ 22 | develop-eggs/ 23 | dist/ 24 | downloads/ 25 | eggs/ 26 | .eggs/ 27 | lib/ 28 | lib64/ 29 | parts/ 30 | sdist/ 31 | var/ 32 | wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # celery beat schedule file 89 | celerybeat-schedule 90 | 91 | # SageMath parsed files 92 | *.sage.py 93 | 94 | # Environments 95 | .env 96 | .venv 97 | env/ 98 | venv/ 99 | ENV/ 100 | env.bak/ 101 | venv.bak/ 102 | 103 | # Spyder project settings 104 | .spyderproject 105 | .spyproject 106 | 107 | # Rope project settings 108 | .ropeproject 109 | 110 | # mkdocs documentation 111 | /site 112 | 113 | # mypy 114 | .mypy_cache/cloze_data 115 | cloze_data/ 116 | 117 | # Data folder 118 | data/ 119 | logs/ 120 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 OpenAI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Improving Relation Extraction by Pre-trained Language Representations 2 | 3 | This repository contains the code of our paper: 4 | [Improving Relation Extraction by Pre-trained Language Representations.](https://openreview.net/forum?id=BJgrxbqp67) 5 | Christoph Alt*, Marc Hübner*, Leonhard Hennig 6 | 7 | We fine-tune the pre-trained OpenAI GPT [1] to the task of relation extraction and show that it achieves state-of-the-art results on SemEval 2010 Task 8 and TACRED relation extraction datasets. 8 | 9 | Our code depends on huggingface's PyTorch reimplementation of the OpenAI GPT [2] - so thanks to them. 10 | 11 | ## Installation 12 | 13 | First, clone the repository to your machine and install the requirements with the following command: 14 | 15 | ```bash 16 | pip install -r requirements.txt 17 | ``` 18 | 19 | We also need the weights of the pre-trained Transformer, which can be downloaded with the following command: 20 | ``` 21 | ./download-model.sh 22 | ``` 23 | 24 | The english spacy model is required for sentence segmentation: 25 | ``` 26 | python -m spacy download en 27 | ``` 28 | 29 | ## Prepare the data 30 | 31 | We evaluate our model on [SemEval 2010 Task 8](https://drive.google.com/file/d/0B_jQiLugGTAkMDQ5ZjZiMTUtMzQ1Yy00YWNmLWJlZDYtOWY1ZDMwY2U4YjFk) and [TACRED](https://catalog.ldc.upenn.edu/LDC2018T24), which is available through LDC. 32 | 33 | Our model expects the input dataset to be in JSONL format. To convert a dataset run the following command: 34 | ```bash 35 | python dataset_converter.py --dataset= 36 | ``` 37 | 38 | ## Training 39 | E.g. for training on the TACRED dataset, run the following command: 40 | 41 | ```bash 42 | CUDA_VISIBLE_DEVICES=0 python relation_extraction.py train \ 43 | --write-model True \ 44 | --masking-mode grammar_and_ner \ 45 | --batch-size 8 \ 46 | --max-epochs 3 \ 47 | --lm-coef 0.5 \ 48 | --learning-rate 5.25e-5 \ 49 | --learning-rate-warmup 0.002 \ 50 | --clf-pdrop 0.1 \ 51 | --attn-pdrop 0.1 \ 52 | --word-pdrop 0.0 \ 53 | --dataset tacred \ 54 | --data-dir \ 55 | --seed=0 \ 56 | --log-dir ./logs/ 57 | ``` 58 | 59 | ## Evaluation 60 | ```bash 61 | CUDA_VISIBLE_DEVICES=0 python relation_extraction.py evaluate \ 62 | --dataset tacred \ 63 | --masking_mode grammar_and_ner \ 64 | --test_file ./data/tacred/test.jsonl \ 65 | --save_dir ./logs/ \ 66 | --model_file \ 67 | --batch_size 8 \ 68 | --log_dir ./logs/ 69 | ``` 70 | 71 | ## Trained Models 72 | 73 | The models we trained on SemEval and TACRED to produce our paper results can be found here: 74 | 75 | | Dataset | Masking Mode | P | R | F1 | Download | 76 | | -------- | --------------- | ---- | ---- | ---- | --------------------------------------------------------------------------- | 77 | | TACRED | grammar_and_ner | 70.0 | 65.0 | 67.4 | [Link](https://dfkide-my.sharepoint.com/:u:/g/personal/lehe02_dfki_de/EQ0zv4QicbVMoW6nBhnOkA8BV4Yzt7agM0hAfX07VQez4w?Web=0&Download=1) | 78 | | SemEval | None | 87.6 | 86.8 | 87.1 | [Link](https://dfkide-my.sharepoint.com/:u:/g/personal/lehe02_dfki_de/EZBQNU99Uz9MiOuP-RHAdaYB7w8jIW0mADM4f05tGbhwZg?Web=0&Download=1) | 79 | 80 | ### Download and extract model files 81 | 82 | First, download the archive corresponding to the model you want to evaluate (links in the table above). 83 | 84 | ```bash 85 | wget --content-disposition 86 | ``` 87 | 88 | Extract the model archive containing model.pt, text_encoder.pkl, and label_encoder.pkl. 89 | 90 | ```bash 91 | tar -xvzf 92 | ``` 93 | 94 | ### Run evaluation 95 | 96 | - `dataset`: dataset to evaluate, can be one of "semeval" or "tacred". 97 | - `test-file`: path to the JSONL test file used during evaluation 98 | - `log-dir`: directory to store the evaluation results and predictions 99 | - `save-dir`: directory containing the downloaded model files (model.pt, text_encoder.pkl, and label_encoder.pkl) 100 | - `masking-mode`: masking mode to use during evaluation, can be one of "None", "grammar_and_ner", "grammar", "ner" or "unk" (**caution:** must match the mode for training) 101 | 102 | For example, to evaluate the TACRED model with "grammar_and_ner" masking, run the following command: 103 | 104 | ```bash 105 | CUDA_VISIBLE_DEVICES=0 python relation_extraction.py evaluate \ 106 | --dataset tacred \ 107 | --test-file .//test.jsonl \ 108 | --log-dir \ 109 | --save-dir \ 110 | --masking_mode grammar_and_ner 111 | ``` 112 | 113 | ## Citations 114 | If you use our code in your research or find our repository useful, please consider citing our work. 115 | 116 | ``` 117 | @InProceedings{alt_improving_2019, 118 | author = {Alt, Christoph and H\"{u}bner, Marc and Hennig, Leonhard}, 119 | title = {Improving Relation Extraction by Pre-trained Language Representations}, 120 | booktitle = {Proceedings of AKBC 2019}, 121 | year = {2019}, 122 | url = {https://openreview.net/forum?id=BJgrxbqp67}, 123 | } 124 | ``` 125 | 126 | ## License 127 | lm-transformer-re is released under the MIT license. See [LICENSE](LICENSE) for additional details. 128 | 129 | ## References 130 | 1. [Improving language understanding by generative pre-training](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf). Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 131 | 2. [PyTorch implementation of OpenAI's Finetuned Transformer Language Model](https://github.com/huggingface/pytorch-openai-transformer-lm) 132 | -------------------------------------------------------------------------------- /analysis/semeval/answer_keys_test.txt: -------------------------------------------------------------------------------- 1 | 8001 Message-Topic(e1,e2) 2 | 8002 Product-Producer(e2,e1) 3 | 8003 Instrument-Agency(e2,e1) 4 | 8004 Entity-Destination(e1,e2) 5 | 8005 Cause-Effect(e2,e1) 6 | 8006 Component-Whole(e1,e2) 7 | 8007 Product-Producer(e1,e2) 8 | 8008 Member-Collection(e2,e1) 9 | 8009 Component-Whole(e1,e2) 10 | 8010 Message-Topic(e1,e2) 11 | 8011 Entity-Destination(e1,e2) 12 | 8012 Other 13 | 8013 Entity-Destination(e1,e2) 14 | 8014 Product-Producer(e1,e2) 15 | 8015 Entity-Origin(e1,e2) 16 | 8016 Entity-Origin(e1,e2) 17 | 8017 Entity-Destination(e1,e2) 18 | 8018 Other 19 | 8019 Member-Collection(e2,e1) 20 | 8020 Product-Producer(e1,e2) 21 | 8021 Message-Topic(e1,e2) 22 | 8022 Content-Container(e1,e2) 23 | 8023 Product-Producer(e1,e2) 24 | 8024 Other 25 | 8025 Entity-Origin(e2,e1) 26 | 8026 Product-Producer(e1,e2) 27 | 8027 Cause-Effect(e2,e1) 28 | 8028 Other 29 | 8029 Other 30 | 8030 Entity-Origin(e1,e2) 31 | 8031 Cause-Effect(e1,e2) 32 | 8032 Message-Topic(e1,e2) 33 | 8033 Component-Whole(e1,e2) 34 | 8034 Product-Producer(e1,e2) 35 | 8035 Component-Whole(e1,e2) 36 | 8036 Component-Whole(e2,e1) 37 | 8037 Member-Collection(e2,e1) 38 | 8038 Content-Container(e2,e1) 39 | 8039 Member-Collection(e2,e1) 40 | 8040 Product-Producer(e1,e2) 41 | 8041 Cause-Effect(e1,e2) 42 | 8042 Component-Whole(e2,e1) 43 | 8043 Cause-Effect(e1,e2) 44 | 8044 Entity-Destination(e1,e2) 45 | 8045 Entity-Origin(e1,e2) 46 | 8046 Content-Container(e1,e2) 47 | 8047 Other 48 | 8048 Entity-Destination(e1,e2) 49 | 8049 Message-Topic(e1,e2) 50 | 8050 Other 51 | 8051 Entity-Destination(e1,e2) 52 | 8052 Other 53 | 8053 Member-Collection(e2,e1) 54 | 8054 Other 55 | 8055 Cause-Effect(e1,e2) 56 | 8056 Entity-Origin(e1,e2) 57 | 8057 Other 58 | 8058 Cause-Effect(e1,e2) 59 | 8059 Other 60 | 8060 Component-Whole(e2,e1) 61 | 8061 Entity-Origin(e2,e1) 62 | 8062 Product-Producer(e1,e2) 63 | 8063 Instrument-Agency(e2,e1) 64 | 8064 Component-Whole(e1,e2) 65 | 8065 Entity-Destination(e1,e2) 66 | 8066 Product-Producer(e2,e1) 67 | 8067 Other 68 | 8068 Other 69 | 8069 Message-Topic(e1,e2) 70 | 8070 Product-Producer(e1,e2) 71 | 8071 Other 72 | 8072 Entity-Origin(e1,e2) 73 | 8073 Cause-Effect(e2,e1) 74 | 8074 Entity-Origin(e1,e2) 75 | 8075 Other 76 | 8076 Product-Producer(e1,e2) 77 | 8077 Other 78 | 8078 Instrument-Agency(e2,e1) 79 | 8079 Entity-Destination(e1,e2) 80 | 8080 Product-Producer(e2,e1) 81 | 8081 Component-Whole(e1,e2) 82 | 8082 Component-Whole(e1,e2) 83 | 8083 Cause-Effect(e1,e2) 84 | 8084 Component-Whole(e1,e2) 85 | 8085 Message-Topic(e1,e2) 86 | 8086 Instrument-Agency(e2,e1) 87 | 8087 Message-Topic(e1,e2) 88 | 8088 Product-Producer(e2,e1) 89 | 8089 Entity-Origin(e2,e1) 90 | 8090 Message-Topic(e1,e2) 91 | 8091 Entity-Origin(e1,e2) 92 | 8092 Other 93 | 8093 Component-Whole(e1,e2) 94 | 8094 Component-Whole(e1,e2) 95 | 8095 Other 96 | 8096 Entity-Destination(e1,e2) 97 | 8097 Message-Topic(e1,e2) 98 | 8098 Component-Whole(e1,e2) 99 | 8099 Entity-Destination(e1,e2) 100 | 8100 Message-Topic(e1,e2) 101 | 8101 Message-Topic(e1,e2) 102 | 8102 Component-Whole(e2,e1) 103 | 8103 Entity-Origin(e1,e2) 104 | 8104 Message-Topic(e1,e2) 105 | 8105 Cause-Effect(e2,e1) 106 | 8106 Other 107 | 8107 Cause-Effect(e2,e1) 108 | 8108 Cause-Effect(e1,e2) 109 | 8109 Component-Whole(e2,e1) 110 | 8110 Member-Collection(e2,e1) 111 | 8111 Other 112 | 8112 Content-Container(e1,e2) 113 | 8113 Other 114 | 8114 Product-Producer(e2,e1) 115 | 8115 Other 116 | 8116 Cause-Effect(e2,e1) 117 | 8117 Product-Producer(e1,e2) 118 | 8118 Cause-Effect(e1,e2) 119 | 8119 Member-Collection(e2,e1) 120 | 8120 Component-Whole(e2,e1) 121 | 8121 Entity-Destination(e1,e2) 122 | 8122 Instrument-Agency(e2,e1) 123 | 8123 Other 124 | 8124 Other 125 | 8125 Message-Topic(e1,e2) 126 | 8126 Entity-Origin(e2,e1) 127 | 8127 Entity-Origin(e2,e1) 128 | 8128 Other 129 | 8129 Component-Whole(e2,e1) 130 | 8130 Content-Container(e1,e2) 131 | 8131 Instrument-Agency(e1,e2) 132 | 8132 Message-Topic(e1,e2) 133 | 8133 Component-Whole(e1,e2) 134 | 8134 Other 135 | 8135 Content-Container(e1,e2) 136 | 8136 Instrument-Agency(e2,e1) 137 | 8137 Component-Whole(e1,e2) 138 | 8138 Member-Collection(e2,e1) 139 | 8139 Entity-Origin(e1,e2) 140 | 8140 Member-Collection(e2,e1) 141 | 8141 Instrument-Agency(e2,e1) 142 | 8142 Entity-Origin(e1,e2) 143 | 8143 Other 144 | 8144 Entity-Origin(e1,e2) 145 | 8145 Member-Collection(e2,e1) 146 | 8146 Instrument-Agency(e2,e1) 147 | 8147 Content-Container(e1,e2) 148 | 8148 Message-Topic(e2,e1) 149 | 8149 Other 150 | 8150 Product-Producer(e2,e1) 151 | 8151 Product-Producer(e1,e2) 152 | 8152 Member-Collection(e2,e1) 153 | 8153 Member-Collection(e2,e1) 154 | 8154 Message-Topic(e1,e2) 155 | 8155 Message-Topic(e1,e2) 156 | 8156 Product-Producer(e2,e1) 157 | 8157 Other 158 | 8158 Component-Whole(e1,e2) 159 | 8159 Cause-Effect(e1,e2) 160 | 8160 Message-Topic(e2,e1) 161 | 8161 Message-Topic(e1,e2) 162 | 8162 Entity-Origin(e1,e2) 163 | 8163 Entity-Origin(e1,e2) 164 | 8164 Product-Producer(e2,e1) 165 | 8165 Entity-Destination(e1,e2) 166 | 8166 Content-Container(e1,e2) 167 | 8167 Member-Collection(e2,e1) 168 | 8168 Component-Whole(e2,e1) 169 | 8169 Entity-Origin(e1,e2) 170 | 8170 Instrument-Agency(e2,e1) 171 | 8171 Entity-Destination(e1,e2) 172 | 8172 Member-Collection(e1,e2) 173 | 8173 Other 174 | 8174 Other 175 | 8175 Cause-Effect(e2,e1) 176 | 8176 Product-Producer(e1,e2) 177 | 8177 Entity-Destination(e1,e2) 178 | 8178 Entity-Origin(e1,e2) 179 | 8179 Instrument-Agency(e2,e1) 180 | 8180 Message-Topic(e1,e2) 181 | 8181 Entity-Destination(e1,e2) 182 | 8182 Content-Container(e1,e2) 183 | 8183 Other 184 | 8184 Product-Producer(e2,e1) 185 | 8185 Other 186 | 8186 Member-Collection(e2,e1) 187 | 8187 Entity-Destination(e1,e2) 188 | 8188 Product-Producer(e1,e2) 189 | 8189 Message-Topic(e2,e1) 190 | 8190 Instrument-Agency(e2,e1) 191 | 8191 Cause-Effect(e1,e2) 192 | 8192 Other 193 | 8193 Message-Topic(e1,e2) 194 | 8194 Component-Whole(e2,e1) 195 | 8195 Message-Topic(e2,e1) 196 | 8196 Other 197 | 8197 Entity-Origin(e2,e1) 198 | 8198 Entity-Destination(e1,e2) 199 | 8199 Entity-Destination(e1,e2) 200 | 8200 Product-Producer(e1,e2) 201 | 8201 Component-Whole(e1,e2) 202 | 8202 Content-Container(e1,e2) 203 | 8203 Other 204 | 8204 Cause-Effect(e2,e1) 205 | 8205 Entity-Destination(e1,e2) 206 | 8206 Component-Whole(e1,e2) 207 | 8207 Component-Whole(e2,e1) 208 | 8208 Content-Container(e2,e1) 209 | 8209 Member-Collection(e2,e1) 210 | 8210 Member-Collection(e2,e1) 211 | 8211 Component-Whole(e1,e2) 212 | 8212 Entity-Origin(e1,e2) 213 | 8213 Content-Container(e1,e2) 214 | 8214 Instrument-Agency(e2,e1) 215 | 8215 Entity-Origin(e2,e1) 216 | 8216 Content-Container(e2,e1) 217 | 8217 Content-Container(e1,e2) 218 | 8218 Other 219 | 8219 Cause-Effect(e2,e1) 220 | 8220 Message-Topic(e1,e2) 221 | 8221 Content-Container(e1,e2) 222 | 8222 Entity-Origin(e1,e2) 223 | 8223 Message-Topic(e1,e2) 224 | 8224 Message-Topic(e2,e1) 225 | 8225 Other 226 | 8226 Other 227 | 8227 Content-Container(e1,e2) 228 | 8228 Member-Collection(e2,e1) 229 | 8229 Product-Producer(e1,e2) 230 | 8230 Other 231 | 8231 Entity-Origin(e1,e2) 232 | 8232 Component-Whole(e2,e1) 233 | 8233 Message-Topic(e1,e2) 234 | 8234 Cause-Effect(e2,e1) 235 | 8235 Component-Whole(e1,e2) 236 | 8236 Cause-Effect(e2,e1) 237 | 8237 Other 238 | 8238 Component-Whole(e1,e2) 239 | 8239 Cause-Effect(e1,e2) 240 | 8240 Cause-Effect(e1,e2) 241 | 8241 Product-Producer(e1,e2) 242 | 8242 Entity-Destination(e1,e2) 243 | 8243 Component-Whole(e1,e2) 244 | 8244 Other 245 | 8245 Other 246 | 8246 Product-Producer(e2,e1) 247 | 8247 Content-Container(e1,e2) 248 | 8248 Component-Whole(e1,e2) 249 | 8249 Entity-Origin(e1,e2) 250 | 8250 Entity-Destination(e1,e2) 251 | 8251 Component-Whole(e1,e2) 252 | 8252 Entity-Origin(e1,e2) 253 | 8253 Cause-Effect(e1,e2) 254 | 8254 Component-Whole(e1,e2) 255 | 8255 Other 256 | 8256 Other 257 | 8257 Cause-Effect(e2,e1) 258 | 8258 Product-Producer(e1,e2) 259 | 8259 Component-Whole(e2,e1) 260 | 8260 Instrument-Agency(e2,e1) 261 | 8261 Message-Topic(e1,e2) 262 | 8262 Entity-Destination(e1,e2) 263 | 8263 Entity-Origin(e2,e1) 264 | 8264 Message-Topic(e2,e1) 265 | 8265 Cause-Effect(e2,e1) 266 | 8266 Entity-Destination(e1,e2) 267 | 8267 Message-Topic(e1,e2) 268 | 8268 Component-Whole(e2,e1) 269 | 8269 Other 270 | 8270 Entity-Destination(e1,e2) 271 | 8271 Other 272 | 8272 Other 273 | 8273 Message-Topic(e2,e1) 274 | 8274 Member-Collection(e2,e1) 275 | 8275 Other 276 | 8276 Entity-Destination(e1,e2) 277 | 8277 Message-Topic(e1,e2) 278 | 8278 Instrument-Agency(e2,e1) 279 | 8279 Product-Producer(e2,e1) 280 | 8280 Product-Producer(e1,e2) 281 | 8281 Member-Collection(e1,e2) 282 | 8282 Entity-Destination(e1,e2) 283 | 8283 Member-Collection(e2,e1) 284 | 8284 Other 285 | 8285 Message-Topic(e1,e2) 286 | 8286 Content-Container(e1,e2) 287 | 8287 Member-Collection(e2,e1) 288 | 8288 Cause-Effect(e2,e1) 289 | 8289 Other 290 | 8290 Message-Topic(e1,e2) 291 | 8291 Content-Container(e1,e2) 292 | 8292 Message-Topic(e1,e2) 293 | 8293 Component-Whole(e1,e2) 294 | 8294 Other 295 | 8295 Entity-Origin(e1,e2) 296 | 8296 Entity-Origin(e1,e2) 297 | 8297 Entity-Destination(e1,e2) 298 | 8298 Entity-Destination(e1,e2) 299 | 8299 Entity-Destination(e1,e2) 300 | 8300 Product-Producer(e2,e1) 301 | 8301 Other 302 | 8302 Instrument-Agency(e2,e1) 303 | 8303 Component-Whole(e2,e1) 304 | 8304 Other 305 | 8305 Product-Producer(e2,e1) 306 | 8306 Message-Topic(e1,e2) 307 | 8307 Product-Producer(e1,e2) 308 | 8308 Other 309 | 8309 Message-Topic(e1,e2) 310 | 8310 Product-Producer(e2,e1) 311 | 8311 Other 312 | 8312 Cause-Effect(e2,e1) 313 | 8313 Message-Topic(e1,e2) 314 | 8314 Product-Producer(e1,e2) 315 | 8315 Message-Topic(e2,e1) 316 | 8316 Member-Collection(e2,e1) 317 | 8317 Content-Container(e1,e2) 318 | 8318 Content-Container(e1,e2) 319 | 8319 Entity-Destination(e1,e2) 320 | 8320 Instrument-Agency(e2,e1) 321 | 8321 Entity-Destination(e1,e2) 322 | 8322 Member-Collection(e2,e1) 323 | 8323 Member-Collection(e1,e2) 324 | 8324 Entity-Destination(e1,e2) 325 | 8325 Content-Container(e2,e1) 326 | 8326 Other 327 | 8327 Message-Topic(e2,e1) 328 | 8328 Message-Topic(e1,e2) 329 | 8329 Message-Topic(e1,e2) 330 | 8330 Product-Producer(e1,e2) 331 | 8331 Member-Collection(e2,e1) 332 | 8332 Message-Topic(e1,e2) 333 | 8333 Message-Topic(e2,e1) 334 | 8334 Cause-Effect(e2,e1) 335 | 8335 Member-Collection(e2,e1) 336 | 8336 Other 337 | 8337 Other 338 | 8338 Message-Topic(e1,e2) 339 | 8339 Other 340 | 8340 Content-Container(e1,e2) 341 | 8341 Message-Topic(e1,e2) 342 | 8342 Other 343 | 8343 Instrument-Agency(e2,e1) 344 | 8344 Entity-Destination(e1,e2) 345 | 8345 Content-Container(e1,e2) 346 | 8346 Content-Container(e2,e1) 347 | 8347 Other 348 | 8348 Other 349 | 8349 Member-Collection(e2,e1) 350 | 8350 Component-Whole(e2,e1) 351 | 8351 Content-Container(e1,e2) 352 | 8352 Member-Collection(e2,e1) 353 | 8353 Message-Topic(e1,e2) 354 | 8354 Message-Topic(e2,e1) 355 | 8355 Content-Container(e1,e2) 356 | 8356 Other 357 | 8357 Cause-Effect(e1,e2) 358 | 8358 Instrument-Agency(e2,e1) 359 | 8359 Member-Collection(e2,e1) 360 | 8360 Component-Whole(e2,e1) 361 | 8361 Cause-Effect(e2,e1) 362 | 8362 Other 363 | 8363 Entity-Origin(e1,e2) 364 | 8364 Instrument-Agency(e2,e1) 365 | 8365 Product-Producer(e1,e2) 366 | 8366 Message-Topic(e1,e2) 367 | 8367 Entity-Destination(e1,e2) 368 | 8368 Entity-Destination(e1,e2) 369 | 8369 Member-Collection(e1,e2) 370 | 8370 Other 371 | 8371 Component-Whole(e1,e2) 372 | 8372 Other 373 | 8373 Cause-Effect(e2,e1) 374 | 8374 Product-Producer(e2,e1) 375 | 8375 Entity-Destination(e1,e2) 376 | 8376 Entity-Destination(e1,e2) 377 | 8377 Cause-Effect(e1,e2) 378 | 8378 Product-Producer(e2,e1) 379 | 8379 Other 380 | 8380 Other 381 | 8381 Instrument-Agency(e1,e2) 382 | 8382 Cause-Effect(e2,e1) 383 | 8383 Entity-Destination(e1,e2) 384 | 8384 Other 385 | 8385 Entity-Origin(e1,e2) 386 | 8386 Component-Whole(e2,e1) 387 | 8387 Product-Producer(e2,e1) 388 | 8388 Component-Whole(e1,e2) 389 | 8389 Message-Topic(e1,e2) 390 | 8390 Other 391 | 8391 Other 392 | 8392 Component-Whole(e2,e1) 393 | 8393 Entity-Origin(e1,e2) 394 | 8394 Entity-Origin(e1,e2) 395 | 8395 Component-Whole(e1,e2) 396 | 8396 Other 397 | 8397 Other 398 | 8398 Entity-Destination(e1,e2) 399 | 8399 Instrument-Agency(e2,e1) 400 | 8400 Other 401 | 8401 Entity-Destination(e1,e2) 402 | 8402 Cause-Effect(e2,e1) 403 | 8403 Cause-Effect(e2,e1) 404 | 8404 Cause-Effect(e2,e1) 405 | 8405 Cause-Effect(e2,e1) 406 | 8406 Component-Whole(e1,e2) 407 | 8407 Other 408 | 8408 Entity-Origin(e2,e1) 409 | 8409 Cause-Effect(e2,e1) 410 | 8410 Entity-Destination(e1,e2) 411 | 8411 Entity-Origin(e1,e2) 412 | 8412 Content-Container(e2,e1) 413 | 8413 Component-Whole(e1,e2) 414 | 8414 Entity-Destination(e1,e2) 415 | 8415 Member-Collection(e2,e1) 416 | 8416 Component-Whole(e2,e1) 417 | 8417 Cause-Effect(e1,e2) 418 | 8418 Entity-Destination(e1,e2) 419 | 8419 Content-Container(e2,e1) 420 | 8420 Message-Topic(e1,e2) 421 | 8421 Component-Whole(e1,e2) 422 | 8422 Component-Whole(e2,e1) 423 | 8423 Entity-Destination(e1,e2) 424 | 8424 Instrument-Agency(e2,e1) 425 | 8425 Other 426 | 8426 Other 427 | 8427 Component-Whole(e1,e2) 428 | 8428 Product-Producer(e1,e2) 429 | 8429 Component-Whole(e1,e2) 430 | 8430 Entity-Origin(e1,e2) 431 | 8431 Component-Whole(e2,e1) 432 | 8432 Other 433 | 8433 Member-Collection(e2,e1) 434 | 8434 Other 435 | 8435 Other 436 | 8436 Other 437 | 8437 Message-Topic(e2,e1) 438 | 8438 Component-Whole(e2,e1) 439 | 8439 Cause-Effect(e2,e1) 440 | 8440 Message-Topic(e1,e2) 441 | 8441 Product-Producer(e2,e1) 442 | 8442 Component-Whole(e1,e2) 443 | 8443 Component-Whole(e1,e2) 444 | 8444 Component-Whole(e2,e1) 445 | 8445 Content-Container(e1,e2) 446 | 8446 Product-Producer(e1,e2) 447 | 8447 Other 448 | 8448 Other 449 | 8449 Entity-Origin(e1,e2) 450 | 8450 Other 451 | 8451 Other 452 | 8452 Member-Collection(e2,e1) 453 | 8453 Entity-Origin(e1,e2) 454 | 8454 Product-Producer(e2,e1) 455 | 8455 Cause-Effect(e2,e1) 456 | 8456 Entity-Destination(e1,e2) 457 | 8457 Entity-Destination(e1,e2) 458 | 8458 Product-Producer(e1,e2) 459 | 8459 Instrument-Agency(e2,e1) 460 | 8460 Entity-Destination(e1,e2) 461 | 8461 Other 462 | 8462 Product-Producer(e2,e1) 463 | 8463 Entity-Destination(e1,e2) 464 | 8464 Entity-Destination(e1,e2) 465 | 8465 Content-Container(e1,e2) 466 | 8466 Other 467 | 8467 Entity-Destination(e1,e2) 468 | 8468 Entity-Destination(e1,e2) 469 | 8469 Entity-Origin(e1,e2) 470 | 8470 Component-Whole(e1,e2) 471 | 8471 Cause-Effect(e1,e2) 472 | 8472 Component-Whole(e1,e2) 473 | 8473 Cause-Effect(e2,e1) 474 | 8474 Content-Container(e1,e2) 475 | 8475 Other 476 | 8476 Cause-Effect(e1,e2) 477 | 8477 Other 478 | 8478 Entity-Origin(e1,e2) 479 | 8479 Message-Topic(e1,e2) 480 | 8480 Message-Topic(e1,e2) 481 | 8481 Entity-Destination(e1,e2) 482 | 8482 Other 483 | 8483 Product-Producer(e1,e2) 484 | 8484 Other 485 | 8485 Product-Producer(e1,e2) 486 | 8486 Cause-Effect(e1,e2) 487 | 8487 Other 488 | 8488 Product-Producer(e1,e2) 489 | 8489 Cause-Effect(e2,e1) 490 | 8490 Content-Container(e1,e2) 491 | 8491 Other 492 | 8492 Member-Collection(e2,e1) 493 | 8493 Cause-Effect(e2,e1) 494 | 8494 Cause-Effect(e2,e1) 495 | 8495 Message-Topic(e2,e1) 496 | 8496 Entity-Destination(e1,e2) 497 | 8497 Entity-Origin(e1,e2) 498 | 8498 Cause-Effect(e1,e2) 499 | 8499 Component-Whole(e1,e2) 500 | 8500 Cause-Effect(e1,e2) 501 | 8501 Message-Topic(e2,e1) 502 | 8502 Content-Container(e1,e2) 503 | 8503 Cause-Effect(e2,e1) 504 | 8504 Entity-Origin(e1,e2) 505 | 8505 Content-Container(e1,e2) 506 | 8506 Entity-Destination(e1,e2) 507 | 8507 Member-Collection(e2,e1) 508 | 8508 Other 509 | 8509 Cause-Effect(e2,e1) 510 | 8510 Other 511 | 8511 Instrument-Agency(e2,e1) 512 | 8512 Cause-Effect(e1,e2) 513 | 8513 Other 514 | 8514 Message-Topic(e1,e2) 515 | 8515 Other 516 | 8516 Other 517 | 8517 Entity-Origin(e1,e2) 518 | 8518 Entity-Origin(e2,e1) 519 | 8519 Product-Producer(e2,e1) 520 | 8520 Cause-Effect(e2,e1) 521 | 8521 Cause-Effect(e2,e1) 522 | 8522 Other 523 | 8523 Cause-Effect(e2,e1) 524 | 8524 Instrument-Agency(e2,e1) 525 | 8525 Entity-Origin(e1,e2) 526 | 8526 Entity-Destination(e1,e2) 527 | 8527 Component-Whole(e1,e2) 528 | 8528 Content-Container(e1,e2) 529 | 8529 Entity-Destination(e1,e2) 530 | 8530 Product-Producer(e2,e1) 531 | 8531 Component-Whole(e2,e1) 532 | 8532 Other 533 | 8533 Product-Producer(e1,e2) 534 | 8534 Cause-Effect(e1,e2) 535 | 8535 Cause-Effect(e2,e1) 536 | 8536 Cause-Effect(e1,e2) 537 | 8537 Other 538 | 8538 Member-Collection(e2,e1) 539 | 8539 Member-Collection(e1,e2) 540 | 8540 Other 541 | 8541 Product-Producer(e1,e2) 542 | 8542 Cause-Effect(e1,e2) 543 | 8543 Entity-Origin(e1,e2) 544 | 8544 Message-Topic(e1,e2) 545 | 8545 Instrument-Agency(e2,e1) 546 | 8546 Entity-Origin(e1,e2) 547 | 8547 Component-Whole(e1,e2) 548 | 8548 Component-Whole(e2,e1) 549 | 8549 Component-Whole(e1,e2) 550 | 8550 Other 551 | 8551 Message-Topic(e2,e1) 552 | 8552 Entity-Destination(e1,e2) 553 | 8553 Message-Topic(e1,e2) 554 | 8554 Content-Container(e1,e2) 555 | 8555 Entity-Origin(e2,e1) 556 | 8556 Cause-Effect(e1,e2) 557 | 8557 Entity-Origin(e1,e2) 558 | 8558 Entity-Destination(e1,e2) 559 | 8559 Product-Producer(e2,e1) 560 | 8560 Other 561 | 8561 Component-Whole(e2,e1) 562 | 8562 Entity-Origin(e1,e2) 563 | 8563 Message-Topic(e1,e2) 564 | 8564 Message-Topic(e1,e2) 565 | 8565 Other 566 | 8566 Entity-Destination(e1,e2) 567 | 8567 Instrument-Agency(e2,e1) 568 | 8568 Other 569 | 8569 Entity-Origin(e1,e2) 570 | 8570 Member-Collection(e2,e1) 571 | 8571 Other 572 | 8572 Member-Collection(e2,e1) 573 | 8573 Other 574 | 8574 Component-Whole(e1,e2) 575 | 8575 Entity-Destination(e1,e2) 576 | 8576 Content-Container(e1,e2) 577 | 8577 Member-Collection(e2,e1) 578 | 8578 Member-Collection(e2,e1) 579 | 8579 Message-Topic(e1,e2) 580 | 8580 Message-Topic(e1,e2) 581 | 8581 Other 582 | 8582 Other 583 | 8583 Member-Collection(e2,e1) 584 | 8584 Component-Whole(e2,e1) 585 | 8585 Message-Topic(e2,e1) 586 | 8586 Component-Whole(e2,e1) 587 | 8587 Entity-Origin(e1,e2) 588 | 8588 Message-Topic(e1,e2) 589 | 8589 Message-Topic(e1,e2) 590 | 8590 Member-Collection(e2,e1) 591 | 8591 Cause-Effect(e2,e1) 592 | 8592 Other 593 | 8593 Product-Producer(e1,e2) 594 | 8594 Entity-Origin(e1,e2) 595 | 8595 Product-Producer(e1,e2) 596 | 8596 Cause-Effect(e1,e2) 597 | 8597 Message-Topic(e1,e2) 598 | 8598 Entity-Destination(e1,e2) 599 | 8599 Component-Whole(e2,e1) 600 | 8600 Member-Collection(e2,e1) 601 | 8601 Product-Producer(e1,e2) 602 | 8602 Cause-Effect(e2,e1) 603 | 8603 Cause-Effect(e2,e1) 604 | 8604 Message-Topic(e1,e2) 605 | 8605 Component-Whole(e1,e2) 606 | 8606 Entity-Destination(e1,e2) 607 | 8607 Other 608 | 8608 Cause-Effect(e2,e1) 609 | 8609 Component-Whole(e2,e1) 610 | 8610 Other 611 | 8611 Message-Topic(e1,e2) 612 | 8612 Entity-Origin(e1,e2) 613 | 8613 Content-Container(e2,e1) 614 | 8614 Entity-Origin(e1,e2) 615 | 8615 Other 616 | 8616 Component-Whole(e1,e2) 617 | 8617 Entity-Origin(e1,e2) 618 | 8618 Other 619 | 8619 Entity-Destination(e1,e2) 620 | 8620 Entity-Origin(e1,e2) 621 | 8621 Cause-Effect(e2,e1) 622 | 8622 Component-Whole(e1,e2) 623 | 8623 Cause-Effect(e1,e2) 624 | 8624 Component-Whole(e1,e2) 625 | 8625 Message-Topic(e1,e2) 626 | 8626 Other 627 | 8627 Member-Collection(e2,e1) 628 | 8628 Other 629 | 8629 Message-Topic(e1,e2) 630 | 8630 Entity-Destination(e1,e2) 631 | 8631 Entity-Destination(e1,e2) 632 | 8632 Component-Whole(e2,e1) 633 | 8633 Cause-Effect(e1,e2) 634 | 8634 Instrument-Agency(e2,e1) 635 | 8635 Entity-Origin(e1,e2) 636 | 8636 Content-Container(e2,e1) 637 | 8637 Instrument-Agency(e2,e1) 638 | 8638 Member-Collection(e2,e1) 639 | 8639 Entity-Destination(e1,e2) 640 | 8640 Entity-Origin(e1,e2) 641 | 8641 Cause-Effect(e2,e1) 642 | 8642 Product-Producer(e2,e1) 643 | 8643 Entity-Destination(e1,e2) 644 | 8644 Product-Producer(e2,e1) 645 | 8645 Other 646 | 8646 Other 647 | 8647 Other 648 | 8648 Cause-Effect(e2,e1) 649 | 8649 Member-Collection(e1,e2) 650 | 8650 Message-Topic(e1,e2) 651 | 8651 Message-Topic(e1,e2) 652 | 8652 Other 653 | 8653 Entity-Origin(e1,e2) 654 | 8654 Content-Container(e1,e2) 655 | 8655 Cause-Effect(e1,e2) 656 | 8656 Member-Collection(e2,e1) 657 | 8657 Component-Whole(e1,e2) 658 | 8658 Message-Topic(e1,e2) 659 | 8659 Cause-Effect(e1,e2) 660 | 8660 Message-Topic(e1,e2) 661 | 8661 Product-Producer(e1,e2) 662 | 8662 Message-Topic(e2,e1) 663 | 8663 Entity-Destination(e1,e2) 664 | 8664 Product-Producer(e1,e2) 665 | 8665 Component-Whole(e2,e1) 666 | 8666 Component-Whole(e2,e1) 667 | 8667 Component-Whole(e1,e2) 668 | 8668 Other 669 | 8669 Member-Collection(e2,e1) 670 | 8670 Entity-Destination(e1,e2) 671 | 8671 Content-Container(e1,e2) 672 | 8672 Message-Topic(e1,e2) 673 | 8673 Product-Producer(e2,e1) 674 | 8674 Message-Topic(e1,e2) 675 | 8675 Component-Whole(e1,e2) 676 | 8676 Message-Topic(e1,e2) 677 | 8677 Component-Whole(e2,e1) 678 | 8678 Other 679 | 8679 Component-Whole(e2,e1) 680 | 8680 Other 681 | 8681 Cause-Effect(e2,e1) 682 | 8682 Message-Topic(e1,e2) 683 | 8683 Member-Collection(e2,e1) 684 | 8684 Component-Whole(e2,e1) 685 | 8685 Content-Container(e1,e2) 686 | 8686 Member-Collection(e1,e2) 687 | 8687 Other 688 | 8688 Entity-Origin(e1,e2) 689 | 8689 Content-Container(e1,e2) 690 | 8690 Cause-Effect(e2,e1) 691 | 8691 Message-Topic(e1,e2) 692 | 8692 Component-Whole(e1,e2) 693 | 8693 Content-Container(e1,e2) 694 | 8694 Other 695 | 8695 Content-Container(e1,e2) 696 | 8696 Member-Collection(e1,e2) 697 | 8697 Other 698 | 8698 Entity-Destination(e1,e2) 699 | 8699 Entity-Origin(e1,e2) 700 | 8700 Product-Producer(e2,e1) 701 | 8701 Member-Collection(e2,e1) 702 | 8702 Component-Whole(e1,e2) 703 | 8703 Component-Whole(e2,e1) 704 | 8704 Entity-Origin(e2,e1) 705 | 8705 Cause-Effect(e1,e2) 706 | 8706 Other 707 | 8707 Content-Container(e2,e1) 708 | 8708 Cause-Effect(e2,e1) 709 | 8709 Entity-Origin(e1,e2) 710 | 8710 Entity-Destination(e1,e2) 711 | 8711 Message-Topic(e1,e2) 712 | 8712 Member-Collection(e1,e2) 713 | 8713 Member-Collection(e2,e1) 714 | 8714 Member-Collection(e2,e1) 715 | 8715 Content-Container(e1,e2) 716 | 8716 Other 717 | 8717 Product-Producer(e2,e1) 718 | 8718 Other 719 | 8719 Entity-Destination(e1,e2) 720 | 8720 Cause-Effect(e2,e1) 721 | 8721 Other 722 | 8722 Product-Producer(e2,e1) 723 | 8723 Product-Producer(e2,e1) 724 | 8724 Component-Whole(e2,e1) 725 | 8725 Message-Topic(e1,e2) 726 | 8726 Other 727 | 8727 Product-Producer(e2,e1) 728 | 8728 Content-Container(e1,e2) 729 | 8729 Member-Collection(e2,e1) 730 | 8730 Component-Whole(e1,e2) 731 | 8731 Cause-Effect(e2,e1) 732 | 8732 Instrument-Agency(e2,e1) 733 | 8733 Entity-Origin(e1,e2) 734 | 8734 Entity-Origin(e1,e2) 735 | 8735 Component-Whole(e1,e2) 736 | 8736 Cause-Effect(e1,e2) 737 | 8737 Instrument-Agency(e2,e1) 738 | 8738 Content-Container(e1,e2) 739 | 8739 Cause-Effect(e2,e1) 740 | 8740 Cause-Effect(e1,e2) 741 | 8741 Member-Collection(e2,e1) 742 | 8742 Entity-Destination(e1,e2) 743 | 8743 Entity-Destination(e1,e2) 744 | 8744 Product-Producer(e2,e1) 745 | 8745 Cause-Effect(e2,e1) 746 | 8746 Component-Whole(e1,e2) 747 | 8747 Entity-Origin(e1,e2) 748 | 8748 Cause-Effect(e1,e2) 749 | 8749 Entity-Origin(e1,e2) 750 | 8750 Instrument-Agency(e2,e1) 751 | 8751 Member-Collection(e2,e1) 752 | 8752 Cause-Effect(e1,e2) 753 | 8753 Other 754 | 8754 Cause-Effect(e2,e1) 755 | 8755 Entity-Destination(e1,e2) 756 | 8756 Product-Producer(e1,e2) 757 | 8757 Entity-Destination(e1,e2) 758 | 8758 Entity-Destination(e1,e2) 759 | 8759 Other 760 | 8760 Entity-Destination(e1,e2) 761 | 8761 Entity-Origin(e1,e2) 762 | 8762 Entity-Origin(e1,e2) 763 | 8763 Other 764 | 8764 Cause-Effect(e1,e2) 765 | 8765 Product-Producer(e2,e1) 766 | 8766 Product-Producer(e1,e2) 767 | 8767 Message-Topic(e2,e1) 768 | 8768 Product-Producer(e1,e2) 769 | 8769 Product-Producer(e1,e2) 770 | 8770 Content-Container(e1,e2) 771 | 8771 Other 772 | 8772 Entity-Destination(e1,e2) 773 | 8773 Member-Collection(e2,e1) 774 | 8774 Cause-Effect(e2,e1) 775 | 8775 Cause-Effect(e2,e1) 776 | 8776 Component-Whole(e2,e1) 777 | 8777 Content-Container(e1,e2) 778 | 8778 Component-Whole(e2,e1) 779 | 8779 Component-Whole(e2,e1) 780 | 8780 Content-Container(e1,e2) 781 | 8781 Cause-Effect(e1,e2) 782 | 8782 Instrument-Agency(e2,e1) 783 | 8783 Product-Producer(e2,e1) 784 | 8784 Entity-Origin(e1,e2) 785 | 8785 Other 786 | 8786 Other 787 | 8787 Entity-Origin(e2,e1) 788 | 8788 Message-Topic(e1,e2) 789 | 8789 Message-Topic(e1,e2) 790 | 8790 Instrument-Agency(e2,e1) 791 | 8791 Entity-Destination(e1,e2) 792 | 8792 Other 793 | 8793 Entity-Destination(e1,e2) 794 | 8794 Other 795 | 8795 Member-Collection(e2,e1) 796 | 8796 Member-Collection(e2,e1) 797 | 8797 Product-Producer(e1,e2) 798 | 8798 Member-Collection(e2,e1) 799 | 8799 Entity-Origin(e1,e2) 800 | 8800 Entity-Destination(e1,e2) 801 | 8801 Other 802 | 8802 Component-Whole(e2,e1) 803 | 8803 Member-Collection(e2,e1) 804 | 8804 Instrument-Agency(e2,e1) 805 | 8805 Entity-Origin(e2,e1) 806 | 8806 Content-Container(e1,e2) 807 | 8807 Component-Whole(e1,e2) 808 | 8808 Component-Whole(e1,e2) 809 | 8809 Other 810 | 8810 Entity-Origin(e2,e1) 811 | 8811 Instrument-Agency(e1,e2) 812 | 8812 Cause-Effect(e2,e1) 813 | 8813 Instrument-Agency(e2,e1) 814 | 8814 Member-Collection(e1,e2) 815 | 8815 Entity-Destination(e1,e2) 816 | 8816 Content-Container(e1,e2) 817 | 8817 Member-Collection(e2,e1) 818 | 8818 Other 819 | 8819 Component-Whole(e1,e2) 820 | 8820 Component-Whole(e1,e2) 821 | 8821 Product-Producer(e2,e1) 822 | 8822 Member-Collection(e2,e1) 823 | 8823 Instrument-Agency(e2,e1) 824 | 8824 Member-Collection(e2,e1) 825 | 8825 Entity-Destination(e1,e2) 826 | 8826 Message-Topic(e1,e2) 827 | 8827 Entity-Destination(e1,e2) 828 | 8828 Product-Producer(e2,e1) 829 | 8829 Cause-Effect(e1,e2) 830 | 8830 Message-Topic(e1,e2) 831 | 8831 Component-Whole(e1,e2) 832 | 8832 Entity-Origin(e1,e2) 833 | 8833 Content-Container(e1,e2) 834 | 8834 Entity-Origin(e1,e2) 835 | 8835 Instrument-Agency(e2,e1) 836 | 8836 Entity-Origin(e1,e2) 837 | 8837 Component-Whole(e2,e1) 838 | 8838 Instrument-Agency(e2,e1) 839 | 8839 Member-Collection(e2,e1) 840 | 8840 Product-Producer(e2,e1) 841 | 8841 Cause-Effect(e1,e2) 842 | 8842 Other 843 | 8843 Content-Container(e1,e2) 844 | 8844 Message-Topic(e1,e2) 845 | 8845 Other 846 | 8846 Entity-Destination(e1,e2) 847 | 8847 Other 848 | 8848 Message-Topic(e1,e2) 849 | 8849 Entity-Destination(e1,e2) 850 | 8850 Entity-Destination(e1,e2) 851 | 8851 Cause-Effect(e2,e1) 852 | 8852 Content-Container(e1,e2) 853 | 8853 Entity-Origin(e1,e2) 854 | 8854 Member-Collection(e2,e1) 855 | 8855 Cause-Effect(e2,e1) 856 | 8856 Content-Container(e1,e2) 857 | 8857 Cause-Effect(e2,e1) 858 | 8858 Cause-Effect(e1,e2) 859 | 8859 Cause-Effect(e2,e1) 860 | 8860 Other 861 | 8861 Message-Topic(e1,e2) 862 | 8862 Entity-Destination(e1,e2) 863 | 8863 Other 864 | 8864 Component-Whole(e2,e1) 865 | 8865 Component-Whole(e1,e2) 866 | 8866 Other 867 | 8867 Entity-Destination(e1,e2) 868 | 8868 Component-Whole(e2,e1) 869 | 8869 Product-Producer(e1,e2) 870 | 8870 Entity-Destination(e1,e2) 871 | 8871 Member-Collection(e2,e1) 872 | 8872 Instrument-Agency(e1,e2) 873 | 8873 Component-Whole(e1,e2) 874 | 8874 Other 875 | 8875 Cause-Effect(e1,e2) 876 | 8876 Other 877 | 8877 Member-Collection(e1,e2) 878 | 8878 Entity-Origin(e1,e2) 879 | 8879 Cause-Effect(e2,e1) 880 | 8880 Entity-Origin(e1,e2) 881 | 8881 Content-Container(e1,e2) 882 | 8882 Entity-Origin(e2,e1) 883 | 8883 Product-Producer(e2,e1) 884 | 8884 Component-Whole(e2,e1) 885 | 8885 Cause-Effect(e2,e1) 886 | 8886 Entity-Origin(e1,e2) 887 | 8887 Message-Topic(e2,e1) 888 | 8888 Other 889 | 8889 Cause-Effect(e2,e1) 890 | 8890 Entity-Origin(e1,e2) 891 | 8891 Content-Container(e1,e2) 892 | 8892 Product-Producer(e1,e2) 893 | 8893 Component-Whole(e2,e1) 894 | 8894 Entity-Origin(e1,e2) 895 | 8895 Product-Producer(e1,e2) 896 | 8896 Other 897 | 8897 Member-Collection(e2,e1) 898 | 8898 Entity-Destination(e1,e2) 899 | 8899 Entity-Origin(e2,e1) 900 | 8900 Message-Topic(e1,e2) 901 | 8901 Message-Topic(e1,e2) 902 | 8902 Member-Collection(e2,e1) 903 | 8903 Entity-Destination(e1,e2) 904 | 8904 Instrument-Agency(e2,e1) 905 | 8905 Other 906 | 8906 Member-Collection(e2,e1) 907 | 8907 Entity-Origin(e2,e1) 908 | 8908 Message-Topic(e1,e2) 909 | 8909 Other 910 | 8910 Other 911 | 8911 Member-Collection(e1,e2) 912 | 8912 Message-Topic(e1,e2) 913 | 8913 Product-Producer(e2,e1) 914 | 8914 Cause-Effect(e2,e1) 915 | 8915 Component-Whole(e2,e1) 916 | 8916 Product-Producer(e2,e1) 917 | 8917 Other 918 | 8918 Instrument-Agency(e2,e1) 919 | 8919 Message-Topic(e2,e1) 920 | 8920 Product-Producer(e1,e2) 921 | 8921 Entity-Origin(e2,e1) 922 | 8922 Product-Producer(e1,e2) 923 | 8923 Component-Whole(e1,e2) 924 | 8924 Product-Producer(e1,e2) 925 | 8925 Other 926 | 8926 Component-Whole(e1,e2) 927 | 8927 Product-Producer(e2,e1) 928 | 8928 Component-Whole(e2,e1) 929 | 8929 Component-Whole(e2,e1) 930 | 8930 Entity-Destination(e1,e2) 931 | 8931 Other 932 | 8932 Component-Whole(e1,e2) 933 | 8933 Other 934 | 8934 Member-Collection(e2,e1) 935 | 8935 Component-Whole(e1,e2) 936 | 8936 Component-Whole(e1,e2) 937 | 8937 Cause-Effect(e2,e1) 938 | 8938 Content-Container(e1,e2) 939 | 8939 Entity-Destination(e1,e2) 940 | 8940 Cause-Effect(e2,e1) 941 | 8941 Component-Whole(e2,e1) 942 | 8942 Other 943 | 8943 Product-Producer(e2,e1) 944 | 8944 Member-Collection(e2,e1) 945 | 8945 Other 946 | 8946 Entity-Destination(e1,e2) 947 | 8947 Instrument-Agency(e1,e2) 948 | 8948 Message-Topic(e1,e2) 949 | 8949 Cause-Effect(e1,e2) 950 | 8950 Content-Container(e1,e2) 951 | 8951 Component-Whole(e2,e1) 952 | 8952 Member-Collection(e2,e1) 953 | 8953 Cause-Effect(e2,e1) 954 | 8954 Cause-Effect(e1,e2) 955 | 8955 Product-Producer(e1,e2) 956 | 8956 Other 957 | 8957 Member-Collection(e2,e1) 958 | 8958 Instrument-Agency(e2,e1) 959 | 8959 Component-Whole(e1,e2) 960 | 8960 Entity-Destination(e1,e2) 961 | 8961 Other 962 | 8962 Component-Whole(e1,e2) 963 | 8963 Content-Container(e1,e2) 964 | 8964 Other 965 | 8965 Member-Collection(e2,e1) 966 | 8966 Member-Collection(e2,e1) 967 | 8967 Other 968 | 8968 Entity-Destination(e1,e2) 969 | 8969 Product-Producer(e1,e2) 970 | 8970 Instrument-Agency(e2,e1) 971 | 8971 Product-Producer(e2,e1) 972 | 8972 Cause-Effect(e2,e1) 973 | 8973 Entity-Destination(e1,e2) 974 | 8974 Cause-Effect(e2,e1) 975 | 8975 Message-Topic(e2,e1) 976 | 8976 Product-Producer(e1,e2) 977 | 8977 Instrument-Agency(e2,e1) 978 | 8978 Entity-Destination(e1,e2) 979 | 8979 Message-Topic(e1,e2) 980 | 8980 Message-Topic(e2,e1) 981 | 8981 Instrument-Agency(e2,e1) 982 | 8982 Instrument-Agency(e2,e1) 983 | 8983 Entity-Destination(e1,e2) 984 | 8984 Component-Whole(e1,e2) 985 | 8985 Message-Topic(e1,e2) 986 | 8986 Member-Collection(e2,e1) 987 | 8987 Cause-Effect(e2,e1) 988 | 8988 Product-Producer(e1,e2) 989 | 8989 Cause-Effect(e2,e1) 990 | 8990 Entity-Destination(e1,e2) 991 | 8991 Other 992 | 8992 Cause-Effect(e2,e1) 993 | 8993 Message-Topic(e1,e2) 994 | 8994 Message-Topic(e2,e1) 995 | 8995 Other 996 | 8996 Content-Container(e2,e1) 997 | 8997 Instrument-Agency(e2,e1) 998 | 8998 Member-Collection(e2,e1) 999 | 8999 Message-Topic(e1,e2) 1000 | 9000 Content-Container(e1,e2) 1001 | 9001 Content-Container(e1,e2) 1002 | 9002 Other 1003 | 9003 Component-Whole(e1,e2) 1004 | 9004 Content-Container(e1,e2) 1005 | 9005 Cause-Effect(e2,e1) 1006 | 9006 Component-Whole(e1,e2) 1007 | 9007 Content-Container(e1,e2) 1008 | 9008 Member-Collection(e2,e1) 1009 | 9009 Other 1010 | 9010 Content-Container(e1,e2) 1011 | 9011 Product-Producer(e2,e1) 1012 | 9012 Cause-Effect(e1,e2) 1013 | 9013 Component-Whole(e1,e2) 1014 | 9014 Cause-Effect(e2,e1) 1015 | 9015 Cause-Effect(e2,e1) 1016 | 9016 Entity-Destination(e1,e2) 1017 | 9017 Entity-Origin(e1,e2) 1018 | 9018 Cause-Effect(e1,e2) 1019 | 9019 Other 1020 | 9020 Other 1021 | 9021 Member-Collection(e1,e2) 1022 | 9022 Other 1023 | 9023 Content-Container(e1,e2) 1024 | 9024 Content-Container(e1,e2) 1025 | 9025 Cause-Effect(e2,e1) 1026 | 9026 Entity-Origin(e1,e2) 1027 | 9027 Entity-Origin(e1,e2) 1028 | 9028 Other 1029 | 9029 Component-Whole(e2,e1) 1030 | 9030 Message-Topic(e2,e1) 1031 | 9031 Product-Producer(e2,e1) 1032 | 9032 Member-Collection(e1,e2) 1033 | 9033 Product-Producer(e2,e1) 1034 | 9034 Other 1035 | 9035 Content-Container(e1,e2) 1036 | 9036 Instrument-Agency(e1,e2) 1037 | 9037 Entity-Destination(e1,e2) 1038 | 9038 Entity-Destination(e1,e2) 1039 | 9039 Entity-Destination(e1,e2) 1040 | 9040 Component-Whole(e1,e2) 1041 | 9041 Entity-Origin(e1,e2) 1042 | 9042 Instrument-Agency(e2,e1) 1043 | 9043 Content-Container(e1,e2) 1044 | 9044 Content-Container(e2,e1) 1045 | 9045 Content-Container(e2,e1) 1046 | 9046 Product-Producer(e2,e1) 1047 | 9047 Product-Producer(e1,e2) 1048 | 9048 Entity-Destination(e1,e2) 1049 | 9049 Product-Producer(e1,e2) 1050 | 9050 Message-Topic(e1,e2) 1051 | 9051 Entity-Origin(e2,e1) 1052 | 9052 Product-Producer(e2,e1) 1053 | 9053 Other 1054 | 9054 Other 1055 | 9055 Cause-Effect(e2,e1) 1056 | 9056 Product-Producer(e2,e1) 1057 | 9057 Cause-Effect(e2,e1) 1058 | 9058 Product-Producer(e2,e1) 1059 | 9059 Message-Topic(e1,e2) 1060 | 9060 Entity-Destination(e1,e2) 1061 | 9061 Entity-Destination(e1,e2) 1062 | 9062 Cause-Effect(e1,e2) 1063 | 9063 Message-Topic(e2,e1) 1064 | 9064 Member-Collection(e2,e1) 1065 | 9065 Entity-Origin(e1,e2) 1066 | 9066 Member-Collection(e2,e1) 1067 | 9067 Entity-Origin(e1,e2) 1068 | 9068 Cause-Effect(e1,e2) 1069 | 9069 Member-Collection(e2,e1) 1070 | 9070 Entity-Destination(e1,e2) 1071 | 9071 Product-Producer(e2,e1) 1072 | 9072 Other 1073 | 9073 Cause-Effect(e2,e1) 1074 | 9074 Other 1075 | 9075 Member-Collection(e2,e1) 1076 | 9076 Entity-Destination(e1,e2) 1077 | 9077 Other 1078 | 9078 Entity-Destination(e1,e2) 1079 | 9079 Member-Collection(e2,e1) 1080 | 9080 Other 1081 | 9081 Cause-Effect(e1,e2) 1082 | 9082 Content-Container(e1,e2) 1083 | 9083 Cause-Effect(e2,e1) 1084 | 9084 Member-Collection(e2,e1) 1085 | 9085 Product-Producer(e1,e2) 1086 | 9086 Component-Whole(e2,e1) 1087 | 9087 Cause-Effect(e1,e2) 1088 | 9088 Other 1089 | 9089 Product-Producer(e2,e1) 1090 | 9090 Component-Whole(e1,e2) 1091 | 9091 Entity-Destination(e1,e2) 1092 | 9092 Entity-Origin(e1,e2) 1093 | 9093 Other 1094 | 9094 Other 1095 | 9095 Message-Topic(e1,e2) 1096 | 9096 Product-Producer(e2,e1) 1097 | 9097 Instrument-Agency(e2,e1) 1098 | 9098 Product-Producer(e2,e1) 1099 | 9099 Other 1100 | 9100 Component-Whole(e1,e2) 1101 | 9101 Other 1102 | 9102 Member-Collection(e2,e1) 1103 | 9103 Content-Container(e1,e2) 1104 | 9104 Member-Collection(e2,e1) 1105 | 9105 Component-Whole(e2,e1) 1106 | 9106 Entity-Destination(e1,e2) 1107 | 9107 Entity-Destination(e1,e2) 1108 | 9108 Message-Topic(e1,e2) 1109 | 9109 Message-Topic(e1,e2) 1110 | 9110 Cause-Effect(e1,e2) 1111 | 9111 Cause-Effect(e2,e1) 1112 | 9112 Content-Container(e2,e1) 1113 | 9113 Component-Whole(e1,e2) 1114 | 9114 Product-Producer(e2,e1) 1115 | 9115 Entity-Origin(e1,e2) 1116 | 9116 Instrument-Agency(e2,e1) 1117 | 9117 Entity-Destination(e1,e2) 1118 | 9118 Entity-Destination(e1,e2) 1119 | 9119 Cause-Effect(e1,e2) 1120 | 9120 Product-Producer(e1,e2) 1121 | 9121 Product-Producer(e1,e2) 1122 | 9122 Entity-Destination(e1,e2) 1123 | 9123 Entity-Origin(e1,e2) 1124 | 9124 Instrument-Agency(e2,e1) 1125 | 9125 Entity-Origin(e1,e2) 1126 | 9126 Member-Collection(e2,e1) 1127 | 9127 Entity-Origin(e1,e2) 1128 | 9128 Cause-Effect(e1,e2) 1129 | 9129 Content-Container(e1,e2) 1130 | 9130 Other 1131 | 9131 Cause-Effect(e1,e2) 1132 | 9132 Instrument-Agency(e2,e1) 1133 | 9133 Instrument-Agency(e2,e1) 1134 | 9134 Component-Whole(e1,e2) 1135 | 9135 Instrument-Agency(e2,e1) 1136 | 9136 Cause-Effect(e2,e1) 1137 | 9137 Other 1138 | 9138 Component-Whole(e2,e1) 1139 | 9139 Cause-Effect(e2,e1) 1140 | 9140 Entity-Destination(e1,e2) 1141 | 9141 Message-Topic(e1,e2) 1142 | 9142 Entity-Destination(e1,e2) 1143 | 9143 Member-Collection(e2,e1) 1144 | 9144 Product-Producer(e2,e1) 1145 | 9145 Message-Topic(e1,e2) 1146 | 9146 Cause-Effect(e2,e1) 1147 | 9147 Cause-Effect(e2,e1) 1148 | 9148 Cause-Effect(e2,e1) 1149 | 9149 Other 1150 | 9150 Entity-Origin(e1,e2) 1151 | 9151 Product-Producer(e1,e2) 1152 | 9152 Component-Whole(e2,e1) 1153 | 9153 Content-Container(e1,e2) 1154 | 9154 Other 1155 | 9155 Entity-Origin(e1,e2) 1156 | 9156 Other 1157 | 9157 Other 1158 | 9158 Content-Container(e1,e2) 1159 | 9159 Content-Container(e1,e2) 1160 | 9160 Member-Collection(e2,e1) 1161 | 9161 Cause-Effect(e1,e2) 1162 | 9162 Entity-Destination(e1,e2) 1163 | 9163 Cause-Effect(e1,e2) 1164 | 9164 Other 1165 | 9165 Message-Topic(e1,e2) 1166 | 9166 Component-Whole(e2,e1) 1167 | 9167 Cause-Effect(e2,e1) 1168 | 9168 Cause-Effect(e2,e1) 1169 | 9169 Message-Topic(e1,e2) 1170 | 9170 Other 1171 | 9171 Cause-Effect(e1,e2) 1172 | 9172 Cause-Effect(e1,e2) 1173 | 9173 Entity-Origin(e1,e2) 1174 | 9174 Component-Whole(e2,e1) 1175 | 9175 Entity-Origin(e1,e2) 1176 | 9176 Entity-Origin(e1,e2) 1177 | 9177 Product-Producer(e2,e1) 1178 | 9178 Entity-Origin(e1,e2) 1179 | 9179 Cause-Effect(e1,e2) 1180 | 9180 Entity-Origin(e1,e2) 1181 | 9181 Cause-Effect(e2,e1) 1182 | 9182 Entity-Origin(e1,e2) 1183 | 9183 Component-Whole(e2,e1) 1184 | 9184 Content-Container(e2,e1) 1185 | 9185 Component-Whole(e2,e1) 1186 | 9186 Message-Topic(e1,e2) 1187 | 9187 Other 1188 | 9188 Entity-Origin(e2,e1) 1189 | 9189 Entity-Destination(e1,e2) 1190 | 9190 Cause-Effect(e2,e1) 1191 | 9191 Message-Topic(e1,e2) 1192 | 9192 Other 1193 | 9193 Other 1194 | 9194 Member-Collection(e1,e2) 1195 | 9195 Instrument-Agency(e1,e2) 1196 | 9196 Content-Container(e1,e2) 1197 | 9197 Entity-Destination(e1,e2) 1198 | 9198 Member-Collection(e2,e1) 1199 | 9199 Message-Topic(e1,e2) 1200 | 9200 Entity-Destination(e1,e2) 1201 | 9201 Entity-Origin(e1,e2) 1202 | 9202 Message-Topic(e1,e2) 1203 | 9203 Component-Whole(e1,e2) 1204 | 9204 Entity-Origin(e1,e2) 1205 | 9205 Instrument-Agency(e2,e1) 1206 | 9206 Entity-Origin(e2,e1) 1207 | 9207 Component-Whole(e1,e2) 1208 | 9208 Other 1209 | 9209 Entity-Origin(e1,e2) 1210 | 9210 Component-Whole(e1,e2) 1211 | 9211 Member-Collection(e2,e1) 1212 | 9212 Content-Container(e1,e2) 1213 | 9213 Cause-Effect(e1,e2) 1214 | 9214 Component-Whole(e2,e1) 1215 | 9215 Instrument-Agency(e2,e1) 1216 | 9216 Member-Collection(e2,e1) 1217 | 9217 Other 1218 | 9218 Entity-Destination(e1,e2) 1219 | 9219 Other 1220 | 9220 Entity-Origin(e1,e2) 1221 | 9221 Cause-Effect(e2,e1) 1222 | 9222 Entity-Destination(e1,e2) 1223 | 9223 Product-Producer(e1,e2) 1224 | 9224 Cause-Effect(e1,e2) 1225 | 9225 Entity-Origin(e1,e2) 1226 | 9226 Cause-Effect(e1,e2) 1227 | 9227 Other 1228 | 9228 Cause-Effect(e1,e2) 1229 | 9229 Member-Collection(e2,e1) 1230 | 9230 Component-Whole(e2,e1) 1231 | 9231 Entity-Destination(e1,e2) 1232 | 9232 Other 1233 | 9233 Member-Collection(e2,e1) 1234 | 9234 Cause-Effect(e2,e1) 1235 | 9235 Other 1236 | 9236 Entity-Origin(e2,e1) 1237 | 9237 Component-Whole(e1,e2) 1238 | 9238 Component-Whole(e2,e1) 1239 | 9239 Product-Producer(e2,e1) 1240 | 9240 Entity-Origin(e1,e2) 1241 | 9241 Component-Whole(e2,e1) 1242 | 9242 Member-Collection(e2,e1) 1243 | 9243 Content-Container(e1,e2) 1244 | 9244 Entity-Destination(e1,e2) 1245 | 9245 Other 1246 | 9246 Other 1247 | 9247 Entity-Destination(e1,e2) 1248 | 9248 Other 1249 | 9249 Other 1250 | 9250 Component-Whole(e2,e1) 1251 | 9251 Other 1252 | 9252 Other 1253 | 9253 Product-Producer(e1,e2) 1254 | 9254 Member-Collection(e1,e2) 1255 | 9255 Content-Container(e1,e2) 1256 | 9256 Other 1257 | 9257 Component-Whole(e2,e1) 1258 | 9258 Message-Topic(e1,e2) 1259 | 9259 Cause-Effect(e2,e1) 1260 | 9260 Content-Container(e2,e1) 1261 | 9261 Message-Topic(e1,e2) 1262 | 9262 Member-Collection(e2,e1) 1263 | 9263 Member-Collection(e2,e1) 1264 | 9264 Component-Whole(e2,e1) 1265 | 9265 Component-Whole(e1,e2) 1266 | 9266 Entity-Origin(e1,e2) 1267 | 9267 Component-Whole(e2,e1) 1268 | 9268 Member-Collection(e2,e1) 1269 | 9269 Message-Topic(e2,e1) 1270 | 9270 Instrument-Agency(e2,e1) 1271 | 9271 Entity-Origin(e1,e2) 1272 | 9272 Component-Whole(e1,e2) 1273 | 9273 Content-Container(e2,e1) 1274 | 9274 Entity-Origin(e1,e2) 1275 | 9275 Entity-Destination(e1,e2) 1276 | 9276 Component-Whole(e1,e2) 1277 | 9277 Product-Producer(e2,e1) 1278 | 9278 Entity-Origin(e1,e2) 1279 | 9279 Entity-Origin(e1,e2) 1280 | 9280 Cause-Effect(e2,e1) 1281 | 9281 Other 1282 | 9282 Member-Collection(e2,e1) 1283 | 9283 Other 1284 | 9284 Instrument-Agency(e1,e2) 1285 | 9285 Content-Container(e2,e1) 1286 | 9286 Member-Collection(e1,e2) 1287 | 9287 Entity-Origin(e2,e1) 1288 | 9288 Component-Whole(e2,e1) 1289 | 9289 Cause-Effect(e2,e1) 1290 | 9290 Message-Topic(e1,e2) 1291 | 9291 Instrument-Agency(e2,e1) 1292 | 9292 Content-Container(e1,e2) 1293 | 9293 Component-Whole(e2,e1) 1294 | 9294 Member-Collection(e2,e1) 1295 | 9295 Entity-Destination(e1,e2) 1296 | 9296 Entity-Origin(e1,e2) 1297 | 9297 Entity-Destination(e1,e2) 1298 | 9298 Message-Topic(e1,e2) 1299 | 9299 Entity-Origin(e1,e2) 1300 | 9300 Entity-Destination(e1,e2) 1301 | 9301 Other 1302 | 9302 Component-Whole(e1,e2) 1303 | 9303 Member-Collection(e2,e1) 1304 | 9304 Message-Topic(e2,e1) 1305 | 9305 Entity-Origin(e1,e2) 1306 | 9306 Entity-Destination(e1,e2) 1307 | 9307 Product-Producer(e1,e2) 1308 | 9308 Instrument-Agency(e2,e1) 1309 | 9309 Cause-Effect(e2,e1) 1310 | 9310 Other 1311 | 9311 Cause-Effect(e2,e1) 1312 | 9312 Other 1313 | 9313 Component-Whole(e2,e1) 1314 | 9314 Content-Container(e1,e2) 1315 | 9315 Message-Topic(e1,e2) 1316 | 9316 Component-Whole(e1,e2) 1317 | 9317 Instrument-Agency(e2,e1) 1318 | 9318 Entity-Destination(e1,e2) 1319 | 9319 Cause-Effect(e2,e1) 1320 | 9320 Other 1321 | 9321 Message-Topic(e1,e2) 1322 | 9322 Product-Producer(e2,e1) 1323 | 9323 Cause-Effect(e2,e1) 1324 | 9324 Content-Container(e1,e2) 1325 | 9325 Member-Collection(e2,e1) 1326 | 9326 Entity-Origin(e1,e2) 1327 | 9327 Message-Topic(e1,e2) 1328 | 9328 Cause-Effect(e1,e2) 1329 | 9329 Component-Whole(e2,e1) 1330 | 9330 Product-Producer(e2,e1) 1331 | 9331 Instrument-Agency(e2,e1) 1332 | 9332 Content-Container(e1,e2) 1333 | 9333 Component-Whole(e2,e1) 1334 | 9334 Content-Container(e2,e1) 1335 | 9335 Entity-Destination(e1,e2) 1336 | 9336 Member-Collection(e1,e2) 1337 | 9337 Component-Whole(e2,e1) 1338 | 9338 Entity-Destination(e1,e2) 1339 | 9339 Message-Topic(e1,e2) 1340 | 9340 Product-Producer(e2,e1) 1341 | 9341 Content-Container(e1,e2) 1342 | 9342 Cause-Effect(e1,e2) 1343 | 9343 Entity-Origin(e1,e2) 1344 | 9344 Member-Collection(e2,e1) 1345 | 9345 Content-Container(e2,e1) 1346 | 9346 Component-Whole(e1,e2) 1347 | 9347 Entity-Origin(e1,e2) 1348 | 9348 Product-Producer(e1,e2) 1349 | 9349 Instrument-Agency(e2,e1) 1350 | 9350 Other 1351 | 9351 Entity-Destination(e1,e2) 1352 | 9352 Cause-Effect(e1,e2) 1353 | 9353 Instrument-Agency(e1,e2) 1354 | 9354 Member-Collection(e2,e1) 1355 | 9355 Member-Collection(e2,e1) 1356 | 9356 Instrument-Agency(e2,e1) 1357 | 9357 Cause-Effect(e2,e1) 1358 | 9358 Other 1359 | 9359 Entity-Origin(e1,e2) 1360 | 9360 Entity-Destination(e1,e2) 1361 | 9361 Component-Whole(e1,e2) 1362 | 9362 Member-Collection(e2,e1) 1363 | 9363 Message-Topic(e1,e2) 1364 | 9364 Message-Topic(e1,e2) 1365 | 9365 Content-Container(e2,e1) 1366 | 9366 Cause-Effect(e1,e2) 1367 | 9367 Product-Producer(e2,e1) 1368 | 9368 Member-Collection(e2,e1) 1369 | 9369 Other 1370 | 9370 Cause-Effect(e2,e1) 1371 | 9371 Message-Topic(e1,e2) 1372 | 9372 Cause-Effect(e2,e1) 1373 | 9373 Cause-Effect(e1,e2) 1374 | 9374 Entity-Destination(e1,e2) 1375 | 9375 Entity-Destination(e1,e2) 1376 | 9376 Component-Whole(e2,e1) 1377 | 9377 Instrument-Agency(e2,e1) 1378 | 9378 Cause-Effect(e2,e1) 1379 | 9379 Cause-Effect(e1,e2) 1380 | 9380 Entity-Destination(e1,e2) 1381 | 9381 Message-Topic(e1,e2) 1382 | 9382 Component-Whole(e1,e2) 1383 | 9383 Entity-Origin(e2,e1) 1384 | 9384 Instrument-Agency(e2,e1) 1385 | 9385 Content-Container(e2,e1) 1386 | 9386 Other 1387 | 9387 Component-Whole(e2,e1) 1388 | 9388 Other 1389 | 9389 Entity-Destination(e1,e2) 1390 | 9390 Entity-Origin(e1,e2) 1391 | 9391 Component-Whole(e2,e1) 1392 | 9392 Other 1393 | 9393 Component-Whole(e2,e1) 1394 | 9394 Cause-Effect(e2,e1) 1395 | 9395 Entity-Origin(e2,e1) 1396 | 9396 Other 1397 | 9397 Other 1398 | 9398 Instrument-Agency(e2,e1) 1399 | 9399 Entity-Destination(e1,e2) 1400 | 9400 Other 1401 | 9401 Message-Topic(e2,e1) 1402 | 9402 Other 1403 | 9403 Cause-Effect(e1,e2) 1404 | 9404 Component-Whole(e2,e1) 1405 | 9405 Component-Whole(e1,e2) 1406 | 9406 Other 1407 | 9407 Content-Container(e2,e1) 1408 | 9408 Other 1409 | 9409 Instrument-Agency(e2,e1) 1410 | 9410 Message-Topic(e1,e2) 1411 | 9411 Component-Whole(e2,e1) 1412 | 9412 Member-Collection(e2,e1) 1413 | 9413 Instrument-Agency(e2,e1) 1414 | 9414 Other 1415 | 9415 Cause-Effect(e1,e2) 1416 | 9416 Entity-Destination(e1,e2) 1417 | 9417 Other 1418 | 9418 Other 1419 | 9419 Component-Whole(e1,e2) 1420 | 9420 Component-Whole(e2,e1) 1421 | 9421 Entity-Origin(e2,e1) 1422 | 9422 Product-Producer(e2,e1) 1423 | 9423 Member-Collection(e1,e2) 1424 | 9424 Other 1425 | 9425 Message-Topic(e1,e2) 1426 | 9426 Entity-Destination(e1,e2) 1427 | 9427 Cause-Effect(e2,e1) 1428 | 9428 Product-Producer(e1,e2) 1429 | 9429 Entity-Destination(e1,e2) 1430 | 9430 Message-Topic(e1,e2) 1431 | 9431 Other 1432 | 9432 Message-Topic(e1,e2) 1433 | 9433 Member-Collection(e1,e2) 1434 | 9434 Cause-Effect(e2,e1) 1435 | 9435 Instrument-Agency(e2,e1) 1436 | 9436 Content-Container(e1,e2) 1437 | 9437 Entity-Destination(e1,e2) 1438 | 9438 Cause-Effect(e1,e2) 1439 | 9439 Other 1440 | 9440 Entity-Origin(e1,e2) 1441 | 9441 Component-Whole(e1,e2) 1442 | 9442 Message-Topic(e1,e2) 1443 | 9443 Instrument-Agency(e2,e1) 1444 | 9444 Other 1445 | 9445 Component-Whole(e2,e1) 1446 | 9446 Member-Collection(e2,e1) 1447 | 9447 Content-Container(e1,e2) 1448 | 9448 Component-Whole(e2,e1) 1449 | 9449 Component-Whole(e2,e1) 1450 | 9450 Product-Producer(e1,e2) 1451 | 9451 Member-Collection(e2,e1) 1452 | 9452 Cause-Effect(e1,e2) 1453 | 9453 Entity-Origin(e1,e2) 1454 | 9454 Entity-Origin(e1,e2) 1455 | 9455 Member-Collection(e1,e2) 1456 | 9456 Message-Topic(e1,e2) 1457 | 9457 Instrument-Agency(e2,e1) 1458 | 9458 Product-Producer(e1,e2) 1459 | 9459 Other 1460 | 9460 Entity-Origin(e1,e2) 1461 | 9461 Other 1462 | 9462 Member-Collection(e2,e1) 1463 | 9463 Entity-Origin(e1,e2) 1464 | 9464 Cause-Effect(e2,e1) 1465 | 9465 Other 1466 | 9466 Product-Producer(e1,e2) 1467 | 9467 Cause-Effect(e1,e2) 1468 | 9468 Member-Collection(e2,e1) 1469 | 9469 Cause-Effect(e2,e1) 1470 | 9470 Message-Topic(e2,e1) 1471 | 9471 Content-Container(e1,e2) 1472 | 9472 Entity-Destination(e1,e2) 1473 | 9473 Entity-Origin(e1,e2) 1474 | 9474 Member-Collection(e1,e2) 1475 | 9475 Content-Container(e1,e2) 1476 | 9476 Message-Topic(e1,e2) 1477 | 9477 Instrument-Agency(e1,e2) 1478 | 9478 Member-Collection(e2,e1) 1479 | 9479 Component-Whole(e1,e2) 1480 | 9480 Other 1481 | 9481 Product-Producer(e2,e1) 1482 | 9482 Cause-Effect(e1,e2) 1483 | 9483 Content-Container(e1,e2) 1484 | 9484 Component-Whole(e1,e2) 1485 | 9485 Component-Whole(e2,e1) 1486 | 9486 Instrument-Agency(e2,e1) 1487 | 9487 Instrument-Agency(e2,e1) 1488 | 9488 Instrument-Agency(e2,e1) 1489 | 9489 Cause-Effect(e2,e1) 1490 | 9490 Cause-Effect(e2,e1) 1491 | 9491 Instrument-Agency(e2,e1) 1492 | 9492 Other 1493 | 9493 Entity-Origin(e2,e1) 1494 | 9494 Cause-Effect(e1,e2) 1495 | 9495 Message-Topic(e1,e2) 1496 | 9496 Content-Container(e1,e2) 1497 | 9497 Component-Whole(e1,e2) 1498 | 9498 Message-Topic(e1,e2) 1499 | 9499 Message-Topic(e2,e1) 1500 | 9500 Content-Container(e1,e2) 1501 | 9501 Content-Container(e1,e2) 1502 | 9502 Entity-Origin(e1,e2) 1503 | 9503 Other 1504 | 9504 Message-Topic(e1,e2) 1505 | 9505 Other 1506 | 9506 Entity-Destination(e1,e2) 1507 | 9507 Other 1508 | 9508 Cause-Effect(e1,e2) 1509 | 9509 Member-Collection(e2,e1) 1510 | 9510 Other 1511 | 9511 Other 1512 | 9512 Instrument-Agency(e2,e1) 1513 | 9513 Product-Producer(e2,e1) 1514 | 9514 Entity-Origin(e1,e2) 1515 | 9515 Cause-Effect(e2,e1) 1516 | 9516 Other 1517 | 9517 Other 1518 | 9518 Member-Collection(e2,e1) 1519 | 9519 Cause-Effect(e2,e1) 1520 | 9520 Other 1521 | 9521 Message-Topic(e1,e2) 1522 | 9522 Content-Container(e1,e2) 1523 | 9523 Other 1524 | 9524 Cause-Effect(e1,e2) 1525 | 9525 Message-Topic(e1,e2) 1526 | 9526 Message-Topic(e1,e2) 1527 | 9527 Component-Whole(e1,e2) 1528 | 9528 Content-Container(e1,e2) 1529 | 9529 Entity-Origin(e1,e2) 1530 | 9530 Member-Collection(e2,e1) 1531 | 9531 Entity-Destination(e1,e2) 1532 | 9532 Other 1533 | 9533 Entity-Destination(e1,e2) 1534 | 9534 Content-Container(e1,e2) 1535 | 9535 Component-Whole(e1,e2) 1536 | 9536 Message-Topic(e1,e2) 1537 | 9537 Other 1538 | 9538 Message-Topic(e1,e2) 1539 | 9539 Entity-Destination(e1,e2) 1540 | 9540 Component-Whole(e1,e2) 1541 | 9541 Other 1542 | 9542 Cause-Effect(e1,e2) 1543 | 9543 Message-Topic(e1,e2) 1544 | 9544 Entity-Destination(e1,e2) 1545 | 9545 Other 1546 | 9546 Other 1547 | 9547 Component-Whole(e2,e1) 1548 | 9548 Entity-Origin(e1,e2) 1549 | 9549 Other 1550 | 9550 Member-Collection(e2,e1) 1551 | 9551 Instrument-Agency(e2,e1) 1552 | 9552 Other 1553 | 9553 Product-Producer(e1,e2) 1554 | 9554 Entity-Destination(e1,e2) 1555 | 9555 Instrument-Agency(e2,e1) 1556 | 9556 Cause-Effect(e2,e1) 1557 | 9557 Component-Whole(e2,e1) 1558 | 9558 Other 1559 | 9559 Cause-Effect(e2,e1) 1560 | 9560 Entity-Origin(e1,e2) 1561 | 9561 Component-Whole(e1,e2) 1562 | 9562 Component-Whole(e2,e1) 1563 | 9563 Entity-Destination(e1,e2) 1564 | 9564 Message-Topic(e2,e1) 1565 | 9565 Component-Whole(e1,e2) 1566 | 9566 Message-Topic(e1,e2) 1567 | 9567 Message-Topic(e2,e1) 1568 | 9568 Entity-Destination(e1,e2) 1569 | 9569 Other 1570 | 9570 Member-Collection(e2,e1) 1571 | 9571 Entity-Origin(e1,e2) 1572 | 9572 Instrument-Agency(e2,e1) 1573 | 9573 Cause-Effect(e1,e2) 1574 | 9574 Other 1575 | 9575 Instrument-Agency(e1,e2) 1576 | 9576 Cause-Effect(e2,e1) 1577 | 9577 Other 1578 | 9578 Entity-Destination(e1,e2) 1579 | 9579 Component-Whole(e2,e1) 1580 | 9580 Component-Whole(e2,e1) 1581 | 9581 Entity-Destination(e1,e2) 1582 | 9582 Cause-Effect(e1,e2) 1583 | 9583 Component-Whole(e2,e1) 1584 | 9584 Member-Collection(e2,e1) 1585 | 9585 Entity-Destination(e1,e2) 1586 | 9586 Entity-Destination(e1,e2) 1587 | 9587 Product-Producer(e1,e2) 1588 | 9588 Other 1589 | 9589 Cause-Effect(e1,e2) 1590 | 9590 Instrument-Agency(e2,e1) 1591 | 9591 Entity-Origin(e2,e1) 1592 | 9592 Member-Collection(e2,e1) 1593 | 9593 Entity-Destination(e1,e2) 1594 | 9594 Instrument-Agency(e2,e1) 1595 | 9595 Member-Collection(e2,e1) 1596 | 9596 Message-Topic(e1,e2) 1597 | 9597 Cause-Effect(e2,e1) 1598 | 9598 Entity-Destination(e1,e2) 1599 | 9599 Other 1600 | 9600 Component-Whole(e1,e2) 1601 | 9601 Cause-Effect(e2,e1) 1602 | 9602 Member-Collection(e2,e1) 1603 | 9603 Component-Whole(e1,e2) 1604 | 9604 Content-Container(e2,e1) 1605 | 9605 Instrument-Agency(e2,e1) 1606 | 9606 Other 1607 | 9607 Other 1608 | 9608 Member-Collection(e2,e1) 1609 | 9609 Content-Container(e2,e1) 1610 | 9610 Other 1611 | 9611 Entity-Origin(e1,e2) 1612 | 9612 Component-Whole(e2,e1) 1613 | 9613 Component-Whole(e1,e2) 1614 | 9614 Member-Collection(e1,e2) 1615 | 9615 Message-Topic(e1,e2) 1616 | 9616 Other 1617 | 9617 Component-Whole(e1,e2) 1618 | 9618 Cause-Effect(e1,e2) 1619 | 9619 Instrument-Agency(e2,e1) 1620 | 9620 Member-Collection(e2,e1) 1621 | 9621 Entity-Destination(e1,e2) 1622 | 9622 Message-Topic(e1,e2) 1623 | 9623 Other 1624 | 9624 Cause-Effect(e1,e2) 1625 | 9625 Component-Whole(e1,e2) 1626 | 9626 Entity-Origin(e2,e1) 1627 | 9627 Other 1628 | 9628 Instrument-Agency(e2,e1) 1629 | 9629 Message-Topic(e1,e2) 1630 | 9630 Other 1631 | 9631 Other 1632 | 9632 Component-Whole(e2,e1) 1633 | 9633 Entity-Destination(e1,e2) 1634 | 9634 Component-Whole(e2,e1) 1635 | 9635 Content-Container(e1,e2) 1636 | 9636 Component-Whole(e1,e2) 1637 | 9637 Entity-Destination(e1,e2) 1638 | 9638 Other 1639 | 9639 Other 1640 | 9640 Content-Container(e1,e2) 1641 | 9641 Other 1642 | 9642 Other 1643 | 9643 Other 1644 | 9644 Product-Producer(e1,e2) 1645 | 9645 Content-Container(e1,e2) 1646 | 9646 Other 1647 | 9647 Cause-Effect(e2,e1) 1648 | 9648 Cause-Effect(e2,e1) 1649 | 9649 Instrument-Agency(e1,e2) 1650 | 9650 Other 1651 | 9651 Member-Collection(e2,e1) 1652 | 9652 Other 1653 | 9653 Message-Topic(e2,e1) 1654 | 9654 Instrument-Agency(e1,e2) 1655 | 9655 Entity-Destination(e1,e2) 1656 | 9656 Entity-Origin(e1,e2) 1657 | 9657 Entity-Origin(e1,e2) 1658 | 9658 Other 1659 | 9659 Cause-Effect(e1,e2) 1660 | 9660 Member-Collection(e2,e1) 1661 | 9661 Message-Topic(e1,e2) 1662 | 9662 Content-Container(e1,e2) 1663 | 9663 Other 1664 | 9664 Member-Collection(e2,e1) 1665 | 9665 Entity-Destination(e1,e2) 1666 | 9666 Component-Whole(e1,e2) 1667 | 9667 Product-Producer(e2,e1) 1668 | 9668 Component-Whole(e1,e2) 1669 | 9669 Entity-Origin(e1,e2) 1670 | 9670 Entity-Origin(e1,e2) 1671 | 9671 Component-Whole(e2,e1) 1672 | 9672 Component-Whole(e2,e1) 1673 | 9673 Cause-Effect(e2,e1) 1674 | 9674 Other 1675 | 9675 Message-Topic(e1,e2) 1676 | 9676 Entity-Destination(e1,e2) 1677 | 9677 Product-Producer(e1,e2) 1678 | 9678 Member-Collection(e2,e1) 1679 | 9679 Component-Whole(e1,e2) 1680 | 9680 Other 1681 | 9681 Member-Collection(e2,e1) 1682 | 9682 Cause-Effect(e1,e2) 1683 | 9683 Entity-Destination(e1,e2) 1684 | 9684 Cause-Effect(e2,e1) 1685 | 9685 Component-Whole(e1,e2) 1686 | 9686 Other 1687 | 9687 Instrument-Agency(e2,e1) 1688 | 9688 Cause-Effect(e1,e2) 1689 | 9689 Cause-Effect(e2,e1) 1690 | 9690 Cause-Effect(e2,e1) 1691 | 9691 Message-Topic(e1,e2) 1692 | 9692 Product-Producer(e1,e2) 1693 | 9693 Entity-Origin(e2,e1) 1694 | 9694 Content-Container(e1,e2) 1695 | 9695 Cause-Effect(e1,e2) 1696 | 9696 Instrument-Agency(e2,e1) 1697 | 9697 Component-Whole(e1,e2) 1698 | 9698 Cause-Effect(e1,e2) 1699 | 9699 Cause-Effect(e2,e1) 1700 | 9700 Other 1701 | 9701 Other 1702 | 9702 Member-Collection(e2,e1) 1703 | 9703 Cause-Effect(e2,e1) 1704 | 9704 Instrument-Agency(e2,e1) 1705 | 9705 Message-Topic(e1,e2) 1706 | 9706 Other 1707 | 9707 Component-Whole(e2,e1) 1708 | 9708 Cause-Effect(e2,e1) 1709 | 9709 Member-Collection(e1,e2) 1710 | 9710 Entity-Origin(e1,e2) 1711 | 9711 Entity-Origin(e1,e2) 1712 | 9712 Product-Producer(e2,e1) 1713 | 9713 Component-Whole(e1,e2) 1714 | 9714 Other 1715 | 9715 Component-Whole(e2,e1) 1716 | 9716 Product-Producer(e2,e1) 1717 | 9717 Member-Collection(e2,e1) 1718 | 9718 Other 1719 | 9719 Cause-Effect(e1,e2) 1720 | 9720 Cause-Effect(e2,e1) 1721 | 9721 Instrument-Agency(e2,e1) 1722 | 9722 Cause-Effect(e2,e1) 1723 | 9723 Component-Whole(e1,e2) 1724 | 9724 Entity-Origin(e1,e2) 1725 | 9725 Cause-Effect(e1,e2) 1726 | 9726 Other 1727 | 9727 Cause-Effect(e1,e2) 1728 | 9728 Message-Topic(e1,e2) 1729 | 9729 Instrument-Agency(e1,e2) 1730 | 9730 Message-Topic(e1,e2) 1731 | 9731 Cause-Effect(e1,e2) 1732 | 9732 Cause-Effect(e1,e2) 1733 | 9733 Entity-Origin(e1,e2) 1734 | 9734 Message-Topic(e2,e1) 1735 | 9735 Message-Topic(e1,e2) 1736 | 9736 Component-Whole(e2,e1) 1737 | 9737 Component-Whole(e1,e2) 1738 | 9738 Other 1739 | 9739 Cause-Effect(e2,e1) 1740 | 9740 Cause-Effect(e2,e1) 1741 | 9741 Other 1742 | 9742 Message-Topic(e2,e1) 1743 | 9743 Component-Whole(e2,e1) 1744 | 9744 Message-Topic(e2,e1) 1745 | 9745 Member-Collection(e2,e1) 1746 | 9746 Member-Collection(e2,e1) 1747 | 9747 Other 1748 | 9748 Member-Collection(e2,e1) 1749 | 9749 Message-Topic(e1,e2) 1750 | 9750 Other 1751 | 9751 Member-Collection(e1,e2) 1752 | 9752 Entity-Origin(e1,e2) 1753 | 9753 Member-Collection(e2,e1) 1754 | 9754 Content-Container(e1,e2) 1755 | 9755 Entity-Origin(e2,e1) 1756 | 9756 Message-Topic(e1,e2) 1757 | 9757 Message-Topic(e1,e2) 1758 | 9758 Cause-Effect(e2,e1) 1759 | 9759 Component-Whole(e2,e1) 1760 | 9760 Content-Container(e1,e2) 1761 | 9761 Other 1762 | 9762 Other 1763 | 9763 Component-Whole(e2,e1) 1764 | 9764 Message-Topic(e1,e2) 1765 | 9765 Member-Collection(e2,e1) 1766 | 9766 Entity-Destination(e1,e2) 1767 | 9767 Entity-Origin(e1,e2) 1768 | 9768 Instrument-Agency(e2,e1) 1769 | 9769 Cause-Effect(e2,e1) 1770 | 9770 Entity-Origin(e1,e2) 1771 | 9771 Component-Whole(e1,e2) 1772 | 9772 Entity-Destination(e1,e2) 1773 | 9773 Component-Whole(e2,e1) 1774 | 9774 Entity-Destination(e1,e2) 1775 | 9775 Entity-Destination(e1,e2) 1776 | 9776 Other 1777 | 9777 Message-Topic(e1,e2) 1778 | 9778 Product-Producer(e2,e1) 1779 | 9779 Other 1780 | 9780 Cause-Effect(e2,e1) 1781 | 9781 Entity-Destination(e1,e2) 1782 | 9782 Other 1783 | 9783 Entity-Origin(e1,e2) 1784 | 9784 Product-Producer(e2,e1) 1785 | 9785 Product-Producer(e2,e1) 1786 | 9786 Content-Container(e1,e2) 1787 | 9787 Entity-Destination(e1,e2) 1788 | 9788 Message-Topic(e1,e2) 1789 | 9789 Other 1790 | 9790 Member-Collection(e2,e1) 1791 | 9791 Cause-Effect(e1,e2) 1792 | 9792 Entity-Destination(e1,e2) 1793 | 9793 Other 1794 | 9794 Component-Whole(e1,e2) 1795 | 9795 Other 1796 | 9796 Other 1797 | 9797 Cause-Effect(e2,e1) 1798 | 9798 Entity-Destination(e1,e2) 1799 | 9799 Other 1800 | 9800 Message-Topic(e1,e2) 1801 | 9801 Entity-Origin(e1,e2) 1802 | 9802 Entity-Destination(e1,e2) 1803 | 9803 Other 1804 | 9804 Member-Collection(e2,e1) 1805 | 9805 Entity-Destination(e1,e2) 1806 | 9806 Content-Container(e1,e2) 1807 | 9807 Entity-Origin(e1,e2) 1808 | 9808 Other 1809 | 9809 Entity-Destination(e1,e2) 1810 | 9810 Content-Container(e1,e2) 1811 | 9811 Other 1812 | 9812 Cause-Effect(e1,e2) 1813 | 9813 Instrument-Agency(e1,e2) 1814 | 9814 Member-Collection(e2,e1) 1815 | 9815 Other 1816 | 9816 Instrument-Agency(e2,e1) 1817 | 9817 Message-Topic(e1,e2) 1818 | 9818 Member-Collection(e2,e1) 1819 | 9819 Cause-Effect(e2,e1) 1820 | 9820 Other 1821 | 9821 Product-Producer(e1,e2) 1822 | 9822 Product-Producer(e1,e2) 1823 | 9823 Entity-Origin(e2,e1) 1824 | 9824 Instrument-Agency(e2,e1) 1825 | 9825 Member-Collection(e2,e1) 1826 | 9826 Member-Collection(e2,e1) 1827 | 9827 Member-Collection(e2,e1) 1828 | 9828 Product-Producer(e1,e2) 1829 | 9829 Cause-Effect(e1,e2) 1830 | 9830 Entity-Origin(e1,e2) 1831 | 9831 Cause-Effect(e1,e2) 1832 | 9832 Entity-Origin(e1,e2) 1833 | 9833 Other 1834 | 9834 Component-Whole(e1,e2) 1835 | 9835 Content-Container(e1,e2) 1836 | 9836 Product-Producer(e2,e1) 1837 | 9837 Instrument-Agency(e1,e2) 1838 | 9838 Member-Collection(e2,e1) 1839 | 9839 Other 1840 | 9840 Message-Topic(e1,e2) 1841 | 9841 Member-Collection(e2,e1) 1842 | 9842 Other 1843 | 9843 Other 1844 | 9844 Entity-Origin(e1,e2) 1845 | 9845 Component-Whole(e2,e1) 1846 | 9846 Product-Producer(e2,e1) 1847 | 9847 Other 1848 | 9848 Cause-Effect(e2,e1) 1849 | 9849 Other 1850 | 9850 Product-Producer(e2,e1) 1851 | 9851 Member-Collection(e2,e1) 1852 | 9852 Entity-Origin(e1,e2) 1853 | 9853 Other 1854 | 9854 Member-Collection(e2,e1) 1855 | 9855 Entity-Destination(e1,e2) 1856 | 9856 Content-Container(e1,e2) 1857 | 9857 Component-Whole(e2,e1) 1858 | 9858 Product-Producer(e1,e2) 1859 | 9859 Content-Container(e2,e1) 1860 | 9860 Entity-Origin(e1,e2) 1861 | 9861 Cause-Effect(e2,e1) 1862 | 9862 Entity-Origin(e1,e2) 1863 | 9863 Product-Producer(e2,e1) 1864 | 9864 Product-Producer(e2,e1) 1865 | 9865 Entity-Destination(e1,e2) 1866 | 9866 Member-Collection(e2,e1) 1867 | 9867 Other 1868 | 9868 Cause-Effect(e2,e1) 1869 | 9869 Other 1870 | 9870 Product-Producer(e1,e2) 1871 | 9871 Entity-Destination(e1,e2) 1872 | 9872 Other 1873 | 9873 Entity-Destination(e1,e2) 1874 | 9874 Entity-Destination(e1,e2) 1875 | 9875 Member-Collection(e1,e2) 1876 | 9876 Cause-Effect(e2,e1) 1877 | 9877 Other 1878 | 9878 Member-Collection(e2,e1) 1879 | 9879 Other 1880 | 9880 Content-Container(e1,e2) 1881 | 9881 Member-Collection(e2,e1) 1882 | 9882 Entity-Origin(e1,e2) 1883 | 9883 Entity-Destination(e1,e2) 1884 | 9884 Instrument-Agency(e2,e1) 1885 | 9885 Message-Topic(e1,e2) 1886 | 9886 Other 1887 | 9887 Member-Collection(e2,e1) 1888 | 9888 Member-Collection(e1,e2) 1889 | 9889 Instrument-Agency(e2,e1) 1890 | 9890 Member-Collection(e2,e1) 1891 | 9891 Member-Collection(e2,e1) 1892 | 9892 Other 1893 | 9893 Component-Whole(e2,e1) 1894 | 9894 Entity-Destination(e1,e2) 1895 | 9895 Product-Producer(e2,e1) 1896 | 9896 Content-Container(e1,e2) 1897 | 9897 Other 1898 | 9898 Entity-Destination(e1,e2) 1899 | 9899 Cause-Effect(e2,e1) 1900 | 9900 Entity-Destination(e1,e2) 1901 | 9901 Cause-Effect(e2,e1) 1902 | 9902 Cause-Effect(e1,e2) 1903 | 9903 Other 1904 | 9904 Entity-Origin(e1,e2) 1905 | 9905 Other 1906 | 9906 Component-Whole(e2,e1) 1907 | 9907 Product-Producer(e1,e2) 1908 | 9908 Other 1909 | 9909 Product-Producer(e2,e1) 1910 | 9910 Member-Collection(e1,e2) 1911 | 9911 Message-Topic(e1,e2) 1912 | 9912 Instrument-Agency(e1,e2) 1913 | 9913 Content-Container(e1,e2) 1914 | 9914 Content-Container(e1,e2) 1915 | 9915 Other 1916 | 9916 Other 1917 | 9917 Product-Producer(e2,e1) 1918 | 9918 Member-Collection(e2,e1) 1919 | 9919 Cause-Effect(e2,e1) 1920 | 9920 Product-Producer(e2,e1) 1921 | 9921 Component-Whole(e1,e2) 1922 | 9922 Entity-Origin(e2,e1) 1923 | 9923 Member-Collection(e2,e1) 1924 | 9924 Other 1925 | 9925 Component-Whole(e1,e2) 1926 | 9926 Product-Producer(e2,e1) 1927 | 9927 Component-Whole(e1,e2) 1928 | 9928 Component-Whole(e1,e2) 1929 | 9929 Content-Container(e1,e2) 1930 | 9930 Other 1931 | 9931 Entity-Destination(e1,e2) 1932 | 9932 Content-Container(e1,e2) 1933 | 9933 Product-Producer(e2,e1) 1934 | 9934 Component-Whole(e1,e2) 1935 | 9935 Product-Producer(e2,e1) 1936 | 9936 Entity-Destination(e1,e2) 1937 | 9937 Member-Collection(e2,e1) 1938 | 9938 Member-Collection(e2,e1) 1939 | 9939 Entity-Destination(e1,e2) 1940 | 9940 Content-Container(e2,e1) 1941 | 9941 Entity-Destination(e1,e2) 1942 | 9942 Content-Container(e2,e1) 1943 | 9943 Other 1944 | 9944 Message-Topic(e1,e2) 1945 | 9945 Component-Whole(e2,e1) 1946 | 9946 Message-Topic(e2,e1) 1947 | 9947 Product-Producer(e2,e1) 1948 | 9948 Entity-Destination(e1,e2) 1949 | 9949 Entity-Origin(e1,e2) 1950 | 9950 Other 1951 | 9951 Message-Topic(e1,e2) 1952 | 9952 Entity-Destination(e1,e2) 1953 | 9953 Entity-Destination(e1,e2) 1954 | 9954 Entity-Origin(e1,e2) 1955 | 9955 Content-Container(e2,e1) 1956 | 9956 Cause-Effect(e2,e1) 1957 | 9957 Component-Whole(e2,e1) 1958 | 9958 Entity-Origin(e1,e2) 1959 | 9959 Instrument-Agency(e2,e1) 1960 | 9960 Member-Collection(e2,e1) 1961 | 9961 Product-Producer(e2,e1) 1962 | 9962 Entity-Origin(e1,e2) 1963 | 9963 Entity-Destination(e1,e2) 1964 | 9964 Entity-Destination(e1,e2) 1965 | 9965 Cause-Effect(e2,e1) 1966 | 9966 Other 1967 | 9967 Cause-Effect(e1,e2) 1968 | 9968 Message-Topic(e2,e1) 1969 | 9969 Entity-Destination(e1,e2) 1970 | 9970 Instrument-Agency(e2,e1) 1971 | 9971 Component-Whole(e2,e1) 1972 | 9972 Component-Whole(e1,e2) 1973 | 9973 Message-Topic(e1,e2) 1974 | 9974 Cause-Effect(e2,e1) 1975 | 9975 Cause-Effect(e2,e1) 1976 | 9976 Other 1977 | 9977 Product-Producer(e2,e1) 1978 | 9978 Other 1979 | 9979 Cause-Effect(e1,e2) 1980 | 9980 Component-Whole(e1,e2) 1981 | 9981 Member-Collection(e2,e1) 1982 | 9982 Entity-Destination(e1,e2) 1983 | 9983 Content-Container(e1,e2) 1984 | 9984 Member-Collection(e2,e1) 1985 | 9985 Cause-Effect(e2,e1) 1986 | 9986 Other 1987 | 9987 Product-Producer(e2,e1) 1988 | 9988 Content-Container(e1,e2) 1989 | 9989 Other 1990 | 9990 Other 1991 | 9991 Message-Topic(e1,e2) 1992 | 9992 Component-Whole(e1,e2) 1993 | 9993 Content-Container(e1,e2) 1994 | 9994 Component-Whole(e1,e2) 1995 | 9995 Other 1996 | 9996 Message-Topic(e1,e2) 1997 | 9997 Component-Whole(e1,e2) 1998 | 9998 Entity-Origin(e1,e2) 1999 | 9999 Entity-Destination(e1,e2) 2000 | 10000 Instrument-Agency(e2,e1) 2001 | 10001 Instrument-Agency(e2,e1) 2002 | 10002 Message-Topic(e1,e2) 2003 | 10003 Cause-Effect(e2,e1) 2004 | 10004 Entity-Destination(e1,e2) 2005 | 10005 Instrument-Agency(e2,e1) 2006 | 10006 Member-Collection(e2,e1) 2007 | 10007 Entity-Origin(e1,e2) 2008 | 10008 Entity-Destination(e1,e2) 2009 | 10009 Cause-Effect(e1,e2) 2010 | 10010 Entity-Origin(e1,e2) 2011 | 10011 Other 2012 | 10012 Cause-Effect(e2,e1) 2013 | 10013 Member-Collection(e2,e1) 2014 | 10014 Entity-Destination(e1,e2) 2015 | 10015 Other 2016 | 10016 Content-Container(e1,e2) 2017 | 10017 Entity-Destination(e1,e2) 2018 | 10018 Entity-Origin(e1,e2) 2019 | 10019 Other 2020 | 10020 Entity-Destination(e1,e2) 2021 | 10021 Other 2022 | 10022 Other 2023 | 10023 Message-Topic(e1,e2) 2024 | 10024 Message-Topic(e1,e2) 2025 | 10025 Other 2026 | 10026 Instrument-Agency(e2,e1) 2027 | 10027 Entity-Destination(e1,e2) 2028 | 10028 Message-Topic(e1,e2) 2029 | 10029 Member-Collection(e2,e1) 2030 | 10030 Other 2031 | 10031 Member-Collection(e2,e1) 2032 | 10032 Member-Collection(e2,e1) 2033 | 10033 Other 2034 | 10034 Content-Container(e1,e2) 2035 | 10035 Component-Whole(e2,e1) 2036 | 10036 Other 2037 | 10037 Entity-Destination(e1,e2) 2038 | 10038 Cause-Effect(e2,e1) 2039 | 10039 Entity-Destination(e1,e2) 2040 | 10040 Cause-Effect(e2,e1) 2041 | 10041 Cause-Effect(e2,e1) 2042 | 10042 Message-Topic(e2,e1) 2043 | 10043 Entity-Destination(e1,e2) 2044 | 10044 Component-Whole(e2,e1) 2045 | 10045 Component-Whole(e2,e1) 2046 | 10046 Entity-Destination(e1,e2) 2047 | 10047 Cause-Effect(e1,e2) 2048 | 10048 Instrument-Agency(e2,e1) 2049 | 10049 Message-Topic(e1,e2) 2050 | 10050 Content-Container(e2,e1) 2051 | 10051 Component-Whole(e2,e1) 2052 | 10052 Member-Collection(e2,e1) 2053 | 10053 Content-Container(e1,e2) 2054 | 10054 Cause-Effect(e2,e1) 2055 | 10055 Entity-Destination(e1,e2) 2056 | 10056 Entity-Destination(e1,e2) 2057 | 10057 Instrument-Agency(e2,e1) 2058 | 10058 Member-Collection(e1,e2) 2059 | 10059 Cause-Effect(e2,e1) 2060 | 10060 Other 2061 | 10061 Other 2062 | 10062 Content-Container(e1,e2) 2063 | 10063 Component-Whole(e2,e1) 2064 | 10064 Cause-Effect(e1,e2) 2065 | 10065 Content-Container(e1,e2) 2066 | 10066 Other 2067 | 10067 Entity-Origin(e1,e2) 2068 | 10068 Entity-Destination(e1,e2) 2069 | 10069 Other 2070 | 10070 Component-Whole(e1,e2) 2071 | 10071 Entity-Origin(e1,e2) 2072 | 10072 Content-Container(e2,e1) 2073 | 10073 Other 2074 | 10074 Entity-Origin(e1,e2) 2075 | 10075 Entity-Origin(e1,e2) 2076 | 10076 Product-Producer(e1,e2) 2077 | 10077 Entity-Destination(e1,e2) 2078 | 10078 Entity-Destination(e1,e2) 2079 | 10079 Product-Producer(e2,e1) 2080 | 10080 Entity-Origin(e2,e1) 2081 | 10081 Entity-Destination(e1,e2) 2082 | 10082 Entity-Origin(e1,e2) 2083 | 10083 Component-Whole(e1,e2) 2084 | 10084 Entity-Origin(e1,e2) 2085 | 10085 Entity-Destination(e1,e2) 2086 | 10086 Cause-Effect(e1,e2) 2087 | 10087 Entity-Destination(e1,e2) 2088 | 10088 Instrument-Agency(e2,e1) 2089 | 10089 Product-Producer(e2,e1) 2090 | 10090 Cause-Effect(e1,e2) 2091 | 10091 Entity-Origin(e2,e1) 2092 | 10092 Entity-Origin(e1,e2) 2093 | 10093 Other 2094 | 10094 Content-Container(e1,e2) 2095 | 10095 Entity-Destination(e1,e2) 2096 | 10096 Component-Whole(e2,e1) 2097 | 10097 Other 2098 | 10098 Message-Topic(e1,e2) 2099 | 10099 Entity-Destination(e1,e2) 2100 | 10100 Entity-Destination(e1,e2) 2101 | 10101 Entity-Origin(e2,e1) 2102 | 10102 Cause-Effect(e1,e2) 2103 | 10103 Message-Topic(e1,e2) 2104 | 10104 Member-Collection(e2,e1) 2105 | 10105 Member-Collection(e2,e1) 2106 | 10106 Component-Whole(e2,e1) 2107 | 10107 Content-Container(e1,e2) 2108 | 10108 Message-Topic(e1,e2) 2109 | 10109 Other 2110 | 10110 Message-Topic(e1,e2) 2111 | 10111 Other 2112 | 10112 Other 2113 | 10113 Product-Producer(e2,e1) 2114 | 10114 Message-Topic(e2,e1) 2115 | 10115 Message-Topic(e1,e2) 2116 | 10116 Entity-Origin(e2,e1) 2117 | 10117 Product-Producer(e2,e1) 2118 | 10118 Cause-Effect(e1,e2) 2119 | 10119 Member-Collection(e2,e1) 2120 | 10120 Component-Whole(e2,e1) 2121 | 10121 Entity-Destination(e1,e2) 2122 | 10122 Entity-Origin(e1,e2) 2123 | 10123 Message-Topic(e1,e2) 2124 | 10124 Other 2125 | 10125 Other 2126 | 10126 Member-Collection(e2,e1) 2127 | 10127 Other 2128 | 10128 Instrument-Agency(e1,e2) 2129 | 10129 Other 2130 | 10130 Other 2131 | 10131 Product-Producer(e1,e2) 2132 | 10132 Component-Whole(e2,e1) 2133 | 10133 Instrument-Agency(e2,e1) 2134 | 10134 Cause-Effect(e2,e1) 2135 | 10135 Component-Whole(e2,e1) 2136 | 10136 Entity-Origin(e1,e2) 2137 | 10137 Message-Topic(e1,e2) 2138 | 10138 Entity-Origin(e1,e2) 2139 | 10139 Entity-Origin(e1,e2) 2140 | 10140 Product-Producer(e2,e1) 2141 | 10141 Other 2142 | 10142 Product-Producer(e2,e1) 2143 | 10143 Other 2144 | 10144 Instrument-Agency(e2,e1) 2145 | 10145 Instrument-Agency(e1,e2) 2146 | 10146 Product-Producer(e1,e2) 2147 | 10147 Component-Whole(e2,e1) 2148 | 10148 Product-Producer(e2,e1) 2149 | 10149 Instrument-Agency(e2,e1) 2150 | 10150 Component-Whole(e1,e2) 2151 | 10151 Product-Producer(e2,e1) 2152 | 10152 Instrument-Agency(e1,e2) 2153 | 10153 Product-Producer(e2,e1) 2154 | 10154 Member-Collection(e2,e1) 2155 | 10155 Message-Topic(e1,e2) 2156 | 10156 Cause-Effect(e1,e2) 2157 | 10157 Component-Whole(e1,e2) 2158 | 10158 Entity-Destination(e1,e2) 2159 | 10159 Other 2160 | 10160 Other 2161 | 10161 Component-Whole(e1,e2) 2162 | 10162 Entity-Origin(e1,e2) 2163 | 10163 Entity-Origin(e2,e1) 2164 | 10164 Entity-Origin(e1,e2) 2165 | 10165 Entity-Destination(e1,e2) 2166 | 10166 Component-Whole(e1,e2) 2167 | 10167 Entity-Origin(e1,e2) 2168 | 10168 Content-Container(e1,e2) 2169 | 10169 Member-Collection(e2,e1) 2170 | 10170 Entity-Origin(e1,e2) 2171 | 10171 Content-Container(e2,e1) 2172 | 10172 Message-Topic(e2,e1) 2173 | 10173 Other 2174 | 10174 Member-Collection(e2,e1) 2175 | 10175 Entity-Destination(e1,e2) 2176 | 10176 Product-Producer(e2,e1) 2177 | 10177 Cause-Effect(e2,e1) 2178 | 10178 Entity-Destination(e1,e2) 2179 | 10179 Product-Producer(e1,e2) 2180 | 10180 Instrument-Agency(e2,e1) 2181 | 10181 Other 2182 | 10182 Cause-Effect(e2,e1) 2183 | 10183 Message-Topic(e1,e2) 2184 | 10184 Entity-Destination(e1,e2) 2185 | 10185 Entity-Origin(e1,e2) 2186 | 10186 Other 2187 | 10187 Entity-Destination(e1,e2) 2188 | 10188 Other 2189 | 10189 Message-Topic(e1,e2) 2190 | 10190 Product-Producer(e1,e2) 2191 | 10191 Entity-Destination(e1,e2) 2192 | 10192 Product-Producer(e2,e1) 2193 | 10193 Component-Whole(e1,e2) 2194 | 10194 Entity-Origin(e1,e2) 2195 | 10195 Instrument-Agency(e2,e1) 2196 | 10196 Other 2197 | 10197 Product-Producer(e1,e2) 2198 | 10198 Entity-Origin(e1,e2) 2199 | 10199 Entity-Origin(e1,e2) 2200 | 10200 Entity-Origin(e1,e2) 2201 | 10201 Instrument-Agency(e2,e1) 2202 | 10202 Entity-Destination(e1,e2) 2203 | 10203 Instrument-Agency(e2,e1) 2204 | 10204 Message-Topic(e1,e2) 2205 | 10205 Product-Producer(e2,e1) 2206 | 10206 Product-Producer(e2,e1) 2207 | 10207 Entity-Destination(e1,e2) 2208 | 10208 Component-Whole(e1,e2) 2209 | 10209 Cause-Effect(e2,e1) 2210 | 10210 Component-Whole(e2,e1) 2211 | 10211 Message-Topic(e1,e2) 2212 | 10212 Component-Whole(e1,e2) 2213 | 10213 Other 2214 | 10214 Component-Whole(e1,e2) 2215 | 10215 Entity-Origin(e1,e2) 2216 | 10216 Message-Topic(e1,e2) 2217 | 10217 Other 2218 | 10218 Entity-Origin(e2,e1) 2219 | 10219 Content-Container(e1,e2) 2220 | 10220 Message-Topic(e1,e2) 2221 | 10221 Entity-Origin(e1,e2) 2222 | 10222 Entity-Origin(e1,e2) 2223 | 10223 Member-Collection(e2,e1) 2224 | 10224 Product-Producer(e2,e1) 2225 | 10225 Member-Collection(e2,e1) 2226 | 10226 Entity-Destination(e1,e2) 2227 | 10227 Content-Container(e1,e2) 2228 | 10228 Cause-Effect(e2,e1) 2229 | 10229 Member-Collection(e1,e2) 2230 | 10230 Cause-Effect(e2,e1) 2231 | 10231 Entity-Destination(e1,e2) 2232 | 10232 Content-Container(e1,e2) 2233 | 10233 Other 2234 | 10234 Product-Producer(e2,e1) 2235 | 10235 Instrument-Agency(e2,e1) 2236 | 10236 Message-Topic(e1,e2) 2237 | 10237 Product-Producer(e2,e1) 2238 | 10238 Member-Collection(e2,e1) 2239 | 10239 Member-Collection(e2,e1) 2240 | 10240 Entity-Destination(e1,e2) 2241 | 10241 Instrument-Agency(e2,e1) 2242 | 10242 Message-Topic(e2,e1) 2243 | 10243 Instrument-Agency(e2,e1) 2244 | 10244 Other 2245 | 10245 Entity-Destination(e1,e2) 2246 | 10246 Cause-Effect(e2,e1) 2247 | 10247 Message-Topic(e1,e2) 2248 | 10248 Content-Container(e2,e1) 2249 | 10249 Instrument-Agency(e2,e1) 2250 | 10250 Product-Producer(e1,e2) 2251 | 10251 Other 2252 | 10252 Instrument-Agency(e2,e1) 2253 | 10253 Message-Topic(e1,e2) 2254 | 10254 Cause-Effect(e2,e1) 2255 | 10255 Entity-Destination(e1,e2) 2256 | 10256 Content-Container(e2,e1) 2257 | 10257 Cause-Effect(e1,e2) 2258 | 10258 Cause-Effect(e2,e1) 2259 | 10259 Message-Topic(e2,e1) 2260 | 10260 Entity-Origin(e1,e2) 2261 | 10261 Other 2262 | 10262 Other 2263 | 10263 Entity-Destination(e1,e2) 2264 | 10264 Component-Whole(e2,e1) 2265 | 10265 Message-Topic(e1,e2) 2266 | 10266 Product-Producer(e2,e1) 2267 | 10267 Cause-Effect(e1,e2) 2268 | 10268 Member-Collection(e2,e1) 2269 | 10269 Message-Topic(e1,e2) 2270 | 10270 Product-Producer(e2,e1) 2271 | 10271 Entity-Origin(e1,e2) 2272 | 10272 Component-Whole(e1,e2) 2273 | 10273 Entity-Origin(e1,e2) 2274 | 10274 Component-Whole(e2,e1) 2275 | 10275 Cause-Effect(e1,e2) 2276 | 10276 Entity-Destination(e1,e2) 2277 | 10277 Component-Whole(e1,e2) 2278 | 10278 Product-Producer(e1,e2) 2279 | 10279 Cause-Effect(e2,e1) 2280 | 10280 Entity-Destination(e1,e2) 2281 | 10281 Cause-Effect(e2,e1) 2282 | 10282 Other 2283 | 10283 Entity-Origin(e2,e1) 2284 | 10284 Entity-Destination(e1,e2) 2285 | 10285 Cause-Effect(e2,e1) 2286 | 10286 Content-Container(e1,e2) 2287 | 10287 Content-Container(e1,e2) 2288 | 10288 Component-Whole(e2,e1) 2289 | 10289 Member-Collection(e2,e1) 2290 | 10290 Content-Container(e1,e2) 2291 | 10291 Other 2292 | 10292 Message-Topic(e1,e2) 2293 | 10293 Entity-Destination(e1,e2) 2294 | 10294 Instrument-Agency(e1,e2) 2295 | 10295 Message-Topic(e2,e1) 2296 | 10296 Cause-Effect(e2,e1) 2297 | 10297 Entity-Origin(e1,e2) 2298 | 10298 Entity-Origin(e2,e1) 2299 | 10299 Entity-Origin(e1,e2) 2300 | 10300 Other 2301 | 10301 Member-Collection(e2,e1) 2302 | 10302 Message-Topic(e1,e2) 2303 | 10303 Entity-Destination(e1,e2) 2304 | 10304 Instrument-Agency(e2,e1) 2305 | 10305 Component-Whole(e1,e2) 2306 | 10306 Component-Whole(e2,e1) 2307 | 10307 Component-Whole(e1,e2) 2308 | 10308 Other 2309 | 10309 Message-Topic(e1,e2) 2310 | 10310 Message-Topic(e1,e2) 2311 | 10311 Component-Whole(e2,e1) 2312 | 10312 Content-Container(e2,e1) 2313 | 10313 Product-Producer(e1,e2) 2314 | 10314 Content-Container(e1,e2) 2315 | 10315 Component-Whole(e1,e2) 2316 | 10316 Content-Container(e1,e2) 2317 | 10317 Other 2318 | 10318 Other 2319 | 10319 Member-Collection(e2,e1) 2320 | 10320 Instrument-Agency(e2,e1) 2321 | 10321 Entity-Destination(e1,e2) 2322 | 10322 Component-Whole(e1,e2) 2323 | 10323 Other 2324 | 10324 Message-Topic(e1,e2) 2325 | 10325 Content-Container(e1,e2) 2326 | 10326 Other 2327 | 10327 Content-Container(e1,e2) 2328 | 10328 Product-Producer(e1,e2) 2329 | 10329 Instrument-Agency(e2,e1) 2330 | 10330 Entity-Destination(e1,e2) 2331 | 10331 Instrument-Agency(e2,e1) 2332 | 10332 Content-Container(e1,e2) 2333 | 10333 Other 2334 | 10334 Other 2335 | 10335 Cause-Effect(e2,e1) 2336 | 10336 Entity-Origin(e1,e2) 2337 | 10337 Content-Container(e1,e2) 2338 | 10338 Entity-Origin(e1,e2) 2339 | 10339 Other 2340 | 10340 Entity-Origin(e1,e2) 2341 | 10341 Other 2342 | 10342 Entity-Destination(e1,e2) 2343 | 10343 Instrument-Agency(e2,e1) 2344 | 10344 Cause-Effect(e2,e1) 2345 | 10345 Component-Whole(e2,e1) 2346 | 10346 Instrument-Agency(e2,e1) 2347 | 10347 Content-Container(e2,e1) 2348 | 10348 Entity-Destination(e1,e2) 2349 | 10349 Member-Collection(e2,e1) 2350 | 10350 Cause-Effect(e1,e2) 2351 | 10351 Entity-Destination(e1,e2) 2352 | 10352 Message-Topic(e2,e1) 2353 | 10353 Product-Producer(e2,e1) 2354 | 10354 Entity-Destination(e1,e2) 2355 | 10355 Content-Container(e1,e2) 2356 | 10356 Entity-Origin(e1,e2) 2357 | 10357 Entity-Origin(e1,e2) 2358 | 10358 Component-Whole(e1,e2) 2359 | 10359 Other 2360 | 10360 Message-Topic(e1,e2) 2361 | 10361 Instrument-Agency(e1,e2) 2362 | 10362 Entity-Destination(e1,e2) 2363 | 10363 Entity-Destination(e1,e2) 2364 | 10364 Product-Producer(e2,e1) 2365 | 10365 Message-Topic(e1,e2) 2366 | 10366 Member-Collection(e2,e1) 2367 | 10367 Product-Producer(e2,e1) 2368 | 10368 Instrument-Agency(e2,e1) 2369 | 10369 Instrument-Agency(e2,e1) 2370 | 10370 Other 2371 | 10371 Product-Producer(e1,e2) 2372 | 10372 Product-Producer(e1,e2) 2373 | 10373 Cause-Effect(e1,e2) 2374 | 10374 Content-Container(e1,e2) 2375 | 10375 Member-Collection(e2,e1) 2376 | 10376 Entity-Destination(e1,e2) 2377 | 10377 Message-Topic(e1,e2) 2378 | 10378 Entity-Origin(e2,e1) 2379 | 10379 Cause-Effect(e1,e2) 2380 | 10380 Component-Whole(e2,e1) 2381 | 10381 Message-Topic(e1,e2) 2382 | 10382 Cause-Effect(e2,e1) 2383 | 10383 Cause-Effect(e1,e2) 2384 | 10384 Entity-Origin(e1,e2) 2385 | 10385 Instrument-Agency(e2,e1) 2386 | 10386 Component-Whole(e2,e1) 2387 | 10387 Component-Whole(e2,e1) 2388 | 10388 Product-Producer(e1,e2) 2389 | 10389 Component-Whole(e1,e2) 2390 | 10390 Other 2391 | 10391 Instrument-Agency(e2,e1) 2392 | 10392 Message-Topic(e1,e2) 2393 | 10393 Entity-Origin(e1,e2) 2394 | 10394 Other 2395 | 10395 Message-Topic(e1,e2) 2396 | 10396 Cause-Effect(e2,e1) 2397 | 10397 Entity-Origin(e1,e2) 2398 | 10398 Cause-Effect(e1,e2) 2399 | 10399 Entity-Destination(e1,e2) 2400 | 10400 Component-Whole(e1,e2) 2401 | 10401 Member-Collection(e2,e1) 2402 | 10402 Other 2403 | 10403 Entity-Origin(e1,e2) 2404 | 10404 Member-Collection(e2,e1) 2405 | 10405 Entity-Destination(e1,e2) 2406 | 10406 Other 2407 | 10407 Product-Producer(e2,e1) 2408 | 10408 Member-Collection(e2,e1) 2409 | 10409 Product-Producer(e1,e2) 2410 | 10410 Other 2411 | 10411 Other 2412 | 10412 Product-Producer(e2,e1) 2413 | 10413 Entity-Destination(e1,e2) 2414 | 10414 Message-Topic(e2,e1) 2415 | 10415 Entity-Destination(e1,e2) 2416 | 10416 Member-Collection(e2,e1) 2417 | 10417 Cause-Effect(e2,e1) 2418 | 10418 Entity-Destination(e1,e2) 2419 | 10419 Cause-Effect(e2,e1) 2420 | 10420 Other 2421 | 10421 Entity-Destination(e1,e2) 2422 | 10422 Message-Topic(e1,e2) 2423 | 10423 Entity-Origin(e1,e2) 2424 | 10424 Instrument-Agency(e2,e1) 2425 | 10425 Cause-Effect(e2,e1) 2426 | 10426 Cause-Effect(e2,e1) 2427 | 10427 Other 2428 | 10428 Component-Whole(e1,e2) 2429 | 10429 Message-Topic(e1,e2) 2430 | 10430 Member-Collection(e2,e1) 2431 | 10431 Content-Container(e1,e2) 2432 | 10432 Content-Container(e1,e2) 2433 | 10433 Component-Whole(e2,e1) 2434 | 10434 Cause-Effect(e1,e2) 2435 | 10435 Component-Whole(e1,e2) 2436 | 10436 Entity-Destination(e1,e2) 2437 | 10437 Message-Topic(e1,e2) 2438 | 10438 Other 2439 | 10439 Other 2440 | 10440 Product-Producer(e1,e2) 2441 | 10441 Member-Collection(e1,e2) 2442 | 10442 Entity-Destination(e1,e2) 2443 | 10443 Content-Container(e1,e2) 2444 | 10444 Instrument-Agency(e2,e1) 2445 | 10445 Content-Container(e1,e2) 2446 | 10446 Entity-Destination(e1,e2) 2447 | 10447 Other 2448 | 10448 Product-Producer(e1,e2) 2449 | 10449 Member-Collection(e2,e1) 2450 | 10450 Other 2451 | 10451 Component-Whole(e2,e1) 2452 | 10452 Other 2453 | 10453 Entity-Destination(e1,e2) 2454 | 10454 Message-Topic(e1,e2) 2455 | 10455 Product-Producer(e1,e2) 2456 | 10456 Entity-Destination(e1,e2) 2457 | 10457 Message-Topic(e1,e2) 2458 | 10458 Other 2459 | 10459 Other 2460 | 10460 Component-Whole(e2,e1) 2461 | 10461 Product-Producer(e2,e1) 2462 | 10462 Content-Container(e1,e2) 2463 | 10463 Entity-Destination(e1,e2) 2464 | 10464 Product-Producer(e2,e1) 2465 | 10465 Message-Topic(e1,e2) 2466 | 10466 Cause-Effect(e2,e1) 2467 | 10467 Entity-Destination(e1,e2) 2468 | 10468 Cause-Effect(e1,e2) 2469 | 10469 Component-Whole(e1,e2) 2470 | 10470 Content-Container(e1,e2) 2471 | 10471 Entity-Origin(e1,e2) 2472 | 10472 Message-Topic(e1,e2) 2473 | 10473 Product-Producer(e1,e2) 2474 | 10474 Entity-Origin(e1,e2) 2475 | 10475 Member-Collection(e2,e1) 2476 | 10476 Content-Container(e1,e2) 2477 | 10477 Content-Container(e1,e2) 2478 | 10478 Entity-Destination(e1,e2) 2479 | 10479 Content-Container(e1,e2) 2480 | 10480 Entity-Origin(e2,e1) 2481 | 10481 Cause-Effect(e2,e1) 2482 | 10482 Product-Producer(e1,e2) 2483 | 10483 Component-Whole(e1,e2) 2484 | 10484 Component-Whole(e1,e2) 2485 | 10485 Other 2486 | 10486 Message-Topic(e1,e2) 2487 | 10487 Other 2488 | 10488 Entity-Destination(e1,e2) 2489 | 10489 Component-Whole(e2,e1) 2490 | 10490 Entity-Origin(e2,e1) 2491 | 10491 Instrument-Agency(e2,e1) 2492 | 10492 Other 2493 | 10493 Cause-Effect(e1,e2) 2494 | 10494 Other 2495 | 10495 Content-Container(e2,e1) 2496 | 10496 Product-Producer(e2,e1) 2497 | 10497 Component-Whole(e2,e1) 2498 | 10498 Content-Container(e2,e1) 2499 | 10499 Other 2500 | 10500 Cause-Effect(e2,e1) 2501 | 10501 Cause-Effect(e2,e1) 2502 | 10502 Component-Whole(e1,e2) 2503 | 10503 Component-Whole(e1,e2) 2504 | 10504 Cause-Effect(e1,e2) 2505 | 10505 Cause-Effect(e1,e2) 2506 | 10506 Instrument-Agency(e2,e1) 2507 | 10507 Entity-Origin(e2,e1) 2508 | 10508 Product-Producer(e1,e2) 2509 | 10509 Entity-Destination(e1,e2) 2510 | 10510 Component-Whole(e1,e2) 2511 | 10511 Product-Producer(e1,e2) 2512 | 10512 Other 2513 | 10513 Other 2514 | 10514 Entity-Origin(e1,e2) 2515 | 10515 Member-Collection(e2,e1) 2516 | 10516 Product-Producer(e2,e1) 2517 | 10517 Other 2518 | 10518 Message-Topic(e1,e2) 2519 | 10519 Entity-Destination(e1,e2) 2520 | 10520 Member-Collection(e2,e1) 2521 | 10521 Other 2522 | 10522 Other 2523 | 10523 Cause-Effect(e2,e1) 2524 | 10524 Cause-Effect(e2,e1) 2525 | 10525 Member-Collection(e2,e1) 2526 | 10526 Component-Whole(e1,e2) 2527 | 10527 Member-Collection(e2,e1) 2528 | 10528 Cause-Effect(e2,e1) 2529 | 10529 Component-Whole(e1,e2) 2530 | 10530 Content-Container(e1,e2) 2531 | 10531 Message-Topic(e2,e1) 2532 | 10532 Entity-Origin(e1,e2) 2533 | 10533 Message-Topic(e1,e2) 2534 | 10534 Other 2535 | 10535 Message-Topic(e1,e2) 2536 | 10536 Component-Whole(e1,e2) 2537 | 10537 Product-Producer(e2,e1) 2538 | 10538 Entity-Origin(e1,e2) 2539 | 10539 Product-Producer(e1,e2) 2540 | 10540 Entity-Destination(e1,e2) 2541 | 10541 Entity-Origin(e1,e2) 2542 | 10542 Component-Whole(e2,e1) 2543 | 10543 Entity-Origin(e1,e2) 2544 | 10544 Cause-Effect(e1,e2) 2545 | 10545 Cause-Effect(e2,e1) 2546 | 10546 Other 2547 | 10547 Component-Whole(e2,e1) 2548 | 10548 Component-Whole(e1,e2) 2549 | 10549 Product-Producer(e1,e2) 2550 | 10550 Instrument-Agency(e2,e1) 2551 | 10551 Cause-Effect(e2,e1) 2552 | 10552 Cause-Effect(e1,e2) 2553 | 10553 Product-Producer(e2,e1) 2554 | 10554 Product-Producer(e2,e1) 2555 | 10555 Content-Container(e1,e2) 2556 | 10556 Component-Whole(e2,e1) 2557 | 10557 Entity-Destination(e1,e2) 2558 | 10558 Message-Topic(e1,e2) 2559 | 10559 Entity-Destination(e1,e2) 2560 | 10560 Member-Collection(e2,e1) 2561 | 10561 Other 2562 | 10562 Other 2563 | 10563 Product-Producer(e2,e1) 2564 | 10564 Entity-Destination(e1,e2) 2565 | 10565 Product-Producer(e1,e2) 2566 | 10566 Entity-Destination(e1,e2) 2567 | 10567 Other 2568 | 10568 Other 2569 | 10569 Product-Producer(e1,e2) 2570 | 10570 Message-Topic(e1,e2) 2571 | 10571 Other 2572 | 10572 Other 2573 | 10573 Entity-Origin(e1,e2) 2574 | 10574 Other 2575 | 10575 Content-Container(e1,e2) 2576 | 10576 Product-Producer(e2,e1) 2577 | 10577 Cause-Effect(e1,e2) 2578 | 10578 Cause-Effect(e2,e1) 2579 | 10579 Content-Container(e1,e2) 2580 | 10580 Member-Collection(e2,e1) 2581 | 10581 Component-Whole(e2,e1) 2582 | 10582 Member-Collection(e2,e1) 2583 | 10583 Instrument-Agency(e2,e1) 2584 | 10584 Cause-Effect(e1,e2) 2585 | 10585 Product-Producer(e1,e2) 2586 | 10586 Component-Whole(e1,e2) 2587 | 10587 Entity-Origin(e2,e1) 2588 | 10588 Member-Collection(e2,e1) 2589 | 10589 Other 2590 | 10590 Entity-Destination(e1,e2) 2591 | 10591 Component-Whole(e2,e1) 2592 | 10592 Component-Whole(e1,e2) 2593 | 10593 Other 2594 | 10594 Entity-Origin(e1,e2) 2595 | 10595 Other 2596 | 10596 Message-Topic(e1,e2) 2597 | 10597 Cause-Effect(e1,e2) 2598 | 10598 Other 2599 | 10599 Cause-Effect(e2,e1) 2600 | 10600 Product-Producer(e1,e2) 2601 | 10601 Other 2602 | 10602 Entity-Destination(e1,e2) 2603 | 10603 Other 2604 | 10604 Component-Whole(e2,e1) 2605 | 10605 Cause-Effect(e1,e2) 2606 | 10606 Cause-Effect(e1,e2) 2607 | 10607 Component-Whole(e2,e1) 2608 | 10608 Entity-Origin(e1,e2) 2609 | 10609 Instrument-Agency(e2,e1) 2610 | 10610 Other 2611 | 10611 Entity-Destination(e1,e2) 2612 | 10612 Other 2613 | 10613 Entity-Destination(e1,e2) 2614 | 10614 Cause-Effect(e2,e1) 2615 | 10615 Other 2616 | 10616 Message-Topic(e1,e2) 2617 | 10617 Entity-Destination(e1,e2) 2618 | 10618 Product-Producer(e2,e1) 2619 | 10619 Entity-Origin(e1,e2) 2620 | 10620 Other 2621 | 10621 Other 2622 | 10622 Cause-Effect(e1,e2) 2623 | 10623 Entity-Origin(e1,e2) 2624 | 10624 Content-Container(e1,e2) 2625 | 10625 Member-Collection(e2,e1) 2626 | 10626 Component-Whole(e2,e1) 2627 | 10627 Cause-Effect(e2,e1) 2628 | 10628 Message-Topic(e1,e2) 2629 | 10629 Cause-Effect(e1,e2) 2630 | 10630 Other 2631 | 10631 Content-Container(e1,e2) 2632 | 10632 Entity-Destination(e1,e2) 2633 | 10633 Entity-Destination(e1,e2) 2634 | 10634 Member-Collection(e2,e1) 2635 | 10635 Content-Container(e1,e2) 2636 | 10636 Content-Container(e1,e2) 2637 | 10637 Cause-Effect(e2,e1) 2638 | 10638 Other 2639 | 10639 Cause-Effect(e2,e1) 2640 | 10640 Component-Whole(e1,e2) 2641 | 10641 Cause-Effect(e2,e1) 2642 | 10642 Cause-Effect(e1,e2) 2643 | 10643 Cause-Effect(e2,e1) 2644 | 10644 Entity-Origin(e1,e2) 2645 | 10645 Cause-Effect(e2,e1) 2646 | 10646 Cause-Effect(e2,e1) 2647 | 10647 Message-Topic(e2,e1) 2648 | 10648 Product-Producer(e1,e2) 2649 | 10649 Entity-Origin(e1,e2) 2650 | 10650 Cause-Effect(e2,e1) 2651 | 10651 Member-Collection(e2,e1) 2652 | 10652 Other 2653 | 10653 Message-Topic(e1,e2) 2654 | 10654 Other 2655 | 10655 Content-Container(e1,e2) 2656 | 10656 Entity-Origin(e1,e2) 2657 | 10657 Component-Whole(e1,e2) 2658 | 10658 Message-Topic(e1,e2) 2659 | 10659 Member-Collection(e1,e2) 2660 | 10660 Message-Topic(e1,e2) 2661 | 10661 Product-Producer(e2,e1) 2662 | 10662 Content-Container(e2,e1) 2663 | 10663 Content-Container(e1,e2) 2664 | 10664 Other 2665 | 10665 Component-Whole(e2,e1) 2666 | 10666 Entity-Origin(e1,e2) 2667 | 10667 Other 2668 | 10668 Member-Collection(e2,e1) 2669 | 10669 Message-Topic(e1,e2) 2670 | 10670 Content-Container(e1,e2) 2671 | 10671 Other 2672 | 10672 Other 2673 | 10673 Product-Producer(e1,e2) 2674 | 10674 Other 2675 | 10675 Entity-Destination(e1,e2) 2676 | 10676 Component-Whole(e2,e1) 2677 | 10677 Message-Topic(e2,e1) 2678 | 10678 Other 2679 | 10679 Product-Producer(e1,e2) 2680 | 10680 Instrument-Agency(e2,e1) 2681 | 10681 Entity-Destination(e1,e2) 2682 | 10682 Other 2683 | 10683 Entity-Destination(e1,e2) 2684 | 10684 Entity-Origin(e1,e2) 2685 | 10685 Product-Producer(e2,e1) 2686 | 10686 Component-Whole(e1,e2) 2687 | 10687 Other 2688 | 10688 Entity-Destination(e1,e2) 2689 | 10689 Component-Whole(e2,e1) 2690 | 10690 Entity-Origin(e1,e2) 2691 | 10691 Entity-Origin(e1,e2) 2692 | 10692 Cause-Effect(e1,e2) 2693 | 10693 Content-Container(e1,e2) 2694 | 10694 Entity-Destination(e1,e2) 2695 | 10695 Message-Topic(e2,e1) 2696 | 10696 Instrument-Agency(e2,e1) 2697 | 10697 Message-Topic(e1,e2) 2698 | 10698 Other 2699 | 10699 Message-Topic(e2,e1) 2700 | 10700 Member-Collection(e2,e1) 2701 | 10701 Entity-Destination(e1,e2) 2702 | 10702 Instrument-Agency(e2,e1) 2703 | 10703 Cause-Effect(e1,e2) 2704 | 10704 Cause-Effect(e2,e1) 2705 | 10705 Entity-Destination(e1,e2) 2706 | 10706 Other 2707 | 10707 Component-Whole(e2,e1) 2708 | 10708 Entity-Destination(e1,e2) 2709 | 10709 Other 2710 | 10710 Member-Collection(e2,e1) 2711 | 10711 Entity-Origin(e1,e2) 2712 | 10712 Entity-Origin(e1,e2) 2713 | 10713 Instrument-Agency(e2,e1) 2714 | 10714 Product-Producer(e1,e2) 2715 | 10715 Component-Whole(e2,e1) 2716 | 10716 Product-Producer(e1,e2) 2717 | 10717 Entity-Destination(e2,e1) 2718 | -------------------------------------------------------------------------------- /analysis/semeval/semeval2010_task8_format_checker.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | # 3 | # 4 | # Author: Preslav Nakov 5 | # nakov@comp.nus.edu.sg 6 | # National University of Singapore 7 | # 8 | # WHAT: This is an official output file format checker for SemEval-2010 Task #8. 9 | # 10 | # Use: 11 | # semeval2010_task8_format_checker.pl 12 | # 13 | # Examples: 14 | # semeval2010_task8_format_checker.pl proposed_answer1.txt 15 | # semeval2010_task8_format_checker.pl proposed_answer2.txt 16 | # semeval2010_task8_format_checker.pl proposed_answer3.txt 17 | # semeval2010_task8_format_checker.pl proposed_answer4.txt 18 | # 19 | # In the examples above, the first three files are OK, while the last one contains four errors. 20 | # And answer_key2.txt contains the true labels for the *training* dataset. 21 | # 22 | # Description: 23 | # The scorer takes as input a proposed classification file, 24 | # which should contain one prediction per line in the format " " 25 | # with a TAB as a separator, e.g., 26 | # 1 Component-Whole(e2,e1) 27 | # 2 Other 28 | # 3 Instrument-Agency(e2,e1) 29 | # ... 30 | # The file does not have to be sorted in any way. 31 | # Repetitions of IDs are not allowed. 32 | # 33 | # In case of problems, the checker outputs the problemtic line and its number. 34 | # Finally, the total number of problems found is reported 35 | # or a message is output saying that the file format is OK. 36 | # 37 | # Participants are expected to check their output using this checker before submission. 38 | # 39 | # Last modified: March 10, 2010 40 | # 41 | # 42 | 43 | use strict; 44 | 45 | ############### 46 | ### I/O ### 47 | ############### 48 | 49 | if ($#ARGV != 0) { 50 | die "Usage:\nsemeval2010_task8_format_checker.pl \n"; 51 | } 52 | 53 | my $INPUT_FILE_NAME = $ARGV[0]; 54 | 55 | ################ 56 | ### MAIN ### 57 | ################ 58 | my %ids = (); 59 | 60 | my $errCnt = 0; 61 | open(INPUT, $INPUT_FILE_NAME) or die "Failed to open $INPUT_FILE_NAME for text reading.\n"; 62 | for (my $lineNo = 1; ; $lineNo++) { 63 | my ($id, $label) = &getIDandLabel($_); 64 | if ($id < 0) { 65 | s/[\n\r]*$//; 66 | print "Bad file format on line $lineNo: '$_'\n"; 67 | $errCnt++; 68 | } 69 | elsif (defined $ids{$id}) { 70 | s/[\n\r]*$//; 71 | print "Bad file format on line $lineNo (ID $id is already defined): '$_'\n"; 72 | $errCnt++; 73 | } 74 | $ids{$id}++; 75 | } 76 | close(INPUT) or die "Failed to close $INPUT_FILE_NAME.\n"; 77 | 78 | if (0 == $errCnt) { 79 | print "\n<<< The file format is OK.\n"; 80 | } 81 | else { 82 | print "\n<<< The format is INCORRECT: $errCnt problematic line(s) found!\n"; 83 | } 84 | 85 | 86 | ################ 87 | ### SUBS ### 88 | ################ 89 | 90 | sub getIDandLabel() { 91 | my $line = shift; 92 | 93 | return (-1,()) if ($line !~ /^([0-9]+)\t([^\r]+)\r?\n$/); 94 | my ($id, $label) = ($1, $2); 95 | 96 | return ($id, '_Other') if ($label eq 'Other'); 97 | 98 | return ($id, $label) 99 | if (($label eq 'Cause-Effect(e1,e2)') || ($label eq 'Cause-Effect(e2,e1)') || 100 | ($label eq 'Component-Whole(e1,e2)') || ($label eq 'Component-Whole(e2,e1)') || 101 | ($label eq 'Content-Container(e1,e2)') || ($label eq 'Content-Container(e2,e1)') || 102 | ($label eq 'Entity-Destination(e1,e2)') || ($label eq 'Entity-Destination(e2,e1)') || 103 | ($label eq 'Entity-Origin(e1,e2)') || ($label eq 'Entity-Origin(e2,e1)') || 104 | ($label eq 'Instrument-Agency(e1,e2)') || ($label eq 'Instrument-Agency(e2,e1)') || 105 | ($label eq 'Member-Collection(e1,e2)') || ($label eq 'Member-Collection(e2,e1)') || 106 | ($label eq 'Message-Topic(e1,e2)') || ($label eq 'Message-Topic(e2,e1)') || 107 | ($label eq 'Product-Producer(e1,e2)') || ($label eq 'Product-Producer(e2,e1)')); 108 | 109 | return (-1, ()); 110 | } 111 | -------------------------------------------------------------------------------- /analysis/semeval/semeval2010_task8_scorer-v1.2.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -w 2 | # 3 | # 4 | # Author: Preslav Nakov 5 | # nakov@comp.nus.edu.sg 6 | # National University of Singapore 7 | # 8 | # WHAT: This is the official scorer for SemEval-2010 Task #8. 9 | # 10 | # 11 | # Last modified: March 22, 2010 12 | # 13 | # Current version: 1.2 14 | # 15 | # Revision history: 16 | # - Version 1.2 (fixed a bug in the precision for the scoring of (iii)) 17 | # - Version 1.1 (fixed a bug in the calculation of accuracy) 18 | # 19 | # 20 | # Use: 21 | # semeval2010_task8_scorer-v1.1.pl 22 | # 23 | # Example2: 24 | # semeval2010_task8_scorer-v1.1.pl proposed_answer1.txt answer_key1.txt > result_scores1.txt 25 | # semeval2010_task8_scorer-v1.1.pl proposed_answer2.txt answer_key2.txt > result_scores2.txt 26 | # semeval2010_task8_scorer-v1.1.pl proposed_answer3.txt answer_key3.txt > result_scores3.txt 27 | # 28 | # Description: 29 | # The scorer takes as input a proposed classification file and an answer key file. 30 | # Both files should contain one prediction per line in the format " " 31 | # with a TAB as a separator, e.g., 32 | # 1 Component-Whole(e2,e1) 33 | # 2 Other 34 | # 3 Instrument-Agency(e2,e1) 35 | # ... 36 | # The files do not have to be sorted in any way and the first file can have predictions 37 | # for a subset of the IDs in the second file only, e.g., because hard examples have been skipped. 38 | # Repetitions of IDs are not allowed in either of the files. 39 | # 40 | # The scorer calculates and outputs the following statistics: 41 | # (1) confusion matrix, which shows 42 | # - the sums for each row/column: -SUM- 43 | # - the number of skipped examples: skip 44 | # - the number of examples with correct relation, but wrong directionality: xDIRx 45 | # - the number of examples in the answer key file: ACTUAL ( = -SUM- + skip + xDIRx ) 46 | # (2) accuracy and coverage 47 | # (3) precision (P), recall (R), and F1-score for each relation 48 | # (4) micro-averaged P, R, F1, where the calculations ignore the Other category. 49 | # (5) macro-averaged P, R, F1, where the calculations ignore the Other category. 50 | # 51 | # Note that in scores (4) and (5), skipped examples are equivalent to those classified as Other. 52 | # So are examples classified as relations that do not exist in the key file (which is probably not optimal). 53 | # 54 | # The scoring is done three times: 55 | # (i) as a (2*9+1)-way classification 56 | # (ii) as a (9+1)-way classification, with directionality ignored 57 | # (iii) as a (9+1)-way classification, with directionality taken into account. 58 | # 59 | # The official score is the macro-averaged F1-score for (iii). 60 | # 61 | 62 | use strict; 63 | 64 | 65 | ############### 66 | ### I/O ### 67 | ############### 68 | 69 | if ($#ARGV != 1) { 70 | die "Usage:\nsemeval2010_task8_scorer.pl \n"; 71 | } 72 | 73 | my $PROPOSED_ANSWERS_FILE_NAME = $ARGV[0]; 74 | my $ANSWER_KEYS_FILE_NAME = $ARGV[1]; 75 | 76 | 77 | ################ 78 | ### MAIN ### 79 | ################ 80 | 81 | my (%confMatrix19way, %confMatrix10wayNoDir, %confMatrix10wayWithDir) = (); 82 | my (%idsProposed, %idsAnswer) = (); 83 | my (%allLabels19waylAnswer, %allLabels10wayAnswer) = (); 84 | my (%allLabels19wayProposed, %allLabels10wayNoDirProposed, %allLabels10wayWithDirProposed) = (); 85 | 86 | ### 1. Read the file contents 87 | my $totalProposed = &readFileIntoHash($PROPOSED_ANSWERS_FILE_NAME, \%idsProposed); 88 | my $totalAnswer = &readFileIntoHash($ANSWER_KEYS_FILE_NAME, \%idsAnswer); 89 | 90 | ### 2. Calculate the confusion matrices 91 | foreach my $id (keys %idsProposed) { 92 | 93 | ### 2.1. Unexpected IDs are not allowed 94 | die "File $PROPOSED_ANSWERS_FILE_NAME contains a bad ID: '$id'" 95 | if (!defined($idsAnswer{$id})); 96 | 97 | ### 2.2. Update the 19-way confusion matrix 98 | my $labelProposed = $idsProposed{$id}; 99 | my $labelAnswer = $idsAnswer{$id}; 100 | $confMatrix19way{$labelProposed}{$labelAnswer}++; 101 | $allLabels19wayProposed{$labelProposed}++; 102 | 103 | ### 2.3. Update the 10-way confusion matrix *without* direction 104 | my $labelProposedNoDir = $labelProposed; 105 | my $labelAnswerNoDir = $labelAnswer; 106 | $labelProposedNoDir =~ s/\(e[12],e[12]\)[\n\r]*$//; 107 | $labelAnswerNoDir =~ s/\(e[12],e[12]\)[\n\r]*$//; 108 | $confMatrix10wayNoDir{$labelProposedNoDir}{$labelAnswerNoDir}++; 109 | $allLabels10wayNoDirProposed{$labelProposedNoDir}++; 110 | 111 | ### 2.4. Update the 10-way confusion matrix *with* direction 112 | if ($labelProposed eq $labelAnswer) { ## both relation and direction match 113 | $confMatrix10wayWithDir{$labelProposedNoDir}{$labelAnswerNoDir}++; 114 | $allLabels10wayWithDirProposed{$labelProposedNoDir}++; 115 | } 116 | elsif ($labelProposedNoDir eq $labelAnswerNoDir) { ## the relations match, but the direction is wrong 117 | $confMatrix10wayWithDir{'WRONG_DIR'}{$labelAnswerNoDir}++; 118 | $allLabels10wayWithDirProposed{'WRONG_DIR'}++; 119 | } 120 | else { ### Wrong relation 121 | $confMatrix10wayWithDir{$labelProposedNoDir}{$labelAnswerNoDir}++; 122 | $allLabels10wayWithDirProposed{$labelProposedNoDir}++; 123 | } 124 | } 125 | 126 | ### 3. Calculate the ground truth distributions 127 | foreach my $id (keys %idsAnswer) { 128 | 129 | ### 3.1. Update the 19-way answer distribution 130 | my $labelAnswer = $idsAnswer{$id}; 131 | $allLabels19waylAnswer{$labelAnswer}++; 132 | 133 | ### 3.2. Update the 10-way answer distribution 134 | my $labelAnswerNoDir = $labelAnswer; 135 | $labelAnswerNoDir =~ s/\(e[12],e[12]\)[\n\r]*$//; 136 | $allLabels10wayAnswer{$labelAnswerNoDir}++; 137 | } 138 | 139 | ### 4. Check for proposed classes that are not contained in the answer key file: this may happen in cross-validation 140 | foreach my $labelProposed (sort keys %allLabels19wayProposed) { 141 | if (!defined($allLabels19waylAnswer{$labelProposed})) { 142 | print "!!!WARNING!!! The proposed file contains $allLabels19wayProposed{$labelProposed} label(s) of type '$labelProposed', which is NOT present in the key file.\n\n"; 143 | } 144 | } 145 | 146 | ### 4. 19-way evaluation with directionality 147 | print "<<< (2*9+1)-WAY EVALUATION (USING DIRECTIONALITY)>>>:\n\n"; 148 | &evaluate(\%confMatrix19way, \%allLabels19wayProposed, \%allLabels19waylAnswer, $totalProposed, $totalAnswer, 0); 149 | 150 | ### 5. Evaluate without directionality 151 | print "<<< (9+1)-WAY EVALUATION IGNORING DIRECTIONALITY >>>:\n\n"; 152 | &evaluate(\%confMatrix10wayNoDir, \%allLabels10wayNoDirProposed, \%allLabels10wayAnswer, $totalProposed, $totalAnswer, 0); 153 | 154 | ### 6. Evaluate without directionality 155 | print "<<< (9+1)-WAY EVALUATION TAKING DIRECTIONALITY INTO ACCOUNT -- OFFICIAL >>>:\n\n"; 156 | my $officialScore = &evaluate(\%confMatrix10wayWithDir, \%allLabels10wayWithDirProposed, \%allLabels10wayAnswer, $totalProposed, $totalAnswer, 1); 157 | 158 | ### 7. Output the official score 159 | printf "<<< The official score is (9+1)-way evaluation with directionality taken into account: macro-averaged F1 = %0.2f%s >>>\n", $officialScore, '%'; 160 | 161 | 162 | ################ 163 | ### SUBS ### 164 | ################ 165 | 166 | sub getIDandLabel() { 167 | my $line = shift; 168 | return (-1,()) if ($line !~ /^([0-9]+)\t([^\r]+)\r?\n$/); 169 | 170 | my ($id, $label) = ($1, $2); 171 | 172 | return ($id, '_Other') if ($label eq 'Other'); 173 | 174 | return ($id, $label) 175 | if (($label eq 'Cause-Effect(e1,e2)') || ($label eq 'Cause-Effect(e2,e1)') || 176 | ($label eq 'Component-Whole(e1,e2)') || ($label eq 'Component-Whole(e2,e1)') || 177 | ($label eq 'Content-Container(e1,e2)') || ($label eq 'Content-Container(e2,e1)') || 178 | ($label eq 'Entity-Destination(e1,e2)') || ($label eq 'Entity-Destination(e2,e1)') || 179 | ($label eq 'Entity-Origin(e1,e2)') || ($label eq 'Entity-Origin(e2,e1)') || 180 | ($label eq 'Instrument-Agency(e1,e2)') || ($label eq 'Instrument-Agency(e2,e1)') || 181 | ($label eq 'Member-Collection(e1,e2)') || ($label eq 'Member-Collection(e2,e1)') || 182 | ($label eq 'Message-Topic(e1,e2)') || ($label eq 'Message-Topic(e2,e1)') || 183 | ($label eq 'Product-Producer(e1,e2)') || ($label eq 'Product-Producer(e2,e1)')); 184 | 185 | return (-1, ()); 186 | } 187 | 188 | 189 | sub readFileIntoHash() { 190 | my ($fname, $ids) = @_; 191 | open(INPUT, $fname) or die "Failed to open $fname for text reading.\n"; 192 | my $lineNo = 0; 193 | while () { 194 | $lineNo++; 195 | my ($id, $label) = &getIDandLabel($_); 196 | die "Bad file format on line $lineNo: '$_'\n" if ($id < 0); 197 | if (defined $$ids{$id}) { 198 | s/[\n\r]*$//; 199 | die "Bad file format on line $lineNo (ID $id is already defined): '$_'\n"; 200 | } 201 | $$ids{$id} = $label; 202 | } 203 | close(INPUT) or die "Failed to close $fname.\n"; 204 | return $lineNo; 205 | } 206 | 207 | 208 | sub evaluate() { 209 | my ($confMatrix, $allLabelsProposed, $allLabelsAnswer, $totalProposed, $totalAnswer, $useWrongDir) = @_; 210 | 211 | ### 0. Create a merged list for the confusion matrix 212 | my @allLabels = (); 213 | &mergeLabelLists($allLabelsAnswer, $allLabelsProposed, \@allLabels); 214 | 215 | ### 1. Print the confusion matrix heading 216 | print "Confusion matrix:\n"; 217 | print " "; 218 | foreach my $label (@allLabels) { 219 | printf " %4s", &getShortRelName($label, $allLabelsAnswer); 220 | } 221 | print " <-- classified as\n"; 222 | print " +"; 223 | foreach my $label (@allLabels) { 224 | print "-----"; 225 | } 226 | if ($useWrongDir) { 227 | print "+ -SUM- xDIRx skip ACTUAL\n"; 228 | } 229 | else { 230 | print "+ -SUM- skip ACTUAL\n"; 231 | } 232 | 233 | ### 2. Print the rest of the confusion matrix 234 | my $freqCorrect = 0; 235 | my $ind = 1; 236 | my $otherSkipped = 0; 237 | foreach my $labelAnswer (sort keys %{$allLabelsAnswer}) { 238 | 239 | ### 2.1. Output the short relation label 240 | printf " %4s |", &getShortRelName($labelAnswer, $allLabelsAnswer); 241 | 242 | ### 2.2. Output a row of the confusion matrix 243 | my $sumProposed = 0; 244 | foreach my $labelProposed (@allLabels) { 245 | $$confMatrix{$labelProposed}{$labelAnswer} = 0 246 | if (!defined($$confMatrix{$labelProposed}{$labelAnswer})); 247 | printf "%4d ", $$confMatrix{$labelProposed}{$labelAnswer}; 248 | $sumProposed += $$confMatrix{$labelProposed}{$labelAnswer}; 249 | } 250 | 251 | ### 2.3. Output the horizontal sums 252 | if ($useWrongDir) { 253 | my $ans = defined($$allLabelsAnswer{$labelAnswer}) ? $$allLabelsAnswer{$labelAnswer} : 0; 254 | $$confMatrix{'WRONG_DIR'}{$labelAnswer} = 0 if (!defined $$confMatrix{'WRONG_DIR'}{$labelAnswer}); 255 | printf "| %4d %4d %4d %6d\n", $sumProposed, $$confMatrix{'WRONG_DIR'}{$labelAnswer}, $ans - $sumProposed - $$confMatrix{'WRONG_DIR'}{$labelAnswer}, $ans; 256 | if ($labelAnswer eq '_Other') { 257 | $otherSkipped = $ans - $sumProposed - $$confMatrix{'WRONG_DIR'}{$labelAnswer}; 258 | } 259 | } 260 | else { 261 | my $ans = defined($$allLabelsAnswer{$labelAnswer}) ? $$allLabelsAnswer{$labelAnswer} : 0; 262 | printf "| %4d %4d %4d\n", $sumProposed, $ans - $sumProposed, $ans; 263 | if ($labelAnswer eq '_Other') { 264 | $otherSkipped = $ans - $sumProposed; 265 | } 266 | } 267 | 268 | $ind++; 269 | 270 | $$confMatrix{$labelAnswer}{$labelAnswer} = 0 271 | if (!defined($$confMatrix{$labelAnswer}{$labelAnswer})); 272 | $freqCorrect += $$confMatrix{$labelAnswer}{$labelAnswer}; 273 | } 274 | print " +"; 275 | foreach (@allLabels) { 276 | print "-----"; 277 | } 278 | print "+\n"; 279 | 280 | ### 3. Print the vertical sums 281 | print " -SUM- "; 282 | foreach my $labelProposed (@allLabels) { 283 | $$allLabelsProposed{$labelProposed} = 0 284 | if (!defined $$allLabelsProposed{$labelProposed}); 285 | printf "%4d ", $$allLabelsProposed{$labelProposed}; 286 | } 287 | if ($useWrongDir) { 288 | printf " %4d %4d %4d %6d\n\n", $totalProposed - $$allLabelsProposed{'WRONG_DIR'}, $$allLabelsProposed{'WRONG_DIR'}, $totalAnswer - $totalProposed, $totalAnswer; 289 | } 290 | else { 291 | printf " %4d %4d %4d\n\n", $totalProposed, $totalAnswer - $totalProposed, $totalAnswer; 292 | } 293 | 294 | ### 4. Output the coverage 295 | my $coverage = 100.0 * $totalProposed / $totalAnswer; 296 | printf "%s%d%s%d%s%5.2f%s", 'Coverage = ', $totalProposed, '/', $totalAnswer, ' = ', $coverage, "\%\n"; 297 | 298 | ### 5. Output the accuracy 299 | my $accuracy = 100.0 * $freqCorrect / $totalProposed; 300 | printf "%s%d%s%d%s%5.2f%s", 'Accuracy (calculated for the above confusion matrix) = ', $freqCorrect, '/', $totalProposed, ' = ', $accuracy, "\%\n"; 301 | 302 | ### 6. Output the accuracy considering all skipped to be wrong 303 | $accuracy = 100.0 * $freqCorrect / $totalAnswer; 304 | printf "%s%d%s%d%s%5.2f%s", 'Accuracy (considering all skipped examples as Wrong) = ', $freqCorrect, '/', $totalAnswer, ' = ', $accuracy, "\%\n"; 305 | 306 | ### 7. Calculate accuracy with all skipped examples considered Other 307 | my $accuracyWithOther = 100.0 * ($freqCorrect + $otherSkipped) / $totalAnswer; 308 | printf "%s%d%s%d%s%5.2f%s", 'Accuracy (considering all skipped examples as Other) = ', ($freqCorrect + $otherSkipped), '/', $totalAnswer, ' = ', $accuracyWithOther, "\%\n"; 309 | 310 | ### 8. Output P, R, F1 for each relation 311 | my ($macroP, $macroR, $macroF1) = (0, 0, 0); 312 | my ($microCorrect, $microProposed, $microAnswer) = (0, 0, 0); 313 | print "\nResults for the individual relations:\n"; 314 | foreach my $labelAnswer (sort keys %{$allLabelsAnswer}) { 315 | 316 | ### 8.1. Consider all wrong directionalities as wrong classification decisions 317 | my $wrongDirectionCnt = 0; 318 | if ($useWrongDir && defined $$confMatrix{'WRONG_DIR'}{$labelAnswer}) { 319 | $wrongDirectionCnt = $$confMatrix{'WRONG_DIR'}{$labelAnswer}; 320 | } 321 | 322 | ### 8.2. Prevent Perl complains about unintialized values 323 | if (!defined($$allLabelsProposed{$labelAnswer})) { 324 | $$allLabelsProposed{$labelAnswer} = 0; 325 | } 326 | 327 | ### 8.3. Calculate P/R/F1 328 | my $P = (0 == $$allLabelsProposed{$labelAnswer}) ? 0 329 | : 100.0 * $$confMatrix{$labelAnswer}{$labelAnswer} / ($$allLabelsProposed{$labelAnswer} + $wrongDirectionCnt); 330 | my $R = (0 == $$allLabelsAnswer{$labelAnswer}) ? 0 331 | : 100.0 * $$confMatrix{$labelAnswer}{$labelAnswer} / $$allLabelsAnswer{$labelAnswer}; 332 | my $F1 = (0 == $P + $R) ? 0 : 2 * $P * $R / ($P + $R); 333 | 334 | ### 8.4. Output P/R/F1 335 | if ($useWrongDir) { 336 | printf "%25s%s%4d%s(%4d +%4d)%s%6.2f", $labelAnswer, 337 | " : P = ", $$confMatrix{$labelAnswer}{$labelAnswer}, '/', $$allLabelsProposed{$labelAnswer}, $wrongDirectionCnt, ' = ', $P; 338 | } 339 | else { 340 | printf "%25s%s%4d%s%4d%s%6.2f", $labelAnswer, 341 | " : P = ", $$confMatrix{$labelAnswer}{$labelAnswer}, '/', ($$allLabelsProposed{$labelAnswer} + $wrongDirectionCnt), ' = ', $P; 342 | } 343 | printf"%s%4d%s%4d%s%6.2f%s%6.2f%s\n", 344 | "% R = ", $$confMatrix{$labelAnswer}{$labelAnswer}, '/', $$allLabelsAnswer{$labelAnswer}, ' = ', $R, 345 | "% F1 = ", $F1, '%'; 346 | 347 | ### 8.5. Accumulate statistics for micro/macro-averaging 348 | if ($labelAnswer ne '_Other') { 349 | $macroP += $P; 350 | $macroR += $R; 351 | $macroF1 += $F1; 352 | $microCorrect += $$confMatrix{$labelAnswer}{$labelAnswer}; 353 | $microProposed += $$allLabelsProposed{$labelAnswer} + $wrongDirectionCnt; 354 | $microAnswer += $$allLabelsAnswer{$labelAnswer}; 355 | } 356 | } 357 | 358 | ### 9. Output the micro-averaged P, R, F1 359 | my $microP = (0 == $microProposed) ? 0 : 100.0 * $microCorrect / $microProposed; 360 | my $microR = (0 == $microAnswer) ? 0 : 100.0 * $microCorrect / $microAnswer; 361 | my $microF1 = (0 == $microP + $microR) ? 0 : 2.0 * $microP * $microR / ($microP + $microR); 362 | print "\nMicro-averaged result (excluding Other):\n"; 363 | printf "%s%4d%s%4d%s%6.2f%s%4d%s%4d%s%6.2f%s%6.2f%s\n", 364 | "P = ", $microCorrect, '/', $microProposed, ' = ', $microP, 365 | "% R = ", $microCorrect, '/', $microAnswer, ' = ', $microR, 366 | "% F1 = ", $microF1, '%'; 367 | 368 | ### 10. Output the macro-averaged P, R, F1 369 | my $distinctLabelsCnt = keys %{$allLabelsAnswer}; 370 | ## -1, if '_Other' exists 371 | $distinctLabelsCnt-- if (defined $$allLabelsAnswer{'_Other'}); 372 | 373 | $macroP /= $distinctLabelsCnt; # first divide by the number of non-Other categories 374 | $macroR /= $distinctLabelsCnt; 375 | $macroF1 /= $distinctLabelsCnt; 376 | print "\nMACRO-averaged result (excluding Other):\n"; 377 | printf "%s%6.2f%s%6.2f%s%6.2f%s\n\n\n\n", "P = ", $macroP, "%\tR = ", $macroR, "%\tF1 = ", $macroF1, '%'; 378 | 379 | ### 11. Return the official score 380 | return $macroF1; 381 | } 382 | 383 | 384 | sub getShortRelName() { 385 | my ($relName, $hashToCheck) = @_; 386 | return '_O_' if ($relName eq '_Other'); 387 | die "relName='$relName'" if ($relName !~ /^(.)[^\-]+\-(.)/); 388 | my $result = (defined $$hashToCheck{$relName}) ? "$1\-$2" : "*$1$2"; 389 | if ($relName =~ /\(e([12])/) { 390 | $result .= $1; 391 | } 392 | return $result; 393 | } 394 | 395 | sub mergeLabelLists() { 396 | my ($hash1, $hash2, $mergedList) = @_; 397 | foreach my $key (sort keys %{$hash1}) { 398 | push @{$mergedList}, $key if ($key ne 'WRONG_DIR'); 399 | } 400 | foreach my $key (sort keys %{$hash2}) { 401 | push @{$mergedList}, $key if (($key ne 'WRONG_DIR') && !defined($$hash1{$key})); 402 | } 403 | } 404 | -------------------------------------------------------------------------------- /analysis_util.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | from os import listdir 4 | from os.path import join, isdir, exists, basename 5 | from subprocess import run, PIPE 6 | 7 | import pandas as pd 8 | from sklearn.preprocessing import label_binarize 9 | 10 | 11 | def read_log_file(experiment_dir, log_file_name='logs.jsonl'): 12 | logs = [] 13 | with open(join(experiment_dir, log_file_name), 'r') as f: 14 | for epoch, log in enumerate(f, start=1): 15 | epoch_log = json.loads(log) 16 | 17 | epoch_log['epoch'] = epoch 18 | 19 | dev_micro_f1 = epoch_log['dev_micro_f1'] 20 | epoch_log['dev_micro_f1'] = dev_micro_f1 * 100. 21 | 22 | dev_macro_f1 = epoch_log['dev_macro_f1'] 23 | epoch_log['dev_macro_f1'] = dev_macro_f1 * 100. 24 | 25 | logs.append(epoch_log) 26 | 27 | return logs 28 | 29 | 30 | def read_config_file(experiment_dir, config_file_name='config.jsonl'): 31 | with open(join(experiment_dir, config_file_name), 'r') as f: 32 | return json.loads(next(f)) 33 | 34 | 35 | def read_experiment_logs(experiments_dir, filter_empty_logs=True): 36 | def list_experiment_dirs(path): 37 | dirs = [join(path, d) for d in listdir(path) if isdir(join(path, d))] 38 | return [d for d in dirs if exists(join(d, 'logs.jsonl'))] 39 | 40 | experiment_dirs = list_experiment_dirs(experiments_dir) 41 | 42 | experiments = {} 43 | for experiment_dir in experiment_dirs: 44 | experiment_name = basename(experiment_dir) 45 | 46 | config = read_config_file(experiment_dir) 47 | logs = read_log_file(experiment_dir) 48 | 49 | if filter_empty_logs and not logs: 50 | continue 51 | 52 | experiments[experiment_name] = { 53 | 'experiment_dir': experiment_dir, 54 | 'config': config, 55 | 'logs': logs 56 | } 57 | 58 | return experiments 59 | 60 | 61 | def add_official_scorer_metrics(experiments, path_to_eval_script, path_to_test_answers): 62 | for experiment_name, experiment in experiments.items(): 63 | experiment_dir = experiment['experiment_dir'] 64 | config = experiment['config'] 65 | logs = experiment['logs'] 66 | 67 | if config['dataset'] == 'semeval_2010_task8': 68 | dev_id_labels_true_file = join(experiment_dir, 'dev_labels.txt') 69 | test_id_labels_true_file = path_to_test_answers 70 | 71 | for log in logs: 72 | epoch = log['epoch'] 73 | 74 | dev_id_labels_pred_file = join(experiment_dir, f'predictions/dev/predictions_epoch_{epoch}.txt') 75 | test_id_labels_pred_file = join(experiment_dir, f'predictions/test/predictions_epoch_{epoch}.txt') 76 | 77 | dev_precision_official, dev_recall_official, dev_f1_official = \ 78 | evaluate_semeval2010_task8(id_labels_true_file=dev_id_labels_true_file, 79 | id_labels_pred_file=dev_id_labels_pred_file, 80 | eval_script=path_to_eval_script) 81 | 82 | test_precision_official, test_recall_official, test_f1_official = \ 83 | evaluate_semeval2010_task8(id_labels_true_file=test_id_labels_true_file, 84 | id_labels_pred_file=test_id_labels_pred_file, 85 | eval_script=path_to_eval_script) 86 | 87 | log['dev_precision_official'] = dev_precision_official 88 | log['dev_recall_official'] = dev_recall_official 89 | log['dev_f1_official'] = dev_f1_official 90 | 91 | log['test_precision_official'] = test_precision_official 92 | log['test_recall_official'] = test_recall_official 93 | log['test_f1_official'] = test_f1_official 94 | 95 | return experiments 96 | 97 | 98 | PRECISION_REGEX = r'P =\s*([0-9]{1,2}\.[0-9]{2})%' 99 | RECALL_REGEX = r'R =\s*([0-9]{1,2}\.[0-9]{2})%' 100 | F1_REGEX = r'F1 =\s*([0-9]{1,2}\.[0-9]{2})%' 101 | 102 | OFFICIAL_RESULT_REGEX = r'\(9\+1\)-WAY EVALUATION TAKING DIRECTIONALITY INTO ACCOUNT -- OFFICIAL' 103 | RESULT_LINE_REGEX = r'MACRO-averaged result \(excluding Other\):\n((.*\n){1})' 104 | 105 | def evaluate_semeval2010_task8(id_labels_true_file, id_labels_pred_file, eval_script): 106 | p = run([eval_script, id_labels_true_file, id_labels_pred_file], stdout=PIPE, encoding='utf-8') 107 | report = p.stdout 108 | 109 | official_result_match = re.search(OFFICIAL_RESULT_REGEX, report) 110 | 111 | if official_result_match: 112 | result_start = official_result_match.span(0)[1] 113 | match = re.search(RESULT_LINE_REGEX, report[result_start:]) 114 | 115 | precision = None 116 | recall = None 117 | f1 = None 118 | if match: 119 | result_line = match.group(1) 120 | precision_match = re.search(PRECISION_REGEX, result_line) 121 | recall_match = re.search(RECALL_REGEX, result_line) 122 | f1_match = re.search(F1_REGEX, result_line) 123 | 124 | if precision_match: 125 | precision = float(precision_match.group(1)) 126 | if recall_match: 127 | recall = float(recall_match.group(1)) 128 | if f1_match: 129 | f1 = float(f1_match.group(1)) 130 | 131 | return precision, recall, f1 132 | 133 | 134 | def experiments_to_dataframe(experiments): 135 | all_logs = [] 136 | all_configs = [] 137 | for experiment_name, experiment in experiments.items(): 138 | config = experiment['config'] 139 | logs = experiment['logs'] 140 | 141 | config['experiment'] = experiment_name 142 | all_configs.append(config) 143 | 144 | for log in logs: 145 | log['experiment'] = experiment_name 146 | all_logs.extend(logs) 147 | 148 | return pd.DataFrame(all_logs), pd.DataFrame(all_configs) 149 | 150 | 151 | def load_experiments_df(log_dir): 152 | experiment_logs = read_experiment_logs(log_dir) 153 | df_logs, df_configs = experiments_to_dataframe(experiment_logs) 154 | experiments_df = df_configs.set_index('time').join(df_logs.set_index('experiment')).reset_index(drop=True) 155 | return experiments_df 156 | -------------------------------------------------------------------------------- /dataset_converter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Converts various datasets into a jsonl format. 3 | The following datasets can be converted: 4 | Semeval 2010 Task 8: 5 | Paper: http://www.aclweb.org/anthology/S10-1006 6 | Download: http://www.kozareva.com/downloads.html 7 | KBP37: 8 | Paper: https://arxiv.org/abs/1508.01006 9 | Download: https://github.com/zhangdongxu/kbp37 10 | TACRED: 11 | Paper: https://nlp.stanford.edu/pubs/zhang2017tacred.pdf 12 | Download: LDC publication pending 13 | 14 | 15 | Exemplary conversion for the Semeval 2010 Task 8 Format: 16 | 9 "The lawsonite was contained in a platinum crucible and the counter-weight was a plastic crucible with metal pieces." 17 | Content-Container(e1,e2) 18 | Comment: prototypical example 19 | 20 | JSONL output Format: 21 | { 22 | "id": "9", 23 | "tokens": ["The", "lawsonite", "was", "contained", "in", "a", "platinum", "crucible", "and", "the", "counter-weight", "was", "a", "plastic", "crucible", "with", "metal", "pieces", "."], 24 | "label": "Content-Container(e1,e2)", 25 | "entities": [[1, 2], [6, 8]] 26 | } 27 | """ 28 | 29 | import argparse 30 | import json 31 | import os 32 | from operator import itemgetter 33 | 34 | import numpy as np 35 | from sklearn.model_selection import train_test_split 36 | 37 | from datasets import SemEval2010Task8 38 | from utils import make_path 39 | 40 | SUPPORTED_DATASETS = ['semeval', 'kbp37', 'tacred'] 41 | 42 | 43 | class DatasetConverter: 44 | 45 | def __init__(self, dataset, dataset_dir, output_dir, subsample): 46 | 47 | self.dataset = dataset 48 | self.subsample = subsample 49 | 50 | if dataset == "semeval": 51 | self.input_train_file = os.path.join(dataset_dir, "SemEval2010_task8_training", "TRAIN_FILE.TXT") 52 | self.input_test_file = os.path.join(dataset_dir, "SemEval2010_task8_testing_keys", "TEST_FILE_FULL.TXT") 53 | self.input_dev_file = None 54 | elif dataset == "kbp37": 55 | self.input_train_file = os.path.join(args.dataset_dir, "train.txt") 56 | self.input_test_file = os.path.join(args.dataset_dir, "test.txt") 57 | self.input_dev_file = os.path.join(args.dataset_dir, "dev.txt") 58 | elif dataset == "tacred": 59 | path_to_json_files = os.path.join(dataset_dir, "data", "json") 60 | self.input_train_file = os.path.join(path_to_json_files, "train.json") 61 | self.input_test_file = os.path.join(path_to_json_files, "test.json") 62 | self.input_dev_file = os.path.join(path_to_json_files, "dev.json") 63 | else: 64 | raise RuntimeError("Only the following datasets are supported: " + ", ".join(SUPPORTED_DATASETS)) 65 | 66 | self.output_dir = output_dir 67 | 68 | assert os.path.exists(self.input_train_file), "Train file not found: {}".format(self.input_train_file) 69 | if not subsample: 70 | self.output_train_file = os.path.join(output_dir, "train.jsonl") 71 | else: 72 | self.masking_modes = [None, 'grammar', 'ner', 'grammar_and_ner', 'unk', 'unk_w_position'] 73 | 74 | assert os.path.exists(self.input_test_file), "Test file not found: {}".format(self.input_test_file) 75 | self.output_test_file = os.path.join(output_dir, "test.jsonl") 76 | 77 | if self.input_dev_file: 78 | assert os.path.exists(self.input_dev_file), "Test file not found: {}".format(self.input_dev_file) 79 | self.output_dev_file = os.path.join(output_dir, "dev.jsonl") 80 | else: 81 | self.output_dev_file = None 82 | 83 | self.glove_mapping = { 84 | '-LRB-': '(', 85 | '-RRB-': ')', 86 | '-LSB-': '[', 87 | '-RSB-': ']', 88 | '-LCB-': '{', 89 | '-RCB-': '}' 90 | } 91 | 92 | def run(self): 93 | print("Converting dataset to jsonl") 94 | os.makedirs(self.output_dir, exist_ok=True) 95 | 96 | if not self.subsample: 97 | self._run_normally() 98 | else: 99 | self._run_subsampling() 100 | 101 | def _run_normally(self): 102 | # Convert the dev and test set 103 | if self.dataset in ['semeval', 'kbp37']: 104 | self._convert_semeval_format_file(self.input_test_file, self.output_test_file) 105 | if self.output_dev_file: 106 | self._convert_semeval_format_file(self.input_dev_file, self.output_dev_file) 107 | elif self.dataset == 'tacred': 108 | self._convert_tacred_format_file(self.input_test_file, self.output_test_file) 109 | self._convert_tacred_format_file(self.input_dev_file, self.output_dev_file) 110 | else: 111 | raise RuntimeError("Unexpected dataset: " + self.dataset) 112 | 113 | if self.dataset in ['semeval', 'kbp37']: 114 | self._convert_semeval_format_file(self.input_train_file, self.output_train_file) 115 | elif self.dataset == 'tacred': 116 | self._convert_tacred_format_file(self.input_train_file, self.output_train_file) 117 | else: 118 | raise RuntimeError("Unexpected dataset: " + self.dataset) 119 | 120 | def _run_subsampling(self): 121 | train_examples = list(self._read_tacred_file(self.input_train_file)) 122 | train_labels = list(map(itemgetter('label'), train_examples)) 123 | dev_examples = list(self._read_tacred_file(self.input_dev_file)) 124 | test_examples = list(self._read_tacred_file(self.input_test_file)) 125 | 126 | for sample_ratio in np.linspace(.1, 1.0, 10): 127 | sampling_dir = os.path.join(self.output_dir, str(int(sample_ratio * 100))) 128 | subsampled_ids_file = os.path.join(sampling_dir, "sentence_ids") 129 | 130 | if self.dataset == 'tacred': 131 | if sample_ratio == 1.0: 132 | subsampled_examples = train_examples 133 | else: 134 | subsampled_examples, _ = train_test_split(train_examples, 135 | train_size=sample_ratio, 136 | stratify=train_labels) 137 | else: 138 | raise RuntimeError("Unsupported dataset: " + self.dataset) 139 | 140 | with open(make_path(subsampled_ids_file), 'w') as ids_file: 141 | for example in subsampled_examples: 142 | ids_file.write(str(example['id']) + "\n") 143 | 144 | for masking_mode in self.masking_modes: 145 | masking_mode_name = 'unmasked' if masking_mode is None else masking_mode 146 | masking_dir = os.path.join(sampling_dir, masking_mode_name) 147 | 148 | print("Creating train set with sampling ratio {:.1f} and masking mode {}" 149 | .format(sample_ratio, masking_mode_name)) 150 | output_train_file = os.path.join(masking_dir, "train.jsonl") 151 | 152 | if masking_mode is None: 153 | masked_examples = subsampled_examples 154 | else: 155 | masked_examples = [SemEval2010Task8.apply_masking_mode(example, masking_mode) 156 | for example in subsampled_examples] 157 | 158 | with open(make_path(output_train_file), 'w') as output_file: 159 | for example in masked_examples: 160 | output_file.write(json.dumps(example) + "\n") 161 | 162 | # Write dev set with different masking modes 163 | for masking_mode in self.masking_modes: 164 | masking_mode_name = 'unmasked' if masking_mode is None else masking_mode 165 | masking_dir = os.path.join(self.output_dir, masking_mode_name) 166 | 167 | print("Creating dev and test set with masking mode {}".format(masking_mode_name)) 168 | output_dev_file = os.path.join(masking_dir, "dev.jsonl") 169 | output_test_file = os.path.join(masking_dir, "test.jsonl") 170 | 171 | if masking_mode is None: 172 | masked_dev_examples = dev_examples 173 | masked_test_examples = test_examples 174 | else: 175 | masked_dev_examples = [SemEval2010Task8.apply_masking_mode(example, masking_mode) 176 | for example in dev_examples] 177 | masked_test_examples = [SemEval2010Task8.apply_masking_mode(example, masking_mode) 178 | for example in test_examples] 179 | 180 | with open(make_path(output_dev_file), 'w') as output_file: 181 | for example in masked_dev_examples: 182 | output_file.write(json.dumps(example) + "\n") 183 | 184 | with open(make_path(output_test_file), 'w') as output_file: 185 | for example in masked_test_examples: 186 | output_file.write(json.dumps(example) + "\n") 187 | 188 | def _convert_semeval_format_file(self, input_path, output_path, sample_ratio=None): 189 | with open(input_path, mode="r") as input_file, open(output_path, mode="w") as output_file: 190 | while True: 191 | tokens_line = input_file.readline() 192 | if not tokens_line: 193 | break 194 | 195 | (index, tokens_string) = tokens_line.split('\t', maxsplit=1) # separate index and tokens 196 | tokens_string = tokens_string.strip()[1:-1] # remove quotation marks 197 | tokens = self._split_tokens(tokens_string) 198 | 199 | tokens, first_args, second_args = self._parse_args(tokens) 200 | 201 | relation_label = input_file.readline().strip() # Remove trailing newline 202 | _ = input_file.readline() # Comment string 203 | _ = input_file.readline() # Empty line separator 204 | 205 | example = { 206 | "id": index, 207 | "tokens": tokens, 208 | "label": relation_label, 209 | "entities": [first_args, second_args] 210 | } 211 | 212 | output_file.write(json.dumps(example) + "\n") 213 | 214 | @staticmethod 215 | def _split_tokens(tokens_string): 216 | prepared_string = tokens_string \ 217 | .replace(".", " . ") \ 218 | .replace("", " ") \ 219 | .replace("", " ") \ 220 | .replace("", " ") \ 221 | .replace("", " ") \ 222 | .replace(",", " , ") \ 223 | .replace("'", " ' ") \ 224 | .replace("!", " ! ") \ 225 | .replace("?", " ? ") 226 | return [token.strip() for token in prepared_string.split(" ") if len(token.strip()) > 0] 227 | 228 | def _parse_args(self, tokens): 229 | tokens, first_args = self._parse_arg(tokens, 'e1') 230 | tokens, second_args = self._parse_arg(tokens, 'e2') 231 | return tokens, first_args, second_args 232 | 233 | @staticmethod 234 | def _parse_arg(tokens, arg_label): 235 | """ 236 | Parses a relation argument with the given xml entity label. 237 | Returns the tokens without the xml entity label and the token offsets of the argument. 238 | """ 239 | start_tag = '<' + arg_label + '>' 240 | end_tag = '' 241 | cleaned_tokens = [] 242 | 243 | arg_start_idx = None 244 | arg_end_idx = None 245 | 246 | # track the index difference due to removed empty tokens 247 | cleaned_tokens_offset = 0 248 | 249 | for index, token in enumerate(tokens): 250 | 251 | if token.startswith(start_tag): 252 | arg_start_idx = index - cleaned_tokens_offset 253 | token = token[len(start_tag):] # clean the tag from the token 254 | 255 | if token.endswith(end_tag): 256 | token = token[:-len(end_tag)] # clean the tag from the token 257 | 258 | # If the current token is now empty, it is going to be removed 259 | # and the end offset will be a token earlier 260 | if DatasetConverter._is_empty_token(token): 261 | arg_end_idx = index - cleaned_tokens_offset 262 | else: 263 | arg_end_idx = index - cleaned_tokens_offset + 1 264 | 265 | if DatasetConverter._is_empty_token(token): 266 | cleaned_tokens_offset += 1 267 | else: 268 | cleaned_tokens.append(token) 269 | 270 | assert arg_start_idx is not None and arg_end_idx is not None, "Argument offsets could not be found" 271 | 272 | # argument_offsets = [] 273 | # argument_offsets += list(range(-arg_start_idx, 0)) # Add negative offsets up to the argument 274 | # argument_offsets += [0] * (arg_end_idx-arg_start_idx) # within the argument, all offsets are 0 275 | # argument_offsets += list(range(0, len(tokens) - arg_end_idx)) # add positive offsets after the argument 276 | 277 | return cleaned_tokens, (arg_start_idx, arg_end_idx) 278 | 279 | def _convert_tacred_format_file(self, input_file, output_file): 280 | with open(output_file, 'w') as output_file: 281 | for example in self._read_tacred_file(input_file): 282 | output_file.write(json.dumps(example) + "\n") 283 | 284 | def _read_tacred_file(self, input_file): 285 | with open(input_file, 'r') as input_file: 286 | input_examples = json.loads(input_file.readline()) 287 | for input_example in input_examples: 288 | tokens = input_example['token'] 289 | subj_offsets = (input_example['subj_start'], input_example['subj_end'] + 1) 290 | obj_offsets = (input_example['obj_start'], input_example['obj_end'] + 1) 291 | 292 | tokens = self.normalize_glove_tokens(tokens) 293 | 294 | output_example = { 295 | "id": input_example['id'], 296 | "tokens": tokens, 297 | "label": input_example['relation'], 298 | "entities": (subj_offsets, obj_offsets), 299 | "grammar": ('SUBJ', 'OBJ'), 300 | "type": (input_example['subj_type'], input_example['obj_type']) 301 | } 302 | 303 | yield output_example 304 | 305 | def normalize_glove_tokens(self, tokens): 306 | return [self.glove_mapping[token] 307 | if token in self.glove_mapping 308 | else token 309 | for token in tokens] 310 | 311 | @staticmethod 312 | def _is_empty_token(token): 313 | return len(token.strip()) == 0 314 | 315 | 316 | def main(args): 317 | assert os.path.exists(args.dataset_dir), "Input directory does not exist" 318 | converter = DatasetConverter(args.dataset, args.dataset_dir, args.output_dir, args.subsample) 319 | converter.run() 320 | 321 | 322 | if __name__ == '__main__': 323 | parser = argparse.ArgumentParser() 324 | parser.add_argument('dataset_dir', type=str, help="The root directory of the dataset") 325 | parser.add_argument('output_dir', type=str, help="An output directory of jsonl files") 326 | parser.add_argument('--dataset', type=str, default="semeval", help="Either semeval, kbp37 or tacred") 327 | parser.add_argument('--subsample', action='store_true', help="Generate subsampled versions of the dataset with" 328 | " splits from 10% to 100% in 10% steps") 329 | 330 | args = parser.parse_args() 331 | print(args) 332 | main(args) 333 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .semeval_2010_task8 import SemEval2010Task8 2 | -------------------------------------------------------------------------------- /datasets/semeval_2010_task8.py: -------------------------------------------------------------------------------- 1 | import json 2 | from collections import Counter 3 | from random import random 4 | from itertools import product 5 | 6 | import numpy as np 7 | 8 | from os.path import join 9 | 10 | from sklearn.model_selection import train_test_split 11 | 12 | 13 | class SemEval2010Task8: 14 | TACRED_GRAMMAR_TYPES = ['obj', 'subj'] 15 | TACRED_NER_TYPES = [ 16 | 'title', 'criminal_charge', 'url', 'ideology', 'location', 'cause_of_death', 'duration', 17 | 'date', 'religion', 'person', 'nationality', 'city', 'country', 'organization', 'misc', 18 | 'state_or_province', 'number' 19 | ] 20 | UNK_POS_TYPES = ['_first_entity_', '_second_entity_'] 21 | UNK_TYPES = ['_entity_'] 22 | 23 | TACRED_MASKED_TOKENS_COMBINED = [f'{grammar}-{type_}'.lower() for grammar, type_ in 24 | product(TACRED_GRAMMAR_TYPES, TACRED_NER_TYPES)] 25 | 26 | TACRED_MASKED_TOKENS_NER = [f'_{type_}_'.lower() for type_ in TACRED_NER_TYPES] 27 | TACRED_MASKED_TOKENS_GRAMMAR = [f'_{type_}_'.lower() for type_ in TACRED_GRAMMAR_TYPES] 28 | 29 | MASKED_ENTITY_TOKENS = ( 30 | TACRED_MASKED_TOKENS_COMBINED 31 | + TACRED_MASKED_TOKENS_NER 32 | + TACRED_MASKED_TOKENS_GRAMMAR 33 | + UNK_POS_TYPES 34 | + UNK_TYPES) 35 | 36 | 37 | @staticmethod 38 | def _subsample(sentences, entities, labels, ids, negative_label, subsampling_rate): 39 | subsampled_dataset = [] 40 | dataset = zip(sentences, entities, labels, ids) 41 | 42 | for example in dataset: 43 | label = example[2] 44 | if label == negative_label: 45 | if random() < subsampling_rate: 46 | subsampled_dataset.append(example) 47 | else: 48 | subsampled_dataset.append(example) 49 | 50 | return zip(*subsampled_dataset) 51 | 52 | @staticmethod 53 | def _mask_entities(tokens, entity_offsets, first_entity_replace, second_entity_replace): 54 | first_entity, second_entity = entity_offsets 55 | 56 | if first_entity[0] > second_entity[0]: 57 | tokens, first_entity, token_diff = SemEval2010Task8._replace_tokens(tokens, first_entity, first_entity_replace) 58 | tokens, second_entity, token_diff = SemEval2010Task8._replace_tokens(tokens, second_entity, second_entity_replace) 59 | 60 | first_entity = (first_entity[0] - token_diff, first_entity[1] - token_diff) 61 | else: 62 | tokens, second_entity, token_diff = SemEval2010Task8._replace_tokens(tokens, second_entity, second_entity_replace) 63 | tokens, first_entity, token_diff = SemEval2010Task8._replace_tokens(tokens, first_entity, first_entity_replace) 64 | 65 | second_entity = (second_entity[0] - token_diff, second_entity[1] - token_diff) 66 | 67 | return tokens, (first_entity, second_entity) 68 | 69 | @staticmethod 70 | def _replace_tokens(tokens, token_offsets, token): 71 | token_diff = token_offsets[1] - token_offsets[0] - 1 72 | tokens = tokens[:token_offsets[0]] + [token] + tokens[token_offsets[1]:] 73 | token_offsets = (token_offsets[0], token_offsets[0] + 1) 74 | 75 | return tokens, token_offsets, token_diff 76 | 77 | @staticmethod 78 | def _load_from_jsonl(path_to_file, is_test=True, masking_mode=None): 79 | sentences = [] 80 | entities = [] 81 | labels = [] 82 | ids = [] 83 | with open(path_to_file) as f: 84 | for line in f.readlines(): 85 | example = json.loads(line) 86 | 87 | if masking_mode is not None: 88 | example = SemEval2010Task8.apply_masking_mode(example, masking_mode) 89 | 90 | sentences.append(example['tokens']) 91 | entities.append(example['entities']) 92 | if not is_test: 93 | labels.append(example['label']) 94 | ids.append(example['id']) 95 | 96 | return sentences, entities, labels, ids 97 | 98 | @staticmethod 99 | def fetch(path_to_data, dev_size, seed, train_file='train.jsonl', test_file='test.jsonl', negative_label=None, 100 | subsampling_rate=1.0, train_set_limit=None, dev_set_limit=None, verbose=False, skip_test_set=False, 101 | predefined_dev_set=False, dev_file=None, masking_mode=None): 102 | 103 | if predefined_dev_set: 104 | if not dev_file: 105 | dev_file = 'dev.jsonl' 106 | 107 | sentences_train, entities_train, labels_train, ids_train = \ 108 | SemEval2010Task8._load_from_jsonl(join(path_to_data, train_file), is_test=False, masking_mode=masking_mode) 109 | sentences_dev, entities_dev, labels_dev, ids_dev = \ 110 | SemEval2010Task8._load_from_jsonl(join(path_to_data, dev_file), is_test=False, masking_mode=masking_mode) 111 | else: 112 | sentences_train_dev, entities_train_dev, labels_train_dev, ids_train_dev = \ 113 | SemEval2010Task8._load_from_jsonl(join(path_to_data, train_file), is_test=False, masking_mode=masking_mode) 114 | sentences_train, sentences_dev, entities_train, entities_dev, labels_train, labels_dev, ids_train, ids_dev = \ 115 | train_test_split(sentences_train_dev, entities_train_dev, labels_train_dev, ids_train_dev, test_size=dev_size, random_state=seed) 116 | 117 | if subsampling_rate < 1.0: 118 | assert negative_label is not None, "Negative class label required for subsampling" 119 | sentences_train, entities_train, labels_train, ids_train =\ 120 | SemEval2010Task8._subsample(sentences_train, entities_train, labels_train, ids_train, negative_label, subsampling_rate) 121 | 122 | if train_set_limit: 123 | train_set = list(zip(sentences_train, entities_train, labels_train, ids_train))[:train_set_limit] 124 | sentences_train, entities_train, labels_train, ids_train = zip(*train_set) 125 | 126 | if dev_set_limit: 127 | dev_set = list(zip(sentences_dev, entities_dev, labels_dev, ids_dev))[:dev_set_limit] 128 | sentences_dev, entities_dev, labels_dev, ids_dev = zip(*dev_set) 129 | 130 | if verbose: 131 | train_label_counter = Counter(labels_train) 132 | print() 133 | print("Train set size: {}".format(len(ids_train))) 134 | print("Train set distribution:") 135 | for (label, count) in train_label_counter.items(): 136 | print("{}: {}".format(label, count)) 137 | print() 138 | 139 | if dev_set_limit: 140 | print() 141 | print("Dev set size: {}".format(len(ids_dev))) 142 | print() 143 | 144 | if not skip_test_set: 145 | sentences_test, entities_test, labels_test, ids_test = \ 146 | SemEval2010Task8._load_from_jsonl(join(path_to_data, test_file), is_test=True, masking_mode=masking_mode) 147 | 148 | return (sentences_train, entities_train, labels_train, ids_train),\ 149 | (sentences_dev, entities_dev, labels_dev, ids_dev),\ 150 | (sentences_test, entities_test, labels_test, ids_test) 151 | else: 152 | return (sentences_train, entities_train, labels_train, ids_train), \ 153 | (sentences_dev, entities_dev, labels_dev, ids_dev) 154 | 155 | 156 | @staticmethod 157 | def encode(*splits, text_encoder, label_encoder): 158 | encoded_splits = [] 159 | for split in splits: 160 | fields = [] 161 | # encode sentence tokens 162 | fields.append(text_encoder.encode(split[0], special_tokens=SemEval2010Task8.MASKED_ENTITY_TOKENS)) 163 | 164 | # encode entities 165 | encoded_entities = [] 166 | for sentence, entities in zip(split[0], split[1]): 167 | encoded_entity = [] 168 | for start, end in entities: 169 | encoded_entity.append(text_encoder.encode([sentence[start: end]], special_tokens=SemEval2010Task8.MASKED_ENTITY_TOKENS)[0]) 170 | encoded_entities.append(encoded_entity) 171 | fields.append(encoded_entities) 172 | 173 | # encode labels, if present 174 | encoded_labels = [] 175 | for label in split[2]: 176 | if isinstance(label, str): 177 | encoded_labels.append(label_encoder.add_item(label)) 178 | else: 179 | encoded_labels.append(label) 180 | fields.append(np.asarray(encoded_labels, dtype=np.int32)) 181 | 182 | # pass through ids 183 | fields.append(split[3]) 184 | 185 | # Add a none value for entity ids of datasets, that are not evaluated on a bag-level 186 | fields.append(None) 187 | 188 | encoded_splits.append(fields) 189 | return encoded_splits 190 | 191 | def transform(*splits, text_encoder, max_length, n_ctx, format='entities_first'): 192 | # TODO: add different input format 193 | # TODO: maybe max_length should be different for sentence and entities 194 | 195 | def transform(sentences, entities): 196 | batch_size = len(sentences) 197 | 198 | batch_indices = np.zeros((batch_size, 1, n_ctx, 2), dtype=np.int32) 199 | batch_mask = np.zeros((batch_size, 1, n_ctx), dtype=np.float32) 200 | 201 | encoder = text_encoder.encoder 202 | start = encoder['_start_'] 203 | delimiter = encoder['_delimiter_'] 204 | delimiter2 = encoder['_delimiter2_'] 205 | clf_token = encoder['_classify_'] 206 | 207 | n_vocab = len(encoder) 208 | 209 | for i, (sentence, entities), in enumerate(zip(sentences, entities)): 210 | input_sentence = [start] 211 | 212 | for entity in entities: 213 | input_sentence.extend(entity[:max_length]) 214 | input_sentence.append(delimiter) 215 | 216 | input_sentence[-1] = delimiter2 217 | 218 | input_sentence = input_sentence + sentence[:max_length] + [clf_token] 219 | input_sentence_length = len(input_sentence) 220 | 221 | batch_indices[i, 0, :input_sentence_length, 0] = input_sentence 222 | batch_mask[i, 0, :input_sentence_length] = 1 223 | 224 | # Position information that is added to the input embeddings in the TransformerModel 225 | batch_indices[:, :, :, 1] = np.arange(n_vocab, n_vocab + n_ctx) 226 | return batch_indices, batch_mask 227 | 228 | transformed_splits = [] 229 | for sentences, entities, labels, ids, _ in splits: 230 | batch_indices, batch_mask = transform(sentences, entities) 231 | transformed_splits.append((batch_indices, batch_mask, labels, ids, None)) 232 | 233 | return tuple(transformed_splits) 234 | 235 | @staticmethod 236 | def max_length(*splits, max_len): 237 | # TODO: do not clip the sentences to max_len, if the entities are smaller than max_len 238 | return max([ 239 | len(sentence[:max_len]) + len(entities[0][:max_len]) + len(entities[1][:max_len]) 240 | for split in splits 241 | for sentence, entities in zip(*split[0:2]) 242 | ]) 243 | 244 | @staticmethod 245 | def apply_masking_mode(example, masking_mode): 246 | masking_mode = masking_mode.lower() 247 | 248 | # TODO: that's kind of unsafe 249 | if 'grammar' in example: 250 | grammar_type = example['grammar'] 251 | if 'type' in example: 252 | ner_type = example['type'] 253 | 254 | if masking_mode == 'grammar': 255 | first_entity_replace, second_entity_replace = [f'_{g}_' for g in grammar_type] 256 | elif masking_mode == 'ner': 257 | first_entity_replace, second_entity_replace = [f'_{n}_' for n in ner_type] 258 | elif masking_mode == 'grammar_and_ner': 259 | first_entity_replace, second_entity_replace = [f'{g}-{n}' for g, n in zip(grammar_type, ner_type)] 260 | elif masking_mode == 'unk': 261 | first_entity_replace, second_entity_replace = SemEval2010Task8.UNK_TYPES[0], SemEval2010Task8.UNK_TYPES[0] 262 | elif masking_mode == 'unk_w_position': 263 | first_entity_replace, second_entity_replace = SemEval2010Task8.UNK_POS_TYPES 264 | else: 265 | raise ValueError(f"Masking mode '{masking_mode}' not supported.") 266 | 267 | example = example.copy() 268 | example['tokens'], example['entities'] = SemEval2010Task8._mask_entities( 269 | example['tokens'], example['entities'], first_entity_replace, second_entity_replace) 270 | 271 | return example 272 | -------------------------------------------------------------------------------- /download-model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Downloads the weights of the pretrained language model. First parameter is the directory name. 4 | # Usage: 5 | # download-model.sh 6 | 7 | MODEL_DIR=${1-model} # save to model if no param is set 8 | echo "Downloading to $MODEL_DIR" 9 | mkdir -p $MODEL_DIR 10 | pushd $MODEL_DIR 11 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/encoder_bpe_40000.json 12 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_0.npy 13 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_1.npy 14 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_2.npy 15 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_3.npy 16 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_4.npy 17 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_5.npy 18 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_6.npy 19 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_7.npy 20 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_8.npy 21 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_9.npy 22 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/params_shapes.json 23 | wget https://raw.githubusercontent.com/openai/finetune-transformer-lm/master/model/vocab_40000.bpe 24 | popd 25 | -------------------------------------------------------------------------------- /logging_utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | from collections import namedtuple, defaultdict 4 | from datetime import datetime 5 | from operator import attrgetter 6 | 7 | from os import makedirs 8 | from os.path import join 9 | 10 | import numpy as np 11 | from sklearn.metrics import auc 12 | 13 | from utils import make_path 14 | 15 | 16 | class ResultLogger(object): 17 | def __init__(self, path_to_log_dir, *args, **kwargs): 18 | self.start_time = datetime.now().strftime("%Y-%m-%d__%H-%M__%f") 19 | self._base_path = join(path_to_log_dir, self.start_time) 20 | 21 | print() 22 | print("Logging to", self._base_path) 23 | print() 24 | makedirs(self._base_path, exist_ok=False) 25 | 26 | if 'time' not in kwargs: 27 | kwargs['time'] = self.start_time 28 | 29 | config_file = join(self._base_path, 'config.jsonl') 30 | with open(make_path(config_file), 'w') as f: 31 | f.write(json.dumps(kwargs) + '\n') 32 | 33 | log_file = join(self._base_path, 'logs.jsonl') 34 | self._log_file = open(make_path(log_file), 'w') 35 | 36 | def log(self, **kwargs): 37 | if 'time' not in kwargs: 38 | kwargs['time'] = datetime.now().strftime("%Y-%m-%d__%H-%M__%f") 39 | self._log_file.write(json.dumps(kwargs) + '\n') 40 | self._log_file.flush() 41 | 42 | def get_base_dir(self): 43 | return self._base_path 44 | 45 | @staticmethod 46 | def _write_pred_file(path_to_file, labels_pred, ids, log_with_id=True): 47 | with open(make_path(path_to_file), 'w') as pred_f: 48 | if log_with_id: 49 | for id_, prediction in zip(ids, labels_pred): 50 | pred_f.write('{}\t{}\n'.format(id_, prediction)) 51 | else: 52 | for prediction in labels_pred: 53 | pred_f.write('{}\n'.format(prediction)) 54 | 55 | def log_dev_labels(self, labels_dev, ids): 56 | dev_labels_file = join(self._base_path, 'dev_labels.txt') 57 | self._write_pred_file(dev_labels_file, labels_dev, ids) 58 | 59 | def log_dev_predictions(self, epoch, labels_pred, ids, log_with_id=True): 60 | dev_pred_file = join(self._base_path, 'predictions', 'dev', 'predictions_epoch_{}.txt'.format(epoch)) 61 | self._write_pred_file(dev_pred_file, labels_pred, ids, log_with_id=log_with_id) 62 | 63 | def log_test_predictions(self, epoch, labels_pred, ids, log_with_id=True): 64 | test_pred_file = join(self._base_path, 'predictions', 'test', 'predictions_epoch_{}.txt'.format(epoch)) 65 | self._write_pred_file(test_pred_file, labels_pred, ids, log_with_id=log_with_id) 66 | 67 | def log_test_pr_curve(self, epoch, entity_ids_test, labels_test, probs_test, negative_label_idx, label_encoder=None): 68 | bag_ids = [e1 + '_' + e2 for e1, e2 in entity_ids_test] 69 | 70 | bag_to_mention_mapping = defaultdict(set) 71 | for idx, bag_id in enumerate(bag_ids): 72 | bag_to_mention_mapping[bag_id].add(idx) 73 | 74 | num_relation_facts = 0 75 | Prediction = namedtuple('Prediction', ['score', 'is_correct', 'bag_id', 'predicted_label_idx', 'bag_label_idxs', 76 | 'predicted_label', 'bag_labels', 'bag_size']) 77 | predictions = [] 78 | for bag_id, mention_idxs in bag_to_mention_mapping.items(): 79 | # Aggregate and count the labels per bag without the negative label 80 | bag_labels = set(labels_test[list(mention_idxs)]) 81 | bag_labels.discard(negative_label_idx) 82 | num_relation_facts += len(bag_labels) 83 | bag_size = len(mention_idxs) 84 | 85 | # Use max to aggregate the mention probabilities in the bag 86 | mention_probs = probs_test[list(mention_idxs)] 87 | bag_probs = np.max(mention_probs, axis=0) 88 | 89 | # For each bag and positive relation create a prediction 90 | for relation_idx, relation_prob in enumerate(bag_probs): 91 | if relation_idx == negative_label_idx: 92 | continue 93 | 94 | if len(bag_labels) == 0: 95 | bag_labels_str = 'NA' 96 | bag_label_idxs_str = negative_label_idx 97 | else: 98 | if label_encoder: 99 | decoded_bag_labels = [label_encoder.get_item_for_index(idx) for idx in bag_labels] 100 | bag_labels_str = ', '.join(decoded_bag_labels) 101 | else: 102 | bag_labels_str = '' 103 | 104 | bag_label_idxs_str = ', '.join([str(lbl) for lbl in bag_labels]) 105 | 106 | if label_encoder: 107 | predicted_label_str = label_encoder.get_item_for_index(relation_idx) 108 | else: 109 | predicted_label_str = "" 110 | predicted_label_idx_str = str(relation_idx) 111 | 112 | is_correct = relation_idx in bag_labels 113 | predictions.append(Prediction(score=relation_prob, 114 | is_correct=is_correct, 115 | bag_id=bag_id, 116 | predicted_label_idx=predicted_label_idx_str, 117 | bag_label_idxs=bag_label_idxs_str, 118 | predicted_label=predicted_label_str, 119 | bag_labels=bag_labels_str, 120 | bag_size=bag_size)) 121 | 122 | predictions = sorted(predictions, key=attrgetter('score'), reverse=True) 123 | 124 | correct = 0 125 | precision_values = [] 126 | recall_values = [] 127 | for idx, prediction in enumerate(predictions): 128 | if prediction.is_correct: 129 | correct += 1 130 | precision_values.append(correct / (idx+1)) 131 | recall_values.append(correct / num_relation_facts) 132 | 133 | def precision_at(n): 134 | return (sum([prediction.is_correct for prediction in predictions[:n]]) / n) * 100 135 | 136 | pr_metrics = { 137 | 'P/R AUC': auc(x=recall_values, y=precision_values), 138 | 'Precision@100': precision_at(100), 139 | 'Precision@200': precision_at(200), 140 | 'Precision@500': precision_at(500) 141 | } 142 | 143 | predictions_dir = join(self._base_path, 'predictions', 'test') 144 | pr_metrics_file_path = join(predictions_dir, 'pr_metrics_epoch_{}.jsonl'.format(epoch)) 145 | with open(make_path(pr_metrics_file_path), 'w', encoding='utf-8') as pr_metrics_file: 146 | pr_metrics_file.write(json.dumps(pr_metrics) + '\n') 147 | 148 | pr_predictions_file = join(predictions_dir, 'predictions_pr_curve_epoch_{}.tsv'.format(epoch)) 149 | with open(make_path(pr_predictions_file), 'w') as pr_pred_file: 150 | tuple_attrs = ['score', 'is_correct', 'bag_id', 'predicted_label_idx', 151 | 'bag_label_idxs', 'predicted_label', 'bag_labels', 'bag_size'] 152 | pr_pred_file.write("\t".join(tuple_attrs) + "\n") 153 | for prediction in predictions: 154 | pred_values = attrgetter(*tuple_attrs)(prediction) 155 | pred_values = [str(val) for val in pred_values] 156 | pr_pred_file.write("\t".join(pred_values) + "\n") 157 | 158 | np.save(join(predictions_dir, 'pr_curve_y_epoch_{}.npy'.format(epoch)), precision_values) 159 | np.save(join(predictions_dir, 'pr_curve_x_epoch_{}.npy'.format(epoch)), recall_values) 160 | 161 | def close(self): 162 | self._log_file.close() 163 | -------------------------------------------------------------------------------- /loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class MultipleChoiceLossCompute: 4 | "A Loss compute and train function for multiple choice tasks." 5 | 6 | def __init__(self, lm_criterion, clf_criterion, lm_coef, opt=None): 7 | self.lm_criterion = lm_criterion 8 | self.clf_criterion = clf_criterion 9 | self.lm_coef = lm_coef 10 | self.opt = opt 11 | 12 | def __call__(self, X, Y, M, clf_logits, lm_logits=None, only_return_losses=False): 13 | # Language modeling loss 14 | if lm_logits is not None: 15 | x_shifted = X[:, :, 1:, 0].contiguous().view(-1) # Shape: 252 16 | M = M.view(-1, M.size(2)) 17 | lm_losses = self.lm_criterion(lm_logits, x_shifted) 18 | lm_losses = lm_losses.view(X.size(0) * X.size(1), X.size(2) - 1) 19 | lm_losses = lm_losses * M[:, 1:] 20 | lm_losses = lm_losses.sum(1) / torch.sum(M[:, 1:], 1) 21 | # Classification loss 22 | clf_losses = self.clf_criterion(clf_logits, Y) 23 | if only_return_losses: 24 | return (clf_losses, lm_losses) if lm_logits is not None else clf_losses 25 | 26 | if self.lm_coef > 0 and lm_logits is not None: 27 | train_loss = clf_losses.sum() + self.lm_coef * lm_losses.sum() 28 | else: 29 | train_loss = clf_losses.sum() 30 | train_loss.backward() 31 | if self.opt is not None: 32 | self.opt.step() 33 | self.opt.zero_grad() 34 | return train_loss.item() 35 | 36 | class ClassificationLossCompute: 37 | "A Loss compute and train function for classification tasks." 38 | 39 | def __init__(self, lm_criterion, clf_criterion, lm_coef, opt=None): 40 | self.lm_criterion = lm_criterion 41 | self.clf_criterion = clf_criterion 42 | self.lm_coef = lm_coef 43 | self.opt = opt 44 | 45 | def __call__(self, X, Y, M, clf_logits, lm_logits=None, only_return_losses=False): 46 | # Language modeling loss 47 | #if lm_logits is not None: 48 | # x_shifted = X[:, 1:, 0].contiguous().view(-1) 49 | # M = M.view(-1, M.size(-1)) 50 | # lm_losses = self.lm_criterion(lm_logits, x_shifted) 51 | # lm_losses = lm_losses.view(X.size(0), X.size(-2) - 1) 52 | # lm_losses = lm_losses * M[:, 1:] 53 | # lm_losses = lm_losses.sum(1) / torch.sum(M[:, 1:], 1) 54 | if lm_logits is not None: 55 | x_shifted = X[:, :, 1:, 0].contiguous().view(-1) # Shape: 252 56 | M = M.view(-1, M.size(2)) 57 | lm_losses = self.lm_criterion(lm_logits, x_shifted) 58 | lm_losses = lm_losses.view(X.size(0) * X.size(1), X.size(2) - 1) 59 | lm_losses = lm_losses * M[:, 1:] 60 | lm_losses = lm_losses.sum(1) / torch.sum(M[:, 1:], 1) 61 | # Classification loss 62 | clf_losses = self.clf_criterion(clf_logits, Y) 63 | if only_return_losses: 64 | return (clf_losses, lm_losses) if lm_logits is not None else clf_losses 65 | 66 | if self.lm_coef > 0 and lm_logits is not None: 67 | train_loss = clf_losses.sum() + self.lm_coef * lm_losses.sum() 68 | else: 69 | train_loss = clf_losses.sum() 70 | train_loss.backward() 71 | if self.opt is not None: 72 | self.opt.step() 73 | self.opt.zero_grad() 74 | return train_loss.item() 75 | 76 | # TODO Implement a LossCompute class for similiraty tasks. 77 | -------------------------------------------------------------------------------- /model_pytorch.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import json 3 | import math 4 | import re 5 | import collections 6 | import warnings 7 | 8 | import numpy as np 9 | import torch 10 | import torch.nn as nn 11 | from torch.nn.parameter import Parameter 12 | 13 | 14 | def gelu(x): 15 | return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) 16 | 17 | 18 | def swish(x): 19 | return x * torch.sigmoid(x) 20 | 21 | 22 | ACT_FNS = { 23 | 'relu': nn.ReLU, 24 | 'swish': swish, 25 | 'gelu': gelu 26 | } 27 | 28 | 29 | class WordDropout(torch.nn.Module): 30 | """ 31 | Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space. 32 | """ 33 | def __init__(self, dropout_rate=0.05): 34 | super(WordDropout, self).__init__() 35 | self.dropout_rate = dropout_rate 36 | 37 | def forward(self, x): 38 | if not self.training or not self.dropout_rate: 39 | return x 40 | 41 | m = x.data.new(1, x.size(1), 1).bernoulli_(1 - self.dropout_rate) 42 | mask = torch.autograd.Variable(m, requires_grad=False) 43 | mask = mask.expand_as(x) 44 | return mask * x 45 | 46 | 47 | class LayerNorm(nn.Module): 48 | "Construct a layernorm module in the OpenAI style (epsilon inside the square root)." 49 | 50 | def __init__(self, n_state, e=1e-5): 51 | super(LayerNorm, self).__init__() 52 | self.g = nn.Parameter(torch.ones(n_state)) 53 | self.b = nn.Parameter(torch.zeros(n_state)) 54 | self.e = e 55 | 56 | def forward(self, x): 57 | u = x.mean(-1, keepdim=True) 58 | s = (x - u).pow(2).mean(-1, keepdim=True) 59 | x = (x - u) / torch.sqrt(s + self.e) 60 | return self.g * x + self.b 61 | 62 | 63 | class Conv1D(nn.Module): 64 | def __init__(self, nf, rf, nx): 65 | super(Conv1D, self).__init__() 66 | self.rf = rf 67 | self.nf = nf 68 | if rf == 1: # faster 1x1 conv 69 | w = torch.empty(nx, nf) 70 | nn.init.normal_(w, std=0.02) 71 | self.w = Parameter(w) 72 | self.b = Parameter(torch.zeros(nf)) 73 | else: # was used to train LM 74 | raise NotImplementedError 75 | 76 | def forward(self, x): 77 | if self.rf == 1: 78 | size_out = x.size()[:-1] + (self.nf,) 79 | x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w) 80 | x = x.view(*size_out) 81 | else: 82 | raise NotImplementedError 83 | return x 84 | 85 | 86 | class Attention(nn.Module): 87 | def __init__(self, nx, n_ctx, cfg, scale=False): 88 | super(Attention, self).__init__() 89 | n_state = nx # in Attention: n_state=768 (nx=n_embd) 90 | # [switch nx => n_state from Block to Attention to keep identical to TF implem] 91 | assert n_state % cfg.n_head == 0 92 | self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) 93 | self.n_head = cfg.n_head 94 | self.split_size = n_state 95 | self.scale = scale 96 | self.c_attn = Conv1D(n_state * 3, 1, nx) 97 | self.c_proj = Conv1D(n_state, 1, nx) 98 | self.attn_dropout = nn.Dropout(cfg.attn_pdrop) 99 | self.resid_dropout = nn.Dropout(cfg.resid_pdrop) 100 | 101 | def _attn(self, q, k, v): 102 | w = torch.matmul(q, k) 103 | if self.scale: 104 | w = w / math.sqrt(v.size(-1)) 105 | w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights 106 | w = nn.Softmax(dim=-1)(w) 107 | w = self.attn_dropout(w) 108 | return torch.matmul(w, v) 109 | 110 | def merge_heads(self, x): 111 | x = x.permute(0, 2, 1, 3).contiguous() 112 | new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) 113 | return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states 114 | 115 | def split_heads(self, x, k=False): 116 | new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) 117 | x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states 118 | if k: 119 | return x.permute(0, 2, 3, 1) 120 | else: 121 | return x.permute(0, 2, 1, 3) 122 | 123 | def forward(self, x): 124 | x = self.c_attn(x) 125 | query, key, value = x.split(self.split_size, dim=2) 126 | query = self.split_heads(query) 127 | key = self.split_heads(key, k=True) 128 | value = self.split_heads(value) 129 | a = self._attn(query, key, value) 130 | a = self.merge_heads(a) 131 | a = self.c_proj(a) 132 | a = self.resid_dropout(a) 133 | return a 134 | 135 | 136 | class MLP(nn.Module): 137 | def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd) 138 | super(MLP, self).__init__() 139 | nx = cfg.n_embd 140 | self.c_fc = Conv1D(n_state, 1, nx) 141 | self.c_proj = Conv1D(nx, 1, n_state) 142 | self.act = ACT_FNS[cfg.afn] 143 | self.dropout = nn.Dropout(cfg.resid_pdrop) 144 | 145 | def forward(self, x): 146 | h = self.act(self.c_fc(x)) 147 | h2 = self.c_proj(h) 148 | return self.dropout(h2) 149 | 150 | 151 | class Block(nn.Module): 152 | def __init__(self, n_ctx, cfg, scale=False): 153 | super(Block, self).__init__() 154 | nx = cfg.n_embd 155 | self.attn = Attention(nx, n_ctx, cfg, scale) 156 | self.ln_1 = LayerNorm(nx) 157 | self.mlp = MLP(4 * nx, cfg) 158 | self.ln_2 = LayerNorm(nx) 159 | 160 | def forward(self, x): 161 | a = self.attn(x) 162 | n = self.ln_1(x + a) 163 | m = self.mlp(n) 164 | h = self.ln_2(n + m) 165 | return h 166 | 167 | 168 | class TransformerModel(nn.Module): 169 | """ Transformer model """ 170 | 171 | def __init__(self, cfg, vocab=40990, n_ctx=512): 172 | super(TransformerModel, self).__init__() 173 | self.vocab = vocab 174 | self.embed = nn.Embedding(vocab, cfg.n_embd) 175 | self.drop = nn.Dropout(cfg.embd_pdrop) 176 | self.word_drop = WordDropout(cfg.word_pdrop) 177 | block = Block(n_ctx, cfg, scale=True) 178 | self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)]) 179 | 180 | nn.init.normal_(self.embed.weight, std=0.02) 181 | 182 | def forward(self, x): 183 | x = x.view(-1, x.size(-2), x.size(-1)) 184 | x = self.word_drop(x) 185 | e = self.embed(x) 186 | # Add the position information to the input embeddings 187 | h = e.sum(dim=2) 188 | for block in self.h: 189 | h = block(h) 190 | return h 191 | 192 | 193 | class LMHead(nn.Module): 194 | """ Language Model Head for the transformer """ 195 | 196 | def __init__(self, model, cfg): 197 | super(LMHead, self).__init__() 198 | self.n_embd = cfg.n_embd 199 | embed_shape = model.embed.weight.shape 200 | self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False) 201 | self.decoder.weight = model.embed.weight # Tied weights 202 | 203 | def forward(self, h): 204 | # Truncated Language modeling logits (we remove the last token) 205 | h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd) 206 | lm_logits = self.decoder(h_trunc) 207 | return lm_logits 208 | 209 | 210 | class MultipleChoiceHead(nn.Module): 211 | """ Classifier Head for the transformer """ 212 | 213 | def __init__(self, clf_token, cfg): 214 | super(MultipleChoiceHead, self).__init__() 215 | self.n_embd = cfg.n_embd 216 | self.clf_token = clf_token 217 | self.dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation 218 | self.linear = nn.Linear(cfg.n_embd, 1) 219 | 220 | nn.init.normal_(self.linear.weight, std = 0.02) 221 | nn.init.normal_(self.linear.bias, 0) 222 | 223 | def forward(self, h, x): 224 | # Classification logits 225 | clf_h = h.view(-1, self.n_embd) 226 | flat = x[..., 0].contiguous().view(-1) 227 | clf_h = clf_h[flat == self.clf_token, :] 228 | clf_h = clf_h.view(-1, x.size(1), self.n_embd, 1) 229 | # This double transposition is there to replicate the behavior 230 | # of the noise_shape argument in the tensorflow 231 | # implementation. For more details, see 232 | # https://github.com/huggingface/pytorch-openai-transformer-lm/issues/11 233 | clf_h = self.dropout(clf_h.transpose(1, 2)).transpose(1, 2) 234 | clf_h = clf_h.contiguous().view(-1, self.n_embd) 235 | clf_logits = self.linear(clf_h) 236 | 237 | return clf_logits.view(-1, x.size(1)) 238 | 239 | 240 | class ClfHead(nn.Module): 241 | """Classification Head for the transformer 242 | 243 | TODO: test this class.""" 244 | def __init__(self, clf_token, cfg, n_class): 245 | super(ClfHead, self).__init__() 246 | self.n_embd = cfg.n_embd 247 | self.clf_token = clf_token 248 | self.dropout = nn.Dropout(cfg.clf_pdrop) 249 | self.linear = nn.Linear(cfg.n_embd, n_class) 250 | 251 | nn.init.normal_(self.linear.weight, std = 0.02) 252 | nn.init.normal_(self.linear.bias, 0) 253 | 254 | def forward(self, h, x): 255 | clf_h = h.view(-1, self.n_embd) 256 | flat = x[..., 0].contiguous().view(-1) 257 | clf_h = clf_h[flat == self.clf_token, :] 258 | clf_h = self.dropout(clf_h) 259 | clf_logits = self.linear(clf_h) 260 | 261 | return clf_logits 262 | 263 | class SimilarityHead(nn.Module): 264 | """ Similarity Head for the transformer 265 | 266 | TODO: test this class.""" 267 | def __init__(self, clf_token, cfg): 268 | super(SimilarityHead, self).__init__() 269 | self.n_embd = cfg.n_embd 270 | self.clf_token = clf_token 271 | self.dropout = nn.Dropout(cfg.clf_pdrop) 272 | self.linear = nn.Linear(cfg.n_embd, 1) 273 | 274 | nn.init.normal_(self.linear.weight, std = 0.02) 275 | nn.init.normal_(self.linear.bias, 0) 276 | 277 | def forward(self, h, x): 278 | sim_h = h.view(-1, self.n_embd) 279 | flat = x[..., 0].contiguous().view(-1) 280 | sim_h = sim_h[flat == self.clf_token, :] 281 | sim_h = self.dropout(sim_h) 282 | sim_h = sim_h.sum(dim = 1) 283 | sim_logits = self.linear(sim_h) 284 | 285 | return sim_logits 286 | 287 | class DoubleHeadModel(nn.Module): 288 | """ Transformer with language model and task specific heads """ 289 | def __init__(self, cfg, clf_token, task_head_type, vocab=40990, n_ctx=512): 290 | super(DoubleHeadModel, self).__init__() 291 | self.cfg = cfg 292 | self.clf_token = clf_token 293 | self.task_head_type = task_head_type 294 | self.vocab = vocab 295 | self.n_ctx = n_ctx 296 | 297 | self.transformer = TransformerModel(cfg, vocab=vocab, n_ctx=n_ctx) 298 | self.lm_head = LMHead(self.transformer, cfg) 299 | if isinstance(task_head_type, str): 300 | if task_head_type == 'multiple_choice': 301 | self.task_head = MultipleChoiceHead(clf_token, cfg) 302 | elif task_head_type == 'similarity': 303 | self.task_head = SimilarityHead(clf_token, cfg) 304 | elif task_head_type == 'inference': 305 | # the three classes correspond to entailment, contradiction and neutral. 306 | self.task_head = ClfHead(clf_token, cfg, 3) 307 | else: 308 | raise ValueError("task_head_type is expected to be 'multiple_choice' " 309 | "'similarity', 'inference' or ('classification', n_class) " 310 | f"got {task_head_type}.") 311 | elif isinstance(task_head_type, collections.abc.Sequence) and len(task_head_type) == 2 and \ 312 | task_head_type[0] == 'classification': 313 | n_class = task_head_type[1] 314 | self.task_head = ClfHead(clf_token, cfg, n_class) 315 | else: 316 | raise ValueError("task_head_type is expected to be 'multiple_choice' " 317 | "'similarity', 'inference' or ('classification', n_class) " 318 | f"got {task_head_type}.") 319 | 320 | def forward(self, x): 321 | h = self.transformer(x) 322 | lm_logits = self.lm_head(h) 323 | task_logits = self.task_head(h, x) 324 | 325 | return lm_logits, task_logits 326 | 327 | 328 | def save_to_file(self, model_file: str): 329 | """ 330 | Saves the current model to the provided file. 331 | :param model_file: the model file 332 | """ 333 | model_state = { 334 | 'state_dict': self.state_dict(), 335 | 'cfg': dict(self.cfg), 336 | 'clf_token': self.clf_token, 337 | 'task_head_type': self.task_head_type, 338 | 'vocab': self.vocab, 339 | 'n_ctx': self.n_ctx 340 | } 341 | torch.save(model_state, model_file, pickle_protocol=4) 342 | 343 | @classmethod 344 | def load_from_file(cls, model_file): 345 | """ 346 | Loads the model from the given file. 347 | :param model_file: the model file 348 | :return: the loaded text classifier model 349 | """ 350 | 351 | # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive 352 | # serialization of torch objects 353 | warnings.filterwarnings("ignore") 354 | if torch.cuda.is_available(): 355 | state = torch.load(model_file) 356 | else: 357 | state = torch.load(model_file, map_location={'cuda:0': 'cpu'}) 358 | warnings.filterwarnings("default") 359 | 360 | model = DoubleHeadModel( 361 | cfg=dotdict(state['cfg']), 362 | clf_token=state['clf_token'], 363 | task_head_type=state['task_head_type'], 364 | vocab=state['vocab'], 365 | n_ctx=state['n_ctx'] 366 | ) 367 | 368 | model.load_state_dict(state['state_dict']) 369 | model.eval() 370 | return model 371 | 372 | 373 | def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n_embd=768, path='./model/', 374 | path_names='./'): 375 | # Load weights from TF model 376 | print("Loading weights...") 377 | names = json.load(open(path_names + 'parameters_names.json')) 378 | shapes = json.load(open(path + 'params_shapes.json')) 379 | offsets = np.cumsum([np.prod(shape) for shape in shapes]) 380 | init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)] 381 | init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] 382 | init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] 383 | if n_ctx > 0: 384 | init_params[0] = init_params[0][:n_ctx] 385 | if n_special > 0: 386 | init_params[0] = np.concatenate( 387 | [init_params[1], 388 | (np.random.randn(n_special, n_embd) * 0.02).astype(np.float32), 389 | init_params[0] 390 | ], 0) 391 | else: 392 | init_params[0] = np.concatenate( 393 | [init_params[1], 394 | init_params[0] 395 | ], 0) 396 | del init_params[1] 397 | if n_transfer == -1: 398 | n_transfer = 0 399 | else: 400 | n_transfer = 1 + n_transfer * 12 401 | init_params = [arr.squeeze() for arr in init_params] 402 | 403 | try: 404 | assert model.embed.weight.shape == init_params[0].shape 405 | except AssertionError as e: 406 | e.args += (model.embed.weight.shape, init_params[0].shape) 407 | raise 408 | 409 | model.embed.weight.data = torch.from_numpy(init_params[0]) 410 | 411 | for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]): 412 | name = name[6:] # skip "model/" 413 | assert name[-2:] == ":0" 414 | name = name[:-2] 415 | name = name.split('/') 416 | pointer = model 417 | for m_name in name: 418 | if re.fullmatch(r'[A-Za-z]+\d+', m_name): 419 | l = re.split(r'(\d+)', m_name) 420 | else: 421 | l = [m_name] 422 | pointer = getattr(pointer, l[0]) 423 | if len(l) >= 2: 424 | num = int(l[1]) 425 | pointer = pointer[num] 426 | try: 427 | assert pointer.shape == ip.shape 428 | except AssertionError as e: 429 | e.args += (pointer.shape, ip.shape) 430 | raise 431 | pointer.data = torch.from_numpy(ip) 432 | 433 | 434 | class dotdict(dict): 435 | """dot.notation access to dictionary attributes""" 436 | __getattr__ = dict.get 437 | __setattr__ = dict.__setitem__ 438 | __delattr__ = dict.__delitem__ 439 | 440 | 441 | DEFAULT_CONFIG = dotdict({ 442 | 'n_embd': 768, 443 | 'n_head': 12, 444 | 'n_layer': 12, 445 | 'embd_pdrop': 0.1, 446 | 'attn_pdrop': 0.1, 447 | 'resid_pdrop': 0.1, 448 | 'afn': 'gelu', 449 | 'clf_pdrop': 0.1}) 450 | -------------------------------------------------------------------------------- /opt.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | from torch.optim import Optimizer 4 | from torch.nn.utils import clip_grad_norm_ 5 | 6 | def warmup_cosine(x, warmup=0.002): 7 | s = 1 if x <= warmup else 0 8 | return s*(x/warmup) + (1-s)*(0.5 * (1 + torch.cos(math.pi * x))) 9 | 10 | def warmup_constant(x, warmup=0.002): 11 | s = 1 if x <= warmup else 0 12 | return s*(x/warmup) + (1-s)*1 13 | 14 | def warmup_linear(x, warmup=0.002): 15 | s = 1 if x <= warmup else 0 16 | return (s*(x/warmup) + (1-s))*(1-x) 17 | 18 | SCHEDULES = { 19 | 'warmup_cosine':warmup_cosine, 20 | 'warmup_constant':warmup_constant, 21 | 'warmup_linear':warmup_linear, 22 | } 23 | 24 | 25 | class OpenAIAdam(Optimizer): 26 | """Implements Open AI version of Adam algorithm with weight decay fix. 27 | """ 28 | def __init__(self, params, lr, schedule, warmup, t_total, 29 | b1=0.9, b2=0.999, e=1e-8, l2=0, 30 | vector_l2=False, max_grad_norm=-1, **kwargs): 31 | if not 0.0 <= lr: 32 | raise ValueError("Invalid learning rate: {}".format(lr)) 33 | if schedule not in SCHEDULES: 34 | raise ValueError("Invalid schedule parameter: {}".format(schedule)) 35 | if not 0 <= warmup: 36 | raise ValueError("Invalid warmup: {}".format(warmup)) 37 | if not 0.0 <= b1 < 1.0: 38 | raise ValueError("Invalid b1 parameter: {}".format(b1)) 39 | if not 0.0 <= b2 < 1.0: 40 | raise ValueError("Invalid b2 parameter: {}".format(b2)) 41 | if not 0.0 <= e: 42 | raise ValueError("Invalid epsilon value: {}".format(e)) 43 | defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, 44 | b1=b1, b2=b2, e=e, l2=l2, vector_l2=vector_l2, 45 | max_grad_norm=max_grad_norm) 46 | super(OpenAIAdam, self).__init__(params, defaults) 47 | 48 | def step(self, closure=None): 49 | """Performs a single optimization step. 50 | 51 | Arguments: 52 | closure (callable, optional): A closure that reevaluates the model 53 | and returns the loss. 54 | """ 55 | loss = None 56 | if closure is not None: 57 | loss = closure() 58 | 59 | for group in self.param_groups: 60 | for p in group['params']: 61 | if p.grad is None: 62 | continue 63 | grad = p.grad.data 64 | if grad.is_sparse: 65 | raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') 66 | 67 | state = self.state[p] 68 | 69 | # State initialization 70 | if len(state) == 0: 71 | state['step'] = 0 72 | # Exponential moving average of gradient values 73 | state['exp_avg'] = torch.zeros_like(p.data) 74 | # Exponential moving average of squared gradient values 75 | state['exp_avg_sq'] = torch.zeros_like(p.data) 76 | 77 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] 78 | beta1, beta2 = group['b1'], group['b2'] 79 | 80 | state['step'] += 1 81 | 82 | # Add grad clipping 83 | if group['max_grad_norm'] > 0: 84 | clip_grad_norm_(p, group['max_grad_norm']) 85 | 86 | # Decay the first and second moment running average coefficient 87 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 88 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 89 | denom = exp_avg_sq.sqrt().add_(group['e']) 90 | 91 | bias_correction1 = 1 - beta1 ** state['step'] 92 | bias_correction2 = 1 - beta2 ** state['step'] 93 | 94 | schedule_fct = SCHEDULES[group['schedule']] 95 | lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) 96 | step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 97 | 98 | p.data.addcdiv_(-step_size, exp_avg, denom) 99 | 100 | # Add weight decay at the end (fixed version) 101 | if (len(p.size()) > 1 or group['vector_l2']) and group['l2'] > 0: 102 | p.data.add_(-lr_scheduled * group['l2'], p.data) 103 | 104 | return loss 105 | -------------------------------------------------------------------------------- /parameters_names.json: -------------------------------------------------------------------------------- 1 | ["model/we:0", "model/h0/attn/c_attn/w:0", "model/h0/attn/c_attn/b:0", "model/h0/attn/c_proj/w:0", "model/h0/attn/c_proj/b:0", "model/h0/ln_1/g:0", "model/h0/ln_1/b:0", "model/h0/mlp/c_fc/w:0", "model/h0/mlp/c_fc/b:0", "model/h0/mlp/c_proj/w:0", "model/h0/mlp/c_proj/b:0", "model/h0/ln_2/g:0", "model/h0/ln_2/b:0", "model/h1/attn/c_attn/w:0", "model/h1/attn/c_attn/b:0", "model/h1/attn/c_proj/w:0", "model/h1/attn/c_proj/b:0", "model/h1/ln_1/g:0", "model/h1/ln_1/b:0", "model/h1/mlp/c_fc/w:0", "model/h1/mlp/c_fc/b:0", "model/h1/mlp/c_proj/w:0", "model/h1/mlp/c_proj/b:0", "model/h1/ln_2/g:0", "model/h1/ln_2/b:0", "model/h2/attn/c_attn/w:0", "model/h2/attn/c_attn/b:0", "model/h2/attn/c_proj/w:0", "model/h2/attn/c_proj/b:0", "model/h2/ln_1/g:0", "model/h2/ln_1/b:0", "model/h2/mlp/c_fc/w:0", "model/h2/mlp/c_fc/b:0", "model/h2/mlp/c_proj/w:0", "model/h2/mlp/c_proj/b:0", "model/h2/ln_2/g:0", "model/h2/ln_2/b:0", "model/h3/attn/c_attn/w:0", "model/h3/attn/c_attn/b:0", "model/h3/attn/c_proj/w:0", "model/h3/attn/c_proj/b:0", "model/h3/ln_1/g:0", "model/h3/ln_1/b:0", "model/h3/mlp/c_fc/w:0", "model/h3/mlp/c_fc/b:0", "model/h3/mlp/c_proj/w:0", "model/h3/mlp/c_proj/b:0", "model/h3/ln_2/g:0", "model/h3/ln_2/b:0", "model/h4/attn/c_attn/w:0", "model/h4/attn/c_attn/b:0", "model/h4/attn/c_proj/w:0", "model/h4/attn/c_proj/b:0", "model/h4/ln_1/g:0", "model/h4/ln_1/b:0", "model/h4/mlp/c_fc/w:0", "model/h4/mlp/c_fc/b:0", "model/h4/mlp/c_proj/w:0", "model/h4/mlp/c_proj/b:0", "model/h4/ln_2/g:0", "model/h4/ln_2/b:0", "model/h5/attn/c_attn/w:0", "model/h5/attn/c_attn/b:0", "model/h5/attn/c_proj/w:0", "model/h5/attn/c_proj/b:0", "model/h5/ln_1/g:0", "model/h5/ln_1/b:0", "model/h5/mlp/c_fc/w:0", "model/h5/mlp/c_fc/b:0", "model/h5/mlp/c_proj/w:0", "model/h5/mlp/c_proj/b:0", "model/h5/ln_2/g:0", "model/h5/ln_2/b:0", "model/h6/attn/c_attn/w:0", "model/h6/attn/c_attn/b:0", "model/h6/attn/c_proj/w:0", "model/h6/attn/c_proj/b:0", "model/h6/ln_1/g:0", "model/h6/ln_1/b:0", "model/h6/mlp/c_fc/w:0", "model/h6/mlp/c_fc/b:0", "model/h6/mlp/c_proj/w:0", "model/h6/mlp/c_proj/b:0", "model/h6/ln_2/g:0", "model/h6/ln_2/b:0", "model/h7/attn/c_attn/w:0", "model/h7/attn/c_attn/b:0", "model/h7/attn/c_proj/w:0", "model/h7/attn/c_proj/b:0", "model/h7/ln_1/g:0", "model/h7/ln_1/b:0", "model/h7/mlp/c_fc/w:0", "model/h7/mlp/c_fc/b:0", "model/h7/mlp/c_proj/w:0", "model/h7/mlp/c_proj/b:0", "model/h7/ln_2/g:0", "model/h7/ln_2/b:0", "model/h8/attn/c_attn/w:0", "model/h8/attn/c_attn/b:0", "model/h8/attn/c_proj/w:0", "model/h8/attn/c_proj/b:0", "model/h8/ln_1/g:0", "model/h8/ln_1/b:0", "model/h8/mlp/c_fc/w:0", "model/h8/mlp/c_fc/b:0", "model/h8/mlp/c_proj/w:0", "model/h8/mlp/c_proj/b:0", "model/h8/ln_2/g:0", "model/h8/ln_2/b:0", "model/h9/attn/c_attn/w:0", "model/h9/attn/c_attn/b:0", "model/h9/attn/c_proj/w:0", "model/h9/attn/c_proj/b:0", "model/h9/ln_1/g:0", "model/h9/ln_1/b:0", "model/h9/mlp/c_fc/w:0", "model/h9/mlp/c_fc/b:0", "model/h9/mlp/c_proj/w:0", "model/h9/mlp/c_proj/b:0", "model/h9/ln_2/g:0", "model/h9/ln_2/b:0", "model/h10/attn/c_attn/w:0", "model/h10/attn/c_attn/b:0", "model/h10/attn/c_proj/w:0", "model/h10/attn/c_proj/b:0", "model/h10/ln_1/g:0", "model/h10/ln_1/b:0", "model/h10/mlp/c_fc/w:0", "model/h10/mlp/c_fc/b:0", "model/h10/mlp/c_proj/w:0", "model/h10/mlp/c_proj/b:0", "model/h10/ln_2/g:0", "model/h10/ln_2/b:0", "model/h11/attn/c_attn/w:0", "model/h11/attn/c_attn/b:0", "model/h11/attn/c_proj/w:0", "model/h11/attn/c_proj/b:0", "model/h11/ln_1/g:0", "model/h11/ln_1/b:0", "model/h11/mlp/c_fc/w:0", "model/h11/mlp/c_fc/b:0", "model/h11/mlp/c_proj/w:0", "model/h11/mlp/c_proj/b:0", "model/h11/ln_2/g:0", "model/h11/ln_2/b:0", "model/clf/w:0", "model/clf/b:0"] -------------------------------------------------------------------------------- /relation_extraction.py: -------------------------------------------------------------------------------- 1 | from os.path import dirname, join 2 | 3 | from os import path 4 | 5 | import fire 6 | import random 7 | import torch 8 | 9 | import numpy as np 10 | 11 | from tempfile import NamedTemporaryFile 12 | from torch import nn 13 | 14 | from sklearn.utils import shuffle 15 | from sklearn.metrics import accuracy_score, f1_score, precision_recall_fscore_support, confusion_matrix, classification_report 16 | 17 | from model_pytorch import DoubleHeadModel, load_openai_pretrained_model, dotdict 18 | from loss import ClassificationLossCompute 19 | from opt import OpenAIAdam 20 | from datasets import SemEval2010Task8 21 | from text_utils import TextEncoder, LabelEncoder 22 | from train_utils import predict, iter_data, iter_apply, persist_model, load_model 23 | from logging_utils import ResultLogger 24 | from analysis_util import evaluate_semeval2010_task8 25 | 26 | 27 | def _remove_label_direction(label): 28 | direction_suffix_start = label.find('(') 29 | if direction_suffix_start != -1: 30 | return label[:direction_suffix_start] 31 | else: 32 | return label 33 | 34 | 35 | def _get_max_label_length(labels): 36 | return max([len(label) for label in labels]) 37 | 38 | 39 | def _print_labeled_confusion_matrix(labels, labels_dev, labels_pred_dev): 40 | conf_matrix = confusion_matrix(labels_dev, labels_pred_dev, labels=labels) 41 | conf_matrix_str = np.array2string(conf_matrix, max_line_width=120, threshold=999999) 42 | 43 | max_label_length = _get_max_label_length(labels) 44 | 45 | for (label, matrix_row) in zip(labels, conf_matrix_str.splitlines()): 46 | n_whitespaces = (max_label_length - len(label)) + 1 47 | print(label + (n_whitespaces * ' ') + matrix_row) 48 | 49 | 50 | def _print_undirected_classifcation_scores(labels, negative_label, labels_dev, labels_pred_dev): 51 | undirected_labels = list(set([_remove_label_direction(label) for label in labels if label != ''])) 52 | 53 | tp_counts = dict() 54 | fp_counts = dict() 55 | tn_counts = dict() 56 | fn_counts = dict() 57 | 58 | for example_idx in range(len(labels_dev)): 59 | true_label = labels_dev[example_idx] 60 | pred_label = labels_pred_dev[example_idx] 61 | 62 | undirected_true_label = _remove_label_direction(true_label) 63 | undirected_pred_label = _remove_label_direction(pred_label) 64 | 65 | for undirected_label in undirected_labels: 66 | # for this label the example is supposed to be a true positive 67 | if undirected_label == undirected_true_label: 68 | if pred_label == true_label: 69 | tp_counts[undirected_label] = tp_counts.get(undirected_label, 0) + 1 70 | else: 71 | fn_counts[undirected_label] = fn_counts.get(undirected_label, 0) + 1 72 | 73 | # for this label the example is supposed to be a true negative 74 | else: 75 | if undirected_pred_label != undirected_label: 76 | tn_counts[undirected_label] = tn_counts.get(undirected_label, 0) + 1 77 | else: 78 | fp_counts[undirected_label] = fp_counts.get(undirected_label, 0) + 1 79 | 80 | macro_f1_scores = [] 81 | macro_f1_scores_wo_negative = [] 82 | 83 | print() 84 | max_label_length = _get_max_label_length(undirected_labels) 85 | print(max_label_length * ' ' + ' P R F1') 86 | for undirected_label in undirected_labels: 87 | tps = tp_counts.get(undirected_label, 0) 88 | fps = fp_counts.get(undirected_label, 0) 89 | fns = fn_counts.get(undirected_label, 0) 90 | 91 | precision_denominator = tps + fps 92 | recall_denominator = tps + fns 93 | if precision_denominator == 0 or recall_denominator == 0: 94 | print("Skipping %s: division by zero, assuming f1 of 0" % undirected_label) 95 | macro_f1_scores.append(0) 96 | if undirected_label != negative_label: 97 | macro_f1_scores_wo_negative.append(0) 98 | continue 99 | 100 | precision = tps / precision_denominator 101 | recall = tps / recall_denominator 102 | 103 | f1_denominator = precision + recall 104 | if f1_denominator == 0: 105 | print("Skipping %s: division by zero, assuming f1 of 0" % undirected_label) 106 | macro_f1_scores.append(0) 107 | if undirected_label != negative_label: 108 | macro_f1_scores_wo_negative.append(0) 109 | continue 110 | 111 | f1 = 2 * (precision * recall) / f1_denominator 112 | 113 | label_padding = (max_label_length - len(undirected_label) - 1) * ' ' 114 | print("{}{:6.2f}{:6.2f}{:6.2f}".format(undirected_label + ':' + label_padding, precision, recall, f1)) 115 | 116 | macro_f1_scores.append(f1) 117 | if undirected_label != negative_label: 118 | macro_f1_scores_wo_negative.append(f1) 119 | 120 | print() 121 | print("Per relation macro f1: {:.2f}".format(np.mean(macro_f1_scores))) 122 | print("Per relation macro f1 excluding negative relation: {:.2f}".format(np.mean(macro_f1_scores_wo_negative))) 123 | print() 124 | 125 | 126 | def _print_classification_details(label_encoder, label_idxs_dev, label_idxs_pred_dev, negative_label): 127 | labels = label_encoder.get_items() 128 | labels_dev = [label_encoder.get_item_for_index(index) for index in label_idxs_dev] 129 | labels_pred_dev = [label_encoder.get_item_for_index(index) for index in label_idxs_pred_dev] 130 | 131 | print(classification_report(labels_dev, labels_pred_dev)) 132 | _print_labeled_confusion_matrix(labels, labels_dev, labels_pred_dev) 133 | _print_undirected_classifcation_scores(labels, negative_label, labels_dev, labels_pred_dev) 134 | 135 | 136 | def run_epoch(model, train, dev, test, compute_loss_fct, batch_size, device, epoch, label_encoder, logger, 137 | negative_label, log_with_id=True, verbose=False): 138 | print('-' * 100) 139 | 140 | indices_train, mask_train, labels_train, _, _ = train 141 | 142 | n_batches = len(indices_train) // batch_size 143 | 144 | current_loss: float = 0 145 | seen_sentences = 0 146 | modulo = max(1, int(n_batches / 10)) 147 | 148 | positive_labels = set(label_encoder.get_items()) 149 | positive_labels.discard(negative_label) 150 | positive_labels = [label_encoder.get_idx_for_item(label) for label in positive_labels] 151 | 152 | epoch_labels_pred_train = [] 153 | epoch_labels_train = [] 154 | 155 | # TODO: refactor! 156 | for batch_no, (batch_indices, batch_mask, batch_labels) in enumerate(iter_data( 157 | *shuffle(indices_train, mask_train, labels_train, random_state=np.random), 158 | batch_size=batch_size, truncate=True, verbose=True)): 159 | 160 | model.train() 161 | 162 | x = torch.tensor(batch_indices, dtype=torch.long).to(device) 163 | y = torch.tensor(batch_labels, dtype=torch.long).to(device) 164 | mask = torch.tensor(batch_mask).to(device) 165 | 166 | lm_logits, clf_logits = model(x) 167 | loss = compute_loss_fct(x, y, mask, clf_logits, lm_logits) 168 | 169 | epoch_labels_pred_train.extend(np.argmax(clf_logits.detach().cpu(), 1)) 170 | epoch_labels_train.extend(batch_labels) 171 | 172 | seen_sentences += len(batch_indices) 173 | current_loss += loss 174 | 175 | if batch_no % modulo == 0: 176 | train_acc = accuracy_score(epoch_labels_train, epoch_labels_pred_train) * 100 177 | train_micro_f1 = f1_score(epoch_labels_train, epoch_labels_pred_train, average='micro', labels=positive_labels) 178 | train_macro_f1 = f1_score(epoch_labels_train, epoch_labels_pred_train, average='macro', labels=positive_labels) 179 | print("epoch {0} - iter {1}/{2} - loss {3:.8f} - acc {4:.2f} - micro f1 {5:.2f} - macro f1 {6:.2f}" 180 | .format(epoch, batch_no, n_batches, current_loss / seen_sentences, train_acc, train_micro_f1, train_macro_f1)) 181 | 182 | current_loss /= len(indices_train) 183 | 184 | # IMPORTANT: Switch to eval mode 185 | model.eval() 186 | 187 | indices_dev, mask_dev, labels_dev, ids_dev, _ = dev 188 | 189 | print('-' * 100) 190 | dev_logits, dev_loss = iter_apply(indices_dev, mask_dev, labels_dev, model, compute_loss_fct, device, batch_size) 191 | 192 | avg_dev_loss = dev_loss / len(indices_dev) 193 | 194 | label_pred_dev = np.argmax(dev_logits, 1) 195 | 196 | dev_accuracy = accuracy_score(labels_dev, label_pred_dev) * 100. 197 | dev_micro_f1 = f1_score(labels_dev, label_pred_dev, average='micro', labels=positive_labels) 198 | dev_macro_f1 = f1_score(labels_dev, label_pred_dev, average='macro', labels=positive_labels) 199 | 200 | if verbose: 201 | _print_classification_details(label_encoder, labels_dev, label_pred_dev, negative_label) 202 | 203 | print('EVALUATION: cost: {} | acc: {} | micro f1: {} | macro f1: {}'.format( 204 | dev_loss / len(indices_dev), dev_accuracy, dev_micro_f1, dev_macro_f1)) 205 | 206 | # save predictions on test dataset per epoch 207 | 208 | logger.log(train_loss=current_loss, 209 | dev_loss=avg_dev_loss, 210 | dev_accuracy=dev_accuracy, 211 | dev_micro_f1=dev_micro_f1, 212 | dev_macro_f1=dev_macro_f1) 213 | 214 | label_idxs_pred_dev, _ = predict(indices_dev, model, device, batch_size) 215 | labels_pred_dev = [label_encoder.get_item_for_index(label_index) for label_index in label_idxs_pred_dev] 216 | logger.log_dev_predictions(epoch, labels_pred_dev, ids_dev, log_with_id=log_with_id) 217 | 218 | if test is not None: 219 | indices_test, _, labels_test, ids_test, entity_ids_test = test 220 | 221 | log_pr_curve = len(labels_test) > 0 and entity_ids_test is not None 222 | 223 | label_idxs_pred_test, probs_test = predict(indices_test, model, device, batch_size, 224 | compute_probs=log_pr_curve) 225 | 226 | labels_pred_test = [label_encoder.get_item_for_index(label_index) for label_index in label_idxs_pred_test] 227 | logger.log_test_predictions(epoch, labels_pred_test, ids_test, log_with_id=log_with_id) 228 | 229 | if log_pr_curve: 230 | negative_label_idx = label_encoder.get_idx_for_item(negative_label) 231 | logger.log_test_pr_curve(epoch, entity_ids_test, labels_test, probs_test, negative_label_idx, label_encoder) 232 | 233 | return avg_dev_loss, dev_micro_f1, dev_macro_f1 234 | 235 | 236 | def train(dataset, data_dir, log_dir, max_grad_norm=1, learning_rate=6.25e-5, learning_rate_warmup=0.002, 237 | n_ctx=512, n_embd=768, n_head=12, n_layer=12, embd_pdrop=.1, lm_coef=.5, 238 | attn_pdrop=.1, resid_pdrop=.1, clf_pdrop=.1, word_pdrop=.0, l2=0.01, vector_l2=True, 239 | optimizer='adam', afn='gelu', learning_rate_schedule='warmup_linear', 240 | encoder_path='model/encoder_bpe_40000.json', bpe_path='model/vocab_40000.bpe', n_transfer=12, 241 | beta1=.9, beta2=.999, e=1e-8, batch_size=8, max_epochs=3, dev_size=.1, seed=0, load_pre_trained=True, 242 | subsampling_rate=1.0, train_set_limit=None, dev_file=None, dev_set_limit=None, skip_test_set=False, 243 | verbose_fetcher=False, verbose_training=False, masking_mode=None, write_model=True): 244 | 245 | cfg = dotdict(locals().items()) 246 | print(cfg) 247 | 248 | logger = ResultLogger(log_dir, **cfg) 249 | 250 | random.seed(seed) 251 | np.random.seed(seed) 252 | torch.manual_seed(seed) 253 | torch.cuda.manual_seed_all(seed) 254 | 255 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 256 | n_gpu = torch.cuda.device_count() 257 | print('Device: {} | n_gpu: {}'.format(device, n_gpu)) 258 | 259 | # create / load encoders for text and labels 260 | text_encoder = TextEncoder(encoder_path, bpe_path) 261 | label_encoder = LabelEncoder(add_unk=False) 262 | 263 | if dataset == 'semeval_2010_task8': 264 | predefined_dev_set = False 265 | negative_label = 'Other' 266 | log_with_id = True 267 | elif dataset == 'tacred': 268 | predefined_dev_set = True 269 | dev_size = None 270 | negative_label = 'no_relation' 271 | log_with_id = False 272 | else: 273 | raise ValueError("Dataset '{}' not supported.".format(dataset)) 274 | 275 | encoder = text_encoder.encoder 276 | encoder['_start_'] = len(encoder) 277 | encoder['_delimiter_'] = len(encoder) 278 | encoder['_delimiter2_'] = len(encoder) 279 | encoder['_classify_'] = len(encoder) 280 | n_special = 4 281 | 282 | if dataset == 'tacred': 283 | for t in SemEval2010Task8.MASKED_ENTITY_TOKENS: 284 | text_encoder.encoder[t] = len(text_encoder.encoder) 285 | n_special += 1 286 | 287 | # TODO: improve (as a sentence is generally much longer than the two entities) 288 | # the input has 3 parts (entity 1, entity 2, sentence) and special tokens 289 | # all together should not exceed the context length 290 | max_len = (n_ctx - n_special - 1) // 3 291 | 292 | if dataset == 'semeval_2010_task8' or dataset == 'tacred': 293 | corpus = SemEval2010Task8.fetch(data_dir, dev_size, seed, 294 | negative_label=negative_label, 295 | subsampling_rate=subsampling_rate, 296 | train_set_limit=train_set_limit, 297 | dev_set_limit=dev_set_limit, 298 | skip_test_set=skip_test_set, 299 | predefined_dev_set=predefined_dev_set, 300 | verbose=verbose_fetcher, 301 | masking_mode=masking_mode, 302 | dev_file=dev_file) 303 | 304 | corpus = SemEval2010Task8.encode(*corpus, text_encoder=text_encoder, label_encoder=label_encoder) 305 | n_ctx = min(SemEval2010Task8.max_length(*corpus, max_len=max_len) + n_special + 1, n_ctx) 306 | transformed_corpus = SemEval2010Task8.transform(*corpus, text_encoder=text_encoder, max_length=max_len, n_ctx=n_ctx) 307 | else: 308 | raise ValueError("Dataset '{}' not supported.".format(dataset)) 309 | 310 | if not skip_test_set: 311 | train, dev, test = transformed_corpus 312 | else: 313 | train, dev = transformed_corpus 314 | test = None 315 | 316 | _, _, labels_dev, ids_dev, _ = dev 317 | 318 | logger.log_dev_labels( 319 | labels_dev=[label_encoder.get_item_for_index(label) for label in labels_dev], 320 | ids=ids_dev) 321 | 322 | batch_size_train = batch_size * max(n_gpu, 1) 323 | n_updates_total = (len(train[0]) // batch_size_train) * max_epochs 324 | 325 | clf_token = text_encoder.encoder['_classify_'] 326 | vocab = len(text_encoder.encoder) + n_ctx 327 | n_class = len(label_encoder) 328 | dh_model = DoubleHeadModel(cfg, clf_token, ('classification', n_class), vocab, n_ctx) 329 | 330 | criterion = nn.CrossEntropyLoss(reduce=False) 331 | model_opt = OpenAIAdam(dh_model.parameters(), 332 | lr=learning_rate, 333 | schedule=learning_rate_schedule, 334 | warmup=learning_rate_warmup, 335 | t_total=n_updates_total, 336 | b1=beta1, 337 | b2=beta2, 338 | e=e, 339 | l2=l2, 340 | vector_l2=vector_l2, 341 | max_grad_norm=max_grad_norm) 342 | 343 | compute_loss_fct = ClassificationLossCompute(criterion, 344 | criterion, 345 | lm_coef, 346 | model_opt) 347 | 348 | if load_pre_trained: 349 | load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special, n_transfer=n_transfer) 350 | 351 | dh_model.to(device) 352 | dh_model = nn.DataParallel(dh_model) 353 | 354 | if write_model: 355 | model_dir = path.join(logger.get_base_dir(), 'models') 356 | persist_model(model_dir, dh_model, text_encoder, label_encoder) 357 | 358 | # run training! 359 | best_f1 = 0. 360 | for epoch in range(1, max_epochs + 1): 361 | dev_loss, _, dev_macro_f1 = run_epoch(dh_model, train, dev, test, compute_loss_fct, batch_size, device, epoch, 362 | label_encoder, logger, negative_label, 363 | log_with_id=log_with_id, verbose=verbose_training) 364 | if dev_macro_f1 > best_f1: 365 | best_f1 = dev_macro_f1 366 | 367 | if write_model: 368 | print(f'Saving model at epoch {epoch}. With dev_f1 score of {dev_macro_f1}.') 369 | model_file_name = f'model_epoch-{epoch}_dev-macro-f1-{dev_macro_f1}_' \ 370 | f'dev-loss-{dev_loss}_{logger.start_time}.pt' 371 | persist_model(model_dir, dh_model, text_encoder, label_encoder, model_name=model_file_name) 372 | 373 | 374 | def evaluate(dataset, test_file, log_dir, save_dir, model_file='model.pt', batch_size=8, masking_mode=None): 375 | cfg = dotdict(locals().items()) 376 | print(cfg) 377 | 378 | logger = ResultLogger(log_dir, **cfg) 379 | 380 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 381 | 382 | model, text_encoder, label_encoder = load_model(save_dir, model_file=model_file) 383 | 384 | model = model.to(device) 385 | 386 | n_special = 4 387 | 388 | n_ctx = model.n_ctx 389 | max_len = 512 // 3 390 | 391 | if dataset == 'semeval_2010_task8' or dataset == 'tacred': 392 | test = SemEval2010Task8._load_from_jsonl(test_file, is_test=False, masking_mode=masking_mode) 393 | test = SemEval2010Task8.encode(test, text_encoder=text_encoder, label_encoder=label_encoder) 394 | test = SemEval2010Task8.transform(*test, text_encoder=text_encoder, max_length=max_len, n_ctx=n_ctx)[0] 395 | else: 396 | raise ValueError("Dataset '{}' not supported.".format(dataset)) 397 | 398 | if dataset == 'semeval_2010_task8': 399 | negative_label = 'Other' 400 | elif dataset == 'tacred': 401 | negative_label = 'no_relation' 402 | else: 403 | raise ValueError("Dataset '{}' not supported.".format(dataset)) 404 | 405 | indices_test, _, label_idxs_test, ids_test, entity_ids_test = test 406 | 407 | log_pr_curve = entity_ids_test is not None 408 | 409 | label_idxs_pred, probs_test = predict(indices_test, model, device, batch_size, compute_probs=log_pr_curve) 410 | labels_pred_test = [label_encoder.get_item_for_index(label_index) for label_index in label_idxs_pred] 411 | logger.log_test_predictions(0, labels_pred_test, ids_test) 412 | 413 | test_accuracy = accuracy_score(label_idxs_test, label_idxs_pred) * 100. 414 | 415 | if dataset == 'semeval_2010_task8': 416 | id_labels_true = [(id_, label_encoder.get_item_for_index(label_index)) for id_, label_index in zip(ids_test, label_idxs_test)] 417 | id_labels_pred = list(zip(ids_test, labels_pred_test)) 418 | 419 | input_files = [] 420 | for id_labels in [id_labels_true, id_labels_pred]: 421 | tmp_file = NamedTemporaryFile(delete=True) 422 | input_files.append(tmp_file) 423 | with open(tmp_file.name, 'w') as f: 424 | for id_, label in id_labels: 425 | f.write('{}\t{}\n'.format(id_, label)) 426 | tmp_file.file.close() 427 | 428 | path_to_eval_script = path.join(path.dirname(path.realpath(__file__)), 'analysis/semeval/semeval2010_task8_scorer-v1.2.pl') 429 | 430 | test_f1 = evaluate_semeval2010_task8(id_labels_true_file=input_files[0].name, 431 | id_labels_pred_file=input_files[1].name, 432 | eval_script=path_to_eval_script) 433 | print(f'TEST: ACC: {test_accuracy} | F1: {test_f1}') 434 | 435 | else: 436 | labels = list(sorted(set(label_idxs_test))) 437 | labels.remove(label_encoder.get_idx_for_item(negative_label)) 438 | 439 | test_precision, test_recall, test_f1, _ = precision_recall_fscore_support( 440 | label_idxs_test, label_idxs_pred, average='micro', labels=labels) 441 | print(f'TEST: ACC: {test_accuracy} | P: {test_precision} | R: {test_recall} | F1: {test_f1}') 442 | 443 | if log_pr_curve: 444 | negative_label_idx = label_encoder.get_idx_for_item(negative_label) 445 | logger.log_test_pr_curve(0, entity_ids_test, label_idxs_test, probs_test, negative_label_idx, label_encoder) 446 | 447 | logger.close() 448 | 449 | 450 | if __name__ == '__main__': 451 | fire.Fire({ 452 | 'train': train, 453 | 'evaluate': evaluate 454 | }) 455 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.0.1.post2 2 | spacy==2.1.3 3 | matplotlib 4 | sklearn 5 | ftfy 6 | fire 7 | numpy 8 | pyyaml 9 | tqdm 10 | pandas 11 | regex==2017.4.5 12 | dill 13 | -------------------------------------------------------------------------------- /text_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import ftfy 3 | import json 4 | import spacy 5 | 6 | from tqdm import tqdm 7 | from typing import List, Dict 8 | 9 | from spacy.tokens import Doc 10 | 11 | 12 | def get_pairs(word): 13 | """ 14 | Return set of symbol pairs in a word. 15 | word is represented as tuple of symbols (symbols being variable-length strings) 16 | """ 17 | pairs = set() 18 | prev_char = word[0] 19 | for char in word[1:]: 20 | pairs.add((prev_char, char)) 21 | prev_char = char 22 | return pairs 23 | 24 | def text_standardize(text): 25 | """ 26 | fixes some issues the spacy tokenizer had on books corpus 27 | also does some whitespace standardization 28 | """ 29 | text = text.replace('—', '-') 30 | text = text.replace('–', '-') 31 | text = text.replace('―', '-') 32 | text = text.replace('…', '...') 33 | text = text.replace('´', "'") 34 | text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text) 35 | text = re.sub(r'\s*\n\s*', ' \n ', text) 36 | text = re.sub(r'[^\S\n]+', ' ', text) 37 | return text.strip() 38 | 39 | class TextEncoder(object): 40 | """ 41 | mostly a wrapper for a public python bpe tokenizer 42 | """ 43 | 44 | def __init__(self, encoder_path, bpe_path): 45 | self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat']) 46 | self.encoder = json.load(open(encoder_path)) 47 | self.decoder = {v:k for k,v in self.encoder.items()} 48 | merges = open(bpe_path, encoding='utf-8').read().split('\n')[1:-1] 49 | merges = [tuple(merge.split()) for merge in merges] 50 | self.bpe_ranks = dict(zip(merges, range(len(merges)))) 51 | self.cache = {} 52 | 53 | def bpe(self, token): 54 | word = tuple(token[:-1]) + ( token[-1] + '',) 55 | if token in self.cache: 56 | return self.cache[token] 57 | pairs = get_pairs(word) 58 | 59 | if not pairs: 60 | return token+'' 61 | 62 | while True: 63 | bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) 64 | if bigram not in self.bpe_ranks: 65 | break 66 | first, second = bigram 67 | new_word = [] 68 | i = 0 69 | while i < len(word): 70 | try: 71 | j = word.index(first, i) 72 | new_word.extend(word[i:j]) 73 | i = j 74 | except: 75 | new_word.extend(word[i:]) 76 | break 77 | 78 | if word[i] == first and i < len(word)-1 and word[i+1] == second: 79 | new_word.append(first+second) 80 | i += 2 81 | else: 82 | new_word.append(word[i]) 83 | i += 1 84 | new_word = tuple(new_word) 85 | word = new_word 86 | if len(word) == 1: 87 | break 88 | else: 89 | pairs = get_pairs(word) 90 | word = ' '.join(word) 91 | if word == '\n ': 92 | word = '\n' 93 | self.cache[token] = word 94 | return word 95 | 96 | def encode(self, texts, verbose=True, use_tokenizer=False, special_tokens=None): 97 | texts_tokens = [] 98 | if verbose: 99 | for text in tqdm(texts, ncols=80, leave=False): 100 | if use_tokenizer: 101 | text = self.nlp(text_standardize(ftfy.fix_text(text))) 102 | else: 103 | words = [] 104 | for token in text: 105 | if special_tokens is not None and token.lower() in special_tokens: 106 | words.append(token) 107 | else: 108 | words.append(text_standardize(ftfy.fix_text(token))) 109 | text = Doc(self.nlp.vocab, words=words) 110 | 111 | text_tokens = [] 112 | for token in text: 113 | if special_tokens is not None and token.text.lower() in special_tokens: 114 | text_tokens.append(self.encoder.get(token.text.lower(), 0)) 115 | else: 116 | text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')]) 117 | texts_tokens.append(text_tokens) 118 | else: 119 | for text in texts: 120 | if use_tokenizer: 121 | text = self.nlp(text_standardize(ftfy.fix_text(text))) 122 | else: 123 | words = [] 124 | for token in text: 125 | if special_tokens is not None and token.lower() in special_tokens: 126 | words.append(token) 127 | else: 128 | words.append(text_standardize(ftfy.fix_text(token))) 129 | text = Doc(self.nlp.vocab, words=words) 130 | text_tokens = [] 131 | for token in text: 132 | if special_tokens is not None and token.text.lower() in special_tokens: 133 | text_tokens.append(self.encoder.get(token.text.lower(), 0)) 134 | else: 135 | text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')]) 136 | texts_tokens.append(text_tokens) 137 | return texts_tokens 138 | 139 | 140 | class Dictionary: 141 | """ 142 | This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings. 143 | """ 144 | 145 | def __init__(self, add_unk=True): 146 | # init dictionaries 147 | self.item2idx: Dict[str, int] = {} 148 | self.idx2item: List[str] = [] 149 | 150 | # in order to deal with unknown tokens, add 151 | if add_unk: 152 | self.add_item('') 153 | 154 | def add_item(self, item: str) -> int: 155 | """ 156 | add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID. 157 | :param item: a string for which to assign an id 158 | :return: ID of string 159 | """ 160 | item = item.encode('utf-8') 161 | if item not in self.item2idx: 162 | self.idx2item.append(item) 163 | self.item2idx[item] = len(self.idx2item) - 1 164 | return self.item2idx[item] 165 | 166 | def get_idx_for_item(self, item: str) -> int: 167 | """ 168 | returns the ID of the string, otherwise 0 169 | :param item: string for which ID is requested 170 | :return: ID of string, otherwise 0 171 | """ 172 | item = item.encode('utf-8') 173 | if item in self.item2idx.keys(): 174 | return self.item2idx[item] 175 | else: 176 | return 0 177 | 178 | def get_items(self) -> List[str]: 179 | items = [] 180 | for item in self.idx2item: 181 | items.append(item.decode('UTF-8')) 182 | return items 183 | 184 | def __len__(self) -> int: 185 | return len(self.idx2item) 186 | 187 | def get_item_for_index(self, idx): 188 | return self.idx2item[idx].decode('UTF-8') 189 | 190 | def save(self, savefile): 191 | import pickle 192 | with open(savefile, 'wb') as f: 193 | mappings = { 194 | 'idx2item': self.idx2item, 195 | 'item2idx': self.item2idx 196 | } 197 | pickle.dump(mappings, f) 198 | 199 | @classmethod 200 | def load_from_file(cls, filename: str): 201 | import pickle 202 | dictionary: Dictionary = Dictionary() 203 | with open(filename, 'rb') as f: 204 | mappings = pickle.load(f, encoding='latin1') 205 | idx2item = mappings['idx2item'] 206 | item2idx = mappings['item2idx'] 207 | dictionary.item2idx = item2idx 208 | dictionary.idx2item = idx2item 209 | return dictionary 210 | 211 | @classmethod 212 | def load(cls, name: str): 213 | return Dictionary.load_from_file(name) 214 | 215 | 216 | class LabelEncoder(Dictionary): 217 | pass 218 | -------------------------------------------------------------------------------- /train_utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import torch 4 | import pickle 5 | 6 | import numpy as np 7 | 8 | from os.path import join 9 | 10 | from torch.nn.functional import softmax 11 | from tqdm import tqdm 12 | from utils import make_path 13 | from model_pytorch import DoubleHeadModel 14 | 15 | 16 | def iter_data(*datas, batch_size=128, truncate=False, verbose=False, max_batches=float("inf")): 17 | n_samples = len(datas[0]) 18 | if truncate: 19 | n_samples = (n_samples // batch_size) * batch_size 20 | n_samples = min(n_samples, max_batches * batch_size) 21 | 22 | n_batches = 0 23 | if verbose: 24 | f = sys.stderr 25 | else: 26 | f = open(os.devnull, 'w') 27 | for i in tqdm(range(0, n_samples, batch_size), total=n_samples // batch_size, file=f, ncols=80, leave=False): 28 | if n_batches >= max_batches: raise StopIteration 29 | if len(datas) == 1: 30 | yield datas[0][i: i + batch_size] 31 | else: 32 | yield (d[i: i + batch_size] for d in datas) 33 | n_batches += 1 34 | 35 | 36 | def iter_apply(X, M, Y, model, loss_fct, device, batch_size): 37 | logits = [] 38 | cost = 0 39 | with torch.no_grad(): 40 | model.eval() 41 | for x, m, y in iter_data(X, M, Y, batch_size=batch_size, truncate=False, verbose=True): 42 | n = len(x) 43 | x = torch.tensor(x, dtype=torch.long).to(device) 44 | y = torch.tensor(y, dtype=torch.long).to(device) 45 | m = torch.tensor(m).to(device) 46 | _, clf_logits = model(x) 47 | #clf_logits *= n 48 | clf_losses = loss_fct(x, y, m, clf_logits, only_return_losses=True) 49 | clf_losses *= n 50 | logits.append(clf_logits.to("cpu").numpy()) 51 | cost += clf_losses.sum().item() 52 | logits = np.concatenate(logits, 0) 53 | return logits, cost 54 | 55 | 56 | def iter_predict(X, model, device, batch_size, compute_probs=False): 57 | logits = [] 58 | probs = [] 59 | with torch.no_grad(): 60 | model.eval() 61 | for x in iter_data(X, batch_size=batch_size, truncate=False, verbose=True): 62 | x = torch.tensor(x, dtype=torch.long).to(device) 63 | _, clf_logits = model(x) 64 | if compute_probs: 65 | probs.append(softmax(clf_logits, dim=1).to("cpu").numpy()) 66 | logits.append(clf_logits.to("cpu").numpy()) 67 | logits = np.concatenate(logits, 0) 68 | 69 | if compute_probs: 70 | probs = np.concatenate(probs, 0) 71 | return logits, probs 72 | else: 73 | return logits, None 74 | 75 | 76 | def predict(X, model, device, batch_size, compute_probs=False): 77 | pred_fn = lambda x: np.argmax(x, 1) 78 | logits, probs = iter_predict(X, model, device, batch_size, compute_probs=compute_probs) 79 | predictions = pred_fn(logits) 80 | 81 | return predictions, probs 82 | 83 | 84 | def persist_model(save_dir, model, text_encoder, label_encoder, model_name='model.pt'): 85 | model.module.save_to_file(make_path(join(save_dir, model_name))) 86 | with open(join(save_dir, 'text_encoder.pkl'), 'wb') as f: 87 | pickle.dump(text_encoder, f) 88 | with open(join(save_dir, 'label_encoder.pkl'), 'wb') as f: 89 | pickle.dump(label_encoder, f) 90 | 91 | 92 | def load_model(save_dir, model_file='model.pt', text_encoder_file='text_encoder.pkl', 93 | label_encoder_file='label_encoder.pkl'): 94 | 95 | model = DoubleHeadModel.load_from_file(join(save_dir, model_file)) 96 | with open(join(save_dir, text_encoder_file), 'rb') as f: 97 | text_encoder = pickle.load(f) 98 | with open(join(save_dir, label_encoder_file), 'rb') as f: 99 | label_encoder = pickle.load(f) 100 | 101 | return model, text_encoder, label_encoder 102 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import time 4 | 5 | from functools import partial 6 | import numpy as np 7 | 8 | 9 | def stsb_label_encoding(labels, nclass=6): 10 | """ 11 | Label encoding from Tree LSTM paper (Tai, Socher, Manning) 12 | """ 13 | Y = np.zeros((len(labels), nclass)).astype(np.float32) 14 | for j, y in enumerate(labels): 15 | for i in range(nclass): 16 | if i == np.floor(y) + 1: 17 | Y[j,i] = y - np.floor(y) 18 | if i == np.floor(y): 19 | Y[j,i] = np.floor(y) - y + 1 20 | return Y 21 | 22 | def np_softmax(x, t=1): 23 | x = x/t 24 | x = x - np.max(x, axis=-1, keepdims=True) 25 | ex = np.exp(x) 26 | return ex/np.sum(ex, axis=-1, keepdims=True) 27 | 28 | def make_path(f): 29 | d = os.path.dirname(f) 30 | if d and not os.path.exists(d): 31 | os.makedirs(d) 32 | return f 33 | 34 | def _identity_init(shape, dtype, partition_info, scale): 35 | n = shape[-1] 36 | w = np.eye(n)*scale 37 | if len([s for s in shape if s != 1]) == 2: 38 | w = w.reshape(shape) 39 | return w.astype(np.float32) 40 | 41 | def identity_init(scale=1.0): 42 | return partial(_identity_init, scale=scale) 43 | 44 | def _np_init(shape, dtype, partition_info, w): 45 | return w 46 | 47 | def np_init(w): 48 | return partial(_np_init, w=w) 49 | 50 | class ResultLogger(object): 51 | def __init__(self, path, *args, **kwargs): 52 | if 'time' not in kwargs: 53 | kwargs['time'] = time.time() 54 | self.f_log = open(make_path(path), 'w') 55 | self.f_log.write(json.dumps(kwargs)+'\n') 56 | 57 | def log(self, **kwargs): 58 | if 'time' not in kwargs: 59 | kwargs['time'] = time.time() 60 | self.f_log.write(json.dumps(kwargs)+'\n') 61 | self.f_log.flush() 62 | 63 | def close(self): 64 | self.f_log.close() 65 | 66 | def flatten(outer): 67 | return [el for inner in outer for el in inner] 68 | 69 | def remove_none(l): 70 | return [e for e in l if e is not None] 71 | --------------------------------------------------------------------------------