├── .gitignore ├── DocSum.ipynb ├── LICENSE ├── README.md ├── bart_sum.py ├── cmd_summarizer.py ├── docsum.png ├── environment.yml ├── main.py ├── presumm ├── __init__.py ├── configuration_bertabs.py ├── modeling_bertabs.py ├── presumm.py ├── run_summarization.py └── utils_summarization.py └── xml_processor.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # celery beat schedule file 95 | celerybeat-schedule 96 | 97 | # SageMath parsed files 98 | *.sage.py 99 | 100 | # Environments 101 | .env 102 | .venv 103 | env/ 104 | venv/ 105 | ENV/ 106 | env.bak/ 107 | venv.bak/ 108 | 109 | # Spyder project settings 110 | .spyderproject 111 | .spyproject 112 | 113 | # Rope project settings 114 | .ropeproject 115 | 116 | # mkdocs documentation 117 | /site 118 | 119 | # mypy 120 | .mypy_cache/ 121 | .dmypy.json 122 | dmypy.json 123 | 124 | # Pyre type checker 125 | .pyre/ 126 | 127 | # Custom 128 | *.xml 129 | *.pdf 130 | *.txt 131 | .vscode -------------------------------------------------------------------------------- /DocSum.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "DocSum.ipynb", 7 | "provenance": [] 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "accelerator": "GPU" 14 | }, 15 | "cells": [ 16 | { 17 | "cell_type": "code", 18 | "metadata": { 19 | "id": "35yqmsUSa1Zy", 20 | "colab_type": "code", 21 | "colab": { 22 | "base_uri": "https://localhost:8080/", 23 | "height": 34 24 | }, 25 | "outputId": "5da69398-16f5-4310-e13b-34aad5018141" 26 | }, 27 | "source": [ 28 | "!nvidia-smi -L" 29 | ], 30 | "execution_count": 1, 31 | "outputs": [ 32 | { 33 | "output_type": "stream", 34 | "text": [ 35 | "GPU 0: Tesla K80 (UUID: GPU-8bb91a64-3a0a-bc15-fdfd-9d11f8c3013f)\n" 36 | ], 37 | "name": "stdout" 38 | } 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "metadata": { 44 | "id": "NLJ6mALYWexB", 45 | "colab_type": "code", 46 | "colab": { 47 | "base_uri": "https://localhost:8080/", 48 | "height": 766 49 | }, 50 | "outputId": "c245b577-cc1c-433e-bb46-72c66c77134c" 51 | }, 52 | "source": [ 53 | "!pip install torch tqdm unidecode regex requests appdirs gdown transformers" 54 | ], 55 | "execution_count": 2, 56 | "outputs": [ 57 | { 58 | "output_type": "stream", 59 | "text": [ 60 | "Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.5.1+cu101)\n", 61 | "Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (4.41.1)\n", 62 | "Collecting unidecode\n", 63 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/d0/42/d9edfed04228bacea2d824904cae367ee9efd05e6cce7ceaaedd0b0ad964/Unidecode-1.1.1-py2.py3-none-any.whl (238kB)\n", 64 | "\u001b[K |████████████████████████████████| 245kB 2.9MB/s \n", 65 | "\u001b[?25hRequirement already satisfied: regex in /usr/local/lib/python3.6/dist-packages (2019.12.20)\n", 66 | "Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (2.23.0)\n", 67 | "Collecting appdirs\n", 68 | " Downloading https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl\n", 69 | "Requirement already satisfied: gdown in /usr/local/lib/python3.6/dist-packages (3.6.4)\n", 70 | "Collecting transformers\n", 71 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/9c/35/1c3f6e62d81f5f0daff1384e6d5e6c5758682a8357ebc765ece2b9def62b/transformers-3.0.0-py3-none-any.whl (754kB)\n", 72 | "\u001b[K |████████████████████████████████| 757kB 12.5MB/s \n", 73 | "\u001b[?25hRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch) (0.16.0)\n", 74 | "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch) (1.18.5)\n", 75 | "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests) (3.0.4)\n", 76 | "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests) (2020.6.20)\n", 77 | "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests) (2.9)\n", 78 | "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests) (1.24.3)\n", 79 | "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gdown) (1.12.0)\n", 80 | "Collecting tokenizers==0.8.0-rc4\n", 81 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/e8/bd/e5abec46af977c8a1375c1dca7cb1e5b3ec392ef279067af7f6bc50491a0/tokenizers-0.8.0rc4-cp36-cp36m-manylinux1_x86_64.whl (3.0MB)\n", 82 | "\u001b[K |████████████████████████████████| 3.0MB 15.6MB/s \n", 83 | "\u001b[?25hRequirement already satisfied: dataclasses; python_version < \"3.7\" in /usr/local/lib/python3.6/dist-packages (from transformers) (0.7)\n", 84 | "Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers) (20.4)\n", 85 | "Collecting sacremoses\n", 86 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/34/09d19aff26edcc8eb2a01bed8e98f13a1537005d31e95233fd48216eed10/sacremoses-0.0.43.tar.gz (883kB)\n", 87 | "\u001b[K |████████████████████████████████| 890kB 31.5MB/s \n", 88 | "\u001b[?25hCollecting sentencepiece\n", 89 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/d4/a4/d0a884c4300004a78cca907a6ff9a5e9fe4f090f5d95ab341c53d28cbc58/sentencepiece-0.1.91-cp36-cp36m-manylinux1_x86_64.whl (1.1MB)\n", 90 | "\u001b[K |████████████████████████████████| 1.1MB 35.9MB/s \n", 91 | "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers) (3.0.12)\n", 92 | "Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers) (2.4.7)\n", 93 | "Requirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (7.1.2)\n", 94 | "Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (0.15.1)\n", 95 | "Building wheels for collected packages: sacremoses\n", 96 | " Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n", 97 | " Created wheel for sacremoses: filename=sacremoses-0.0.43-cp36-none-any.whl size=893260 sha256=32d8ee3cd0705aaff0d466792d36f848be2496f9b961d75e51cc3b5e4c25796e\n", 98 | " Stored in directory: /root/.cache/pip/wheels/29/3c/fd/7ce5c3f0666dab31a50123635e6fb5e19ceb42ce38d4e58f45\n", 99 | "Successfully built sacremoses\n", 100 | "Installing collected packages: unidecode, appdirs, tokenizers, sacremoses, sentencepiece, transformers\n", 101 | "Successfully installed appdirs-1.4.4 sacremoses-0.0.43 sentencepiece-0.1.91 tokenizers-0.8.0rc4 transformers-3.0.0 unidecode-1.1.1\n" 102 | ], 103 | "name": "stdout" 104 | } 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "metadata": { 110 | "id": "SAkPHsIkVzij", 111 | "colab_type": "code", 112 | "colab": { 113 | "base_uri": "https://localhost:8080/", 114 | "height": 610 115 | }, 116 | "outputId": "8f637070-485d-4c6b-a2ef-1f5ca1afd930" 117 | }, 118 | "source": [ 119 | "!sudo apt install poppler-utils\n", 120 | "!git clone https://github.com/HHousen/docsum.git\n", 121 | "%cd docsum" 122 | ], 123 | "execution_count": 3, 124 | "outputs": [ 125 | { 126 | "output_type": "stream", 127 | "text": [ 128 | "Reading package lists... Done\n", 129 | "Building dependency tree \n", 130 | "Reading state information... Done\n", 131 | "The following package was automatically installed and is no longer required:\n", 132 | " libnvidia-common-440\n", 133 | "Use 'sudo apt autoremove' to remove it.\n", 134 | "The following NEW packages will be installed:\n", 135 | " poppler-utils\n", 136 | "0 upgraded, 1 newly installed, 0 to remove and 33 not upgraded.\n", 137 | "Need to get 154 kB of archives.\n", 138 | "After this operation, 613 kB of additional disk space will be used.\n", 139 | "Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 poppler-utils amd64 0.62.0-2ubuntu2.10 [154 kB]\n", 140 | "Fetched 154 kB in 1s (285 kB/s)\n", 141 | "debconf: unable to initialize frontend: Dialog\n", 142 | "debconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 1.)\n", 143 | "debconf: falling back to frontend: Readline\n", 144 | "debconf: unable to initialize frontend: Readline\n", 145 | "debconf: (This frontend requires a controlling tty.)\n", 146 | "debconf: falling back to frontend: Teletype\n", 147 | "dpkg-preconfigure: unable to re-open stdin: \n", 148 | "Selecting previously unselected package poppler-utils.\n", 149 | "(Reading database ... 144379 files and directories currently installed.)\n", 150 | "Preparing to unpack .../poppler-utils_0.62.0-2ubuntu2.10_amd64.deb ...\n", 151 | "Unpacking poppler-utils (0.62.0-2ubuntu2.10) ...\n", 152 | "Setting up poppler-utils (0.62.0-2ubuntu2.10) ...\n", 153 | "Processing triggers for man-db (2.8.3-2ubuntu0.1) ...\n", 154 | "Cloning into 'docsum'...\n", 155 | "remote: Enumerating objects: 78, done.\u001b[K\n", 156 | "remote: Counting objects: 100% (78/78), done.\u001b[K\n", 157 | "remote: Compressing objects: 100% (50/50), done.\u001b[K\n", 158 | "remote: Total 78 (delta 43), reused 62 (delta 27), pack-reused 0\u001b[K\n", 159 | "Unpacking objects: 100% (78/78), done.\n", 160 | "/content/docsum\n" 161 | ], 162 | "name": "stdout" 163 | } 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "metadata": { 169 | "id": "0dJpH3-wV2cU", 170 | "colab_type": "code", 171 | "colab": { 172 | "base_uri": "https://localhost:8080/", 173 | "height": 1000 174 | }, 175 | "outputId": "a37c84bd-bbaa-49cd-b782-aa53480e254b" 176 | }, 177 | "source": [ 178 | "!python cmd_summarizer.py -m bart --text \"Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, “and what is the use of a book,” thought Alice “without pictures or conversations?” So she was considering in her own mind (as well as she could, for the hot day made her feel very sleepy and stupid), whether the pleasure of making a daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly a White Rabbit with pink eyes ran close by her. There was nothing so very remarkable in that; nor did Alice think it so very much out of the way to hear the Rabbit say to itself, “Oh dear! Oh dear! I shall be late!” (when she thought it over afterwards, it occurred to her that she ought to have wondered at this, but at the time it all seemed quite natural); but when the Rabbit actually took a watch out of its waistcoat-pocket, and looked at it, and then hurried on, Alice started to her feet, for it flashed across her mind that she had never before seen a rabbit with either a waistcoat-pocket, or a watch to take out of it, and burning with curiosity, she ran across the field after it, and fortunately was just in time to see it pop down a large rabbit-hole under the hedge. In another moment down went Alice after it, never once considering how in the world she was to get out again. The rabbit-hole went straight on like a tunnel for some way, and then dipped suddenly down, so suddenly that Alice had not a moment to think about stopping herself before she found herself falling down a very deep well. Either the well was very deep, or she fell very slowly, for she had plenty of time as she went down to look about her and to wonder what was going to happen next. First, she tried to look down and make out what she was coming to, but it was too dark to see anything; then she looked at the sides of the well, and noticed that they were filled with cupboards and book-shelves; here and there she saw maps and pictures hung upon pegs. She took down a jar from one of the shelves as she passed; it was labelled “ORANGE MARMALADE”, but to her great disappointment it was empty: she did not like to drop the jar for fear of killing somebody underneath, so managed to put it into one of the cupboards as she fell past it. “Well!” thought Alice to herself, “after such a fall as this, I shall think nothing of tumbling down stairs! How brave they’ll all think me at home! Why, I wouldn’t say anything about it, even if I fell off the top of the house!” (Which was very likely true.)\"" 179 | ], 180 | "execution_count": 4, 181 | "outputs": [ 182 | { 183 | "output_type": "stream", 184 | "text": [ 185 | "2020-06-29 19:20:45.759234: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n", 186 | "2020-06-29 19:20:48,017|__main__|INFO> Loading Model\n", 187 | "2020-06-29 19:20:48,205|filelock|INFO> Lock 140437094268264 acquired on /root/.cache/torch/transformers/5f0de1d2bbb8eb1a3b69656622293b3328b06b701663a9d4109359751cb4e739.5e72c6158467741b29afbcad014cd97414f17a191d39253eef90d7bfe969cc1f.lock\n", 188 | "2020-06-29 19:20:48,205|transformers.file_utils|INFO> https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmpnbq7mikg\n", 189 | "Downloading: 100% 1.30k/1.30k [00:00<00:00, 1.01MB/s]\n", 190 | "2020-06-29 19:20:48,298|transformers.file_utils|INFO> storing https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json in cache at /root/.cache/torch/transformers/5f0de1d2bbb8eb1a3b69656622293b3328b06b701663a9d4109359751cb4e739.5e72c6158467741b29afbcad014cd97414f17a191d39253eef90d7bfe969cc1f\n", 191 | "2020-06-29 19:20:48,298|transformers.file_utils|INFO> creating metadata file for /root/.cache/torch/transformers/5f0de1d2bbb8eb1a3b69656622293b3328b06b701663a9d4109359751cb4e739.5e72c6158467741b29afbcad014cd97414f17a191d39253eef90d7bfe969cc1f\n", 192 | "2020-06-29 19:20:48,299|filelock|INFO> Lock 140437094268264 released on /root/.cache/torch/transformers/5f0de1d2bbb8eb1a3b69656622293b3328b06b701663a9d4109359751cb4e739.5e72c6158467741b29afbcad014cd97414f17a191d39253eef90d7bfe969cc1f.lock\n", 193 | "2020-06-29 19:20:48,299|transformers.configuration_utils|INFO> loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json from cache at /root/.cache/torch/transformers/5f0de1d2bbb8eb1a3b69656622293b3328b06b701663a9d4109359751cb4e739.5e72c6158467741b29afbcad014cd97414f17a191d39253eef90d7bfe969cc1f\n", 194 | "2020-06-29 19:20:48,300|transformers.configuration_utils|INFO> Model config BartConfig {\n", 195 | " \"_num_labels\": 3,\n", 196 | " \"activation_dropout\": 0.0,\n", 197 | " \"activation_function\": \"gelu\",\n", 198 | " \"add_bias_logits\": false,\n", 199 | " \"add_final_layer_norm\": false,\n", 200 | " \"attention_dropout\": 0.0,\n", 201 | " \"bos_token_id\": 0,\n", 202 | " \"classif_dropout\": 0.0,\n", 203 | " \"d_model\": 1024,\n", 204 | " \"decoder_attention_heads\": 16,\n", 205 | " \"decoder_ffn_dim\": 4096,\n", 206 | " \"decoder_layerdrop\": 0.0,\n", 207 | " \"decoder_layers\": 12,\n", 208 | " \"decoder_start_token_id\": 2,\n", 209 | " \"dropout\": 0.1,\n", 210 | " \"early_stopping\": true,\n", 211 | " \"encoder_attention_heads\": 16,\n", 212 | " \"encoder_ffn_dim\": 4096,\n", 213 | " \"encoder_layerdrop\": 0.0,\n", 214 | " \"encoder_layers\": 12,\n", 215 | " \"eos_token_id\": 2,\n", 216 | " \"extra_pos_embeddings\": 2,\n", 217 | " \"id2label\": {\n", 218 | " \"0\": \"LABEL_0\",\n", 219 | " \"1\": \"LABEL_1\",\n", 220 | " \"2\": \"LABEL_2\"\n", 221 | " },\n", 222 | " \"init_std\": 0.02,\n", 223 | " \"is_encoder_decoder\": true,\n", 224 | " \"label2id\": {\n", 225 | " \"LABEL_0\": 0,\n", 226 | " \"LABEL_1\": 1,\n", 227 | " \"LABEL_2\": 2\n", 228 | " },\n", 229 | " \"length_penalty\": 2.0,\n", 230 | " \"max_length\": 142,\n", 231 | " \"max_position_embeddings\": 1024,\n", 232 | " \"min_length\": 56,\n", 233 | " \"model_type\": \"bart\",\n", 234 | " \"no_repeat_ngram_size\": 3,\n", 235 | " \"normalize_before\": false,\n", 236 | " \"normalize_embedding\": true,\n", 237 | " \"num_beams\": 4,\n", 238 | " \"num_hidden_layers\": 12,\n", 239 | " \"output_past\": true,\n", 240 | " \"pad_token_id\": 1,\n", 241 | " \"prefix\": \" \",\n", 242 | " \"scale_embedding\": false,\n", 243 | " \"static_position_embeddings\": false,\n", 244 | " \"task_specific_params\": {\n", 245 | " \"summarization\": {\n", 246 | " \"early_stopping\": true,\n", 247 | " \"length_penalty\": 2.0,\n", 248 | " \"max_length\": 142,\n", 249 | " \"min_length\": 56,\n", 250 | " \"no_repeat_ngram_size\": 3,\n", 251 | " \"num_beams\": 4\n", 252 | " }\n", 253 | " },\n", 254 | " \"vocab_size\": 50264\n", 255 | "}\n", 256 | "\n", 257 | "2020-06-29 19:20:48,642|filelock|INFO> Lock 140437092302520 acquired on /root/.cache/torch/transformers/579dd21941940697e1fe35c8963e41bebe3260ff761dc99fe01f2d8f9a699996.73d71f0899e4bd27603a3503868c9f8cf938416df2de374c864a8c3af18f981d.lock\n", 258 | "2020-06-29 19:20:48,642|transformers.file_utils|INFO> https://cdn.huggingface.co/facebook/bart-large-cnn/pytorch_model.bin not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmp58sahtu_\n", 259 | "Downloading: 100% 1.63G/1.63G [00:29<00:00, 55.5MB/s]\n", 260 | "2020-06-29 19:21:18,135|transformers.file_utils|INFO> storing https://cdn.huggingface.co/facebook/bart-large-cnn/pytorch_model.bin in cache at /root/.cache/torch/transformers/579dd21941940697e1fe35c8963e41bebe3260ff761dc99fe01f2d8f9a699996.73d71f0899e4bd27603a3503868c9f8cf938416df2de374c864a8c3af18f981d\n", 261 | "2020-06-29 19:21:18,136|transformers.file_utils|INFO> creating metadata file for /root/.cache/torch/transformers/579dd21941940697e1fe35c8963e41bebe3260ff761dc99fe01f2d8f9a699996.73d71f0899e4bd27603a3503868c9f8cf938416df2de374c864a8c3af18f981d\n", 262 | "2020-06-29 19:21:18,136|filelock|INFO> Lock 140437092302520 released on /root/.cache/torch/transformers/579dd21941940697e1fe35c8963e41bebe3260ff761dc99fe01f2d8f9a699996.73d71f0899e4bd27603a3503868c9f8cf938416df2de374c864a8c3af18f981d.lock\n", 263 | "2020-06-29 19:21:18,136|transformers.modeling_utils|INFO> loading weights file https://cdn.huggingface.co/facebook/bart-large-cnn/pytorch_model.bin from cache at /root/.cache/torch/transformers/579dd21941940697e1fe35c8963e41bebe3260ff761dc99fe01f2d8f9a699996.73d71f0899e4bd27603a3503868c9f8cf938416df2de374c864a8c3af18f981d\n", 264 | "2020-06-29 19:21:29,667|transformers.modeling_utils|INFO> All model checkpoint weights were used when initializing BartForConditionalGeneration.\n", 265 | "\n", 266 | "2020-06-29 19:21:29,667|transformers.modeling_utils|WARNING> Some weights of BartForConditionalGeneration were not initialized from the model checkpoint at facebook/bart-large-cnn and are newly initialized: ['final_logits_bias']\n", 267 | "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", 268 | "2020-06-29 19:21:30,192|filelock|INFO> Lock 140437093505904 acquired on /root/.cache/torch/transformers/1ae1f5b6e2b22b25ccc04c000bb79ca847aa226d0761536b011cf7e5868f0655.ef00af9e673c7160b4d41cfda1f48c5f4cba57d5142754525572a846a1ab1b9b.lock\n", 269 | "2020-06-29 19:21:30,192|transformers.file_utils|INFO> https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmpy9c15uft\n", 270 | "Downloading: 100% 899k/899k [00:00<00:00, 12.4MB/s]\n", 271 | "2020-06-29 19:21:30,350|transformers.file_utils|INFO> storing https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json in cache at /root/.cache/torch/transformers/1ae1f5b6e2b22b25ccc04c000bb79ca847aa226d0761536b011cf7e5868f0655.ef00af9e673c7160b4d41cfda1f48c5f4cba57d5142754525572a846a1ab1b9b\n", 272 | "2020-06-29 19:21:30,350|transformers.file_utils|INFO> creating metadata file for /root/.cache/torch/transformers/1ae1f5b6e2b22b25ccc04c000bb79ca847aa226d0761536b011cf7e5868f0655.ef00af9e673c7160b4d41cfda1f48c5f4cba57d5142754525572a846a1ab1b9b\n", 273 | "2020-06-29 19:21:30,351|filelock|INFO> Lock 140437093505904 released on /root/.cache/torch/transformers/1ae1f5b6e2b22b25ccc04c000bb79ca847aa226d0761536b011cf7e5868f0655.ef00af9e673c7160b4d41cfda1f48c5f4cba57d5142754525572a846a1ab1b9b.lock\n", 274 | "2020-06-29 19:21:30,423|filelock|INFO> Lock 140437093505904 acquired on /root/.cache/torch/transformers/f8f83199a6270d582d6245dc100e99c4155de81c9745c6248077018fe01abcfb.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda.lock\n", 275 | "2020-06-29 19:21:30,424|transformers.file_utils|INFO> https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmp2ueq8f74\n", 276 | "Downloading: 100% 456k/456k [00:00<00:00, 8.24MB/s]\n", 277 | "2020-06-29 19:21:30,587|transformers.file_utils|INFO> storing https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt in cache at /root/.cache/torch/transformers/f8f83199a6270d582d6245dc100e99c4155de81c9745c6248077018fe01abcfb.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda\n", 278 | "2020-06-29 19:21:30,587|transformers.file_utils|INFO> creating metadata file for /root/.cache/torch/transformers/f8f83199a6270d582d6245dc100e99c4155de81c9745c6248077018fe01abcfb.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda\n", 279 | "2020-06-29 19:21:30,587|filelock|INFO> Lock 140437093505904 released on /root/.cache/torch/transformers/f8f83199a6270d582d6245dc100e99c4155de81c9745c6248077018fe01abcfb.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda.lock\n", 280 | "2020-06-29 19:21:30,587|transformers.tokenization_utils_base|INFO> loading file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json from cache at /root/.cache/torch/transformers/1ae1f5b6e2b22b25ccc04c000bb79ca847aa226d0761536b011cf7e5868f0655.ef00af9e673c7160b4d41cfda1f48c5f4cba57d5142754525572a846a1ab1b9b\n", 281 | "2020-06-29 19:21:30,588|transformers.tokenization_utils_base|INFO> loading file https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt from cache at /root/.cache/torch/transformers/f8f83199a6270d582d6245dc100e99c4155de81c9745c6248077018fe01abcfb.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda\n", 282 | "2020-06-29 19:21:30,671|__main__|INFO> Document Created\n", 283 | "2020-06-29 19:21:30,671|__main__|INFO> Document Length: 503\n", 284 | "2020-06-29 19:21:30,671|__main__|INFO> min_len: 83\n", 285 | "2020-06-29 19:21:30,671|__main__|INFO> max_len_b: 283\n", 286 | "2020-06-29 19:21:30,671|transformers.tokenization_utils_base|WARNING> Truncation was not explicitely activated but `max_length` is provided a specific value, please use `truncation=True` to explicitely truncate examples to max length. Defaulting to 'only_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you may want to check this is the right behavior.\n" 287 | ], 288 | "name": "stdout" 289 | } 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "metadata": { 295 | "id": "Jn1hfM5IYRGL", 296 | "colab_type": "code", 297 | "colab": { 298 | "base_uri": "https://localhost:8080/", 299 | "height": 89 300 | }, 301 | "outputId": "465db828-ebc7-4a62-8c28-67fb59fb9bc0" 302 | }, 303 | "source": [ 304 | "!cat summarized.txt" 305 | ], 306 | "execution_count": 5, 307 | "outputs": [ 308 | { 309 | "output_type": "stream", 310 | "text": [ 311 | "\n", 312 | "2020-06-29 19:21:59.852838:\n", 313 | "Alice fell down a rabbit-hole after a White Rabbit with pink eyes. She had never before seen a rabbit with a waistcoat-pocket, or a watch to take out of it. The sides of the well were filled with cupboards and book-shelves. She took down a jar from one of the shelves as she passed; it was labelled “ORANGE MARMALADE”\n" 314 | ], 315 | "name": "stdout" 316 | } 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "metadata": { 322 | "id": "cAuS8p4dgxHA", 323 | "colab_type": "code", 324 | "colab": {} 325 | }, 326 | "source": [ 327 | "" 328 | ], 329 | "execution_count": 5, 330 | "outputs": [] 331 | } 332 | ] 333 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![DocSum Logo](docsum.png) 2 | # DocSum 3 | > A tool to automatically summarize documents (or plain text) using either the BART or PreSumm Machine Learning Model. 4 | 5 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/hhousen/docsum/blob/master/DocSum.ipynb) 6 | 7 | **BART** ([BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf)) is the state-of-the-art in text summarization as of 02/02/2020. It is a "sequence-to-sequence model trained with denoising as pretraining objective" ([Documentation & Examples](https://github.com/pytorch/fairseq/blob/master/examples/bart/README.md)). 8 | 9 | **PreSumm** ([Text Summarization with Pretrained Encoders](https://arxiv.org/pdf/1908.08345.pdf)) applies BERT (Bidirectional Encoder Representations from Transformers) to text summarization by using "a novel document-level encoder based on BERT which is able to express the semantics of a document and obtain representations for its sentences." BERT represented "the latest incarnation of pretrained language models which have recently advanced a wide range of natural language processing tasks" at the time of writing ([Documentation & Examples](https://github.com/nlpyang/PreSumm)). 10 | 11 | ## Tasks 12 | 13 | 1. Convert a PDF to XML and then interpret that XML file using the `font` property of each `text` element using [main.py](main.py). Utilizes the [xml.etree.elementtree](https://docs.python.org/3/library/xml.etree.elementtree.html) python library. 14 | 2. Summarize raw text input using [cmd_summarizer.py](cmd_summarizer.py). You can run this in Google Colaboratory by clicking this button: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/hhousen/docsum/blob/master/DocSum.ipynb) 15 | 3. Summarize multiple text files using [presumm/run_summarization.py](presumm/run_summarization.py) 16 | 17 | ## Getting Started 18 | These instructions will get you a copy of the project up and running on your local machine. 19 | 20 | ### Prerequisites 21 | * [Python](https://www.python.org/) 22 | * [Git](https://git-scm.com/) 23 | * [Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) 24 | 25 | ### Installation 26 | 27 | ```bash 28 | sudo apt install poppler-utils 29 | git clone https://github.com/HHousen/docsum.git 30 | cd docsum 31 | conda env create --file environment.yml 32 | conda activate docsum 33 | ``` 34 | 35 | ### To convert PDF to XML 36 | 37 | ``` 38 | pdftohtml input.pdf -i -s -c -xml output.xml 39 | ``` 40 | 41 | ## Project Structure 42 | ```bash 43 | DocSum 44 | ├── bart_sum.py 45 | ├── cmd_summarizer.py 46 | ├── docsum.png 47 | ├── environment.yml 48 | ├── LICENSE 49 | ├── main.py 50 | ├── presumm 51 | │ ├── configuration_bertabs.py 52 | │ ├── __init__.py 53 | │ ├── modeling_bertabs.py 54 | │ ├── presumm.py 55 | │ ├── run_summarization.py 56 | │ └── utils_summarization.py 57 | ├── README.md 58 | └── xml_processor.py 59 | ``` 60 | 61 | ## Usage 62 | Output of `python main.py --help`: 63 | ``` 64 | usage: main.py [-h] [-t {pdf,xml}] [-m {bart,presumm}] [--bart_checkpoint PATH] [--bart_state_dict_key PATH] [--bart_fairseq] -cf N [N ...] 65 | -bhf N [N ...] -bf N [N ...] [-ns] [--output_xml_path PATH] [-l {DEBUG,INFO,WARNING,ERROR,CRITICAL}] 66 | PATH 67 | 68 | Summarization of PDFs using BART 69 | 70 | positional arguments: 71 | PATH path to input file 72 | 73 | optional arguments: 74 | -h, --help show this help message and exit 75 | -t {pdf,xml}, --file_type {pdf,xml} 76 | type of file to summarize 77 | -m {bart,presumm}, --model {bart,presumm} 78 | machine learning model choice 79 | --bart_checkpoint PATH 80 | [BART Only] Path to optional checkpoint. Semsim is better model but will use more memory and is an additional 5GB 81 | download. (default: none, recommended: semsim) 82 | --bart_state_dict_key PATH 83 | [BART Only] model state_dict key to load from pickle file specified with --bart_checkpoint (default: "model") 84 | --bart_fairseq [BART Only] Use fairseq model from torch hub instead of huggingface transformers library models. Can not use 85 | --bart_checkpoint if this option is supplied. 86 | -cf N [N ...], --chapter_heading_font N [N ...] 87 | font of chapter titles 88 | -bhf N [N ...], --body_heading_font N [N ...] 89 | font of headings within chapter 90 | -bf N [N ...], --body_font N [N ...] 91 | font of body (the text you want to summarize) 92 | -ns, --no_summarize do not run the summarization step 93 | --output_xml_path PATH 94 | path to output XML file if `file_type` is `pdf` 95 | -l {DEBUG,INFO,WARNING,ERROR,CRITICAL}, --log {DEBUG,INFO,WARNING,ERROR,CRITICAL} 96 | Set the logging level (default: 'Info'). 97 | ``` 98 | 99 | Output of `python cmd_summarizer.py --help` 100 | 101 | ``` 102 | usage: cmd_summarizer.py [-h] -m {bart,presumm} [--bart_checkpoint PATH] [--bart_state_dict_key PATH] [--bart_fairseq] 103 | [-l {DEBUG,INFO,WARNING,ERROR,CRITICAL}] 104 | 105 | Summarization of text using CMD prompt 106 | 107 | optional arguments: 108 | -h, --help show this help message and exit 109 | -m {bart,presumm}, --model {bart,presumm} 110 | machine learning model choice 111 | --bart_checkpoint PATH 112 | [BART Only] Path to optional checkpoint. Semsim is better model but will use more memory and is an additional 5GB 113 | download. (default: none, recommended: semsim) 114 | --bart_state_dict_key PATH 115 | [BART Only] model state_dict key to load from pickle file specified with --bart_checkpoint (default: "model") 116 | --bart_fairseq [BART Only] Use fairseq model from torch hub instead of huggingface transformers library models. Can not use 117 | --bart_checkpoint if this option is supplied. 118 | -l {DEBUG,INFO,WARNING,ERROR,CRITICAL}, --log {DEBUG,INFO,WARNING,ERROR,CRITICAL} 119 | Set the logging level (default: 'Info'). 120 | ``` 121 | 122 | Output of `python -m presumm.run_summarization --help` 123 | ``` 124 | usage: run_summarization.py [-h] --documents_dir DOCUMENTS_DIR [--summaries_output_dir SUMMARIES_OUTPUT_DIR] [--compute_rouge COMPUTE_ROUGE] 125 | [--no_cuda NO_CUDA] [--batch_size BATCH_SIZE] [--min_length MIN_LENGTH] [--max_length MAX_LENGTH] 126 | [--beam_size BEAM_SIZE] [--alpha ALPHA] [--block_trigram BLOCK_TRIGRAM] 127 | 128 | optional arguments: 129 | -h, --help show this help message and exit 130 | --documents_dir DOCUMENTS_DIR 131 | The folder where the documents to summarize are located. 132 | --summaries_output_dir SUMMARIES_OUTPUT_DIR 133 | The folder in wich the summaries should be written. Defaults to the folder where the documents are 134 | --compute_rouge COMPUTE_ROUGE 135 | Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset. 136 | --no_cuda NO_CUDA Whether to force the execution on CPU. 137 | --batch_size BATCH_SIZE 138 | Batch size per GPU/CPU for training. 139 | --min_length MIN_LENGTH 140 | Minimum number of tokens for the summaries. 141 | --max_length MAX_LENGTH 142 | Maixmum number of tokens for the summaries. 143 | --beam_size BEAM_SIZE 144 | The number of beams to start with for each example. 145 | --alpha ALPHA The value of alpha for the length penalty in the beam search. 146 | --block_trigram BLOCK_TRIGRAM 147 | Whether to block the existence of repeating trigrams in the text generated by beam search. 148 | ``` 149 | 150 | ### Notes 151 | 152 | * `--file_type pdf` is only available on linux and requires `poppler-utils` to be installed 153 | 154 | ## PDF Structure 155 | 156 | PDFs must be formatted in a specific way for this program to function. This program works with two levels of headings: `chapter` headings and `body` headings. `Chapter headings` contain many `body headings` and each body heading contains many lines of `body text`. If your PDF file is organized in this way and you can find unique font styles in the XML representation, then this program should work. 157 | 158 | Sometimes italics or other stylistic fonts may be represented by separate font numbers. If this is the case simply run the command and pass in multiple font styles: `python main.py book.xml -cf 5 50 -bhf 23 34 60 -bf 11 132`. 159 | 160 | ## Meta 161 | 162 | Hayden Housen – [haydenhousen.com](https://haydenhousen.com) 163 | 164 | Distributed under the GPLv3 license. See the [LICENSE](LICENSE) for more information. 165 | 166 | 167 | 168 | PreSumm code extensively borrowed from [Hugging Face Transformers Library](https://github.com/huggingface/transformers/tree/master/examples/summarization). 169 | 170 | ## Contributing 171 | 172 | All Pull Requests are greatly welcomed. 173 | 174 | **Questions? Commends? Issues? Don't hesitate to open an [issue](https://github.com/HHousen/docsum/issues/new) and briefly describe what you are experiencing (with any error logs if necessary). Thanks.** 175 | 176 | 1. Fork it () 177 | 2. Create your feature branch (`git checkout -b feature/fooBar`) 178 | 3. Commit your changes (`git commit -am 'Add some fooBar'`) 179 | 4. Push to the branch (`git push origin feature/fooBar`) 180 | 5. Create a new Pull Request 181 | 182 | ## To Do 183 | 184 | * [ ] Make DocSum more robust to different PDF types (multi-layered headings) 185 | * [ ] Implement other summarization techniques 186 | * [ ] Implement automatic header detection ([Possibly this paper](https://arxiv.org/pdf/1809.01477.pdf)) -------------------------------------------------------------------------------- /bart_sum.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | import appdirs 4 | import gdown 5 | import torch 6 | import logging 7 | from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig 8 | 9 | class BartSumSummarizer(): 10 | def __init__(self, device=None, checkpoint=None, state_dict_key='model', pretrained="facebook/bart-large-cnn", hg_transformers=True): 11 | if not hg_transformers and checkpoint: 12 | raise Exception("hg_transformers must be set to True in order to load from checkpoint") 13 | 14 | if not device: 15 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 16 | 17 | # huggingface uses dashes and fairseq/torchhub uses dots (periods) 18 | if pretrained: 19 | if hg_transformers: 20 | pretrained = pretrained.replace(".", "-") 21 | else: 22 | # only use the part after the "/" 23 | pretrained = pretrained.split("/")[-1].replace("-", ".") 24 | 25 | 26 | if checkpoint != None and "semsim" in checkpoint: 27 | cache_dir = appdirs.user_cache_dir("DocSum", "HHousen") 28 | output_file_path = os.path.join(cache_dir, "bart_semsim.pt") 29 | if not os.path.isfile(output_file_path): 30 | if not os.path.exists(cache_dir): 31 | os.makedirs(cache_dir) 32 | gdown.download("https://drive.google.com/uc?id=1CNgK6ZkaqUD239h_6GkLmfUOGgryc2v9", output_file_path) 33 | checkpoint = output_file_path 34 | 35 | if checkpoint: 36 | loaded_checkpoint = torch.load(checkpoint) 37 | model_state_dict = loaded_checkpoint[state_dict_key] 38 | 39 | bart = BartForConditionalGeneration.from_pretrained(pretrained, state_dict=model_state_dict) 40 | tokenizer = BartTokenizer.from_pretrained(pretrained, state_dict=model_state_dict) 41 | self.tokenizer = tokenizer 42 | else: 43 | if hg_transformers: 44 | bart = BartForConditionalGeneration.from_pretrained(pretrained) 45 | tokenizer = BartTokenizer.from_pretrained(pretrained) 46 | self.tokenizer = tokenizer 47 | else: 48 | bart = torch.hub.load('pytorch/fairseq', pretrained) 49 | bart.to(device) 50 | bart.eval() 51 | bart.half() 52 | 53 | self.logger = logging.getLogger(__name__) 54 | self.hg_transformers = hg_transformers 55 | self.bart = bart 56 | 57 | def __call__(self, *args, **kwargs): 58 | return self.summarize_string(*args, **kwargs) 59 | 60 | def summarize_string(self, source_line, min_length=55, max_length=140): 61 | """Summarize a single document""" 62 | self.logger.debug("min_length: " + str(min_length) +" - max_length: " + str(max_length)) 63 | 64 | source_line = [source_line] 65 | 66 | if self.hg_transformers: 67 | inputs = self.tokenizer.batch_encode_plus(source_line, max_length=1024, return_tensors='pt') 68 | # Generate Summary 69 | summary_ids = self.bart.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], num_beams=4, min_length=min_length, max_length=max_length) 70 | 71 | return [self.tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids][0] 72 | else: 73 | with torch.no_grad(): 74 | # beam = beam size 75 | # lenpen = length penalty: <1.0 favors shorter, >1.0 favors longer sentences 76 | # max_len_a & max_len_b = generate sequences of maximum length ax + b, where x is the source length 77 | # min_len = minimum generation length 78 | # no_repeat_ngram_size = ngram blocking such that this size ngram cannot be repeated in the generation 79 | # https://fairseq.readthedocs.io/en/latest/command_line_tools.html 80 | # print("max_len_b " + str(max_len_b) + " min_len " + str(min_len)) 81 | hypotheses = self.bart.sample(source_line, beam=4, lenpen=2.0, max_len_a=0, max_len_b=max_length, min_length=min_length, no_repeat_ngram_size=3) 82 | return hypotheses[0] 83 | -------------------------------------------------------------------------------- /cmd_summarizer.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import argparse 3 | import bart_sum 4 | import logging 5 | import presumm.presumm as presumm 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | def do_summarize(contents): 10 | document = str(contents) 11 | logger.info("Document Created") 12 | 13 | 14 | doc_length = len(document.split()) 15 | logger.info("Document Length: " + str(doc_length)) 16 | 17 | min_length = int(doc_length/6) 18 | logger.info("min_length: " + str(min_length)) 19 | max_length = min_length+200 20 | logger.info("max_length: " + str(max_length)) 21 | 22 | transcript_summarized = summarizer.summarize_string(document, min_length=min_length, max_length=max_length) 23 | with open("summarized.txt", 'a+') as file: 24 | file.write("\n" + str(datetime.datetime.now()) + ":\n") 25 | file.write(transcript_summarized + "\n") 26 | 27 | parser = argparse.ArgumentParser(description='Summarization of text using CMD prompt') 28 | parser.add_argument('-m', '--model', choices=["bart", "presumm"], required=True, 29 | help='machine learning model choice') 30 | parser.add_argument('--bart_checkpoint', default=None, type=str, metavar='PATH', 31 | help='[BART Only] Path to optional checkpoint. Semsim is better model but will use more memory and is an additional 5GB download. (default: none, recommended: semsim)') 32 | parser.add_argument('--bart_state_dict_key', default='model', type=str, metavar='PATH', 33 | help='[BART Only] model state_dict key to load from pickle file specified with --bart_checkpoint (default: "model")') 34 | parser.add_argument('--bart_fairseq', action='store_true', 35 | help='[BART Only] Use fairseq model from torch hub instead of huggingface transformers library models. Can not use --bart_checkpoint if this option is supplied.') 36 | parser.add_argument('--text', default=None, type=str, 37 | help='Optional text to summarize if you cannot paste it using an interactive shell.') 38 | parser.add_argument("-l", "--log", dest="logLevel", default='INFO', 39 | choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], 40 | help="Set the logging level (default: 'Info').") 41 | args = parser.parse_args() 42 | 43 | logging.basicConfig(format="%(asctime)s|%(name)s|%(levelname)s> %(message)s", level=logging.getLevelName(args.logLevel)) 44 | 45 | logger.info("Loading Model") 46 | if args.model == "bart": 47 | summarizer = bart_sum.BartSumSummarizer(checkpoint=args.bart_checkpoint, 48 | state_dict_key=args.bart_state_dict_key, 49 | hg_transformers=(not args.bart_fairseq)) 50 | elif args.model == "presumm": 51 | summarizer = presumm.PreSummSummarizer() 52 | 53 | if args.text: 54 | do_summarize(args.text) 55 | else: 56 | try: 57 | while True: 58 | print("Enter/Paste your content. Ctrl-D or Ctrl-Z (windows) to save it. Ctrl-C to exit.") 59 | contents = "" 60 | while True: 61 | try: 62 | line = input() 63 | except EOFError: 64 | break 65 | contents += (line.strip()+ " ") 66 | 67 | do_summarize(contents) 68 | 69 | except KeyboardInterrupt: 70 | print("Exiting...") -------------------------------------------------------------------------------- /docsum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HHousen/DocSum/47c4d91a094bd6c0ede3eb575eb80b245e6f150e/docsum.png -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: docsum 2 | channels: 3 | - conda-forge 4 | - pytorch 5 | dependencies: 6 | - pytorch 7 | - tqdm 8 | - unidecode 9 | - regex 10 | - requests 11 | - appdirs 12 | - gdown 13 | - pip 14 | - pip: 15 | - transformers==3.0.2 -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import bart_sum 2 | import presumm.presumm as presumm 3 | import os 4 | import xml_processor 5 | import argparse 6 | import logging 7 | from tqdm import tqdm 8 | 9 | parser = argparse.ArgumentParser(description='Summarization of PDFs using BART') 10 | parser.add_argument('file_path', metavar='PATH', 11 | help='path to input file') 12 | parser.add_argument('-t', '--file_type', default="xml", choices=["pdf", "xml"], 13 | help='type of file to summarize') 14 | parser.add_argument('-m', '--model', default="bart", choices=["bart", "presumm"], 15 | help='machine learning model choice') 16 | parser.add_argument('--bart_checkpoint', default=None, type=str, metavar='PATH', 17 | help='[BART Only] Path to optional checkpoint. Semsim is better model but will use more memory and is an additional 5GB download. (default: none, recommended: semsim)') 18 | parser.add_argument('--bart_state_dict_key', default='model', type=str, metavar='PATH', 19 | help='[BART Only] model state_dict key to load from pickle file specified with --bart_checkpoint (default: "model")') 20 | parser.add_argument('--bart_fairseq', action='store_true', 21 | help='[BART Only] Use fairseq model from torch hub instead of huggingface transformers library models. Can not use --bart_checkpoint if this option is supplied.') 22 | parser.add_argument('-cf', '--chapter_heading_font', nargs='+', default=0, type=int, metavar='N', required=True, 23 | help='font of chapter titles') 24 | parser.add_argument('-bhf', '--body_heading_font', nargs='+', default=0, type=int, metavar='N', required=True, 25 | help='font of headings within chapter') 26 | parser.add_argument('-bf', '--body_font', nargs='+', default=0, type=int, metavar='N', required=True, 27 | help='font of body (the text you want to summarize)') 28 | parser.add_argument('-ns', '--no_summarize', action='store_true', 29 | help='do not run the summarization step') 30 | parser.add_argument('--output_xml_path', metavar='PATH', 31 | help='path to output XML file if `file_type` is `pdf`') 32 | parser.add_argument("-l", "--log", dest="logLevel", default='INFO', 33 | choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], 34 | help="Set the logging level (default: 'Info').") 35 | args = parser.parse_args() 36 | 37 | logging.basicConfig(format="%(asctime)s|%(name)s|%(levelname)s> %(message)s", level=logging.getLevelName(args.logLevel)) 38 | 39 | if args.file_type == "pdf": 40 | if not args.output_xml_path: 41 | args.output_xml_path = "output.xml" 42 | os.system('pdftohtml ' + args.file_path + '.pdf -i -s -c -xml ' + args.output_xml_path) 43 | args.file_path = args.output_xml_path 44 | 45 | args.chapter_heading_font = [str(i) for i in args.chapter_heading_font] 46 | args.body_heading_font = [str(i) for i in args.body_heading_font] 47 | args.body_font = [str(i) for i in args.body_font] 48 | 49 | xml_root = xml_processor.parse_xml(args.file_path) 50 | chapter_start_pages = xml_processor.get_chapter_page_numbers(xml_root, fonts=args.chapter_heading_font) 51 | book = xml_processor.process(xml_root, chapter_start_pages, heading_fonts=args.body_heading_font, body_fonts=args.body_font) 52 | 53 | # Summarize each section of the `book` list 54 | if not args.no_summarize: 55 | if args.model == "bart": 56 | summarizer = bart_sum.BartSumSummarizer(checkpoint=args.bart_checkpoint, 57 | state_dict_key=args.bart_state_dict_key, 58 | hg_transformers=(not args.bart_fairseq)) 59 | elif args.model == "presumm": 60 | summarizer = presumm.PreSummSummarizer() 61 | 62 | for chapter, content in tqdm(enumerate(book), total=len(book), desc="Chapter"): 63 | for heading in tqdm(content, desc="Heading"): 64 | document = content[heading] 65 | doc_length = len(document.split()) 66 | min_length = int(doc_length/6) 67 | max_length = min_length+200 68 | content[heading] = summarizer.summarize_string(document, min_length=min_length, max_length=max_length) 69 | 70 | # Save to file 71 | with open("output.txt", "w") as file: 72 | for chapter, content in enumerate(book): 73 | file.write("Chapter " + str(chapter) + "\n" + "---------------------------\n") 74 | for heading in content: 75 | file.write(heading + "\n") 76 | file.write(content[heading] + "\n\n") -------------------------------------------------------------------------------- /presumm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HHousen/DocSum/47c4d91a094bd6c0ede3eb575eb80b245e6f150e/presumm/__init__.py -------------------------------------------------------------------------------- /presumm/configuration_bertabs.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2019 The HuggingFace Inc. team. 3 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | """ BertAbs configuration """ 17 | import logging 18 | 19 | from transformers import PretrainedConfig 20 | 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | BERTABS_FINETUNED_CONFIG_MAP = { 26 | "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", 27 | } 28 | 29 | 30 | class BertAbsConfig(PretrainedConfig): 31 | r"""Class to store the configuration of the BertAbs model. 32 | 33 | Arguments: 34 | vocab_size: int 35 | Number of tokens in the vocabulary. 36 | max_pos: int 37 | The maximum sequence length that this model will be used with. 38 | enc_layer: int 39 | The numner of hidden layers in the Transformer encoder. 40 | enc_hidden_size: int 41 | The size of the encoder's layers. 42 | enc_heads: int 43 | The number of attention heads for each attention layer in the encoder. 44 | enc_ff_size: int 45 | The size of the encoder's feed-forward layers. 46 | enc_dropout: int 47 | The dropout probabilitiy for all fully connected layers in the 48 | embeddings, layers, pooler and also the attention probabilities in 49 | the encoder. 50 | dec_layer: int 51 | The numner of hidden layers in the decoder. 52 | dec_hidden_size: int 53 | The size of the decoder's layers. 54 | dec_heads: int 55 | The number of attention heads for each attention layer in the decoder. 56 | dec_ff_size: int 57 | The size of the decoder's feed-forward layers. 58 | dec_dropout: int 59 | The dropout probability for all fully connected layers in the 60 | embeddings, layers, pooler and also the attention probabilities in 61 | the decoder. 62 | """ 63 | 64 | model_type = "bertabs" 65 | 66 | def __init__( 67 | self, 68 | vocab_size=30522, 69 | max_pos=512, 70 | enc_layers=6, 71 | enc_hidden_size=512, 72 | enc_heads=8, 73 | enc_ff_size=512, 74 | enc_dropout=0.2, 75 | dec_layers=6, 76 | dec_hidden_size=768, 77 | dec_heads=8, 78 | dec_ff_size=2048, 79 | dec_dropout=0.2, 80 | **kwargs, 81 | ): 82 | super().__init__(**kwargs) 83 | 84 | self.vocab_size = vocab_size 85 | self.max_pos = max_pos 86 | 87 | self.enc_layers = enc_layers 88 | self.enc_hidden_size = enc_hidden_size 89 | self.enc_heads = enc_heads 90 | self.enc_ff_size = enc_ff_size 91 | self.enc_dropout = enc_dropout 92 | 93 | self.dec_layers = dec_layers 94 | self.dec_hidden_size = dec_hidden_size 95 | self.dec_heads = dec_heads 96 | self.dec_ff_size = dec_ff_size 97 | self.dec_dropout = dec_dropout 98 | -------------------------------------------------------------------------------- /presumm/modeling_bertabs.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2019 Yang Liu and the HuggingFace team 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in all 13 | # copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | import copy 23 | import math 24 | 25 | import numpy as np 26 | import torch 27 | from torch import nn 28 | from torch.nn.init import xavier_uniform_ 29 | 30 | from .configuration_bertabs import BertAbsConfig 31 | from transformers import BertConfig, BertModel, PreTrainedModel 32 | 33 | 34 | MAX_SIZE = 5000 35 | 36 | BERTABS_FINETUNED_MODEL_ARCHIVE_LIST = [ 37 | "remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization", 38 | ] 39 | 40 | 41 | class BertAbsPreTrainedModel(PreTrainedModel): 42 | config_class = BertAbsConfig 43 | load_tf_weights = False 44 | base_model_prefix = "bert" 45 | 46 | 47 | class BertAbs(BertAbsPreTrainedModel): 48 | def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None): 49 | super().__init__(args) 50 | self.args = args 51 | self.bert = Bert() 52 | 53 | # If pre-trained weights are passed for Bert, load these. 54 | load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False 55 | if load_bert_pretrained_extractive: 56 | self.bert.model.load_state_dict( 57 | dict([(n[11:], p) for n, p in bert_extractive_checkpoint.items() if n.startswith("bert.model")]), 58 | strict=True, 59 | ) 60 | 61 | self.vocab_size = self.bert.model.config.vocab_size 62 | 63 | if args.max_pos > 512: 64 | my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size) 65 | my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data 66 | my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][ 67 | None, : 68 | ].repeat(args.max_pos - 512, 1) 69 | self.bert.model.embeddings.position_embeddings = my_pos_embeddings 70 | tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0) 71 | 72 | tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight) 73 | 74 | self.decoder = TransformerDecoder( 75 | self.args.dec_layers, 76 | self.args.dec_hidden_size, 77 | heads=self.args.dec_heads, 78 | d_ff=self.args.dec_ff_size, 79 | dropout=self.args.dec_dropout, 80 | embeddings=tgt_embeddings, 81 | vocab_size=self.vocab_size, 82 | ) 83 | 84 | gen_func = nn.LogSoftmax(dim=-1) 85 | self.generator = nn.Sequential(nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func) 86 | self.generator[0].weight = self.decoder.embeddings.weight 87 | 88 | load_from_checkpoints = False if checkpoint is None else True 89 | if load_from_checkpoints: 90 | self.load_state_dict(checkpoint) 91 | 92 | def init_weights(self): 93 | for module in self.decoder.modules(): 94 | if isinstance(module, (nn.Linear, nn.Embedding)): 95 | module.weight.data.normal_(mean=0.0, std=0.02) 96 | elif isinstance(module, nn.LayerNorm): 97 | module.bias.data.zero_() 98 | module.weight.data.fill_(1.0) 99 | if isinstance(module, nn.Linear) and module.bias is not None: 100 | module.bias.data.zero_() 101 | for p in self.generator.parameters(): 102 | if p.dim() > 1: 103 | xavier_uniform_(p) 104 | else: 105 | p.data.zero_() 106 | 107 | def forward( 108 | self, 109 | encoder_input_ids, 110 | decoder_input_ids, 111 | token_type_ids, 112 | encoder_attention_mask, 113 | decoder_attention_mask, 114 | ): 115 | encoder_output = self.bert( 116 | input_ids=encoder_input_ids, 117 | token_type_ids=token_type_ids, 118 | attention_mask=encoder_attention_mask, 119 | ) 120 | encoder_hidden_states = encoder_output[0] 121 | dec_state = self.decoder.init_decoder_state(encoder_input_ids, encoder_hidden_states) 122 | decoder_outputs, _ = self.decoder(decoder_input_ids[:, :-1], encoder_hidden_states, dec_state) 123 | return decoder_outputs 124 | 125 | 126 | class Bert(nn.Module): 127 | """This class is not really necessary and should probably disappear.""" 128 | 129 | def __init__(self): 130 | super().__init__() 131 | config = BertConfig.from_pretrained("bert-base-uncased") 132 | self.model = BertModel(config) 133 | 134 | def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs): 135 | self.eval() 136 | with torch.no_grad(): 137 | encoder_outputs, _ = self.model( 138 | input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, **kwargs 139 | ) 140 | return encoder_outputs 141 | 142 | 143 | class TransformerDecoder(nn.Module): 144 | """ 145 | The Transformer decoder from "Attention is All You Need". 146 | 147 | Args: 148 | num_layers (int): number of encoder layers. 149 | d_model (int): size of the model 150 | heads (int): number of heads 151 | d_ff (int): size of the inner FF layer 152 | dropout (float): dropout parameters 153 | embeddings (:obj:`onmt.modules.Embeddings`): 154 | embeddings to use, should have positional encodings 155 | attn_type (str): if using a separate copy attention 156 | """ 157 | 158 | def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size): 159 | super().__init__() 160 | 161 | # Basic attributes. 162 | self.decoder_type = "transformer" 163 | self.num_layers = num_layers 164 | self.embeddings = embeddings 165 | self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim) 166 | 167 | # Build TransformerDecoder. 168 | self.transformer_layers = nn.ModuleList( 169 | [TransformerDecoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_layers)] 170 | ) 171 | 172 | self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) 173 | 174 | # forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask) 175 | # def forward(self, input_ids, state, attention_mask=None, memory_lengths=None, 176 | # step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None): 177 | def forward( 178 | self, 179 | input_ids, 180 | encoder_hidden_states=None, 181 | state=None, 182 | attention_mask=None, 183 | memory_lengths=None, 184 | step=None, 185 | cache=None, 186 | encoder_attention_mask=None, 187 | ): 188 | """ 189 | See :obj:`onmt.modules.RNNDecoderBase.forward()` 190 | memory_bank = encoder_hidden_states 191 | """ 192 | # Name conversion 193 | tgt = input_ids 194 | memory_bank = encoder_hidden_states 195 | memory_mask = encoder_attention_mask 196 | 197 | # src_words = state.src 198 | src_words = state.src 199 | src_batch, src_len = src_words.size() 200 | 201 | padding_idx = self.embeddings.padding_idx 202 | 203 | # Decoder padding mask 204 | tgt_words = tgt 205 | tgt_batch, tgt_len = tgt_words.size() 206 | tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len) 207 | 208 | # Encoder padding mask 209 | if memory_mask is not None: 210 | src_len = memory_mask.size(-1) 211 | src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len) 212 | else: 213 | src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1).expand(src_batch, tgt_len, src_len) 214 | 215 | # Pass through the embeddings 216 | emb = self.embeddings(input_ids) 217 | output = self.pos_emb(emb, step) 218 | assert emb.dim() == 3 # len x batch x embedding_dim 219 | 220 | if state.cache is None: 221 | saved_inputs = [] 222 | 223 | for i in range(self.num_layers): 224 | prev_layer_input = None 225 | if state.cache is None: 226 | if state.previous_input is not None: 227 | prev_layer_input = state.previous_layer_inputs[i] 228 | 229 | output, all_input = self.transformer_layers[i]( 230 | output, 231 | memory_bank, 232 | src_pad_mask, 233 | tgt_pad_mask, 234 | previous_input=prev_layer_input, 235 | layer_cache=state.cache["layer_{}".format(i)] if state.cache is not None else None, 236 | step=step, 237 | ) 238 | if state.cache is None: 239 | saved_inputs.append(all_input) 240 | 241 | if state.cache is None: 242 | saved_inputs = torch.stack(saved_inputs) 243 | 244 | output = self.layer_norm(output) 245 | 246 | if state.cache is None: 247 | state = state.update_state(tgt, saved_inputs) 248 | 249 | # Decoders in transformers return a tuple. Beam search will fail 250 | # if we don't follow this convention. 251 | return output, state # , state 252 | 253 | def init_decoder_state(self, src, memory_bank, with_cache=False): 254 | """ Init decoder state """ 255 | state = TransformerDecoderState(src) 256 | if with_cache: 257 | state._init_cache(memory_bank, self.num_layers) 258 | return state 259 | 260 | 261 | class PositionalEncoding(nn.Module): 262 | def __init__(self, dropout, dim, max_len=5000): 263 | pe = torch.zeros(max_len, dim) 264 | position = torch.arange(0, max_len).unsqueeze(1) 265 | div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))) 266 | pe[:, 0::2] = torch.sin(position.float() * div_term) 267 | pe[:, 1::2] = torch.cos(position.float() * div_term) 268 | pe = pe.unsqueeze(0) 269 | super().__init__() 270 | self.register_buffer("pe", pe) 271 | self.dropout = nn.Dropout(p=dropout) 272 | self.dim = dim 273 | 274 | def forward(self, emb, step=None): 275 | emb = emb * math.sqrt(self.dim) 276 | if step: 277 | emb = emb + self.pe[:, step][:, None, :] 278 | 279 | else: 280 | emb = emb + self.pe[:, : emb.size(1)] 281 | emb = self.dropout(emb) 282 | return emb 283 | 284 | def get_emb(self, emb): 285 | return self.pe[:, : emb.size(1)] 286 | 287 | 288 | class TransformerDecoderLayer(nn.Module): 289 | """ 290 | Args: 291 | d_model (int): the dimension of keys/values/queries in 292 | MultiHeadedAttention, also the input size of 293 | the first-layer of the PositionwiseFeedForward. 294 | heads (int): the number of heads for MultiHeadedAttention. 295 | d_ff (int): the second-layer of the PositionwiseFeedForward. 296 | dropout (float): dropout probability(0-1.0). 297 | self_attn_type (string): type of self-attention scaled-dot, average 298 | """ 299 | 300 | def __init__(self, d_model, heads, d_ff, dropout): 301 | super().__init__() 302 | 303 | self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) 304 | 305 | self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) 306 | self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) 307 | self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) 308 | self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) 309 | self.drop = nn.Dropout(dropout) 310 | mask = self._get_attn_subsequent_mask(MAX_SIZE) 311 | # Register self.mask as a saved_state in TransformerDecoderLayer, so 312 | # it gets TransformerDecoderLayer's cuda behavior automatically. 313 | self.register_buffer("mask", mask) 314 | 315 | def forward( 316 | self, 317 | inputs, 318 | memory_bank, 319 | src_pad_mask, 320 | tgt_pad_mask, 321 | previous_input=None, 322 | layer_cache=None, 323 | step=None, 324 | ): 325 | """ 326 | Args: 327 | inputs (`FloatTensor`): `[batch_size x 1 x model_dim]` 328 | memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]` 329 | src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]` 330 | tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]` 331 | 332 | Returns: 333 | (`FloatTensor`, `FloatTensor`, `FloatTensor`): 334 | 335 | * output `[batch_size x 1 x model_dim]` 336 | * attn `[batch_size x 1 x src_len]` 337 | * all_input `[batch_size x current_step x model_dim]` 338 | 339 | """ 340 | dec_mask = torch.gt(tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0) 341 | input_norm = self.layer_norm_1(inputs) 342 | all_input = input_norm 343 | if previous_input is not None: 344 | all_input = torch.cat((previous_input, input_norm), dim=1) 345 | dec_mask = None 346 | 347 | query = self.self_attn( 348 | all_input, 349 | all_input, 350 | input_norm, 351 | mask=dec_mask, 352 | layer_cache=layer_cache, 353 | type="self", 354 | ) 355 | 356 | query = self.drop(query) + inputs 357 | 358 | query_norm = self.layer_norm_2(query) 359 | mid = self.context_attn( 360 | memory_bank, 361 | memory_bank, 362 | query_norm, 363 | mask=src_pad_mask, 364 | layer_cache=layer_cache, 365 | type="context", 366 | ) 367 | output = self.feed_forward(self.drop(mid) + query) 368 | 369 | return output, all_input 370 | # return output 371 | 372 | def _get_attn_subsequent_mask(self, size): 373 | """ 374 | Get an attention mask to avoid using the subsequent info. 375 | 376 | Args: 377 | size: int 378 | 379 | Returns: 380 | (`LongTensor`): 381 | 382 | * subsequent_mask `[1 x size x size]` 383 | """ 384 | attn_shape = (1, size, size) 385 | subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8") 386 | subsequent_mask = torch.from_numpy(subsequent_mask) 387 | return subsequent_mask 388 | 389 | 390 | class MultiHeadedAttention(nn.Module): 391 | """ 392 | Multi-Head Attention module from 393 | "Attention is All You Need" 394 | :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. 395 | 396 | Similar to standard `dot` attention but uses 397 | multiple attention distributions simulataneously 398 | to select relevant items. 399 | 400 | .. mermaid:: 401 | 402 | graph BT 403 | A[key] 404 | B[value] 405 | C[query] 406 | O[output] 407 | subgraph Attn 408 | D[Attn 1] 409 | E[Attn 2] 410 | F[Attn N] 411 | end 412 | A --> D 413 | C --> D 414 | A --> E 415 | C --> E 416 | A --> F 417 | C --> F 418 | D --> O 419 | E --> O 420 | F --> O 421 | B --> O 422 | 423 | Also includes several additional tricks. 424 | 425 | Args: 426 | head_count (int): number of parallel heads 427 | model_dim (int): the dimension of keys/values/queries, 428 | must be divisible by head_count 429 | dropout (float): dropout parameter 430 | """ 431 | 432 | def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True): 433 | assert model_dim % head_count == 0 434 | self.dim_per_head = model_dim // head_count 435 | self.model_dim = model_dim 436 | 437 | super().__init__() 438 | self.head_count = head_count 439 | 440 | self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) 441 | self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head) 442 | self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head) 443 | self.softmax = nn.Softmax(dim=-1) 444 | self.dropout = nn.Dropout(dropout) 445 | self.use_final_linear = use_final_linear 446 | if self.use_final_linear: 447 | self.final_linear = nn.Linear(model_dim, model_dim) 448 | 449 | def forward( 450 | self, 451 | key, 452 | value, 453 | query, 454 | mask=None, 455 | layer_cache=None, 456 | type=None, 457 | predefined_graph_1=None, 458 | ): 459 | """ 460 | Compute the context vector and the attention vectors. 461 | 462 | Args: 463 | key (`FloatTensor`): set of `key_len` 464 | key vectors `[batch, key_len, dim]` 465 | value (`FloatTensor`): set of `key_len` 466 | value vectors `[batch, key_len, dim]` 467 | query (`FloatTensor`): set of `query_len` 468 | query vectors `[batch, query_len, dim]` 469 | mask: binary mask indicating which keys have 470 | non-zero attention `[batch, query_len, key_len]` 471 | Returns: 472 | (`FloatTensor`, `FloatTensor`) : 473 | 474 | * output context vectors `[batch, query_len, dim]` 475 | * one of the attention vectors `[batch, query_len, key_len]` 476 | """ 477 | batch_size = key.size(0) 478 | dim_per_head = self.dim_per_head 479 | head_count = self.head_count 480 | 481 | def shape(x): 482 | """ projection """ 483 | return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2) 484 | 485 | def unshape(x): 486 | """ compute context """ 487 | return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) 488 | 489 | # 1) Project key, value, and query. 490 | if layer_cache is not None: 491 | if type == "self": 492 | query, key, value = ( 493 | self.linear_query(query), 494 | self.linear_keys(query), 495 | self.linear_values(query), 496 | ) 497 | 498 | key = shape(key) 499 | value = shape(value) 500 | 501 | if layer_cache is not None: 502 | device = key.device 503 | if layer_cache["self_keys"] is not None: 504 | key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2) 505 | if layer_cache["self_values"] is not None: 506 | value = torch.cat((layer_cache["self_values"].to(device), value), dim=2) 507 | layer_cache["self_keys"] = key 508 | layer_cache["self_values"] = value 509 | elif type == "context": 510 | query = self.linear_query(query) 511 | if layer_cache is not None: 512 | if layer_cache["memory_keys"] is None: 513 | key, value = self.linear_keys(key), self.linear_values(value) 514 | key = shape(key) 515 | value = shape(value) 516 | else: 517 | key, value = ( 518 | layer_cache["memory_keys"], 519 | layer_cache["memory_values"], 520 | ) 521 | layer_cache["memory_keys"] = key 522 | layer_cache["memory_values"] = value 523 | else: 524 | key, value = self.linear_keys(key), self.linear_values(value) 525 | key = shape(key) 526 | value = shape(value) 527 | else: 528 | key = self.linear_keys(key) 529 | value = self.linear_values(value) 530 | query = self.linear_query(query) 531 | key = shape(key) 532 | value = shape(value) 533 | 534 | query = shape(query) 535 | 536 | # 2) Calculate and scale scores. 537 | query = query / math.sqrt(dim_per_head) 538 | scores = torch.matmul(query, key.transpose(2, 3)) 539 | 540 | if mask is not None: 541 | mask = mask.unsqueeze(1).expand_as(scores) 542 | scores = scores.masked_fill(mask, -1e18) 543 | 544 | # 3) Apply attention dropout and compute context vectors. 545 | 546 | attn = self.softmax(scores) 547 | 548 | if predefined_graph_1 is not None: 549 | attn_masked = attn[:, -1] * predefined_graph_1 550 | attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9) 551 | 552 | attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1) 553 | 554 | drop_attn = self.dropout(attn) 555 | if self.use_final_linear: 556 | context = unshape(torch.matmul(drop_attn, value)) 557 | output = self.final_linear(context) 558 | return output 559 | else: 560 | context = torch.matmul(drop_attn, value) 561 | return context 562 | 563 | 564 | class DecoderState(object): 565 | """Interface for grouping together the current state of a recurrent 566 | decoder. In the simplest case just represents the hidden state of 567 | the model. But can also be used for implementing various forms of 568 | input_feeding and non-recurrent models. 569 | 570 | Modules need to implement this to utilize beam search decoding. 571 | """ 572 | 573 | def detach(self): 574 | """ Need to document this """ 575 | self.hidden = tuple([_.detach() for _ in self.hidden]) 576 | self.input_feed = self.input_feed.detach() 577 | 578 | def beam_update(self, idx, positions, beam_size): 579 | """ Need to document this """ 580 | for e in self._all: 581 | sizes = e.size() 582 | br = sizes[1] 583 | if len(sizes) == 3: 584 | sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx] 585 | else: 586 | sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx] 587 | 588 | sent_states.data.copy_(sent_states.data.index_select(1, positions)) 589 | 590 | def map_batch_fn(self, fn): 591 | raise NotImplementedError() 592 | 593 | 594 | class TransformerDecoderState(DecoderState): 595 | """ Transformer Decoder state base class """ 596 | 597 | def __init__(self, src): 598 | """ 599 | Args: 600 | src (FloatTensor): a sequence of source words tensors 601 | with optional feature tensors, of size (len x batch). 602 | """ 603 | self.src = src 604 | self.previous_input = None 605 | self.previous_layer_inputs = None 606 | self.cache = None 607 | 608 | @property 609 | def _all(self): 610 | """ 611 | Contains attributes that need to be updated in self.beam_update(). 612 | """ 613 | if self.previous_input is not None and self.previous_layer_inputs is not None: 614 | return (self.previous_input, self.previous_layer_inputs, self.src) 615 | else: 616 | return (self.src,) 617 | 618 | def detach(self): 619 | if self.previous_input is not None: 620 | self.previous_input = self.previous_input.detach() 621 | if self.previous_layer_inputs is not None: 622 | self.previous_layer_inputs = self.previous_layer_inputs.detach() 623 | self.src = self.src.detach() 624 | 625 | def update_state(self, new_input, previous_layer_inputs): 626 | state = TransformerDecoderState(self.src) 627 | state.previous_input = new_input 628 | state.previous_layer_inputs = previous_layer_inputs 629 | return state 630 | 631 | def _init_cache(self, memory_bank, num_layers): 632 | self.cache = {} 633 | 634 | for l in range(num_layers): 635 | layer_cache = {"memory_keys": None, "memory_values": None} 636 | layer_cache["self_keys"] = None 637 | layer_cache["self_values"] = None 638 | self.cache["layer_{}".format(l)] = layer_cache 639 | 640 | def repeat_beam_size_times(self, beam_size): 641 | """ Repeat beam_size times along batch dimension. """ 642 | self.src = self.src.data.repeat(1, beam_size, 1) 643 | 644 | def map_batch_fn(self, fn): 645 | def _recursive_map(struct, batch_dim=0): 646 | for k, v in struct.items(): 647 | if v is not None: 648 | if isinstance(v, dict): 649 | _recursive_map(v) 650 | else: 651 | struct[k] = fn(v, batch_dim) 652 | 653 | self.src = fn(self.src, 0) 654 | if self.cache is not None: 655 | _recursive_map(self.cache) 656 | 657 | 658 | def gelu(x): 659 | return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) 660 | 661 | 662 | class PositionwiseFeedForward(nn.Module): 663 | """ A two-layer Feed-Forward-Network with residual layer norm. 664 | 665 | Args: 666 | d_model (int): the size of input for the first-layer of the FFN. 667 | d_ff (int): the hidden layer size of the second-layer 668 | of the FNN. 669 | dropout (float): dropout probability in :math:`[0, 1)`. 670 | """ 671 | 672 | def __init__(self, d_model, d_ff, dropout=0.1): 673 | super().__init__() 674 | self.w_1 = nn.Linear(d_model, d_ff) 675 | self.w_2 = nn.Linear(d_ff, d_model) 676 | self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) 677 | self.actv = gelu 678 | self.dropout_1 = nn.Dropout(dropout) 679 | self.dropout_2 = nn.Dropout(dropout) 680 | 681 | def forward(self, x): 682 | inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x)))) 683 | output = self.dropout_2(self.w_2(inter)) 684 | return output + x 685 | 686 | 687 | # 688 | # TRANSLATOR 689 | # The following code is used to generate summaries using the 690 | # pre-trained weights and beam search. 691 | # 692 | 693 | 694 | def build_predictor(args, tokenizer, symbols, model, logger=None): 695 | # we should be able to refactor the global scorer a lot 696 | scorer = GNMTGlobalScorer(args["alpha"], length_penalty="wu") 697 | translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger) 698 | return translator 699 | 700 | 701 | class GNMTGlobalScorer(object): 702 | """ 703 | NMT re-ranking score from 704 | "Google's Neural Machine Translation System" :cite:`wu2016google` 705 | 706 | Args: 707 | alpha (float): length parameter 708 | beta (float): coverage parameter 709 | """ 710 | 711 | def __init__(self, alpha, length_penalty): 712 | self.alpha = alpha 713 | penalty_builder = PenaltyBuilder(length_penalty) 714 | self.length_penalty = penalty_builder.length_penalty() 715 | 716 | def score(self, beam, logprobs): 717 | """ 718 | Rescores a prediction based on penalty functions 719 | """ 720 | normalized_probs = self.length_penalty(beam, logprobs, self.alpha) 721 | return normalized_probs 722 | 723 | 724 | class PenaltyBuilder(object): 725 | """ 726 | Returns the Length and Coverage Penalty function for Beam Search. 727 | 728 | Args: 729 | length_pen (str): option name of length pen 730 | cov_pen (str): option name of cov pen 731 | """ 732 | 733 | def __init__(self, length_pen): 734 | self.length_pen = length_pen 735 | 736 | def length_penalty(self): 737 | if self.length_pen == "wu": 738 | return self.length_wu 739 | elif self.length_pen == "avg": 740 | return self.length_average 741 | else: 742 | return self.length_none 743 | 744 | """ 745 | Below are all the different penalty terms implemented so far 746 | """ 747 | 748 | def length_wu(self, beam, logprobs, alpha=0.0): 749 | """ 750 | NMT length re-ranking score from 751 | "Google's Neural Machine Translation System" :cite:`wu2016google`. 752 | """ 753 | 754 | modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha) 755 | return logprobs / modifier 756 | 757 | def length_average(self, beam, logprobs, alpha=0.0): 758 | """ 759 | Returns the average probability of tokens in a sequence. 760 | """ 761 | return logprobs / len(beam.next_ys) 762 | 763 | def length_none(self, beam, logprobs, alpha=0.0, beta=0.0): 764 | """ 765 | Returns unmodified scores. 766 | """ 767 | return logprobs 768 | 769 | 770 | class Translator(object): 771 | """ 772 | Uses a model to translate a batch of sentences. 773 | 774 | Args: 775 | model (:obj:`onmt.modules.NMTModel`): 776 | NMT model to use for translation 777 | fields (dict of Fields): data fields 778 | beam_size (int): size of beam to use 779 | n_best (int): number of translations produced 780 | max_length (int): maximum length output to produce 781 | global_scores (:obj:`GlobalScorer`): 782 | object to rescore final translations 783 | copy_attn (bool): use copy attention during translation 784 | beam_trace (bool): trace beam search for debugging 785 | logger(logging.Logger): logger. 786 | """ 787 | 788 | def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None): 789 | self.logger = logger 790 | 791 | self.args = args 792 | self.model = model 793 | self.generator = self.model.generator 794 | self.vocab = vocab 795 | self.symbols = symbols 796 | self.start_token = symbols["BOS"] 797 | self.end_token = symbols["EOS"] 798 | 799 | self.global_scorer = global_scorer 800 | self.beam_size = args["beam_size"] 801 | self.min_length = args["min_length"] 802 | self.max_length = args["max_length"] 803 | 804 | def translate(self, batch, step, attn_debug=False): 805 | """Generates summaries from one batch of data.""" 806 | self.model.eval() 807 | with torch.no_grad(): 808 | batch_data = self.translate_batch(batch) 809 | translations = self.from_batch(batch_data) 810 | return translations 811 | 812 | def translate_batch(self, batch, fast=False): 813 | """ 814 | Translate a batch of sentences. 815 | 816 | Mostly a wrapper around :obj:`Beam`. 817 | 818 | Args: 819 | batch (:obj:`Batch`): a batch from a dataset object 820 | fast (bool): enables fast beam search (may not support all features) 821 | """ 822 | with torch.no_grad(): 823 | return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length) 824 | 825 | # Where the beam search lives 826 | # I have no idea why it is being called from the method above 827 | def _fast_translate_batch(self, batch, max_length, min_length=0): 828 | """Beam Search using the encoder inputs contained in `batch`.""" 829 | 830 | # The batch object is funny 831 | # Instead of just looking at the size of the arguments we encapsulate 832 | # a size argument. 833 | # Where is it defined? 834 | beam_size = self.beam_size 835 | batch_size = batch.batch_size 836 | src = batch.src 837 | segs = batch.segs 838 | mask_src = batch.mask_src 839 | 840 | src_features = self.model.bert(src, segs, mask_src) 841 | dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True) 842 | device = src_features.device 843 | 844 | # Tile states and memory beam_size times. 845 | dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim)) 846 | src_features = tile(src_features, beam_size, dim=0) 847 | batch_offset = torch.arange(batch_size, dtype=torch.long, device=device) 848 | beam_offset = torch.arange(0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device) 849 | alive_seq = torch.full([batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device) 850 | 851 | # Give full probability to the first beam on the first step. 852 | topk_log_probs = torch.tensor([0.0] + [float("-inf")] * (beam_size - 1), device=device).repeat(batch_size) 853 | 854 | # Structure that holds finished hypotheses. 855 | hypotheses = [[] for _ in range(batch_size)] # noqa: F812 856 | 857 | results = {} 858 | results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812 859 | results["scores"] = [[] for _ in range(batch_size)] # noqa: F812 860 | results["gold_score"] = [0] * batch_size 861 | results["batch"] = batch 862 | 863 | for step in range(max_length): 864 | decoder_input = alive_seq[:, -1].view(1, -1) 865 | 866 | # Decoder forward. 867 | decoder_input = decoder_input.transpose(0, 1) 868 | 869 | dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step) 870 | 871 | # Generator forward. 872 | log_probs = self.generator(dec_out.transpose(0, 1).squeeze(0)) 873 | vocab_size = log_probs.size(-1) 874 | 875 | if step < min_length: 876 | log_probs[:, self.end_token] = -1e20 877 | 878 | # Multiply probs by the beam probability. 879 | log_probs += topk_log_probs.view(-1).unsqueeze(1) 880 | 881 | alpha = self.global_scorer.alpha 882 | length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha 883 | 884 | # Flatten probs into a list of possibilities. 885 | curr_scores = log_probs / length_penalty 886 | 887 | if self.args['block_trigram']: 888 | cur_len = alive_seq.size(1) 889 | if cur_len > 3: 890 | for i in range(alive_seq.size(0)): 891 | fail = False 892 | words = [int(w) for w in alive_seq[i]] 893 | words = [self.vocab.ids_to_tokens[w] for w in words] 894 | words = " ".join(words).replace(" ##", "").split() 895 | if len(words) <= 3: 896 | continue 897 | trigrams = [(words[i - 1], words[i], words[i + 1]) for i in range(1, len(words) - 1)] 898 | trigram = tuple(trigrams[-1]) 899 | if trigram in trigrams[:-1]: 900 | fail = True 901 | if fail: 902 | curr_scores[i] = -10e20 903 | 904 | curr_scores = curr_scores.reshape(-1, beam_size * vocab_size) 905 | topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1) 906 | 907 | # Recover log probs. 908 | topk_log_probs = topk_scores * length_penalty 909 | 910 | # Resolve beam origin and true word ids. 911 | topk_beam_index = topk_ids.floor_divide(vocab_size) 912 | topk_ids = topk_ids.fmod(vocab_size) 913 | 914 | # Map beam_index to batch_index in the flat representation. 915 | batch_index = topk_beam_index + beam_offset[: topk_beam_index.size(0)].unsqueeze(1) 916 | select_indices = batch_index.view(-1) 917 | 918 | # Append last prediction. 919 | alive_seq = torch.cat([alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1) 920 | 921 | is_finished = topk_ids.eq(self.end_token) 922 | if step + 1 == max_length: 923 | is_finished.fill_(1) 924 | # End condition is top beam is finished. 925 | end_condition = is_finished[:, 0].eq(1) 926 | # Save finished hypotheses. 927 | if is_finished.any(): 928 | predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1)) 929 | for i in range(is_finished.size(0)): 930 | b = batch_offset[i] 931 | if end_condition[i]: 932 | is_finished[i].fill_(1) 933 | finished_hyp = is_finished[i].nonzero(as_tuple=False).view(-1) 934 | # Store finished hypotheses for this batch. 935 | for j in finished_hyp: 936 | hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:])) 937 | # If the batch reached the end, save the n_best hypotheses. 938 | if end_condition[i]: 939 | best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True) 940 | score, pred = best_hyp[0] 941 | 942 | results["scores"][b].append(score) 943 | results["predictions"][b].append(pred) 944 | non_finished = end_condition.eq(0).nonzero(as_tuple=False).view(-1) 945 | # If all sentences are translated, no need to go further. 946 | if len(non_finished) == 0: 947 | break 948 | # Remove finished batches for the next step. 949 | topk_log_probs = topk_log_probs.index_select(0, non_finished) 950 | batch_index = batch_index.index_select(0, non_finished) 951 | batch_offset = batch_offset.index_select(0, non_finished) 952 | alive_seq = predictions.index_select(0, non_finished).view(-1, alive_seq.size(-1)) 953 | # Reorder states. 954 | select_indices = batch_index.view(-1) 955 | src_features = src_features.index_select(0, select_indices) 956 | dec_states.map_batch_fn(lambda state, dim: state.index_select(dim, select_indices)) 957 | 958 | return results 959 | 960 | def from_batch(self, translation_batch): 961 | batch = translation_batch["batch"] 962 | assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"]) 963 | batch_size = batch.batch_size 964 | 965 | preds, _, _, tgt_str, src = ( 966 | translation_batch["predictions"], 967 | translation_batch["scores"], 968 | translation_batch["gold_score"], 969 | batch.tgt_str, 970 | batch.src, 971 | ) 972 | 973 | translations = [] 974 | for b in range(batch_size): 975 | pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]]) 976 | pred_sents = " ".join(pred_sents).replace(" ##", "") 977 | gold_sent = " ".join(tgt_str[b].split()) 978 | raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500] 979 | raw_src = " ".join(raw_src) 980 | translation = (pred_sents, gold_sent, raw_src) 981 | translations.append(translation) 982 | 983 | return translations 984 | 985 | 986 | def tile(x, count, dim=0): 987 | """ 988 | Tiles x on dimension dim count times. 989 | """ 990 | perm = list(range(len(x.size()))) 991 | if dim != 0: 992 | perm[0], perm[dim] = perm[dim], perm[0] 993 | x = x.permute(perm).contiguous() 994 | out_size = list(x.size()) 995 | out_size[0] *= count 996 | batch = x.size(0) 997 | x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size) 998 | if dim != 0: 999 | x = x.permute(perm).contiguous() 1000 | return x 1001 | 1002 | 1003 | # 1004 | # Optimizer for training. We keep this here in case we want to add 1005 | # a finetuning script. 1006 | # 1007 | 1008 | 1009 | class BertSumOptimizer(object): 1010 | """Specific optimizer for BertSum. 1011 | 1012 | As described in [1], the authors fine-tune BertSum for abstractive 1013 | summarization using two Adam Optimizers with different warm-up steps and 1014 | learning rate. They also use a custom learning rate scheduler. 1015 | 1016 | [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." 1017 | arXiv preprint arXiv:1908.08345 (2019). 1018 | """ 1019 | 1020 | def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8): 1021 | self.encoder = model.encoder 1022 | self.decoder = model.decoder 1023 | self.lr = lr 1024 | self.warmup_steps = warmup_steps 1025 | 1026 | self.optimizers = { 1027 | "encoder": torch.optim.Adam( 1028 | model.encoder.parameters(), 1029 | lr=lr["encoder"], 1030 | betas=(beta_1, beta_2), 1031 | eps=eps, 1032 | ), 1033 | "decoder": torch.optim.Adam( 1034 | model.decoder.parameters(), 1035 | lr=lr["decoder"], 1036 | betas=(beta_1, beta_2), 1037 | eps=eps, 1038 | ), 1039 | } 1040 | 1041 | self._step = 0 1042 | self.current_learning_rates = {} 1043 | 1044 | def _update_rate(self, stack): 1045 | return self.lr[stack] * min(self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5)) 1046 | 1047 | def zero_grad(self): 1048 | self.optimizer_decoder.zero_grad() 1049 | self.optimizer_encoder.zero_grad() 1050 | 1051 | def step(self): 1052 | self._step += 1 1053 | for stack, optimizer in self.optimizers.items(): 1054 | new_rate = self._update_rate(stack) 1055 | for param_group in optimizer.param_groups: 1056 | param_group["lr"] = new_rate 1057 | optimizer.step() 1058 | self.current_learning_rates[stack] = new_rate 1059 | -------------------------------------------------------------------------------- /presumm/presumm.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import logging 4 | from collections import namedtuple 5 | from tqdm import tqdm 6 | 7 | import torch 8 | from torch.utils.data import DataLoader, SequentialSampler 9 | 10 | from .modeling_bertabs import BertAbs, build_predictor 11 | from transformers import BertTokenizer 12 | from .utils_summarization import ( 13 | SummarizationDataset, 14 | process_story, 15 | build_mask, 16 | compute_token_type_ids, 17 | encode_for_summarization, 18 | fit_to_block_size, 19 | ) 20 | 21 | 22 | class PreSummSummarizer(): 23 | def __init__(self, batch_size=4, device=None): 24 | if not device: 25 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 26 | 27 | tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True) 28 | model = BertAbs.from_pretrained("remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization") 29 | model.to(device) 30 | model.eval() 31 | 32 | symbols = { 33 | "BOS": tokenizer.vocab["[unused0]"], 34 | "EOS": tokenizer.vocab["[unused1]"], 35 | "PAD": tokenizer.vocab["[PAD]"], 36 | } 37 | 38 | self.Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]) 39 | 40 | self.logger = logging.getLogger(__name__) 41 | self.tokenizer = tokenizer 42 | self.model = model 43 | self.symbols = symbols 44 | self.batch_size = batch_size 45 | self.device = device 46 | 47 | def __call__(self, *args, **kwargs): 48 | return self.summarize_string(*args, **kwargs) 49 | 50 | def collate(self, data, tokenizer, block_size, device): 51 | """ Collate formats the data passed to the data loader. 52 | 53 | In particular we tokenize the data batch after batch to avoid keeping them 54 | all in memory. We output the data as a namedtuple to fit the original BertAbs's 55 | API. 56 | """ 57 | data = [x for x in data if not len(x[1]) == 0] # remove empty_files 58 | names = [name for name, _, _ in data] 59 | summaries = [" ".join(summary_list) for _, _, summary_list in data] 60 | 61 | encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data] 62 | encoded_stories = torch.tensor( 63 | [fit_to_block_size(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text] 64 | ) 65 | encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) 66 | encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) 67 | 68 | batch = self.Batch( 69 | document_names=names, 70 | batch_size=len(encoded_stories), 71 | src=encoded_stories.to(device), 72 | segs=encoder_token_type_ids.to(device), 73 | mask_src=encoder_mask.to(device), 74 | tgt_str=summaries, 75 | ) 76 | 77 | return batch 78 | 79 | @staticmethod 80 | def format_summary(translation): 81 | """ Transforms the output of the `from_batch` function 82 | into nicely formatted summaries. 83 | """ 84 | raw_summary, _, _ = translation 85 | summary = ( 86 | raw_summary.replace("[unused0]", "") 87 | .replace("[unused3]", "") 88 | .replace("[PAD]", "") 89 | .replace("[unused1]", "") 90 | .replace(r" +", " ") 91 | .replace(" [unused2] ", ". ") 92 | .replace("[unused2]", "") 93 | .strip() 94 | ) 95 | 96 | return summary 97 | 98 | @staticmethod 99 | def save_summaries(summaries, path, original_document_name): 100 | """ Write the summaries in files that are prefixed by the original 101 | files' name with the `_summary` appended. 102 | 103 | Attributes: 104 | original_document_names: List[string] 105 | Name of the document that was summarized. 106 | path: string 107 | Path were the summaries will be written 108 | summaries: List[string] 109 | The summaries that we produced. 110 | """ 111 | for summary, document_name in zip(summaries, original_document_name): 112 | # Prepare the summary file's name 113 | if "." in document_name: 114 | bare_document_name = ".".join(document_name.split(".")[:-1]) 115 | extension = document_name.split(".")[-1] 116 | name = bare_document_name + "_summary." + extension 117 | else: 118 | name = document_name + "_summary" 119 | 120 | file_path = os.path.join(path, name) 121 | with open(file_path, "w") as output: 122 | output.write(summary) 123 | 124 | def summarize_folder(self, documents_dir, summaries_output_dir, max_length=200, 125 | min_length=50, beam_size=5, alpha=0.95, block_trigram=True): 126 | args = { 127 | "max_length": max_length, 128 | "min_length": min_length, 129 | "beam_size": beam_size, 130 | "alpha": alpha, 131 | 'block_trigram': block_trigram 132 | } 133 | 134 | predictor = build_predictor(args, self.tokenizer, self.symbols, self.model) 135 | 136 | data_iterator = self.build_data_iterator(documents_dir) 137 | for batch in tqdm(data_iterator): 138 | translations = predictor.translate(batch, -1) 139 | summaries = [self.format_summary(t) for t in translations] 140 | self.save_summaries(summaries, summaries_output_dir, batch.document_names) 141 | 142 | def summarize_string(self, input_string, max_length=200, min_length=50, 143 | beam_size=5, alpha=0.95, block_trigram=True): 144 | self.logger.debug("min_length: " + str(min_length) +" - max_length: " + str(max_length) + " - beam_size: " + str(beam_size) + " - alpha: " + str(alpha) + " - block_trigram: " + str(block_trigram)) 145 | 146 | args = { 147 | "max_length": max_length, 148 | "min_length": min_length, 149 | "beam_size": beam_size, 150 | "alpha": alpha, 151 | 'block_trigram': block_trigram 152 | } 153 | 154 | predictor = build_predictor(args, self.tokenizer, self.symbols, self.model) 155 | 156 | story, summary = process_story(input_string) 157 | batch = self.collate([["useless_name", story, summary]], self.tokenizer, block_size=512, device=self.device) 158 | translations = predictor.translate(batch, -1) 159 | summaries = [self.format_summary(t) for t in translations] 160 | return summaries[0] 161 | 162 | def build_data_iterator(self, documents_dir): 163 | dataset = SummarizationDataset(documents_dir) 164 | sampler = SequentialSampler(dataset) 165 | 166 | def collate_fn(data): 167 | return self.collate(data, self.tokenizer, block_size=512, device=self.device) 168 | 169 | iterator = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn=collate_fn,) 170 | 171 | return iterator 172 | -------------------------------------------------------------------------------- /presumm/run_summarization.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | import argparse 3 | import logging 4 | import os 5 | import sys 6 | from collections import namedtuple 7 | 8 | import torch 9 | from torch.utils.data import DataLoader, SequentialSampler 10 | from tqdm import tqdm 11 | 12 | from .modeling_bertabs import BertAbs, build_predictor 13 | from transformers import BertTokenizer 14 | from .utils_summarization import ( 15 | SummarizationDataset, 16 | build_mask, 17 | compute_token_type_ids, 18 | encode_for_summarization, 19 | fit_to_block_size, 20 | ) 21 | 22 | 23 | logger = logging.getLogger(__name__) 24 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 25 | 26 | 27 | Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]) 28 | 29 | 30 | def evaluate(args): 31 | tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True) 32 | model = BertAbs.from_pretrained("bertabs-finetuned-cnndm") 33 | model.to(args.device) 34 | model.eval() 35 | 36 | symbols = { 37 | "BOS": tokenizer.vocab["[unused0]"], 38 | "EOS": tokenizer.vocab["[unused1]"], 39 | "PAD": tokenizer.vocab["[PAD]"], 40 | } 41 | 42 | if args.compute_rouge: 43 | reference_summaries = [] 44 | generated_summaries = [] 45 | 46 | import rouge 47 | import nltk 48 | 49 | nltk.download("punkt") 50 | rouge_evaluator = rouge.Rouge( 51 | metrics=["rouge-n", "rouge-l"], 52 | max_n=2, 53 | limit_length=True, 54 | length_limit=args.beam_size, 55 | length_limit_type="words", 56 | apply_avg=True, 57 | apply_best=False, 58 | alpha=0.5, # Default F1_score 59 | weight_factor=1.2, 60 | stemming=True, 61 | ) 62 | 63 | # these (unused) arguments are defined to keep the compatibility 64 | # with the legacy code and will be deleted in a next iteration. 65 | args.result_path = "" 66 | args.temp_dir = "" 67 | 68 | data_iterator = build_data_iterator(args, tokenizer) 69 | predictor = build_predictor(args, tokenizer, symbols, model) 70 | 71 | logger.info("***** Running evaluation *****") 72 | logger.info(" Number examples = %d", len(data_iterator.dataset)) 73 | logger.info(" Batch size = %d", args.batch_size) 74 | logger.info("") 75 | logger.info("***** Beam Search parameters *****") 76 | logger.info(" Beam size = %d", args.beam_size) 77 | logger.info(" Minimum length = %d", args.min_length) 78 | logger.info(" Maximum length = %d", args.max_length) 79 | logger.info(" Alpha (length penalty) = %.2f", args.alpha) 80 | logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT")) 81 | 82 | for batch in tqdm(data_iterator): 83 | batch_data = predictor.translate_batch(batch) 84 | translations = predictor.from_batch(batch_data) 85 | summaries = [format_summary(t) for t in translations] 86 | save_summaries(summaries, args.summaries_output_dir, batch.document_names) 87 | 88 | if args.compute_rouge: 89 | reference_summaries += batch.tgt_str 90 | generated_summaries += summaries 91 | 92 | if args.compute_rouge: 93 | scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries) 94 | str_scores = format_rouge_scores(scores) 95 | save_rouge_scores(str_scores) 96 | print(str_scores) 97 | 98 | 99 | def save_summaries(summaries, path, original_document_name): 100 | """ Write the summaries in fies that are prefixed by the original 101 | files' name with the `_summary` appended. 102 | 103 | Attributes: 104 | original_document_names: List[string] 105 | Name of the document that was summarized. 106 | path: string 107 | Path were the summaries will be written 108 | summaries: List[string] 109 | The summaries that we produced. 110 | """ 111 | for summary, document_name in zip(summaries, original_document_name): 112 | # Prepare the summary file's name 113 | if "." in document_name: 114 | bare_document_name = ".".join(document_name.split(".")[:-1]) 115 | extension = document_name.split(".")[-1] 116 | name = bare_document_name + "_summary." + extension 117 | else: 118 | name = document_name + "_summary" 119 | 120 | file_path = os.path.join(path, name) 121 | with open(file_path, "w") as output: 122 | output.write(summary) 123 | 124 | 125 | def format_summary(translation): 126 | """ Transforms the output of the `from_batch` function 127 | into nicely formatted summaries. 128 | """ 129 | raw_summary, _, _ = translation 130 | summary = ( 131 | raw_summary.replace("[unused0]", "") 132 | .replace("[unused3]", "") 133 | .replace("[PAD]", "") 134 | .replace("[unused1]", "") 135 | .replace(r" +", " ") 136 | .replace(" [unused2] ", ". ") 137 | .replace("[unused2]", "") 138 | .strip() 139 | ) 140 | 141 | return summary 142 | 143 | 144 | def format_rouge_scores(scores): 145 | return """\n 146 | ****** ROUGE SCORES ****** 147 | 148 | ** ROUGE 1 149 | F1 >> {:.3f} 150 | Precision >> {:.3f} 151 | Recall >> {:.3f} 152 | 153 | ** ROUGE 2 154 | F1 >> {:.3f} 155 | Precision >> {:.3f} 156 | Recall >> {:.3f} 157 | 158 | ** ROUGE L 159 | F1 >> {:.3f} 160 | Precision >> {:.3f} 161 | Recall >> {:.3f}""".format( 162 | scores["rouge-1"]["f"], 163 | scores["rouge-1"]["p"], 164 | scores["rouge-1"]["r"], 165 | scores["rouge-2"]["f"], 166 | scores["rouge-2"]["p"], 167 | scores["rouge-2"]["r"], 168 | scores["rouge-l"]["f"], 169 | scores["rouge-l"]["p"], 170 | scores["rouge-l"]["r"], 171 | ) 172 | 173 | 174 | def save_rouge_scores(str_scores): 175 | with open("rouge_scores.txt", "w") as output: 176 | output.write(str_scores) 177 | 178 | 179 | # 180 | # LOAD the dataset 181 | # 182 | 183 | 184 | def build_data_iterator(args, tokenizer): 185 | dataset = load_and_cache_examples(args, tokenizer) 186 | sampler = SequentialSampler(dataset) 187 | 188 | def collate_fn(data): 189 | return collate(data, tokenizer, block_size=512, device=args.device) 190 | 191 | iterator = DataLoader(dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate_fn,) 192 | 193 | return iterator 194 | 195 | 196 | def load_and_cache_examples(args, tokenizer): 197 | dataset = SummarizationDataset(args.documents_dir) 198 | return dataset 199 | 200 | 201 | def collate(data, tokenizer, block_size, device): 202 | """ Collate formats the data passed to the data loader. 203 | 204 | In particular we tokenize the data batch after batch to avoid keeping them 205 | all in memory. We output the data as a namedtuple to fit the original BertAbs's 206 | API. 207 | """ 208 | data = [x for x in data if not len(x[1]) == 0] # remove empty_files 209 | names = [name for name, _, _ in data] 210 | summaries = [" ".join(summary_list) for _, _, summary_list in data] 211 | 212 | encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data] 213 | encoded_stories = torch.tensor( 214 | [fit_to_block_size(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text] 215 | ) 216 | encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) 217 | encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) 218 | 219 | batch = Batch( 220 | document_names=names, 221 | batch_size=len(encoded_stories), 222 | src=encoded_stories.to(device), 223 | segs=encoder_token_type_ids.to(device), 224 | mask_src=encoder_mask.to(device), 225 | tgt_str=summaries, 226 | ) 227 | 228 | return batch 229 | 230 | 231 | def decode_summary(summary_tokens, tokenizer): 232 | """ Decode the summary and return it in a format 233 | suitable for evaluation. 234 | """ 235 | summary_tokens = summary_tokens.to("cpu").numpy() 236 | summary = tokenizer.decode(summary_tokens) 237 | sentences = summary.split(".") 238 | sentences = [s + "." for s in sentences] 239 | return sentences 240 | 241 | 242 | def main(): 243 | """ The main function defines the interface with the users. 244 | """ 245 | parser = argparse.ArgumentParser() 246 | parser.add_argument( 247 | "--documents_dir", 248 | default=None, 249 | type=str, 250 | required=True, 251 | help="The folder where the documents to summarize are located.", 252 | ) 253 | parser.add_argument( 254 | "--summaries_output_dir", 255 | default=None, 256 | type=str, 257 | required=False, 258 | help="The folder in wich the summaries should be written. Defaults to the folder where the documents are", 259 | ) 260 | parser.add_argument( 261 | "--compute_rouge", 262 | default=False, 263 | type=bool, 264 | required=False, 265 | help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.", 266 | ) 267 | # EVALUATION options 268 | parser.add_argument( 269 | "--no_cuda", default=False, type=bool, help="Whether to force the execution on CPU.", 270 | ) 271 | parser.add_argument( 272 | "--batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.", 273 | ) 274 | # BEAM SEARCH arguments 275 | parser.add_argument( 276 | "--min_length", default=50, type=int, help="Minimum number of tokens for the summaries.", 277 | ) 278 | parser.add_argument( 279 | "--max_length", default=200, type=int, help="Maixmum number of tokens for the summaries.", 280 | ) 281 | parser.add_argument( 282 | "--beam_size", default=5, type=int, help="The number of beams to start with for each example.", 283 | ) 284 | parser.add_argument( 285 | "--alpha", default=0.95, type=float, help="The value of alpha for the length penalty in the beam search.", 286 | ) 287 | parser.add_argument( 288 | "--block_trigram", 289 | default=True, 290 | type=bool, 291 | help="Whether to block the existence of repeating trigrams in the text generated by beam search.", 292 | ) 293 | args = parser.parse_args() 294 | 295 | # Select device (distibuted not available) 296 | args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") 297 | 298 | # Check the existence of directories 299 | if not args.summaries_output_dir: 300 | args.summaries_output_dir = args.documents_dir 301 | 302 | if not documents_dir_is_valid(args.documents_dir): 303 | raise FileNotFoundError( 304 | "We could not find the directory you specified for the documents to summarize, or it was empty. Please specify a valid path." 305 | ) 306 | os.makedirs(args.summaries_output_dir, exist_ok=True) 307 | 308 | evaluate(args) 309 | 310 | 311 | def documents_dir_is_valid(path): 312 | if not os.path.exists(path): 313 | return False 314 | 315 | file_list = os.listdir(path) 316 | if len(file_list) == 0: 317 | return False 318 | 319 | return True 320 | 321 | 322 | if __name__ == "__main__": 323 | main() 324 | -------------------------------------------------------------------------------- /presumm/utils_summarization.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import deque 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | 7 | 8 | # ------------ 9 | # Data loading 10 | # ------------ 11 | 12 | 13 | class SummarizationDataset(Dataset): 14 | """ Abstracts the dataset used to train seq2seq models. 15 | 16 | The class will process the documents that are located in the specified 17 | folder. The preprocessing will work on any document that is reasonably 18 | formatted. On the CNN/DailyMail dataset it will extract both the story 19 | and the summary. 20 | 21 | CNN/Daily News: 22 | 23 | The CNN/Daily News raw datasets are downloaded from [1]. The stories are 24 | stored in different files; the summary appears at the end of the story as 25 | sentences that are prefixed by the special `@highlight` line. To process 26 | the data, untar both datasets in the same folder, and pass the path to this 27 | folder as the "data_dir argument. The formatting code was inspired by [2]. 28 | 29 | [1] https://cs.nyu.edu/~kcho/ 30 | [2] https://github.com/abisee/cnn-dailymail/ 31 | """ 32 | 33 | def __init__(self, path="", prefix="train"): 34 | """ We initialize the class by listing all the documents to summarize. 35 | Files are not read in memory due to the size of some datasets (like CNN/DailyMail). 36 | """ 37 | assert os.path.isdir(path) 38 | 39 | self.documents = [] 40 | story_filenames_list = os.listdir(path) 41 | for story_filename in story_filenames_list: 42 | if "summary" in story_filename: 43 | continue 44 | path_to_story = os.path.join(path, story_filename) 45 | if not os.path.isfile(path_to_story): 46 | continue 47 | self.documents.append(path_to_story) 48 | 49 | def __len__(self): 50 | """ Returns the number of documents. """ 51 | return len(self.documents) 52 | 53 | def __getitem__(self, idx): 54 | document_path = self.documents[idx] 55 | document_name = document_path.split("/")[-1] 56 | with open(document_path, encoding="utf-8") as source: 57 | raw_story = source.read() 58 | story_lines, summary_lines = process_story(raw_story) 59 | return document_name, story_lines, summary_lines 60 | 61 | 62 | def process_story(raw_story): 63 | """ Extract the story and summary from a story file. 64 | 65 | Attributes: 66 | raw_story (str): content of the story file as an utf-8 encoded string. 67 | 68 | Raises: 69 | IndexError: If the story is empty or contains no highlights. 70 | """ 71 | nonempty_lines = list(filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])) 72 | 73 | # for some unknown reason some lines miss a period, add it 74 | nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] 75 | 76 | # gather article lines 77 | story_lines = [] 78 | lines = deque(nonempty_lines) 79 | while True: 80 | try: 81 | element = lines.popleft() 82 | if element.startswith("@highlight"): 83 | break 84 | story_lines.append(element) 85 | except IndexError: 86 | # if "@highlight" is absent from the file we pop 87 | # all elements until there is None, raising an exception. 88 | return story_lines, [] 89 | 90 | # gather summary lines 91 | summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) 92 | 93 | return story_lines, summary_lines 94 | 95 | 96 | def _add_missing_period(line): 97 | END_TOKENS = [".", "!", "?", "...", "'", "`", '"', "\u2019", "\u2019", ")"] 98 | if line.startswith("@highlight"): 99 | return line 100 | if line[-1] in END_TOKENS: 101 | return line 102 | return line + "." 103 | 104 | 105 | # -------------------------- 106 | # Encoding and preprocessing 107 | # -------------------------- 108 | 109 | 110 | def fit_to_block_size(sequence, block_size, pad_token_id): 111 | """ Adapt the source and target sequences' lengths to the block size. 112 | If the sequence is shorter we append padding token to the right of the sequence. 113 | """ 114 | if len(sequence) > block_size: 115 | return sequence[:block_size] 116 | else: 117 | sequence.extend([pad_token_id] * (block_size - len(sequence))) 118 | return sequence 119 | 120 | 121 | def build_mask(sequence, pad_token_id): 122 | """ Builds the mask. The attention mechanism will only attend to positions 123 | with value 1. """ 124 | mask = torch.ones_like(sequence) 125 | idx_pad_tokens = sequence == pad_token_id 126 | mask[idx_pad_tokens] = 0 127 | return mask 128 | 129 | 130 | def encode_for_summarization(story_lines, summary_lines, tokenizer): 131 | """ Encode the story and summary lines, and join them 132 | as specified in [1] by using `[SEP] [CLS]` tokens to separate 133 | sentences. 134 | """ 135 | story_lines_token_ids = [tokenizer.encode(line) for line in story_lines] 136 | story_token_ids = [token for sentence in story_lines_token_ids for token in sentence] 137 | summary_lines_token_ids = [tokenizer.encode(line) for line in summary_lines] 138 | summary_token_ids = [token for sentence in summary_lines_token_ids for token in sentence] 139 | 140 | return story_token_ids, summary_token_ids 141 | 142 | 143 | def compute_token_type_ids(batch, separator_token_id): 144 | """ Segment embeddings as described in [1] 145 | 146 | The values {0,1} were found in the repository [2]. 147 | 148 | Attributes: 149 | batch: torch.Tensor, size [batch_size, block_size] 150 | Batch of input. 151 | separator_token_id: int 152 | The value of the token that separates the segments. 153 | 154 | [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." 155 | arXiv preprint arXiv:1908.08345 (2019). 156 | [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) 157 | """ 158 | batch_embeddings = [] 159 | for sequence in batch: 160 | sentence_num = -1 161 | embeddings = [] 162 | for s in sequence: 163 | if s == separator_token_id: 164 | sentence_num += 1 165 | embeddings.append(sentence_num % 2) 166 | batch_embeddings.append(embeddings) 167 | return torch.tensor(batch_embeddings) 168 | -------------------------------------------------------------------------------- /xml_processor.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from unidecode import unidecode 3 | import xml.etree.ElementTree as ET 4 | from tqdm import tqdm 5 | 6 | def parse_xml(xml_path): 7 | """Obtain representation of XML file""" 8 | xml_root = ET.parse(xml_path).getroot() 9 | return xml_root 10 | 11 | def get_chapter_page_numbers(xml_root, fonts, closeness=3): 12 | """ 13 | Create list of chapter page numbers. 14 | `closeness` determines how far pages need to be apart in order to be considered a new chapter 15 | """ 16 | chapter_start_pages = list() 17 | for page in xml_root: 18 | page_num = int(page.attrib['number']) 19 | for item in page: 20 | if item.tag == "text": 21 | if item.attrib['font'] in fonts: # chapter detection 22 | chapter_start_pages.append(page_num) 23 | break 24 | 25 | # Clean chapter_start_pages by removing page numbers that are too close together 26 | previous_number = 0 27 | for page_number in chapter_start_pages: 28 | if previous_number+closeness > page_number: 29 | chapter_start_pages.remove(previous_number) 30 | previous_number = page_number 31 | 32 | return chapter_start_pages 33 | 34 | def process(xml_root, chapter_start_pages, heading_fonts, body_fonts): 35 | content = OrderedDict() 36 | heading = "" 37 | first_body = True 38 | book = list() 39 | last_chapter_num = 1 40 | for page in tqdm(xml_root, desc="Page"): 41 | current_page_num = int(page.attrib['number']) 42 | # Get current chapter based on page number 43 | for idx, page_number in enumerate(chapter_start_pages): 44 | # If the current page number is less than or equal to every chapter start page number 45 | if current_page_num+1 <= page_number: 46 | chapter_num = idx+1 47 | break 48 | else: 49 | chapter_num = 0 50 | 51 | # If the chapter number has changed since the last page then save content and reset 52 | if last_chapter_num != chapter_num: 53 | # print("last_chapter_num: " + str(last_chapter_num) + " chapter_num: " + str(chapter_num)) 54 | book.append(content) 55 | content = OrderedDict() 56 | first_body = True 57 | 58 | # Set last chapter number to the current chapter number 59 | last_chapter_num = chapter_num 60 | 61 | for item in page: 62 | if item.tag == "text": 63 | # If item is a heading 64 | if item.attrib['font'] in heading_fonts: 65 | first_body = True 66 | heading += item[0].text 67 | # If item is body text 68 | if item.attrib['font'] in body_fonts: 69 | # If this is the first body after the heading then set the `current_heading` and initialize the `content` section 70 | if first_body: 71 | current_heading = heading.replace('\n', ' ').strip() 72 | if current_heading == "": 73 | current_heading = "Unknown" 74 | current_heading = unidecode(current_heading) 75 | content[current_heading] = "" 76 | heading = "" 77 | first_body = False 78 | 79 | # convert unicode to ascii 80 | text = unidecode(item.text) 81 | # strip whitespace and replace newlines 82 | text = text.strip().replace('\n', ' ').replace('\r', ' ') 83 | # remove dashes from lines that end in dashes 84 | if text[-1:] == "-": 85 | text = text[-1:] 86 | # add space after each line 87 | text += " " 88 | 89 | # Store line of text in `content` dictionary under `current_heading` 90 | content[current_heading] += text 91 | return book 92 | 93 | 94 | --------------------------------------------------------------------------------