├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── data ├── feature_extraction_pipeline_benchmark.png ├── qa_pipeline_benchmark.png └── social_preview.jpeg ├── notebooks └── benchmark_pipelines.ipynb ├── onnx_transformers ├── __init__.py └── pipelines.py ├── setup.cfg ├── setup.py └── tests ├── __init__.py └── test_pipelines_onnx.py /.gitignore: -------------------------------------------------------------------------------- 1 | # onnx graph cache 2 | .onnx 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: quality style test test-examples docs 2 | 3 | # Check that source code meets quality standards 4 | 5 | quality: 6 | black --check --line-length 119 --target-version py35 tests onnx_transformers 7 | isort --check-only tests onnx_transformers 8 | flake8 tests onnx_transformers 9 | 10 | # Format source code automatically 11 | 12 | style: 13 | black --line-length 119 --target-version py35 tests onnx_transformers 14 | isort tests onnx_transformers 15 | 16 | # Run tests for the library 17 | 18 | test: 19 | python -m pytest -n auto --dist=loadfile -s -v ./tests/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # onnx_transformers 2 | 3 | ![onnx_transformers](https://github.com/patil-suraj/onnx_transformers/blob/master/data/social_preview.jpeg?raw=True) 4 | 5 | Accelerated NLP pipelines for fast inference 🚀 on CPU. Built with 🤗Transformers and ONNX runtime. 6 | 7 | ## Installation: 8 | 9 | ```bash 10 | pip install git+https://github.com/patil-suraj/onnx_transformers 11 | ``` 12 | 13 | ## Usage: 14 | 15 | > *NOTE* : This is an experimental project and only tested with PyTorch 16 | 17 | The pipeline API is similar to transformers [pipeline](https://huggingface.co/transformers/main_classes/pipelines.html) with just a few differences which are explained below. 18 | 19 | Just provide the path/url to the model and it'll download the model if needed from the [hub](https://huggingface.co/models) and automatically create onnx graph and run inference. 20 | 21 | ```python 22 | from onnx_transformers import pipeline 23 | 24 | # Initialize a pipeline by passing the task name and 25 | # set onnx to True (default value is also True) 26 | >>> nlp = pipeline("sentiment-analysis", onnx=True) 27 | >>> nlp("Transformers and onnx runtime is an awesome combo!") 28 | [{'label': 'POSITIVE', 'score': 0.999721109867096}] 29 | ``` 30 | 31 | Or provide a different model using the `model` argument. 32 | 33 | ```python 34 | from onnx_transformers import pipeline 35 | 36 | >>> nlp = pipeline("question-answering", model="deepset/roberta-base-squad2", onnx=True) 37 | >>> nlp({ 38 | "question": "What is ONNX Runtime ?", 39 | "context": "ONNX Runtime is a highly performant single inference engine for multiple platforms and hardware" 40 | }) 41 | {'answer': 'highly performant single inference engine for multiple platforms and hardware', 'end': 94, 'score': 0.751201868057251, 'start': 18} 42 | ``` 43 | 44 | Set `onnx` to `False` for standard torch inference. 45 | 46 | You can create `Pipeline` objects for the following down-stream tasks: 47 | 48 | - `feature-extraction`: Generates a tensor representation for the input sequence 49 | - `ner`: Generates named entity mapping for each word in the input sequence. 50 | - `sentiment-analysis`: Gives the polarity (positive / negative) of the whole input sequence. Can be used for any text classification model. 51 | - `question-answering`: Provided some context and a question referring to the context, it will extract the answer to the question in the context. 52 | - `zero-shot-classification`: 53 | 54 | 55 | Calling the pipeline for the first time loads the model, creates the onnx graph, and caches it for future use. Due to this, the first load will take some time. Subsequent calls to the same model will load the onnx graph automatically from the cache. 56 | 57 | The key difference between HF pipeline and onnx_transformers is that the `model` parameter should always be a `string` (path or url to the saved model). Also, the `zero-shot-classification` pipeline here uses `roberta-large-mnli` as default model instead of `facebook/bart-large-mnli` as BART is not yet tested with onnx runtime. 58 | 59 | 60 | ## Benchmarks 61 | 62 | > Note: For some reason, onnx is slow on colab notebook so you won't notice any speed-up there. Benchmark it on your own hardware. 63 | 64 | For detailed benchmarks and other information refer to this blog post and notebook. 65 | - [Accelerate your NLP pipelines using Hugging Face Transformers and ONNX Runtime](https://medium.com/microsoftazure/accelerate-your-nlp-pipelines-using-hugging-face-transformers-and-onnx-runtime-2443578f4333) 66 | - [Exporting 🤗 transformers model to ONNX](https://github.com/huggingface/transformers/blob/master/notebooks/04-onnx-export.ipynb) 67 | 68 | To benchmark the pipelines in this repo, see the [benchmark_pipelines](https://github.com/patil-suraj/onnx_transformers/blob/master/notebooks/benchmark_pipelines.ipynb) notebook. 69 | >(Note: These are not yet comprehensive benchmarks.) 70 | 71 | **Benchmark `feature-extraction` pipeline** 72 | 73 | ![](https://github.com/patil-suraj/onnx_transformers/blob/master/data/feature_extraction_pipeline_benchmark.png?raw=True) 74 | 75 | 76 | **Benchmark `question-answering` pipeline** 77 | 78 | ![](https://github.com/patil-suraj/onnx_transformers/blob/master/data/qa_pipeline_benchmark.png?raw=True) 79 | -------------------------------------------------------------------------------- /data/feature_extraction_pipeline_benchmark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patil-suraj/onnx_transformers/463dcfc9b7d037dedd85b1637fe44eeb58f4a5a3/data/feature_extraction_pipeline_benchmark.png -------------------------------------------------------------------------------- /data/qa_pipeline_benchmark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patil-suraj/onnx_transformers/463dcfc9b7d037dedd85b1637fe44eeb58f4a5a3/data/qa_pipeline_benchmark.png -------------------------------------------------------------------------------- /data/social_preview.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patil-suraj/onnx_transformers/463dcfc9b7d037dedd85b1637fe44eeb58f4a5a3/data/social_preview.jpeg -------------------------------------------------------------------------------- /notebooks/benchmark_pipelines.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "Cloning into 'onnx_transformers'...\n", 13 | "remote: Enumerating objects: 105, done.\u001b[K\n", 14 | "remote: Counting objects: 100% (105/105), done.\u001b[K\n", 15 | "remote: Compressing objects: 100% (65/65), done.\u001b[K\n", 16 | "remote: Total 105 (delta 48), reused 92 (delta 37), pack-reused 0\u001b[K\n", 17 | "Receiving objects: 100% (105/105), 34.52 KiB | 415.00 KiB/s, done.\n", 18 | "Resolving deltas: 100% (48/48), done.\n" 19 | ] 20 | } 21 | ], 22 | "source": [ 23 | "!pip install -qqq --upgrade torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html\n", 24 | "!git clone https://github.com/patil-suraj/onnx_transformers.git\n", 25 | "!pip install -U -qqq -e ./onnx_transformers" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 2, 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "Architecture: x86_64\n", 38 | "CPU op-mode(s): 32-bit, 64-bit\n", 39 | "Byte Order: Little Endian\n", 40 | "CPU(s): 4\n", 41 | "On-line CPU(s) list: 0-3\n", 42 | "Thread(s) per core: 2\n", 43 | "Core(s) per socket: 2\n", 44 | "Socket(s): 1\n", 45 | "NUMA node(s): 1\n", 46 | "Vendor ID: AuthenticAMD\n", 47 | "CPU family: 23\n", 48 | "Model: 1\n", 49 | "Model name: AMD EPYC 7571\n", 50 | "Stepping: 2\n", 51 | "CPU MHz: 2541.395\n", 52 | "BogoMIPS: 4399.98\n", 53 | "Hypervisor vendor: KVM\n", 54 | "Virtualization type: full\n", 55 | "L1d cache: 32K\n", 56 | "L1i cache: 64K\n", 57 | "L2 cache: 512K\n", 58 | "L3 cache: 8192K\n", 59 | "NUMA node0 CPU(s): 0-3\n", 60 | "Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch topoext vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "!lscpu" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "Restart notebook after installation" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 1, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "from onnx_transformers import pipeline" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 2, 87 | "metadata": {}, 88 | "outputs": [ 89 | { 90 | "name": "stdout", 91 | "output_type": "stream", 92 | "text": [ 93 | "/notebooks/onnx_tests/onnx_transformers/.onnx/distilbert-base-cased/distilbert-base-cased.onnx.input_names.json\n", 94 | "Creating folder /notebooks/onnx_tests/onnx_transformers/.onnx/distilbert-base-cased\n", 95 | "Using framework PyTorch: 1.6.0+cpu\n", 96 | "Found input input_ids with shape: {0: 'batch', 1: 'sequence'}\n", 97 | "Found input attention_mask with shape: {0: 'batch', 1: 'sequence'}\n", 98 | "Found output output_0 with shape: {0: 'batch', 1: 'sequence'}\n", 99 | "Ensuring inputs are in correct order\n", 100 | "head_mask is not present in the generated input list.\n", 101 | "Generated inputs order: ['input_ids', 'attention_mask']\n" 102 | ] 103 | }, 104 | { 105 | "name": "stderr", 106 | "output_type": "stream", 107 | "text": [ 108 | "/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py:1570: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", 109 | " input_tensor.shape == tensor_shape for input_tensor in input_tensors\n" 110 | ] 111 | }, 112 | { 113 | "name": "stdout", 114 | "output_type": "stream", 115 | "text": [ 116 | "Found input input_ids with shape: {0: 'batch', 1: 'sequence'}\n", 117 | "Found input attention_mask with shape: {0: 'batch', 1: 'sequence'}\n", 118 | "Found output output_0 with shape: {0: 'batch', 1: 'sequence'}\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "# load onnx pipeline\n", 124 | "nlp_onnx = pipeline(\"feature-extraction\", onnx=True)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 3, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "# load torch pipeline\n", 134 | "nlp_torch = pipeline(\"feature-extraction\", onnx=False)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": 4, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "from contextlib import contextmanager\n", 144 | "from dataclasses import dataclass\n", 145 | "from time import time\n", 146 | "from tqdm import trange\n", 147 | "\n", 148 | "@contextmanager\n", 149 | "def track_infer_time(buffer: [int]):\n", 150 | " start = time()\n", 151 | " yield\n", 152 | " end = time()\n", 153 | "\n", 154 | " buffer.append(end - start)\n", 155 | "\n", 156 | "\n", 157 | "@dataclass\n", 158 | "class OnnxInferenceResult:\n", 159 | " model_inference_time: [int] \n", 160 | " optimized_model_path: str" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": 5, 166 | "metadata": {}, 167 | "outputs": [ 168 | { 169 | "name": "stderr", 170 | "output_type": "stream", 171 | "text": [ 172 | "Tracking inference time for Pytorch CPU: 100%|██████████| 100/100 [00:04<00:00, 22.69it/s]\n", 173 | "Tracking inference time for ONNX CPU: 100%|██████████| 100/100 [00:02<00:00, 37.85it/s]\n" 174 | ] 175 | } 176 | ], 177 | "source": [ 178 | "pipelines = [(\"Pytorch CPU\", nlp_torch), (\"ONNX CPU\", nlp_onnx)]\n", 179 | "results = {}\n", 180 | "for label, pipeline_ in pipelines:\n", 181 | " # Compute \n", 182 | " time_buffer = []\n", 183 | " for _ in trange(100, desc=f\"Tracking inference time for {label}\"):\n", 184 | " with track_infer_time(time_buffer):\n", 185 | " pipeline_(\"My name is BERT\")\n", 186 | "\n", 187 | " # Store the result\n", 188 | " results[label] = OnnxInferenceResult(\n", 189 | " time_buffer, \n", 190 | " None\n", 191 | " )" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": 6, 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [ 200 | "%matplotlib inline\n", 201 | "\n", 202 | "import matplotlib\n", 203 | "import matplotlib.pyplot as plt\n", 204 | "import seaborn as sns\n", 205 | "sns.set()\n", 206 | "import numpy as np\n", 207 | "import os\n", 208 | "\n", 209 | "\n", 210 | "def plot_benchmark(results):\n", 211 | " # Compute average inference time + std\n", 212 | " time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()}\n", 213 | " time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000\n", 214 | "\n", 215 | " plt.rcdefaults()\n", 216 | " fig, ax = plt.subplots(figsize=(16, 12))\n", 217 | " ax.set_ylabel(\"Avg Inference time (ms)\")\n", 218 | " ax.set_title(\"Average inference time (ms) for each provider\")\n", 219 | " ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std)\n", 220 | " plt.show()" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": 7, 226 | "metadata": {}, 227 | "outputs": [ 228 | { 229 | "data": { 230 | "image/png": "iVBORw0KGgoAAAANSUhEUgAABRoAAAPeCAYAAABjjKazAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeZRU1b347W83QzdTgwwCAQQBFcQhhtwgDqAIKoJoRBk0BgdCchUiGOLVeCMSUYwaxRhxiF6c2gAqEDRXRSMSB5wwKjFR0QtKQAFRQAQR6fr94dv1UnQz7iYN5nnW6rW6dp2qs+tUVS/5eIa8TCaTCQAAAACABPmVPQEAAAAAYPcnNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCALuVVq1axVlnnbXDj7/22mujdevWUaVKlfj2t79dcRP7Bnn66acjLy8vnn766cqeSo7Vq1fHnnvuGcXFxf/S9V588cXRqVOn7XrMN/1zVvoZefDBByt7KhXmrrvuiry8vFiwYMFWl039OwQA31RCIwDsgsaPHx95eXnbHTfYshkzZsRFF10Uhx9+eEyYMCGuuuqqyp5SpRo/fnzcddddlT2NbXbjjTdGnTp1YsCAAf/S9Q4fPjxef/31mD59+jYt73MGAPy7qlrZEwAAyiouLo5WrVrFSy+9FO+++260bdu2sqe0y3j77bcjP3/H/l/pU089Ffn5+XHnnXdG9erVK3hmu5/x48dHw4YNy+yZ1aVLl1i7du0utY3Wr18fN954Y4wYMSKqVKnyL113kyZN4qSTTorrrrsu+vTps9Xlfc52T2eeeWYMGDAgCgoKKnsqALDbskcjAOxi5s+fH88//3xcf/310ahRo3/5YaIRESUlJfHFF1/8y9e7LQoKCqJatWo79NilS5dGjRo1KjT+rFmzpsKea1eRn58fhYWFOxx0d4ZHHnkkli1bFv369auU9ffr1y+effbZ+L//+7+tLlvRn7NMJhNr166tkOf6JthZf5+qVKkShYWFkZeXV+HPvSVfffVVfPnll//SdQLAzrLr/NcjABARX+/NuMcee0SvXr3i1FNPzQmN69evj/r168fZZ59d5nGrVq2KwsLCGDlyZHZs3bp1MWrUqGjbtm0UFBREixYt4qKLLop169blPDYvLy+GDh0axcXF0aFDhygoKIjHHnssIiKuu+66OOyww6JBgwZRo0aN6NixY7nnZVu7dm389Kc/jYYNG0adOnWiT58+sWjRosjLy4vLL788Z9lFixbFOeecE40bN46CgoLo0KFD/M///M82bZ9Nz41Wel615557Li688MJo1KhR1KpVK77//e/HsmXLcl7jhAkT4vPPP4+8vLzIy8vLOWz4vvvui44dO0aNGjWifv36MWDAgFi4cGHOuo866qg44IADYs6cOdGlS5eoWbNm/OIXv9ihbT1t2rQ44IADsq+/dHtvup3OPffc+Na3vhUFBQWx9957x3/+53/mRIkVK1bE8OHDo0WLFlFQUBBt27aNX//611FSUrLV7fjmm2/GrFmzstvjqKOOiojyz9FY+trfeOON6Nq1a9SsWTPatm2b/SzMmjUrOnXqFDVq1Ij99tsvnnzyyXJfz46+79OmTYtWrVpFmzZtcsbPOuusqF27dnzwwQfRu3fvqF27djRr1ixuvvnmiIiYO3dudOvWLWrVqhUtW7aM+++/P+fx69evj9GjR8c+++wThYWF0aBBgzjiiCPiiSeeyFmue/fuERHxxz/+cYvz3NLn7Kuvvoorrrgi2rRpEwUFBdGqVav4xS9+UeYz0qpVq+jdu3c8/vjj8d3vfjdq1KgRt9122xbX++KLL8bxxx8fdevWjZo1a0bXrl3jueeey1nm/fffj/POOy/222+/qFGjRjRo0CBOO+20cs9JuGLFihgxYkS0atUqCgoKonnz5vHDH/4wPv7445zlSkpK4sorr4zmzZtHYWFhHHPMMfHuu+9uca4REZdffnnk5eXFW2+9Ff369YuioqJo0KBBXHDBBWUi4pb+Pv31r3+Nnj17RlFRUdSuXTuOOeaYeOGFF7KPfeWVVyIvLy/uvvvuMnN4/PHHIy8vLx555JGIKP8cjZlMJsaMGRPNmzePmjVrxtFHHx1vvvlmua9pW76LCxYsiLy8vLjuuuti3Lhx2c/C3//+961uMwDYHTh0GgB2McXFxXHKKadE9erVY+DAgXHLLbfEyy+/HP/xH/8R1apVi+9///sxZcqUuO2223L2mJo2bVqsW7cue/66kpKS6NOnTzz77LMxZMiQaN++fcydOzduuOGGeOedd2LatGk5633qqadi8uTJMXTo0GjYsGG0atUqIr4+L16fPn3ijDPOiC+//DImTpwYp512WjzyyCPRq1ev7OPPOuusmDx5cpx55plx6KGHxqxZs3LuL7VkyZI49NBDs/GgUaNG8eijj8a5554bq1atiuHDh+/Qdhs2bFjsscceMWrUqFiwYEGMGzcuhg4dGpMmTYqIiHvvvTduv/32eOmll+KOO+6IiIjDDjssIiKuvPLK+OUvfxn9+vWLwYMHx7Jly+Kmm26KLl26xF//+teoV69edj3Lly+Pnj17xoABA+IHP/hBNG7ceLu39bPPPhtTpkyJ8847L+rUqRO//e1vo2/fvvHBBx9EgwYNIiJi8eLF8b3vfS9WrFgRQ4YMiXbt2sWiRYviwQcfjDVr1kT16tVjzZo10bVr11i0aFH8+Mc/jr322iuef/75uOSSS+LDDz+McePGbXZ7jRs3LoYNGxa1a9eOSy+9NCIiGjduvMVt/Omnn0bv3r1jwIABcdppp8Utt9wSAwYMiOLi4hg+fHj85Cc/idNPPz2uvfbaOPXUU2PhwoVRp06diEh/359//vn4zne+U+59GzZsiJ49e0aXLl3immuuieLi4hg6dGjUqlUrLr300jjjjDPilFNOiVtvvTV++MMfRufOnWPvvfeOiK+D19ixY2Pw4MHxve99L1atWhWvvPJKvPrqq9GjR4/sOurWrRtt2rSJ5557LkaMGLHZeW7pczZ48OC4++6749RTT42f/exn8eKLL8bYsWPjH//4R0ydOjXned5+++0YOHBg/PjHP44f/ehHsd9++212nU899VT07NkzOnbsGKNGjYr8/PyYMGFCdOvWLZ555pn43ve+FxERL7/8cjz//PMxYMCAaN68eSxYsCBuueWWOOqoo+Lvf/971KxZMyK+vujOkUceGf/4xz/inHPOie985zvx8ccfx/Tp0+Of//xnNGzYMLvuq6++OvLz82PkyJGxcuXKuOaaa+KMM86IF198cbPz3Vi/fv2iVatWMXbs2HjhhRfit7/9bXz66adxzz33lHmNm/59evPNN+PII4+MoqKiuOiii6JatWpx2223xVFHHZUN39/97nejdevWMXny5Bg0aFDOc06aNCn22GOPOO644zY7v8suuyzGjBkTJ5xwQpxwwgnx6quvxrHHHltmD8Tt/S5OmDAhvvjiixgyZEgUFBRE/fr1t2l7AcAuLwMA7DJeeeWVTERknnjiiUwmk8mUlJRkmjdvnrnggguyyzz++OOZiMg8/PDDOY894YQTMq1bt87evvfeezP5+fmZZ555Jme5W2+9NRMRmeeeey47FhGZ/Pz8zJtvvllmTmvWrMm5/eWXX2YOOOCATLdu3bJjc+bMyUREZvjw4TnLnnXWWZmIyIwaNSo7du6552aaNm2a+fjjj3OWHTBgQKZu3bpl1repli1bZgYNGpS9PWHChExEZLp3754pKSnJjo8YMSJTpUqVzIoVK7JjgwYNytSqVSvn+RYsWJCpUqVK5sorr8wZnzt3bqZq1ao54127ds1ERObWW2/NWXZ7t3X16tUz7777bnbs9ddfz0RE5qabbsqO/fCHP8zk5+dnXn755TLboPR1XnHFFZlatWpl3nnnnZz7L7744kyVKlUyH3zwQZnHbqxDhw6Zrl27lhmfOXNmJiIyM2fOLPPa77///uzYW2+9lf3svPDCC9nx0s/ohAkTsmMp7/v69eszeXl5mZ/97Gdl7hs0aFAmIjJXXXVVduzTTz/N1KhRI5OXl5eZOHFimflu/Hk8+OCDM7169drsujd27LHHZtq3b7/V5cr7nL322muZiMgMHjw4Z3zkyJGZiMg89dRT2bGWLVtmIiLz2GOPbXVdJSUlmX322Sdz3HHH5Xz+16xZk9l7770zPXr0yBnb1OzZszMRkbnnnnuyY5dddlkmIjJTpkwpd32ZzP//GWnfvn1m3bp12ftvvPHGTERk5s6du8V5jxo1KhMRmT59+uSMn3feeZmIyLz++uvZsc39fTr55JMz1atXz7z33nvZscWLF2fq1KmT6dKlS3bskksuyVSrVi3zySefZMfWrVuXqVevXuacc87JjpX+LZk/f34mk8lkli5dmqlevXqmV69eOdv2F7/4RSYicv4Obet3cf78+ZmIyBQVFWWWLl26xW0EALsjh04DwC6kuLg4GjduHEcffXREfH3IYP/+/WPixImxYcOGiIjo1q1bNGzYMLunXsTXe5o98cQT0b9//+zYAw88EO3bt4927drFxx9/nP3p1q1bRETMnDkzZ91du3aN/fffv8ycatSokbOelStXxpFHHhmvvvpqdrz0MMbzzjsv57HDhg3LuZ3JZOKhhx6KE088MTKZTM68jjvuuFi5cmXO826PIUOG5Jxb7cgjj4wNGzbE+++/v8XHTZkyJUpKSqJfv34582nSpEnss88+ZbZTQUFBmUPXt3dbd+/ePecQ4IMOOiiKioqy5/8rKSmJadOmxYknnhjf/e53y8y59HU+8MADceSRR8Yee+yRs97u3bvHhg0b4i9/+cvWNtt2qV27ds4Vn/fbb7+oV69etG/fPucK6aW/l76e1Pf9k08+iUwmE3vsscdmlxk8eHD293r16sV+++0XtWrVyjmnY+l8Nz7PYr169eLNN9+MefPmbfX1l27nHfG///u/ERFx4YUX5oz/7Gc/i4iIP/3pTznje++99xb3tCv12muvxbx58+L000+P5cuXZ7fr559/Hsccc0z85S9/yR66u/F3ef369bF8+fJo27Zt1KtXL2f7P/TQQ3HwwQfH97///TLr2/T8hWeffXbOntVHHnlkRMQ2ncsyIuL888/PuV36N6N0e5Xa9O/Thg0bYsaMGXHyySdH69ats+NNmzaN008/PZ599tlYtWpVRET0798/1q9fH1OmTMkuN2PGjFixYkXO38xNPfnkk/Hll1/GsGHDcl53eXvfbu93sW/fvtGoUaPNrhsAdlcOnQaAXcSGDRti4sSJcfTRR8f8+fOz4506dYrf/OY38ec//zmOPfbYqFq1avTt2zfuv//+WLduXRQUFMSUKVNi/fr1Of9onjdvXvzjH//Y7D9mly5dmnO79FDSTT3yyCMxZsyYeO2113LOJbfxP7zff//9yM/PL/Mcm14te9myZbFixYq4/fbb4/bbb9+meW2rvfbaK+d2aZT69NNPt/i4efPmRSaTiX322afc+ze98EyzZs3KXORje7f1pnMtnW/pXJctWxarVq2KAw44YKtzf+ONN7Z5vamaN29eJjTVrVs3WrRoUWYsInJeT0W875lMptzxwsLCMtugbt26m53vxp+JX/3qV3HSSSfFvvvuGwcccEAcf/zxceaZZ8ZBBx1U7vp39EIhpd+RTb8TTZo0iXr16pUJ4pv7Pm6qNJBueljwxlauXBl77LFHrF27NsaOHRsTJkyIRYsW5WzPlStXZn9/7733om/fvtu0/h393pXa9HvXpk2byM/PL3PeyE23x7Jly2LNmjXlHlLevn37KCkpiYULF0aHDh3i4IMPjnbt2sWkSZPi3HPPjYivD5tu2LBh9n8GlKf0Pdl0jo0aNSoTvbf3u7it7y8A7G6ERgDYRTz11FPx4YcfxsSJE2PixIll7i8uLo5jjz02IiIGDBgQt912Wzz66KNx8sknx+TJk6Ndu3Zx8MEHZ5cvKSmJAw88MK6//vpy17dpHNp4b6dSzzzzTPTp0ye6dOkS48ePj6ZNm0a1atViwoQJZS6qsS1K96z6wQ9+sNkwUl7g2RZVqlQpd3xzcWrjOeXl5cWjjz5a7nPUrl0753Z522l7t/WOzrW89fbo0SMuuuiicu/fd999t+v5tmZz897a60l93+vXrx95eXmbjVc7Oq+IiC5dusR7770Xf/zjH2PGjBlxxx13xA033BC33nprzl6SEV/Hs43PT7gjtjVUlvc5K0/ptr322mvj29/+drnLlH6Ghw0bFhMmTIjhw4dH586do27dupGXlxcDBgzY6sWDNqeiPsulNrd9tnV7bE7//v3jyiuvjI8//jjq1KkT06dPj4EDB0bVqhXzz6Ht/S6mvh4A2FUJjQCwiyguLo4999wze7XcjU2ZMiWmTp0at956a9SoUSO6dOkSTZs2jUmTJsURRxwRTz31VPaCHqXatGkTr7/+ehxzzDE7vBfWQw89FIWFhfH4449HQUFBdnzChAk5y7Vs2TJKSkpi/vz5OXv/bHr12UaNGkWdOnViw4YN2av4VrY2bdpEJpOJvffee4fDXEVs6401atQoioqK4m9/+9tW17t69eod3pYVMddtkfq+V61aNdq0aZOzp29FKr2S+9lnnx2rV6+OLl26xOWXX14mNM6fPz8n5m+P0u/IvHnzon379tnxJUuWxIoVK6Jly5Y79Lylh+AXFRVtdds++OCDMWjQoPjNb36THfviiy9ixYoVZZ5za5+9ijJv3rycvfvefffdKCkpyV6ManMaNWoUNWvWjLfffrvMfW+99Vbk5+fnBP7+/fvH6NGj46GHHorGjRvHqlWrck4DUJ7S92TevHk5h2cvW7asTPRO/S4CwDeFczQCwC5g7dq1MWXKlOjdu3eceuqpZX6GDh0an332WUyfPj0iIvLz8+PUU0+Nhx9+OO6999746quvypxrrF+/frFo0aL4/e9/X+76Pv/8863Oq0qVKpGXl5c9P2RExIIFC8pcRbn0XHLjx4/PGb/pppvKPF/fvn3joYceKjdkLFu2bKtzqminnHJKVKlSJUaPHl1mL6xMJhPLly/f6nNUxLbeWH5+fpx88snx8MMPxyuvvFLm/tJ59uvXL2bPnh2PP/54mWVWrFgRX3311RbXU6tWrTKRaWeoiPe9c+fO5W6LVJu+v7Vr1462bdvmnCYg4utDi997773sFaS31wknnBARUebqw6V7wZZ3hfZt0bFjx2jTpk1cd911sXr16jL3b7xtq1SpUuYzftNNN+V8vyO+Pn/g66+/XuZK2BE7vqfi5mz6P1ZK/2b07Nlzi4+rUqVKHHvssfHHP/4x5zDrJUuWxP333x9HHHFEFBUVZcfbt28fBx54YEyaNCkmTZoUTZs2jS5dumxxHd27d49q1arFTTfdlPO6y7uae+p3EQC+KezRCAC7gOnTp8dnn30Wffr0Kff+Qw89NBo1ahTFxcXZoNi/f/+46aabYtSoUXHggQfm7CUVEXHmmWfG5MmT4yc/+UnMnDkzDj/88NiwYUO89dZbMXny5Hj88cfLvdDIxnr16hXXX399HH/88XH66afH0qVL4+abb462bdvGG2+8kV2uY8eO0bdv3xg3blwsX748Dj300Jg1a1a88847EZG759zVV18dM2fOjE6dOsWPfvSj2H///eOTTz6JV199NZ588sn45JNPdmgb7qg2bdrEmDFj4pJLLokFCxbEySefHHXq1In58+fH1KlTY8iQITFy5MgtPkdFbOtNXXXVVTFjxozo2rVrDBkyJNq3bx8ffvhhPPDAA/Hss89GvXr14uc//3lMnz49evfuHWeddVZ07NgxPv/885g7d248+OCDsWDBgi0e6tuxY8e45ZZbYsyYMdG2bdvYc889t3jOuhSp7/tJJ50U9957b7zzzjsVekj4/vvvH0cddVR07Ngx6tevH6+88ko8+OCDMXTo0JzlnnzyychkMnHSSSft0HoOPvjgGDRoUNx+++2xYsWK6Nq1a7z00ktx9913x8knn5y9ANT2ys/PjzvuuCN69uwZHTp0iLPPPjuaNWsWixYtipkzZ0ZRUVE8/PDDERHRu3fvuPfee6Nu3bqx//77x+zZs+PJJ5+MBg0a5Dznz3/+83jwwQfjtNNOi3POOSc6duwYn3zySUyfPj1uvfXWHd6rszzz58+PPn36xPHHHx+zZ8+O++67L04//fRtWseYMWPiiSeeiCOOOCLOO++8qFq1atx2222xbt26uOaaa8os379//7jsssuisLAwzj333MjP3/I+F40aNYqRI0fG2LFjo3fv3nHCCSfEX//613j00UfLfK9Sv4sA8E0hNALALqC4uDgKCwujR48e5d6fn58fvXr1iuLi4li+fHk0aNAgDjvssGjRokUsXLiw3Cun5ufnx7Rp0+KGG26Ie+65J6ZOnRo1a9aM1q1bxwUXXLBNsaZbt25x5513xtVXXx3Dhw+PvffeO37961/HggULckJjRMQ999wTTZo0iT/84Q8xderU6N69e0yaNCn222+/KCwszC7XuHHjeOmll+JXv/pVTJkyJcaPHx8NGjSIDh06xK9//evt3HIV4+KLL4599903brjhhhg9enREfH1exWOPPXaz8XdjFbGtN9WsWbN48cUX45e//GUUFxfHqlWrolmzZtGzZ8+oWbNmRETUrFkzZs2aFVdddVU88MADcc8990RRUVHsu+++MXr06OxFWTbnsssui/fffz+uueaa+Oyzz6Jr1647LTSmvu8nnnhiNGzYMCZPnhz//d//XWHz+ulPfxrTp0+PGTNmxLp166Jly5YxZsyY+PnPf56z3AMPPBBHHHFEztXCt9cdd9wRrVu3jrvuuiumTp0aTZo0iUsuuSRGjRqV9BqOOuqomD17dlxxxRXxu9/9LlavXh1NmjSJTp06xY9//OPscjfeeGNUqVIliouL44svvojDDz88nnzyyTJXt65du3Y888wzMWrUqJg6dWrcfffdseeee8YxxxwTzZs3T5rrpiZNmhSXXXZZXHzxxVG1atUYOnRoXHvttdv02A4dOsQzzzwTl1xySYwdOzZKSkqiU6dOcd999+VcBb1U//7947//+79jzZo1W7za9MbGjBkThYWFceutt2ZD+YwZM8rsgZr6XQSAb4q8TEUf/wAA8P957bXX4pBDDon77rsvzjjjjMqeDru5K664IiZMmBDz5s3b7EVIdoaPPvoo9t5775g4ceIO79FIrssvvzxGjx4dy5Yts6cfAHyDOEcjAFAh1q5dW2Zs3LhxkZ+fv9VzocG2GDFiRKxevbrcq7LvTOPGjYsDDzxQZAQA2AqHTgMAFeKaa66JOXPmxNFHHx1Vq1aNRx99NB599NEYMmRIztVfYUfVrl07li5d+i9f79VXX/0vXycAwO5IaAQAKsRhhx0WTzzxRFxxxRWxevXq2GuvveLyyy+PSy+9tLKnBgAA/As4RyMAAAAAkMw5GgEAAACAZEIjAAAAAJDsG3+OxpKSkli8eHHUqVMn8vLyKns6AAAAALBbyWQy8dlnn8W3vvWtyM/f/H6L3/jQuHjxYle6BAAAAIBECxcujObNm2/2/m98aKxTp05EfL0hioqKKnk2AAAAALB7WbVqVbRo0SLb2TbnGx8aSw+XLioqEhoBAAAAYAdt7bSELgYDAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAXZxa9asiWbNmkWzZs1izZo1lT0dAAAAKJfQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQrFJD4+WXXx55eXk5P+3atcve/8UXX8T5558fDRo0iNq1a0ffvn1jyZIllThjAAAAAKA8lb5HY4cOHeLDDz/M/jz77LPZ+0aMGBEPP/xwPPDAAzFr1qxYvHhxnHLKKZU4WwAAAACgPFUrfQJVq0aTJk3KjK9cuTLuvPPOuP/++6Nbt24RETFhwoRo3759vPDCC3HooYf+q6cKAAAAAGxGpe/ROG/evPjWt74VrVu3jjPOOCM++OCDiIiYM2dOrF+/Prp3755dtl27drHXXnvF7NmzN/t869ati1WrVuX8AAAAAAA7V6WGxk6dOsVdd90Vjz32WNxyyy0xf/78OPLII+Ozzz6Ljz76KKpXrx716tXLeUzjxo3jo48+2uxzjsGlAccAACAASURBVB07NurWrZv9adGixc5+GQAAAADwb69SD53u2bNn9veDDjooOnXqFC1btozJkydHjRo1dug5L7nkkrjwwguzt1etWiU2AgAAAMBOVumHTm+sXr16se+++8a7774bTZo0iS+//DJWrFiRs8ySJUvKPadjqYKCgigqKsr5AQAAAAB2rl0qNK5evTree++9aNq0aXTs2DGqVasWf/7zn7P3v/322/HBBx9E586dK3GWAAAAAMCmKvXQ6ZEjR8aJJ54YLVu2jMWLF8eoUaOiSpUqMXDgwKhbt26ce+65ceGFF0b9+vWjqKgohg0bFp07d3bFaQAAAADYxVRqaPznP/8ZAwcOjOXLl0ejRo3iiCOOiBdeeCEaNWoUERE33HBD5OfnR9++fWPdunVx3HHHxfjx4ytzygAAAABAOfIymUymsiexM61atSrq1q0bK1eudL5GYLe0Zs2a2GeffSIiYt68eVGzZs1KnhEAAAD/Tra1r+1S52gEAAAAAHZPQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkVSt7AqRrdfGfKnsKwE6UWb8u+3v7Xz4WedUKKnE2wM604OpelT0FAADYYfZoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZLtMaLz66qsjLy8vhg8fnh374osv4vzzz48GDRpE7dq1o2/fvrFkyZJKnCUAAAAAUJ5dIjS+/PLLcdttt8VBBx2UMz5ixIh4+OGH44EHHohZs2bF4sWL45RTTqmkWQIAAAAAm1PpoXH16tVxxhlnxO9///vYY489suMrV66MO++8M66//vro1q1bdOzYMSZMmBDPP/98vPDCC5U4YwAAAABgU5UeGs8///zo1atXdO/ePWd8zpw5sX79+pzxdu3axV577RWzZ8/+V08TAAAAANiCqpW58okTJ8arr74aL7/8cpn7Pvroo6hevXrUq1cvZ7xx48bx0UcfbfY5161bF+vWrcveXrVqVcVNGAAAAAAoV6Xt0bhw4cK44IILori4OAoLCyvseceOHRt169bN/rRo0aLCnhsAAAAAKF+lhcY5c+bE0qVL4zvf+U5UrVo1qlatGrNmzYrf/va3UbVq1WjcuHF8+eWXsWLFipzHLVmyJJo0abLZ573kkkti5cqV2Z+FCxfu7JcCAAAAAP/2Ku3Q6WOOOSbmzp2bM3b22WdHu3bt4r/+67+iRYsWUa1atfjzn/8cffv2jYiIt99+Oz744IPo3LnzZp+3oKAgCgoKdurcAQAAAIBclRYa69SpEwcccEDOWK1ataJBgwbZ8XPPPTcuvPDCqF+/fhQVFcWwYcOic+fOceihh1bGlAEAAACAzajUi8FszQ033BD5+fnRt2/fWLduXRx33HExfvz4yp4WAAAAALCJXSo0Pv300zm3CwsL4+abb46bb765ciYEAAAAAGyTSrsYDAAAAADwzSE0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAICdYM2aNdGsWbNo1qxZrFmzprKnA7DTCY0AAAAAQDKhEQAAAABIJjQCAAAAAMmERgAAAAAgmdAIAAAAACQTGgEAAACAZEIjAAAAAJBMaAQAAAAAkgmNAAAAAEAyoREAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJKta2RMAYMvyqhVEtTNvr+xpAAAAwBbZoxEAAAAASCY0AgAAAADJhEYAAAAAIJnQCAAAAAAkExoBAAAAgGRCIwAAAACQTGgEAAAAAJIJjQAAAABAMqERAAAAAEgmNAIAAAAAyYRGAAAAACCZ0AgAAAAAJBMaAQAAAIBkQiMAAAAAkExoBAAAAACSCY0AAAAAQDKhEQAAAABIVnV7Fl6xYkVMnTo1nnnmmXj//fdjzZo10ahRozjkkEPiuOOOi8MOO2xnzRMAAAAA2IVt0x6NixcvjsGDB0fTpk1jzJgxsXbt2vj2t78dxxxzTDRv3jxmzpwZPXr0iP333z8mTZq0s+cMAAAAAOxitmmPxkMOOSQGDRoUc+bMif3337/cZdauXRvTpk2LcePGxcKFC2PkyJEVOlEAAAAAYNe1TaHx73//ezRo0GCLy9SoUSMGDhwYAwcOjOXLl1fI5AAAAACA3cM2HTq9tciYujwAAAAAsHvb7qtO33333fGnP/0pe/uiiy6KevXqxWGHHRbvv/9+hU4OAAAAANg9bHdovOqqq6JGjRoRETF79uy4+eab45prromGDRvGiBEjKnyCAAAAAMCub5vO0bixhQsXRtu2bSMiYtq0adG3b98YMmRIHH744XHUUUdV9PwAAAAAgN3Adu/RWLt27ezFXmbMmBE9evSIiIjCwsJYu3Ztxc4OAAAAANgtbPcejT169IjBgwfHIYccEu+8806ccMIJERHx5ptvRqtWrSp6fgAAAADAbmC792i8+eabo3PnzrFs2bJ46KGHsleYnjNnTgwcOLDCJwgAAAAA7Pq2e4/GevXqxe9+97sy46NHj66QCQEAAAAAu5/tDo0REV988UW88cYbsXTp0igpKcmO5+XlxYknnlhhkwMAAAAAdg/bHRofe+yxOPPMM7MXhNlYXl5ebNiwoUImBgAAAADsPrb7HI3Dhg2Lfv36xYcffhglJSU5PyIjAAAAAPx72u7QuGTJkrjwwgujcePGO2M+AAAAAMBuaLtD46mnnhpPP/30TpgKAAAAALC72u5zNP7ud7+L0047LZ555pk48MADo1q1ajn3//SnP62wyQEAAAAAu4ftDo1/+MMfYsaMGVFYWBhPP/105OXlZe/Ly8sTGgEAAADg39B2h8ZLL700Ro8eHRdffHHk52/3kdcAAAAAwDfQdpfCL7/8Mvr37y8yAgAAAABZ210LBw0aFJMmTdoZcwEAAAAAdlPbfej0hg0b4pprronHH388DjrooDIXg7n++usrbHIAAAAAwO5hu0Pj3Llz45BDDomIiL/97W859218YRgAAAAA4N/HdofGmTNn7ox5AAAAAAC7MVd0AQAAAACSbVNo/MlPfhL//Oc/t+kJJ02aFMXFxUmTgv/H3r2HaV0X+P9/3RxmEIFBNEESEpOV8JAulhJ7gSmKh8xT66G18LCWXmgmWiuXCmVearomW6uSWLLbqrhqmmurruIRTyV47GCSmiKnMGE45GjM/P74fZ2rCTVu3zPO3Pl4XNdcF/fn/tz3vOCv+3py3/cHAAAAgNqyQR+d/tCHPpTtttsuY8aMyQEHHJBddtklgwcPTq9evfLaa6/ll7/8ZebOnZvZs2dn8ODBueKKKzp6NwAAAADQhWxQaPzWt76Vk046KVdeeWUuu+yy/PKXv2xzf9++fTN+/PhcccUV2WeffTpkKAAAAADQdW3wxWAGDhyYM888M2eeeWZee+21vPTSS/njH/+YzTbbLB/96EddcRoAAAAAPsCqvup0kmyyySbZZJNN2nsLAAAAAFCjXHUaAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFHtPofFPf/pT7rrrrnz/+9/PqlWrkiSLFi3K6tWr23UcAAAAAFAbqr7q9O9+97vss88+eemll9LU1JS99torffv2zbe//e00NTVlxowZHbETAAAAAOjCqn5H4ymnnJJddtklr732WjbaaKPW4wcffHDmzJnTruMAAAAAgNpQ9TsaH3jggTz00EOpq6trc3yrrbbKK6+80m7DAAAAAIDaUfU7Gpubm7Nu3br1ji9cuDB9+/Ztl1EAAAAAQG2pOjTuvffemT59euvtSqWS1atXZ9q0adlvv/3adRwAAAAAUBuq/uj0xRdfnAkTJmTkyJF5/fXX8/nPfz7PPfdcNttss1x77bUdsREAAAAA6OKqDo1bbrllnnzyycyePTtPPfVUVq9eneOOOy7/9E//1ObiMAAAAADAB0fVoTFJevTokaOOOqq9twAAAAAANeo9hcZFixZl7ty5WbZsWZqbm9vc95WvfGWDn+fyyy/P5ZdfnhdffDFJst1222Xq1KnZd999kySvv/56TjvttMyePTtNTU2ZMGFCLrvssgwcOPC9zAYAAAAAOkjVoXHWrFn58pe/nLq6umy66aapVCqt91UqlapC45ZbbpkLLrggw4cPT0tLS/7jP/4jBx54YB5//PFst912OfXUU/PTn/40119/fRoaGnLSSSflkEMOyYMPPljtbAAAAACgA1VaWlpaqnnAkCFDcsIJJ2TKlCnp1q3qi1b/VQMGDMhFF12Uz33uc/nQhz6Ua665Jp/73OeSJL/+9a/zsY99LA8//HB22223DXq+xsbGNDQ0ZOXKlenXr1+77+0Ktjrjp509AQBoBy9esH9nTwCgHa1duzbDhw9Pkjz33HPp3bt3Jy8CeG82tK9VXQrXrl2bI444ot0j47p16zJ79uysWbMmo0ePzrx58/Lmm29m/PjxreeMGDEiQ4cOzcMPP9yuvxsAAAAAKFN1LTzuuONy/fXXt9uAp59+On369El9fX1OOOGE3HTTTRk5cmSWLFmSurq69O/fv835AwcOzJIlS97x+ZqamtLY2NjmBwAAAADoWFV/R+P555+fz3zmM7n99tuzww47pGfPnm3u/853vlPV82277bZ54oknsnLlytxwww2ZOHFi7rvvvmpntdn3zW9+8z0/HgAAAACo3nsKjXfccUe23XbbJFnvYjDVqquryzbbbJMkGTVqVH7+85/n3/7t33L44YfnjTfeyIoVK9q8q3Hp0qUZNGjQOz7flClTMnny5NbbjY2NGTJkSNW7AAAAAIANV3VovPjii/PDH/4wRx99dAfMSZqbm9PU1JRRo0alZ8+emTNnTg499NAkybPPPpuXXnopo0ePfsfH19fXp76+vkO2AQAAAABvr+rQWF9fnzFjxrTLL58yZUr23XffDB06NKtWrco111yTe++9N3fccUcaGhpy3HHHZfLkyRkwYED69euXk08+OaNHj97gK04DAAAAAO+PqkPjKaecku9973v57ne/W/zLly1bli9+8YtZvHhxGhoasuOOO+aOO+7IXnvtlSS55JJL0q1btxx66KFpamrKhAkTctlllxX/XgAAAACgfVUdGn/2s5/l7rvvzq233prttttuvYvB/PjHP97g5/rBD37wrvf36tUrl156aS699NJqZwIAAAAA76OqQ2P//v1zyCGHdMQWAAAAAKBGVR0ar7rqqo7YAQAAAADUsG6dPQAAAAAAqH0b9I7Gv//7v8+cOXOyySabZOedd06lUnnHc+fPn99u4wAAAACA2rBBofHAAw9MfX1965/fLTQCAAAAAB88GxQap02b1vrnb3zjGx21BQAAAACoUVV/R+PWW2+dV199db3jK1asyNZbb90uowAAAACA2lJ1aHzxxRezbt269Y43NTVl4cKF7TIKAAAAAKgtG/TR6SS55ZZbWv98xx13pKGhofX2unXrMmfOnAwbNqx91wEAAAAANWGDQ+NBBx2UJKlUKpk4cWKb+3r27JmtttoqF198cfuuAwAAAABqwgaHxubm5iTJsGHD8vOf/zybbbZZh40CAAAAAGrLBofGt7zwwgsdsQMAAAAAqGFVXwwGAAAAAOAvCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMXeU2j87W9/m7POOitHHnlkli1bliS57bbb8otf/KJdxwEAAAAAtaHq0Hjfffdlhx12yKOPPpof//jHWb16dZLkySefzLRp09p9IAAAAADQ9VUdGs8444yce+65ufPOO1NXV9d6fI899sgjjzzSruMAAAAAgNpQdWh8+umnc/DBB693fPPNN8/y5cvbZRQAAAAAUFuqDo39+/fP4sWL1zv++OOP58Mf/nC7jAIAAAAAakvVofGII47Iv/zLv2TJkiWpVCppbm7Ogw8+mNNPPz1f/OIXO2IjAAAAANDFVR0azzvvvIwYMSJDhgzJ6tWrM3LkyIwdOzaf+tSnctZZZ3XERgAAAACgi+tR7QPq6uoyc+bMTJ06NU8//XRWr16dnXfeOcOHD++IfQAAAABADag6NL5lyJAhGTJkSHtuAQAAAABqVNUfnT700EPz7W9/e73jF154Yf7xH/+xXUYBAAAAALWl6tB4//33Z7/99lvv+L777pv777+/XUYBAAAAALWl6tC4evXq1NXVrXe8Z8+eaWxsbJdRAAAAAEBtqTo07rDDDrnuuuvWOz579uyMHDmyXUYBAAAAALWl6ovBnH322TnkkEPy29/+NnvssUeSZM6cObn22mtz/fXXt/tAAAAAAKDrqzo0HnDAAbn55ptz3nnn5YYbbshGG22UHXfcMXfddVfGjRvXERsBAAAAgC6u6tCYJPvvv3/233//9t4CAAAAANSo9xQak+SNN97IsmXL0tzc3Ob40KFDi0cBAAAAALWl6tD43HPP5dhjj81DDz3U5nhLS0sqlUrWrVvXbuMAAAAAgNpQdWg8+uij06NHj9x6663ZYostUqlUOmIXAAAAAFBDqg6NTzzxRObNm5cRI0Z0xB4AAAAAoAZ1q/YBI0eOzPLlyztiCwAAAABQo6oOjd/+9rfz9a9/Pffee29effXVNDY2tvkBAAAAAD54qv7o9Pjx45Mke+65Z5vjLgYDAAAAAB9cVYfGe+65pyN2AAAAAAA1rOrQOG7cuI7YAQAAAADUsKq/ozFJHnjggRx11FH51Kc+lVdeeSVJ8qMf/Shz585t13EAAAAAQG2oOjTeeOONmTBhQjbaaKPMnz8/TU1NSZKVK1fmvPPOa/eBAAAAAEDXV3VoPPfcczNjxozMnDkzPXv2bD0+ZsyYzJ8/v13HAQAAAAC1oerQ+Oyzz2bs2LHrHW9oaMiKFSvaZRQAAAAAUFuqDo2DBg3KggUL1js+d+7cbL311u0yCgAAAACoLVWHxuOPPz6nnHJKHn300VQqlSxatChXX311Tj/99Jx44okdsREAAAAA6OJ6VPuAM844I83Nzdlzzz2zdu3ajB07NvX19Tn99NNz8sknd8RGAAAAAKCLqyo0rlu3Lg8++GAmTZqUr33ta1mwYEFWr16dkSNHpk+fPh21EQAAAADo4qoKjd27d8/ee++dX/3qV+nfv39GjhzZUbsAAAAAgBpS9Xc0br/99nn++ec7YgsAAAAAUKOqDo3nnntuTj/99Nx6661ZvHhxGhsb2/wAAAAAAB88VV8MZr/99kuSfPazn02lUmk93tLSkkqlknXr1rXfOgAAAACgJlQdGu+5556O2AEAAAAA1LCqQ+O4ceM6YgcAAAAAUMOq/o7GJHnggQdy1FFH5VOf+lReeeWVJMmPfvSjzJ07t13HAQAAAAC1oerQeOONN2bChAnZaKONMn/+/DQ1NSVJVq5cmfPOO6/dBwIAAAAAXd97uur0jBkzMnPmzPTs2bP1+JgxYzJ//vx2HQcAAAAA1IaqQ+Ozzz6bsWPHrne8oaEhK1asaJdRAAAAAEBtqTo0Dho0KAsWLFjv+Ny5c7P11lu3yygAAAAAoLZUHRqPP/74nHLKKXn00UdTqVSyaNGiXH311Tn99NNz4okndsRGAAAAAKCL61HtA84444w0Nzdnzz33zNq1azN27NjU19fn9NNPz8knn9wRGwEAAACALm6DQuNTTz2V7bffPt26dUulUsmZZ56Zr33ta1mwYEFWr16dkSNHpk+fPh29FQAAAADoojboo9M777xzli9fniTZeuut8+qrr6auri4jR47MJz/5SZERAAAAAD7gNugdjf37988LL7yQzTffPC+++GKam5s7ehcAAHwgbHXGTzt7AtBBWt5sav3zx86+PZWe9Z24BuhoL16wf2dP6HQbFBoPPfTQjBs3LltssUUqlUp22WWXdO/e/W3Pff7559t1IAAAAADQ9W1QaLziiityyCGHZMGCBfnKV76S448/Pn379u3obQAAAABAjdjgq07vs88+SZJ58+bllFNOERoBAAAAgFYbHBrfctVVV3XEDgAAAACghlUdGtesWZMLLrggc+bMybJly9a7MIzvaAQAAACAD56qQ+M///M/57777ssXvvCF1ovDAAAAAAAfbFWHxttuuy0//elPM2bMmI7YAwAAAADUoG7VPmCTTTbJgAEDOmILAAAAAFCjqg6N3/rWtzJ16tSsXbu2I/YAAAAAADWo6o9OX3zxxfntb3+bgQMHZquttkrPnj3b3D9//vx2GwcAAAAA1IaqQ+NBBx3UETsAAAAAgBpWdWicNm1aR+wAAAAAAGpY1d/RCAAAAADwlzb4HY2bbLJJKpXKXz3vD3/4Q9EgAAAAAKD2bHBonD59ekfuAAAAAABq2AaHxokTJ3bkDgAAAACghvmORgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAim3wVaffMnny5Lc9XqlU0qtXr2yzzTY58MADM2DAgOJxAAAAAEBtqDo0Pv7445k/f37WrVuXbbfdNknym9/8Jt27d8+IESNy2WWX5bTTTsvcuXMzcuTIdh8MAAAAAHQ9VX90+sADD8z48eOzaNGizJs3L/PmzcvChQuz11575cgjj8wrr7ySsWPH5tRTT+2IvQAAAABAF1R1aLzooovyrW99K/369Ws91tDQkG984xu58MIL07t370ydOjXz5s1r16EAAAAAQNdVdWhcuXJlli1btt7x3//+92lsbEyS9O/fP2+88Ub5OgAAAACgJrynj04fe+yxuemmm7Jw4cIsXLgwN910U4477rgc68r1EwAAIABJREFUdNBBSZKf/exn+bu/+7t2HwsAAAAAdE1VXwzm+9//fk499dQcccQR+dOf/vT/P0mPHpk4cWIuueSSJMmIESNy5ZVXtu9SAAAAAKDLqjo09unTJzNnzswll1yS559/Pkmy9dZbp0+fPq3n7LTTTu23EAAAAADo8qr+6PR//dd/Ze3atenTp0923HHH7Ljjjm0iIwAAAADwwVN1aDz11FOz+eab5/Of/3z+93//N+vWreuIXQAAAABADak6NC5evDizZ89OpVLJYYcdli222CKTJk3KQw891BH7AAAAAIAaUHVo7NGjRz7zmc/k6quvzrJly3LJJZfkxRdfzKc//el89KMf7YiNAAAAAEAXV/XFYP5c7969M2HChLz22mv53e9+l1/96lfttQsAAAAAqCFVv6MxSdauXZurr746++23Xz784Q9n+vTpOfjgg/OLX/yivfcBAAAAADWg6nc0HnHEEbn11lvTu3fvHHbYYTn77LMzevTojtgGAAAAANSIqkNj9+7d89///d+ZMGFCunfv3ua+Z555Jttvv327jQMAAAAAakPVofHqq69uc3vVqlW59tprc+WVV2bevHlZt25du40DAAAAAGrDe/qOxiS5//77M3HixGyxxRb513/91+yxxx555JFH2nMbAAAAAFAjqnpH45IlSzJr1qz84Ac/SGNjYw477LA0NTXl5ptvzsiRIztqIwAAAADQxW3wOxoPOOCAbLvttnnqqacyffr0LFq0KN/73vc6chsAAAAAUCM2+B2Nt912W77yla/kxBNPzPDhwztyEwAAAABQYzb4HY1z587NqlWrMmrUqOy6667593//9yxfvrwjtwEAAAAANWKDQ+Nuu+2WmTNnZvHixfnyl7+c2bNnZ/DgwWlubs6dd96ZVatWdeROAAAAAKALq/qq0xtvvHGOPfbYzJ07N08//XROO+20XHDBBdl8883z2c9+tiM2AgAAAABdXNWh8c9tu+22ufDCC7Nw4cJce+217bUJAAAAAKgxRaHxLd27d89BBx2UW265pT2eDgAAAACoMe0SGgEAAACADzahEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACjWqaHx/PPPzyc+8Yn07ds3m2++eQ466KA8++yzbc55/fXXM2nSpGy66abp06dPDj300CxdurSTFgMAAAAAb6dTQ+N9992XSZMm5ZFHHsmdd96ZN998M3vvvXfWrFnTes6pp56a//mf/8n111+f++67L4sWLcohhxzSiasBAAAAgL/UozN/+e23397m9qxZs7L55ptn3rx5GTt2bFauXJkf/OAHueaaa7LHHnskSa666qp87GMfyyOPPJLddtutM2YDAAAAAH+hS31H48qVK5MkAwYMSJLMmzcvb775ZsaPH996zogRIzJ06NA8/PDDnbIRAAAAAFhfp76j8c81Nzfnq1/9asaMGZPtt98+SbJkyZLU1dWlf//+bc4dOHBglixZ8rbP09TUlKamptbbjY2NHTcaAAAAAEjShd7ROGnSpDzzzDOZPXt20fOcf/75aWhoaP0ZMmRIOy0EAAAAAN5JlwiNJ510Um699dbcc8892XLLLVuPDxo0KG+88UZWrFjR5vylS5dm0KBBb/tcU6ZMycqVK1t/Xn755Q7dDgAAAAB0cmhsaWnJSSedlJtuuil33313hg0b1ub+UaNGpWfPnpkzZ07rsWeffTYvvfRSRo8e/bbPWV9fn379+rX5AQAAAAA6Vqd+R+OkSZNyzTXX5Cc/+Un69u3b+r2LDQ0N2WijjdLQ0JDjjjsukydPzoABA9KvX7+cfPLJGT16tCtOAwAAAEAX0qmh8fLLL0+S7L777m2OX3XVVTn66KOTJJdcckm6deuWQw89NE1NTZkwYUIuu+yy93kpAAAAAPBuOjU0trS0/NVzevXqlUsvvTSXXnrp+7AIAAAAAHgvusTFYAAAAACA2iY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxXp09gAAAAD4W1TpWZ+eX7iis2cAvG+8oxEAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAinVqaLz//vtzwAEHZPDgwalUKrn55pvb3N/S0pKpU6dmiy22yEYbbZTx48fnueee66S1AAAAAMA76dTQuGbNmnz84x/PpZde+rb3X3jhhfnud7+bGTNm5NFHH83GG2+cCRMm5PXXX3+flwIAAAAA76ZHZ/7yfffdN/vuu+/b3tfS0pLp06fnrLPOyoEHHpgk+c///M8MHDgwN998c4444oj3cyoAAAAA8C667Hc0vvDCC1myZEnGjx/feqyhoSG77rprHn744Xd8XFNTUxobG9v8AAAAAAAdq8uGxiVLliRJBg4c2Ob4wIEDW+97O+eff34aGhpaf4YMGdKhOwEAAACALhwa36spU6Zk5cqVrT8vv/xyZ08CAAAAgL95XTY0Dho0KEmydOnSNseXLl3aet/bqa+vT79+/dr8AAAAAAAdq8uGxmHDhmXQoEGZM2dO67HGxsY8+uijGT16dCcuAwAAAAD+UqdedXr16tVZsGBB6+0XXnghTzzxRAYMGJChQ4fmq1/9as4999wMHz48w4YNy9lnn53BgwfnoIMO6sTVAAAAAMBf6tTQ+Nhjj+XTn/506+3JkycnSSZOnJhZs2bl61//etasWZMvfelLWbFiRf7hH/4ht99+e3r16tVZkwEAAACAt9GpoXH33XdPS0vLO95fqVRyzjnn5JxzznkfVwEAAAAA1eqy39EIAAAAANQOoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAACgmNAIAAAAAxYRGAAAAAKCY0AgAAAAAFBMaAQAAAIBiQiMAAAAAUExoBAAAAACKCY0AAAAAQDGhEQAAAAAoJjQCAAAAAMWERgAAAACgmNAIAAAAABQTGgEAAACAYkIjAAAAAFBMaAQAAAAAigmNAAAAAEAxoREAAAAAKCY0AgAAAADFhEYAAAAAoJjQCAAAAAAUExoBAAAAgGJCIwAAAABQTGgEAAAAAIoJjQAAAABAMaERAAAAAChWE6Hx0ksvzVZbbZVevXpl1113zc9+9rPOngQAAAAA/JkuHxqvu+66TJ48OdOmTcv8+fPz8Y9/PBMmTMiyZcs6exoAAAAA8P90+dD4ne98J8cff3yOOeaYjBw5MjNmzEjv3r3zwx/+sLOnAQAAAAD/T4/OHvBu3njjjcybNy9TpkxpPdatW7eMHz8+Dz/88Ns+pqmpKU1NTa23V65cmSRpbGzs2LGdqLlpbWdPAADawd/y6xXemddyAPC34W/5tdxbf7eWlpZ3Pa9Lh8bly5dn3bp1GThwYJvjAwcOzK9//eu3fcz555+fb37zm+sdHzJkSIdsBABoLw3TO3sBAADv1QfhtdyqVavS0NDwjvd36dD4XkyZMiWTJ09uvd3c3Jw//OEP2XTTTVOpVDpxGcB719jYmCFDhuTll19Ov379OnsOAAAbyOs44G9BS0tLVq1alcGDB7/reV06NG622Wbp3r17li5d2ub40qVLM2jQoLd9TH19ferr69sc69+/f4dtBHg/9evXzwtUAIAa5HUcUOve7Z2Mb+nSF4Opq6vLqFGjMmfOnNZjzc3NmTNnTkaPHt2JywAAAACAP9el39GYJJMnT87EiROzyy675JOf/GSmT5+eNWvW5JhjjunsaQAAAADA/9PlQ+Phhx+e3//+95k6dWqWLFmSnXbaKbfffvt6F4gB+FtWX1+fadOmrffVEAAAdG1exwEfJJWWv3ZdagAAAACAv6JLf0cjAAAAAFAbhEYAAAAAoJjQCAAAAAAUExoBakylUsnNN9/c2TMAAACgDaER+MA7+uijU6lUUqlUUldXl2222SbnnHNO/vSnP23Q41988cVUKpU88cQTHby0zIIFC3LMMcdkyy23TH19fYYNG5Yjjzwyjz32WOs5b/07VCqVNDQ0ZMyYMbn77rtb7999993z1a9+db3nnjVrVvr37/++/D0AADbEyy+/nGOPPTaDBw9OXV1dPvKRj+SUU07Jq6++2ua83XffPZVKJbNnz25zfPr06dlqq61ab8+aNSuVSiX77LNPm/NWrFiRSqWSe++9N0ny5JNPpq6uLrfcckub82688cb06tUrzzzzzLvuvvHGG7P77runoaEhffr0yY477phzzjknf/jDH9rsqFQq6datW7bccsscc8wxWbZsWZJ3f236Tq/lANqL0AiQZJ999snixYvz3HPP5bTTTss3vvGNXHTRRe/7jjfffLNDnvexxx7LqFGj8pvf/Cbf//7388tf/jI33XRTRowYkdNOO63NuVdddVUWL16cBx98MJtttlk+85nP5Pnnn++QXQAAHeH555/PLrvskueeey7XXnttFixYkBkzZmTOnDkZPXp0a7R7S69evXLWWWf91ddiPXr0yF133ZV77rnnHc/5+Mc/nqlTp+ZLX/pSa9RctmxZTjjhhHzzm9/M9ttv/46PPfPMM3P44YfnE5/4RG677bY888wzufjii/Pkk0/mRz/6Uet5/fr1y+LFi7Nw4cLMnDkzt912W77whS9syD8NQIcSGgGS1NfXZ9CgQfnIRz6SE088MePHj88tt9ySNWvWpF+/frnhhhvanH/zzTdn4403zqpVqzJs2LAkyc4775xKpZLdd989SdLc3Jxzzjmn9R2EO+20U26//fbW53jrf5uvu+66jBs3Lr169crVV1+dJPnhD3+Y7bbbLvX19dliiy1y0kkntfn9y5cvz8EHH5zevXtn+PDh6/2P+Z9raWnJ0UcfneHDh+eBBx7I/vvvn49+9KPZaaedMm3atPzkJz9pc37//v0zaNCgbL/99rn88svzxz/+MXfeeed7/rcFAHi/TZo0KXV1dfm///u/jBs3LkOHDs2+++6bu+66K6+88krOPPPMNucfeeSRWbFiRWbOnPmuz7vxxhvn2GOPzRlnnPGu502ZMuX/a+/eQ6LO+jiOv0e7aDrTfZssqp1GTSXrj0ooKqp13S60YPcMbLOMNVbNSSsK0jC3abUtd7ENas02C4sWNpZQ1gqMisSVwS5W7sZGoHahi5iVUD5/REPT6KhZT/vwfF5/jef8zmV+f8jhe+Z7DsOGDWPNmjUArF69msDAQNatW9dmm/LycrKyssjJyeG7775j4sSJjBgxgsjISI4fP05sbKzzWYPBgNlsJiAggJkzZ5KYmEhpaSlPnz5t79WIiHxQCjSKiLTC19eX5uZm/Pz8WLx4Mfn5+S71+fn5zJ8/H6PRSHl5OQClpaXU1dXx66+/ArB7925ycnLIzs6mqqqKqKgo5s6dS01NjUtfGzZsICkpierqaqKiotizZw9r1qwhPj6eS5cuceLECaxWq0ubjIwMFi5cSFVVFbNmzSImJsZtZ/41h8PBlStXsNlseHm5/9v3lPLs6+sLQHNzcztvTEREROTf4cGDB5SUlJCQkOBcy7xmNpuJiYmhqKiIlpYWZ7nJZGLTpk1s3bqVJ0+eeOw/PT2dS5cuuW1Ev8nb25uCggJ+++03li5dSklJCQcOHMDb27vNNoWFhfj7+5OQkNBqfXtrtpcvX3b46B8RkQ9FgUYRkTe0tLRQWlpKSUkJ06dPB2DlypWUlJRQV1cHvEp9OXnyJCtWrABg4MCBAPTv3x+z2Uy/fv0AyM7OZv369SxevJjg4GDsdjtjx45l165dLmMmJycTHR3Np59+yuDBg8nMzMRms5GUlERQUBDjx493O0tn+fLlLFmyBKvVSlZWFo2Njc6A59teBzZHjRrVqXfR1NTE5s2b8fb2ZurUqZ1qKyIiIvKx1NTU0NLSQkhISKv1ISEhPHz4kHv37rmUJyQk4OPjw86dOz32HxAQQFJSEps2bfIY2AsJCSE5OZkjR46Qnp5OUFBQu/O2WCx0797d43Ottfvpp58YN24cRqOxU21FRN43BRpFRIDff/8df39/fHx8mDlzJosWLSI9PR2ACRMmEBYWRkFBAQCHDh1i+PDhTJkypc3+GhoaqK2tZdKkSS7lkyZNorq62qVs3Lhxzs93796ltraWGTNmeJxveHi487Ofnx8mk8l5APjb3tyt74glS5bg7++P0Wjk+PHj7N+/32U8ERERkf8FnV0D9ezZk61bt5Kdnc39+/c9Prt+/Xru3bvHzz//3OYzjY2NFBUV0atXL86ePfte5/v48WP8/f3p1asXwcHBDBo0yHkEj4jIx6RAo4gIMG3aNBwOBzU1NTx9+pSCggL8/Pyc9StXruTAgQPAq7Tpr776CoPB8F7GfnOct9N72vL2TrfBYODly5etPvt69/zatWsd6vv777/H4XBQX19PfX29y3lAJpOJx48fu7V59OgRvXv37lD/IiIiIh+S1WrFYDC4be6+Vl1dTd++fZ1ZKW9atmwZw4cPJzMz0+MYffr0YePGjWRkZNDU1NTqM6mpqfj4+HD+/HlKS0s5ePCgxz6DgoK4efNmhy4HNBqNOBwOLl++zJMnTygrK3Ou+UwmE4DWbCLyUSjQKCLCq2Cf1Wpl2LBhdOvWza1+2bJl3Lp1i9zcXK5eveoSfOvRowcAL168cJaZTCYCAgI4d+6cSz/nzp0jNDS0zXkYjUZGjBjBqVOnuvqVnMaOHUtoaCg5OTmtBiMfPXrk8rfZbMZqtba6+A4ODqaystKtvLKyst10IBEREZH/hv79+xMZGUleXp7b5Sj19fUUFhayaNGiVjeNvby8+Pbbb9mzZw///POPx3G++eYbvLy82L17t1vdH3/8wb59+ygoKGDMmDFkZmaSnJzsPIqnNUuXLqWxsZG8vLxW699cs3l5eWG1WrFYLG4b1f369WPAgAH8+eefLuUNDQ389ddfWrOJyAelQKOISAf07duX6OhoUlNT+fzzzxk6dKiz7pNPPsHX15fi4mLu3Lnj3D1OTU3FbrdTVFTE9evX2bBhAw6Hg6SkJI9jpaenk5OTQ25uLjU1NVRWVvLDDz+889wNBgP5+fncuHGDyZMnc/LkSW7evElVVRXbtm3jyy+/7HBfX3/9NTdu3CAxMZGqqiquX7/Ozp07OXLkCDab7Z3nKCIiIvI+/fjjjzx//pyoqCjKysq4ffs2xcXFREZGMmTIELZt29Zm29mzZxMREcHevXs9juHj40NGRga5ubku5Q0NDcTFxZGamsr48eMBWLt2LaGhocTHx7fZX0REBGlpadhsNtLS0rhw4QK3bt3i1KlTLFiwwHmMT0ekpKSQlZVFYWEhf//9N+Xl5cTExDBw4ECio6M73I+ISGcp0Cgi0kFxcXE0Nzc7L4F5rVu3buTm5rJ3714CAgKcgbvExERSUlKw2WyMHj2a4uJiTpw4QWBgoMdxYmNj2bVrF3l5eYSFhTFnzhy3m6o7a8KECVRUVGC1Wlm1ahUhISHMnTuXK1euuF1O44nFYqGsrIxr167x2WefERERwdGjRzl27BhffPFFl+YoIiIi8r4EBgZSUVGBxWJh4cKFjBw5kvj4eKZNm8aFCxecl/e1xW638+zZs3bHiY2NxWKxuJQlJyfTu3dv53nf8OoXiPn5+Zw+fdpjCrXdbufw4cNcvHiRqKgowsLCSElJITw83CWjpj1paWls2bIFu91OeHg48+bNw8/PjzNnznT4qB4RkXdhaOnsCbkiIv+nfvnlF9auXUttba0zXVpEREREREREXnE/iExERFw0NTVRV1fH9u3bWb16tYKMIiIiIiIiIq1Q6rSISDt27NjBqFGjMJvNbNy48WNPR0RERERERORfSanTIiIiIiIiIiIi0mX6RaOICS6uAAAAe0lEQVSIiIiIiIiIiIh0mQKNIiIiIiIiIiIi0mUKNIqIiIiIiIiIiEiXKdAoIiIiIiIiIiIiXaZAo4iIiIiIiIiIiHSZAo0iIiIiIiIiIiLSZQo0ioiIiIiIiIiISJcp0CgiIiIiIiIiIiJdpkCjiIiIiIiIiIiIdNl/AF19BWdIVytdAAAAAElFTkSuQmCC\n", 231 | "text/plain": [ 232 | "
" 233 | ] 234 | }, 235 | "metadata": {}, 236 | "output_type": "display_data" 237 | } 238 | ], 239 | "source": [ 240 | "plot_benchmark(results)" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "Now, let's benchmark anothe pipline" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 8, 253 | "metadata": {}, 254 | "outputs": [ 255 | { 256 | "name": "stdout", 257 | "output_type": "stream", 258 | "text": [ 259 | "/notebooks/onnx_tests/onnx_transformers/.onnx/distilbert-base-cased-distilled-squad/distilbert-base-cased-distilled-squad.onnx.input_names.json\n" 260 | ] 261 | } 262 | ], 263 | "source": [ 264 | "# load onnx pipeline\n", 265 | "nlp_onnx = pipeline(\"question-answering\", onnx=True)\n", 266 | "# load torch pipeline\n", 267 | "nlp_torch = pipeline(\"question-answering\", onnx=False)" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": 9, 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [ 276 | "import warnings" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": 10, 282 | "metadata": {}, 283 | "outputs": [ 284 | { 285 | "name": "stderr", 286 | "output_type": "stream", 287 | "text": [ 288 | "Tracking inference time for Pytorch CPU: 100%|██████████| 100/100 [00:25<00:00, 3.93it/s]\n", 289 | "Tracking inference time for ONNX CPU: 100%|██████████| 100/100 [00:20<00:00, 4.82it/s]\n" 290 | ] 291 | }, 292 | { 293 | "data": { 294 | "image/png": "iVBORw0KGgoAAAANSUhEUgAABSMAAAPeCAYAAAD3ag9BAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdebRd8+H//9e9GW7mhMxNNInEkKBoqsSQxBRDFBVDUFNp2kq00aKoiiFE0aJq/OgnpviIITG1iJSmhqiihlLjR8iXIoYkSETknt8fXTk/Jzez2Fd8Ho+17lr3vM8+Z7/3PufcJU/77F1VKpVKAQAAAAD4glXX9wQAAAAAgP8bxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgA4Cune/fuOfTQQ1f68eecc07WXnvtNGjQIJtsssmqm9hXyF/+8pdUVVXlL3/5S31PpcKHH36YDh06ZNy4cYWu9/jjj8/mm2++Qo/5qr/PFr5Hbrrppvqeyipz5ZVXpqqqKtOmTVvmsp/37xAAfFWJkQCwmrr44otTVVW1wgGEpZs0aVKOO+64bLXVVhk7dmzOPPPM+p5Svbr44otz5ZVX1vc0ltsFF1yQli1bZujQoYWud+TIkXnyySdz2223Ldfy3mcAwP9VDet7AgDAyhk3bly6d++eRx55JC+99FJ69epV31P60nj++edTXb1y/8/13nvvTXV1df7whz+kcePGq3hmq5+LL7447dq1q3OEV//+/TN37twv1T6aP39+Lrjgghx99NFp0KBBoevu1KlT9thjj5x77rnZfffdl7m899nq6aCDDsrQoUNTU1NT31MBgNWWIyMBYDX0yiuv5KGHHspvf/vbtG/fvvCvpCZJbW1tPv7448LXuzxqamrSqFGjlXrs22+/naZNm67SQDRnzpxV9lxfFtXV1WnSpMlKR98vwh133JEZM2Zk3333rZf177vvvnnggQfyv//7v8tcdlW/z0qlUubOnbtKnuur4Iv6+9SgQYM0adIkVVVVq/y5l+bTTz/NJ598Uug6AeCL8uX5r0cAYLmNGzcua6yxRgYPHpy99967IkbOnz8/a665Zg477LA6j5s9e3aaNGmSY445pjw2b968jBo1Kr169UpNTU3WWmutHHfccZk3b17FY6uqqjJixIiMGzcuG2ywQWpqanLXXXclSc4999xsueWWadu2bZo2bZq+ffsu9jxxc+fOzU9+8pO0a9cuLVu2zO67757XX389VVVVOeWUUyqWff311/P9738/HTt2TE1NTTbYYIP893//93Ltn0XP1bbwPG8PPvhgfvazn6V9+/Zp3rx5vvvd72bGjBkV2zh27Nh89NFHqaqqSlVVVcVXlK+99tr07ds3TZs2zZprrpmhQ4dm+vTpFeseOHBgNtxwwzz22GPp379/mjVrlhNPPHGl9vUtt9ySDTfcsLz9C/f3ovvp8MMPz9e+9rXU1NSkR48e+fGPf1wRLmbOnJmRI0dmrbXWSk1NTXr16pVf//rXqa2tXeZ+fOaZZzJlypTy/hg4cGCSxZ8zcuG2P/XUUxkwYECaNWuWXr16ld8LU6ZMyeabb56mTZtmvfXWy+TJkxe7PSv7ut9yyy3p3r17evbsWTF+6KGHpkWLFnnttdey2267pUWLFunSpUsuuuiiJMnTTz+d7bbbLs2bN0+3bt1y3XXXVTx+/vz5OfXUU7POOuukSZMmadu2bbbeeuvcc889FcvtsMMOSZJbb711qfNc2vvs008/zemnn56ePXumpqYm3bt3z4knnljnPdK9e/fstttuufvuu/Otb30rTZs2zWWXXbbU9f7tb3/LzjvvnNatW6dZs2YZMGBAHnzwwYplXn311Rx55JFZb7310rRp07Rt2zb77LPPYs+ROHPmzBx99NHp3r17ampq0rVr1xx88MF55513Kparra3NGWecka5du6ZJkybZfvvt89JLLy11rklyyimnpKqqKs8991z23XfftGrVKm3bts1Pf/rTOqFxaX+f/vGPf2SXXXZJq1at0qJFi2y//fZ5+OGHy4999NFHU1VVlauuuqrOHO6+++5UVVXljjvuSLL4c0aWSqWMHj06Xbt2TbNmzbLtttvmmWeeWew2Lc9ncdq0aamqqsq5556b888/v/xeePbZZ5e5zwBgdeBr2gCwGho3blz22muvNG7cOPvvv38uueSS/P3vf89mm22WRo0a5bvf/W4mTJiQyy67rOLIq1tuuSXz5s0rn0+vtrY2u+++ex544IEMGzYsvXv3ztNPP53zzjsvL7zwQm655ZaK9d5777254YYbMmLEiLRr1y7du3dP8p/z9O2+++458MAD88knn+T666/PPvvskzvuuCODBw8uP/7QQw/NDTfckIMOOihbbLFFpkyZUnH/Qm+99Va22GKLcmBo37597rzzzhx++OGZPXt2Ro4cuVL77aijjsoaa6yRUaNGZdq0aTn//PMzYsSIjB8/PklyzTXX5PLLL88jjzySK664Ikmy5ZZbJknOOOOM/OpXv8q+++6bI444IjNmzMiFF16Y/v375x//+EfatGlTXs+7776bXXbZJUOHDs33vve9dOzYcYX39QMPPJAJEybkyCOPTMuWLfO73/0uQ4YMyWuvvZa2bdsmSd544418+9vfzsyZMzNs2LCsv/76ef3113PTTTdlzpw5ady4cebMmZMBAwbk9ddfzw9/+MN8/etfz0MPPZQTTjgh//73v3P++ecvcX+df/75Oeqoo9KiRYv88pe/TJJ07Nhxqfv4/fffz2677ZahQ4dmn332ySWXXJKhQ4dm3LhxGTlyZH70ox/lgAMOyDnnnJO9994706dPT8uWLZN8/tf9oYceyje/+c3F3rdgwYLssssu6d+/f84+++yMGzcuI0aMSPPmzfPLX/4yBx54YPbaa69ceumlOfjgg9OvX7/06NEjyX+i2JgxY3LEEUfk29/+dmbPnp1HH300jz/+eHbcccfyOlq3bp2ePXvmwQcfzNFHH73EeS7tfXbEEUfkqquuyt57752f//zn+dvf/pYxY8bkX//6VyZOnFjxPM8//3z233///PCHP8wPfvCDrLfeektc57333ptddtklffv2zahRo1JdXZ2xY8dmu+22y/33359vf/vbSZK///3veeihhzJ06NB07do106ZNyyWXXJKBAwfm2WefTbNmzZL850JB22yzTf71r3/l+9//fr75zW/mnXfeyW233Zb/9//+X9q1a1de91lnnZXq6uocc8wxmTVrVs4+++wceOCB+dvf/rbE+X7Wvvvum+7du2fMmDF5+OGH87vf/S7vv/9+rr766jrbuOjfp2eeeSbbbLNNWrVqleOOOy6NGjXKZZddloEDB5bj+Le+9a2svfbaueGGG3LIIYdUPOf48eOzxhprZKeddlri/E4++eSMHj06u+66a3bdddc8/vjjGTRoUJ0jGVf0szh27Nh8/PHHGTZsWGpqarLmmmsu1/4CgC+9EgCwWnn00UdLSUr33HNPqVQqlWpra0tdu3Yt/fSnPy0vc/fdd5eSlG6//faKx+66666ltddeu3z7mmuuKVVXV5fuv//+iuUuvfTSUpLSgw8+WB5LUqquri4988wzdeY0Z86cituffPJJacMNNyxtt9125bHHHnuslKQ0cuTIimUPPfTQUpLSqFGjymOHH354qXPnzqV33nmnYtmhQ4eWWrduXWd9i+rWrVvpkEMOKd8eO3ZsKUlphx12KNXW1pbHjz766FKDBg1KM2fOLI8dcsghpebNm1c837Rp00oNGjQonXHGGRXjTz/9dKlhw4YV4wMGDCglKV166aUVy67ovm7cuHHppZdeKo89+eSTpSSlCy+8sDx28MEHl6qrq0t///vf6+yDhdt5+umnl5o3b1564YUXKu4//vjjSw0aNCi99tprdR77WRtssEFpwIABdcbvu+++UpLSfffdV2fbr7vuuvLYc889V37vPPzww+Xxhe/RsWPHlsc+z+s+f/78UlVVVennP/95nfsOOeSQUpLSmWeeWR57//33S02bNi1VVVWVrr/++jrz/ez7ceONNy4NHjx4iev+rEGDBpV69+69zOUW9z574oknSklKRxxxRMX4McccU0pSuvfee8tj3bp1KyUp3XXXXctcV21tbWmdddYp7bTTThXv/zlz5pR69OhR2nHHHSvGFjV16tRSktLVV19dHjv55JNLSUoTJkxY7PpKpf//PdK7d+/SvHnzyvdfcMEFpSSlp59+eqnzHjVqVClJaffdd68YP/LII0tJSk8++WR5bEl/n/bcc89S48aNSy+//HJ57I033ii1bNmy1L9///LYCSecUGrUqFHpvffeK4/Nmzev1KZNm9L3v//98tjCvyWvvPJKqVQqld5+++1S48aNS4MHD67YtyeeeGIpScXfoeX9LL7yyiulJKVWrVqV3n777aXuIwBYHfmaNgCsZsaNG5eOHTtm2223TfKfryfut99+uf7667NgwYIkyXbbbZd27dqVj/hL/nPE2j333JP99tuvPHbjjTemd+/eWX/99fPOO++Uf7bbbrskyX333Vex7gEDBqRPnz515tS0adOK9cyaNSvbbLNNHn/88fL4wq9MHnnkkRWPPeqooypul0ql3HzzzfnOd76TUqlUMa+ddtops2bNqnjeFTFs2LCKc71ts802WbBgQV599dWlPm7ChAmpra3NvvvuWzGfTp06ZZ111qmzn2pqaup8TX5F9/UOO+xQ8XXjb3zjG2nVqlX5fIS1tbW55ZZb8p3vfCff+ta36sx54XbeeOON2WabbbLGGmtUrHeHHXbIggUL8te//nVZu22FtGjRouJK1uutt17atGmT3r17V1z5feHvC7fn877u7733XkqlUtZYY40lLnPEEUeUf2/Tpk3WW2+9NG/evOIckwvn+9nzPrZp0ybPPPNMXnzxxWVu/8L9vDL+9Kc/JUl+9rOfVYz//Oc/T5L88Y9/rBjv0aPHUo/YW+iJJ57Iiy++mAMOOCDvvvtueb9+9NFH2X777fPXv/61/DXhz36W58+fn3fffTe9evVKmzZtKvb/zTffnI033jjf/e5366xv0fMpHnbYYRVHaG+zzTZJslzn1kyS4cOHV9xe+Ddj4f5aaNG/TwsWLMikSZOy5557Zu211y6Pd+7cOQcccEAeeOCBzJ49O0my3377Zf78+ZkwYUJ5uUmTJmXmzJkVfzMXNXny5HzyySc56qijKrZ7cUfxruhncciQIWnfvv0S1w0Aqytf0waA1ciCBQty/fXXZ9ttt80rr7xSHt98883zm9/8Jn/+858zaNCgNGzYMEOGDMl1112XefPmpaamJhMmTMj8+fMr/mH94osv5l//+tcS/8H79ttvV9xe+LXVRd1xxx0ZPXp0nnjiiYpz2332H+evvvpqqqur6zzHolcBnzFjRmbOnJnLL788l19++XLNa3l9/etfr7i9MFy9//77S33ciy++mFKplHXWWWex9y96sZwuXbrUuTDJiu7rRee6cL4L5zpjxozMnj07G2644TLn/tRTTy33ej+vrl271olRrVu3zlprrVVnLEnF9qyK171UKi12vEmTJnX2QevWrZc438++J0477bTsscceWXfddbPhhhtm5513zkEHHZRvfOMbi13/yl7cZOFnZNHPRKdOndKmTZs60XxJn8dFLYyoi34F+bNmzZqVNdZYI3Pnzs2YMWMyduzYvP766xX7c9asWeXfX3755QwZMmS51r+yn7uFFv3c9ezZM9XV1XXOY7no/pgxY0bmzJmz2K+v9+7dO7W1tZk+fXo22GCDbLzxxll//fUzfvz4HH744Un+8xXtdu3alf+HweIsfE0WnWP79u3rhPEV/Swu7+sLAKsbMRIAViP33ntv/v3vf+f666/P9ddfX+f+cePGZdCgQUmSoUOH5rLLLsudd96ZPffcMzfccEPWX3/9bLzxxuXla2trs9FGG+W3v/3tYte3aED67FFTC91///3Zfffd079//1x88cXp3LlzGjVqlLFjx9a5EMjyWHiE1ve+970lxpPFRaDl0aBBg8WOLylgfXZOVVVVufPOOxf7HC1atKi4vbj9tKL7emXnurj17rjjjjnuuOMWe/+66667Qs+3LEua97K25/O+7muuuWaqqqqWGLhWdl5J0r9//7z88su59dZbM2nSpFxxxRU577zzcumll1YcbZn8J7B99nyJK2N5Y+bi3meLs3DfnnPOOdlkk00Wu8zC9/BRRx2VsWPHZuTIkenXr19at26dqqqqDB06dJkXPFqSVfVeXmhJ+2d598eS7LfffjnjjDPyzjvvpGXLlrntttuy//77p2HDVfNPphX9LH7e7QGALysxEgBWI+PGjUuHDh3KVwH+rAkTJmTixIm59NJL07Rp0/Tv3z+dO3fO+PHjs/XWW+fee+8tX4RkoZ49e+bJJ5/M9ttvv9JHc918881p0qRJ7r777tTU1JTHx44dW7Fct27dUltbm1deeaXiKKJFr6rbvn37tGzZMgsWLChfnbi+9ezZM6VSKT169FjpeLcq9vVntW/fPq1atco///nPZa73ww8/XOl9uSrmujw+7+vesGHD9OzZs+KI4VVp4RXqDzvssHz44Yfp379/TjnllDox8pVXXqkI/iti4WfkxRdfTO/evcvjb731VmbOnJlu3bqt1PMu/Lp/q1atlrlvb7rpphxyyCH5zW9+Ux77+OOPM3PmzDrPuaz33qry4osvVhwl+NJLL6W2trZ8Aa0lad++fZo1a5bnn3++zn3PPfdcqqurK/4nwH777ZdTTz01N998czp27JjZs2dXnHJgcRa+Ji+++GLFV8FnzJhRJ4x/3s8iAHxVOGckAKwm5s6dmwkTJmS33XbL3nvvXednxIgR+eCDD3LbbbclSaqrq7P33nvn9ttvzzXXXJNPP/20zrnP9t1337z++uv5r//6r8Wu76OPPlrmvBo0aJCqqqry+SqTZNq0aXWuDr3w3HYXX3xxxfiFF15Y5/mGDBmSm2++ebGxY8aMGcuc06q21157pUGDBjn11FPrHM1VKpXy7rvvLvM5VsW+/qzq6ursueeeuf322/Poo4/WuX/hPPfdd99MnTo1d999d51lZs6cmU8//XSp62nevHmdEPVFWBWve79+/Ra7Lz6vRV/fFi1apFevXhWnJEj+8zXml19+uXxl7BW16667JkmdqyovPJp2cVeeXx59+/ZNz549c+655+bDDz+sc/9n922DBg3qvMcvvPDCis938p/zGT755JN1rvCdrPwRj0uy6P98Wfg3Y5dddlnq4xo0aJBBgwbl1ltvrfhK91tvvZXrrrsuW2+9dVq1alUe7927dzbaaKOMHz8+48ePT+fOndO/f/+lrmOHHXZIo0aNcuGFF1Zs9+KuUv95P4sA8FXhyEgAWE3cdttt+eCDD7L77rsv9v4tttgi7du3z7hx48rRcb/99suFF16YUaNGZaONNqo42ipJDjrooNxwww350Y9+lPvuuy9bbbVVFixYkOeeey433HBD7r777sVeHOWzBg8enN/+9rfZeeedc8ABB+Ttt9/ORRddlF69euWpp54qL9e3b98MGTIk559/ft59991sscUWmTJlSl544YUklUfgnXXWWbnvvvuy+eab5wc/+EH69OmT9957L48//ngmT56c9957b6X24crq2bNnRo8enRNOOCHTpk3LnnvumZYtW+aVV17JxIkTM2zYsBxzzDFLfY5Vsa8XdeaZZ2bSpEkZMGBAhg0blt69e+ff//53brzxxjzwwANp06ZNjj322Nx2223Zbbfdcuihh6Zv37756KOP8vTTT+emm27KtGnTlvq14r59++aSSy7J6NGj06tXr3To0GGp59D7PD7v677HHnvkmmuuyQsvvLBKv37ep0+fDBw4MH379s2aa66ZRx99NDfddFNGjBhRsdzkyZNTKpWyxx57rNR6Nt544xxyyCG5/PLLM3PmzAwYMCCPPPJIrrrqquy5557li1atqOrq6lxxxRXZZZddssEGG+Swww5Lly5d8vrrr+e+++5Lq1atcvvttydJdtttt1xzzTVp3bp1+vTpk6lTp2by5Mlp27ZtxXMee+yxuemmm7LPPvvk+9//fvr27Zv33nsvt912Wy699NKVPjp0cV555ZXsvvvu2XnnnTN16tRce+21OeCAA5ZrHaNHj84999yTrbfeOkceeWQaNmyYyy67LPPmzcvZZ59dZ/n99tsvJ598cpo0aZLDDz881dVLP3ajffv2OeaYYzJmzJjstttu2XXXXfOPf/wjd955Z53P1ef9LALAV4UYCQCriXHjxqVJkybZcccdF3t/dXV1Bg8enHHjxuXdd99N27Zts+WWW2attdbK9OnTF3tF2Orq6txyyy0577zzcvXVV2fixIlp1qxZ1l577fz0pz9drqCz3Xbb5Q9/+EPOOuusjBw5Mj169Mivf/3rTJs2rSJGJsnVV1+dTp065X/+538yceLE7LDDDhk/fnzWW2+9NGnSpLxcx44d88gjj+S0007LhAkTcvHFF6dt27bZYIMN8utf/3oF99yqcfzxx2fdddfNeeedl1NPPTXJf87zOGjQoCUG4s9aFft6UV26dMnf/va3/OpXv8q4ceMye/bsdOnSJbvsskuaNWuWJGnWrFmmTJmSM888MzfeeGOuvvrqtGrVKuuuu25OPfXU8oVkluTkk0/Oq6++mrPPPjsffPBBBgwY8IXFyM/7un/nO99Ju3btcsMNN+Skk05aZfP6yU9+kttuuy2TJk3KvHnz0q1bt4wePTrHHntsxXI33nhjtt5664qroK+oK664ImuvvXauvPLKTJw4MZ06dcoJJ5yQUaNGfa5tGDhwYKZOnZrTTz89v//97/Phhx+mU6dO2XzzzfPDH/6wvNwFF1yQBg0aZNy4cfn444+z1VZbZfLkyXWu2t2iRYvcf//9GTVqVCZOnJirrroqHTp0yPbbb5+uXbt+rrkuavz48Tn55JNz/PHHp2HDhhkxYkTOOeec5XrsBhtskPvvvz8nnHBCxowZk9ra2my++ea59tprK67uvtB+++2Xk046KXPmzFnqVbQ/a/To0WnSpEkuvfTSckyfNGlSnSNZP+9nEQC+KqpKq/p7FAAAK+CJJ57IpptummuvvTYHHnhgfU+H1dzpp5+esWPH5sUXX1zihVO+CG+++WZ69OiR66+/fqWPjKTSKaecklNPPTUzZsxwxCAAfIU4ZyQAUJi5c+fWGTv//PNTXV29zHOzwfI4+uij8+GHHy72avNfpPPPPz8bbbSREAkAsAy+pg0AFObss8/OY489lm233TYNGzbMnXfemTvvvDPDhg2ruKotrKwWLVrk7bffLny9Z511VuHrBABYHYmRAEBhttxyy9xzzz05/fTT8+GHH+brX/96TjnllPzyl7+s76kBAAAFcM5IAAAAAKAQzhkJAAAAABRCjAQAAAAACuGckUlqa2vzxhtvpGXLlqmqqqrv6QAAAADAaqVUKuWDDz7I1772tVRXL/n4RzEyyRtvvOEKngAAAADwOU2fPj1du3Zd4v1iZJKWLVsm+c/OatWqVT3PBgAAAABWL7Nnz85aa61V7mxLIkYm5a9mt2rVSowEAAAAgJW0rFMguoANAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRLgK2DOnDnp0qVLunTpkjlz5tT3dAAAAGCxxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAACiEGAkAAAAAFEKMBAAAAAAKIUYCAAAAAIUQIwEAAACAQoiRAAAAAEAhxEgAAAAAoBBiJAAAAABQCDESAAAAAChEw/qeAMXofvwf63sKwBeoNH9e+ffev7orVY1q6nE2wBdp2teZhg4AACAASURBVFmD63sKAACw0hwZCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBC1GuMHDNmTDbbbLO0bNkyHTp0yJ577pnnn3++YpmBAwemqqqq4udHP/pRxTKvvfZaBg8enGbNmqVDhw459thj8+mnnxa5KQAAAADAMtTr1bSnTJmS4cOHZ7PNNsunn36aE088MYMGDcqzzz6b5s2bl5f7wQ9+kNNOO618u1mzZuXfFyxYkMGDB6dTp0556KGH8u9//zsHH3xwGjVqlDPPPLPQ7QEAAAAAlqxeY+Rdd91VcfvKK69Mhw4d8thjj6V///7l8WbNmqVTp06LfY5Jkybl2WefzeTJk9OxY8dssskmOf300/OLX/wip5xySho3bvyFbgMAAAAAsHy+VOeMnDVrVpJkzTXXrBgfN25c2rVrlw033DAnnHBC5syZU75v6tSp2WijjdKxY8fy2E477ZTZs2fnmWeeKWbiAAAAAMAy1euRkZ9VW1ubkSNHZquttsqGG25YHj/ggAPSrVu3fO1rX8tTTz2VX/ziF3n++eczYcKEJMmbb75ZESKTlG+/+eabi13XvHnzMm/evPLt2bNnr+rNAQAAAAAW8aWJkcOHD88///nPPPDAAxXjw4YNK/++0UYbpXPnztl+++3z8ssvp2fPniu1rjFjxuTUU0/9XPMFAAAAAFbMl+Jr2iNGjMgdd9yR++67L127dl3qsptvvnmS5KWXXkqSdOrUKW+99VbFMgtvL+k8kyeccEJmzZpV/pk+ffrn3QQAAAAAYBnqNUaWSqWMGDEiEydOzL333psePXos8zFPPPFEkqRz585Jkn79+uXpp5/O22+/XV7mnnvuSatWrdKnT5/FPkdNTU1atWpV8QMAAAAAfLHq9Wvaw4cPz3XXXZdbb701LVu2LJ/jsXXr1mnatGlefvnlXHfdddl1113Ttm3bPPXUUzn66KPTv3//fOMb30iSDBo0KH369MlBBx2Us88+O2+++WZOOumkDB8+PDU1NfW5eQAAAADAZ9TrkZGXXHJJZs2alYEDB6Zz587ln/HjxydJGjdunMmTJ2fQoEFZf/318/Of/zxDhgzJ7bffXn6OBg0a5I477kiDBg3Sr1+/fO9738vBBx+c0047rb42CwAAAABYjHo9MrJUKi31/rXWWitTpkxZ5vN069Ytf/rTn1bVtAAAAACAL8CX4gI2AAAAAMBXnxgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAADUkzlz5qRLly7p0qVL5syZU9/TAfjCiZEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFCIhvU9AQA+v6pGNWl00OX1PQ0AAABYKkdGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAoRMP6ngAAALBk3Y//Y31PAfgClebPK//e+1d3papRTT3OBvgiTTtrcH1P4UvBkZEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBC1GuMHDNmTDbbbLO0bNkyHTp0yJ577pnnn3++YpmPP/44w4cPT9u2bdOiRYsMGTIkb731VsUyr732WgYPHpxmzZqlQ4cOOfbYY/Ppp58WuSkAAAAAwDLUa4ycMmVKhg8fnocffjj33HNP5s+fn0GDBuWjjz4qL3P00Ufn9ttvz4033pgpU6bkjTfeyF577VW+f8GCBRk8eHA++eSTPPTQQ7nqqqty5ZVX5uSTT66PTQIAAAAAlqBhfa78rrvuqrh95ZVXpkOHDnnsscfSv3//zJo1K3/4wx9y3XXXZbvttkuSjB07Nr17987DDz+cLbbYIpMmTcqzzz6byZMnp2PHjtlkk01y+umn5xe/+EVOOeWUNG7cuD42DQAAAABYxJfqnJGzZs1Kkqy55ppJksceeyzz58/PDjvsUF5m/fXXz9e//vVMnTo1STJ16tRstNFG6dixY3mZnXbaKbNnz84zzzyz2PXMmzcvs2fPrvgBAAAAAL5YX5oYWVtbm5EjR2arrbbKhhtumCR5880307hx47Rp06Zi2Y4dO+bNN98sL/PZELnw/oX3Lc6YMWPSunXr8s9aa621qjcHAAAAAFjElyZGDh8+PP/85z9z/fXXf+HrOuGEEzJr1qzyz/Tp07/wdQIAAADA/3X1es7IhUaMGJE77rgjf/3rX9O1a9fyeKdOnfLJJ59k5syZFUdHvvXWW+nUqVN5mUceeaTi+RZebXvhMouqqalJTU3Nqt4MAAAAAGAp6vXIyFKplBEjRmTixIm5995706NHj4r7+/btm0aNGuXPf/5zeez555/Pa6+9ln79+iVJ+vXrl6effjpvv/12eZl77rknrVq1Sp8+fYrZEAAAAABgmer1yMjhw4fnuuuuy6233pqWLVuWz/HYunXrNG3aNK1bt87hhx+en/3sZ1lzzTXTqlWrHHXUUenXr1+22GKLJMmgQYPSp0+fHHTQQTn77LPz5ptv5qSTTsrw4cMd/QgAAAAAXyL1GiMvueSSJMnAgQMrxseOHZtDDz00SXLeeeeluro6Q4YMybx587LTTjvl4osvLi/boEGD3HHHHfnxj3+cfv36pXnz5jnkkENy2mmnFbUZAAAAAMByqNcYWSqVlrlMkyZNctFFF+Wiiy5a4jLdunXLn/70p1U5NQAAAABgFfvSXE0bAAAAAPhqEyMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABRCjAQAAAAACiFGAgAAAACFECMBAAAAgEKIkQAAAABAIcRIAAAAAKAQYiQAAAAAUAgxEgAAAAAohBgJAAAAABSiYX1PAAAAAP6vqmpUk0YHXV7f0wAojCMjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAAohRgIAAAAAhRAjAQAAAIBCiJEAAAAAQCHESAAAAACgEGIkAAAAAFAIMRIAAAAAKIQYCQAAAAAUQowEAAAAAArRcEUWnjlzZiZOnJj7778/r776aubMmZP27dtn0003zU477ZQtt9zyi5onAAAAALCaW64jI994440cccQR6dy5c0aPHp25c+dmk002yfbbb5+uXbvmvvvuy4477pg+ffpk/PjxX/ScAQAAAIDV0HIdGbnpppvmkEMOyWOPPZY+ffosdpm5c+fmlltuyfnnn5/p06fnmGOOWaUTBQAAAABWb8sVI5999tm0bdt2qcs0bdo0+++/f/bff/+8++67q2RyAAAAAMBXx3J9TXtZIfLzLg8AAAAAfPWt8NW0r7rqqvzxj38s3z7uuOPSpk2bbLnllnn11VdX6eQAAAAAgK+OFY6RZ555Zpo2bZokmTp1ai666KKcffbZadeuXY4++uhVPkEAAAAA4Kthuc4Z+VnTp09Pr169kiS33HJLhgwZkmHDhmWrrbbKwIEDV/X8AAAAAICviBU+MrJFixblC9RMmjQpO+64Y5KkSZMmmTt37qqdHQAAAADwlbHCR0buuOOOOeKII7LpppvmhRdeyK677pokeeaZZ9K9e/dVPT8AAAAA4CtihY+MvOiii9KvX7/MmDEjN998c/nK2Y899lj233//VT5BAAAAAOCrYYWPjGzTpk1+//vf1xk/9dRTV8mEAAAA+P/Yu/cgK+vD/uOfwx2jgKjcEowSL4i3UJkQotVGSbgkKEpjUExRKaaOt0qIlVFBq6PRemFitERNdJISNTYJk9KKNXhBoqJCjMZYK4pR5BZRWJC66i6/P351xy1oOLr7BU5er5kzwz7POcfP+tfOe855HgCoTVXHyCR566238tRTT2XVqlVpbGxsOl6pVDJq1KgWGwcAAAAA1I6qY+ScOXPyjW98o+kmNu9XqVTS0NDQIsMAAAAAgNpS9TUjzzrrrBx//PFZvnx5Ghsbmz2ESAAAAADgg1QdI1euXJlJkyalZ8+erbEHAAAAAKhRVcfIv/7rv84DDzzQClMAAAAAgFpW9TUjv/e97+VrX/taHnrooRx44IFp3759s/Nnn312i40DAAAAAGpH1THy9ttvz3/+53+mU6dOeeCBB1KpVJrOVSoVMRIAAAAA2KyqY+QFF1yQSy65JOeff37atKn6W94AAAAAwJ+pqmvi22+/na9//etCJAAAAABQlaqL4vjx43PnnXe2xhYAAAAAoIZV/TXthoaGXHXVVbnnnnty0EEHbXIDm2uvvbbFxgEAAAAAtaPqGPn0009n4MCBSZLf/e53zc69/2Y2AAAAAADvV3WMvP/++1tjBwAAAABQ49yFBgAAAAAoYoti5N/93d9l6dKlW/SGd955Z2bOnPmxRgEAAAAAtWeLvqa92267Zf/998+hhx6aUaNGZdCgQenTp086deqUN954I7///e8zf/783HHHHenTp09uuumm1t4NAAAAAGxntihGXnrppTnzzDNzyy235MYbb8zvf//7Zud32mmnDB06NDfddFOGDx/eKkMBAAAAgO3bFt/ApmfPnrngggtywQUX5I033sjLL7+c//mf/8muu+6az3zmM+6kDQAAAAB8qKrvpp0kO++8c3beeeeW3gIAAAAA1DB30wYAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAo4iPFyHfffTe/+tWv8v3vfz/r1q1Lkixbtizr169v0XEAAAAAQO2o+m7af/jDHzJ8+PC8/PLLqa+vz5e+9KXstNNOufLKK1NfX58ZM2a0xk4AAAAAYDtX9ScjzznnnAwaNChvvPFGOnfu3HT82GOPzdy5c1t0HAAAAABQO6r+ZORDDz2Uhx9+OB06dGh2fI899sirr77aYsMAAAAAgNpS9ScjGxsb09DQsMnxpUuXZqeddmqRUQAAAABA7ak6Rn75y1/O9OnTm36uVCpZv359pk2blpEjR7boOAAAAACgdlT9Ne1rrrkmw4YNy4ABA/LWW2/lxBNPzPPPP59dd901t99+e2tsBAAAAABqQNUx8lOf+lR++9vf5o477shTTz2V9evXZ8KECRk3blyzG9oAAAAAALxf1TEySdq1a5eTTjqppbcAAAAAADXsI8XIZcuWZf78+Vm1alUaGxubnTv77LNbZBgAAAAAUFuqjpG33XZbvvnNb6ZDhw7ZZZddUqlUms5VKhUxEgAAAADYrKpj5EUXXZSpU6dmypQpadOm6ptxAwAAAAB/pqquiRs2bMjYsWOFSAAAAACgKlUXxQkTJuSuu+5qjS0AAAAAQA2r+mvaV1xxRb761a9mzpw5OfDAA9O+fftm56+99toWGwcAAAAA1I6PFCPvueee7LvvvkmyyQ1sAAAAAAA2p+oYec011+SHP/xhTj755FaYAwAAAADUqqqvGdmxY8cceuihrbEFAAAAAKhhVcfIc845J9dff31rbAEAAAAAaljVX9N+7LHHct9992X27NnZf//9N7mBzc9//vMWGwcAAAAA1I6qY2S3bt1y3HHHtcYWAAAAAKCGVR0jb7311tbYAQAAAADUuKqvGQkAAAAA8FFs0Scj/+Iv/iJz587NzjvvnIEDB6ZSqXzgcxctWtRi4wAAAACA2rFFMfKYY45Jx44dm/79YTESAAAAAGBztihGTps2renfF198cWttAQAAAABqWNXXjOzXr19Wr169yfE1a9akX79+LTIKAAAAAKg9VcfIl156KQ0NDZscr6+vz9KlS1tkFAAAAABQe7boa9pJ8stf/rLp3/fcc0+6du3a9HNDQ0Pmzp2bPffcs2XXAQAAAAA1Y4tj5OjRo5MklUol48ePb3auffv22WOPPXLNNde07DoAAAAAoGZscYxsbGxMkuy55555/PHHs+uuu7baKAAAAACg9mxxjHzPkiVLWmMHAAAAAFDjqr6BTUuaN29eRo0alT59+qRSqWTWrFnNzp988smpVCrNHsOHD2/2nNdffz3jxo1Lly5d0q1bt0yYMCHr168v+WsAAAAAAFtgq8bIN998MwcffHBuuOGGD3zO8OHDs3z58qbH7bff3uz8uHHj8swzz+Tee+/N7NmzM2/evJx22mmtPR0AAAAAqFLVX9NuSSNGjMiIESM+9DkdO3ZMr169Nnvu2WefzZw5c/L4449n0KBBSZLrr78+I0eOzNVXX50+ffq0+GYAAAAA4KPZqp+M3BIPPPBAevTokX333Tenn356Vq9e3XTukUceSbdu3ZpCZJIMHTo0bdq0yYIFCz7wPevr61NXV9fsAQAAAAC0ro8UI1944YVceOGFOeGEE7Jq1aokyd13351nnnmmRccNHz48P/rRjzJ37txceeWVefDBBzNixIg0NDQkSVasWJEePXo0e027du3SvXv3rFix4gPf94orrkjXrl2bHn379m3R3QAAAADApqqOkQ8++GAOPPDALFiwID//+c+bbhbz29/+NtOmTWvRcWPHjs3RRx+dAw88MKNHj87s2bPz+OOP54EHHvhY7ztlypSsXbu26fHKK6+0zGAAAAAA4ANVHSPPP//8XHbZZbn33nvToUOHpuNHHnlkHn300RYd93/169cvu+66axYvXpwk6dWrV9MnM9/z7rvv5vXXX//A60wm//86lF26dGn2AAAAAABaV9Ux8umnn86xxx67yfEePXrktddea5FRH2Tp0qVZvXp1evfunSQZMmRI1qxZk4ULFzY957777ktjY2MGDx7cqlsAAAAAgOpUfTftbt26Zfny5dlzzz2bHf/Nb36TT37yk1W91/r165s+5ZgkS5YsyZNPPpnu3bune/fuueSSSzJmzJj06tUrL7zwQs4777zstddeGTZsWJJkv/32y/DhwzNx4sTMmDEj77zzTs4888yMHTvWnbQBAAAAYBtT9Scjx44dm3/4h3/IihUrUqlU0tjYmF//+teZPHly/uZv/qaq93riiScycODADBw4MEkyadKkDBw4MFOnTk3btm3z1FNP5eijj84+++yTCRMm5JBDDslDDz2Ujh07Nr3HzJkz079//xx11FEZOXJkDjvssNx0003V/loAAAAAQCur+pORl19+ec4444z07ds3DQ0NGTBgQBoaGnLiiSfmwgsvrOq9/uqv/iobN278wPP33HPPn3yP7t275yc/+UlV/10AAAAAoLyqY2SHDh1y8803Z+rUqXn66aezfv36DBw4MHvvvXdr7AMAAAAAakTVMfI9ffv2Td++fVtyCwAAAABQw6q+ZuSYMWNy5ZVXbnL8qquuyte+9rUWGQUAAAAA1J6qY+S8efMycuTITY6PGDEi8+bNa5FRAAAAAEDtqTpGrl+/Ph06dNjkePv27VNXV9ciowAAAACA2lN1jDzwwANz5513bnL8jjvuyIABA1pkFAAAAABQe6q+gc1FF12U4447Li+88EKOPPLIJMncuXNz++2356677mrxgQAAAABAbag6Ro4aNSqzZs3K5Zdfnn/9139N586dc9BBB+VXv/pVjjjiiNbYCAAAAADUgKpjZJJ85StfyVe+8pWW3gIAAAAA1LCPFCOT5O23386qVavS2NjY7Pjuu+/+sUcBAAAAALWn6hj5/PPP59RTT83DDz/c7PjGjRtTqVTS0NDQYuMAAAAAgNpRdYw8+eST065du8yePTu9e/dOpVJpjV0AAAAAQI2pOkY++eSTWbhwYfr3798aewAAAACAGtWm2hcMGDAgr732WmtsAQAAAABqWNUx8sorr8x5552XBx54IKtXr05dXV2zBwAAAADA5lT9Ne2hQ4cmSY466qhmx93ABgAAAAD4MFXHyPvvv781dgAAAAAANa7qGHnEEUe0xg4AAAAAoMZVfc3IJHnooYdy0kkn5Qtf+EJeffXVJMmPf/zjzJ8/v0XHAQAAAAC1o+oY+bOf/SzDhg1L586ds2jRotTX1ydJ1q5dm8svv7zFBwIAAAAAtaHqGHnZZZdlxowZufnmm9O+ffum44ceemgWLVrUouMAAAAAgNpRdYx87rnncvjhh29yvGvXrlmzZk2LjAIAAAAAak/VMbJXr15ZvHjxJsfnz5+ffv36tcgoAAAAAKD2VB0jJ06cmHPOOScLFixIpVLJsmXLMnPmzEyePDmnn356a2wEAAAAAGpAu2pfcP7556exsTFHHXVUNmzYkMMPPzwdO3bM5MmTc9ZZZ7XGRgAAAACgBlQVIxsaGvLrX/86Z5xxRr797W9n8eLFWb9+fQYMGJAdd9yxtTYCAAAAADWgqhjZtm3bfPnLX86zzz6bbt26ZcCAAa21CwAAAACoMVVfM/KAAw7Iiy++2BpbAAAAAIAaVnWMvOyyyzJ58uTMnj07y5cvT11dXbMHAAAAAMDmVH0Dm5EjRyZJjj766FQqlabjGzduTKVSSUNDQ8utAwAAAABqRtUx8v7772+NHQAAAABAjas6Rh5xxBGtsQMAAAAAqHFVXzMySR566KGcdNJJ+cIXvpBXX301SfLjH/848+fPb9FxAAAAAEDtqDpG/uxnP8uwYcPSuXPnLFq0KPX19UmStWvX5vLLL2/xgQAAAABAbfhId9OeMWNGbr755rRv377p+KGHHppFixa16DgAAAAAoHZUHSOfe+65HH744Zsc79q1a9asWdMiowAAAACA2lN1jOzVq1cWL168yfH58+enX79+LTIKAAAAAKg9VcfIiRMn5pxzzsmCBQtSqVSybNmyzJw5M5MnT87pp5/eGhsBAAAAgBrQrtoXnH/++WlsbMxRRx2VDRs25PDDD0/Hjh0zefLknHXWWa2xEQAAAACoAVsUI5966qkccMABadOmTSqVSi644IJ8+9vfzuLFi7N+/foMGDAgO+64Y2tvBQAAAAC2Y1v0Ne2BAwfmtddeS5L069cvq1evTocOHTJgwIB87nOfEyIBAAAAgD9pi2Jkt27dsmTJkiTJSy+9lMbGxlYdBQAAAADUni36mvaYMWNyxBFHpHfv3qlUKhk0aFDatm272ee++OKLLToQAAAAAKgNWxQjb7rpphx33HFZvHhxzj777EycODE77bRTa28DAAAAAGrIFt9Ne/jw4UmShQsX5pxzzhEjAQAAAICqbHGMfM+tt97aGjsAAAAAgBpXdYx88803853vcddE3wAAIABJREFUfCdz587NqlWrNrmZjWtGAgAAAACbU3WM/Nu//ds8+OCD+cY3vtF0QxsAAAAAgD+l6hh5991359///d9z6KGHtsYeAAAAAKBGtan2BTvvvHO6d+/eGlsAAAAAgBpWdYy89NJLM3Xq1GzYsKE19gAAAAAANarqr2lfc801eeGFF9KzZ8/ssccead++fbPzixYtarFxAAAAAEDtqDpGjh49ujV2AAAAAAA1ruoYOW3atNbYAQAAAADUuKqvGQkAAAAA8FFs8Scjd95551QqlT/5vNdff/1jDQIAAAAAatMWx8jp06e35g4AAAAAoMZtcYwcP358a+4AAAAAAGqca0YCAAAAAEWIkQAAAABAEWIkAAAAAFCEGAkAAAAAFCFGAgAAAABFbPHdtN8zadKkzR6vVCrp1KlT9tprrxxzzDHp3r37xx4HAAAAANSOqmPkb37zmyxatCgNDQ3Zd999kyT//d//nbZt26Z///658cYb861vfSvz58/PgAEDWnwwAAAAALB9qvpr2sccc0yGDh2aZcuWZeHChVm4cGGWLl2aL33pSznhhBPy6quv5vDDD8+5557bGnsBAAAAgO1U1THyn/7pn3LppZemS5cuTce6du2aiy++OFdddVV22GGHTJ06NQsXLmzRoQAAAADA9q3qGLl27dqsWrVqk+N//OMfU1dXlyTp1q1b3n777Y+/DgAAAACoGR/pa9qnnnpqfvGLX2Tp0qVZunRpfvGLX2TChAkZPXp0kuSxxx7LPvvs0+JjAQAAAIDtV9U3sPn+97+fc889N2PHjs277777/9+kXbuMHz8+1113XZKkf//+ueWWW1p2KQAAAACwXas6Ru644465+eabc9111+XFF19MkvTr1y877rhj03M++9nPttxCAAAAAKAmVP017X/5l3/Jhg0bsuOOO+aggw7KQQcd1CxEAgAAAABsTtUx8txzz02PHj1y4okn5j/+4z/S0NDQGrsAAAAAgBpTdYxcvnx57rjjjlQqlRx//PHp3bt3zjjjjDz88MOtsQ8AAAAAqBFVx8h27drlq1/9ambOnJlVq1bluuuuy0svvZQvfvGL+cxnPtMaGwEAAACAGlD1DWzeb4cddsiwYcPyxhtv5A9/+EOeffbZltoFAAAAANSYqj8ZmSQbNmzIzJkzM3LkyHzyk5/M9OnTc+yxx+aZZ55p6X0AAAAAQI2o+pORY8eOzezZs7PDDjvk+OOPz0UXXZQhQ4a0xjYAAAAAoIZUHSPbtm2bn/70pxk2bFjatm3b7Nzvfve7HHDAAS02DgAAAACoHVXHyJkzZzb7ed26dbn99ttzyy23ZOHChWloaGixcQAAAABA7fhI14xMknnz5mX8+PHp3bt3rr766hx55JF59NFHW3IbAAAAAFBDqvpk5IoVK3LbbbflBz/4Qerq6nL88cenvr4+s2bNyoABA1prIwAAAABQA7b4k5GjRo3Kvvvum6eeeirTp0/PsmXLcv3117fmNgAAAACghmzxJyPvvvvunH322Tn99NOz9957t+YmAAAAAKAGbfEnI+fPn59169blkEMOyeDBg/O9730vr732WmtuAwAAAABqyBbHyM9//vO5+eabs3z58nzzm9/MHXfckT59+qSxsTH33ntv1q1b15o7AQAAAIDtXNV30/7EJz6RU089NfPnz8/TTz+db33rW/nOd76THj165Oijj26NjQAAAABADag6Rr7fvvvum6uuuipLly7N7bff3lKbAAAAAIAa9LFi5Hvatm2b0aNH55e//GVLvB0AAAAAUINaJEYCAAAAAPwpYiQAAAAAUIQYCQAAAAAUIUYCAAAAAEWIkQAAAABAEWIkAAAAAFCEGAkAAAAAFCFGAgAAAABFiJEAAAAAQBFiJAAAAABQhBgJAAAAABQhRgIAAAAARYiRAAAAAEARYiQAAAAAUIQYCQAAAAAUIUYCAAAAAEWIkQAAAABAEWIkAAAAAFCEGAkAAAAAFCFGAgAAAABFiJEAAAAAQBFiJAAAAABQhBgJAAAAABQhRgIAAAAARYiRAAAAAEARYiQAAAAAUIQYCQAAAAAUIUYCAAAAAEWIkQAAAABAEWIkAAAAAFCEGAkAAAAAFCFGAgAAAABFiJEAAAAAQBFiJAAAAABQhBgJAAAAABQhRgIAAAAARYiRAAAAAEARYiQAAAAAUIQYCQAAAAAUIUYCAAAAAEWIkQAAAABAEWIkAAAAAFCEGAkAAAAAFCFGAgAAAABFiJEAAAAAQBFiJAAAAABQhBgJAAAAABQhRgIAAAAARYiRAAAAAEARYiQAAAAAUIQYCQAAAAAUIUYCAAAAAEWIkQAAAABAEWIkAAAAAFCEGAkAAAAAFCFGAgAAAABFiJEAAAAAQBFbNUbOmzcvo0aNSp8+fVKpVDJr1qxm5zdu3JipU6emd+/e6dy5c4YOHZrnn3++2XNef/31jBs3Ll26dEm3bt0yYcKErF+/vuSvAQAAAABsga0aI998880cfPDBueGGGzZ7/qqrrsp3v/vdzJgxIwsWLMgnPvGJDBs2LG+99VbTc8aNG5dnnnkm9957b2bPnp158+bltNNOK/UrAAAAAABbqN3W/I+PGDEiI0aM2Oy5jRs3Zvr06bnwwgtzzDHHJEl+9KMfpWfPnpk1a1bGjh2bZ599NnPmzMnjjz+eQYMGJUmuv/76jBw5MldffXX69OlT7HcBAAAAAD7cNnvNyCVLlmTFihUZOnRo07GuXbtm8ODBeeSRR5IkjzzySLp169YUIpNk6NChadOmTRYsWFB8MwAAAADwwbbqJyM/zIoVK5IkPXv2bHa8Z8+eTedWrFiRHj16NDvfrl27dO/evek5m1NfX5/6+vqmn+vq6lpqNgAAAADwAbbZT0a2piuuuCJdu3ZtevTt23drTwIAAACAmrfNxshevXolSVauXNns+MqVK5vO9erVK6tWrWp2/t13383rr7/e9JzNmTJlStauXdv0eOWVV1p4PQAAAADwf22zMXLPPfdMr169Mnfu3KZjdXV1WbBgQYYMGZIkGTJkSNasWZOFCxc2Pee+++5LY2NjBg8e/IHv3bFjx3Tp0qXZAwAAAABoXVv1mpHr16/P4sWLm35esmRJnnzyyXTv3j277757/v7v/z6XXXZZ9t577+y555656KKL0qdPn4wePTpJst9++2X48OGZOHFiZsyYkXfeeSdnnnlmxo4d607aAAAAALCN2aox8oknnsgXv/jFpp8nTZqUJBk/fnxuu+22nHfeeXnzzTdz2mmnZc2aNTnssMMyZ86cdOrUqek1M2fOzJlnnpmjjjoqbdq0yZgxY/Ld7363+O8CAAAAAHy4ysaNGzdu7RFbW11dXbp27Zq1a9fW7Fe29zj/37f2BACgBbz0na9s7QkU5u84AKgNtf533Jb2tW32mpEAAAAAQG0RIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKCIbTpGXnzxxalUKs0e/fv3bzr/1ltv5Ywzzsguu+ySHXfcMWPGjMnKlSu34mIAAAAA4INs0zEySfbff/8sX7686TF//vymc+eee27+7d/+LXfddVcefPDBLFu2LMcdd9xWXAsAAAAAfJB2W3vAn9KuXbv06tVrk+Nr167ND37wg/zkJz/JkUcemSS59dZbs99+++XRRx/N5z//+dJTAQAAAIAPsc1/MvL5559Pnz590q9fv4wbNy4vv/xykmThwoV55513MnTo0Kbn9u/fP7vvvnseeeSRD33P+vr61NXVNXsAAAAAAK1rm46RgwcPzm233ZY5c+bkn//5n7NkyZL85V/+ZdatW5cVK1akQ4cO6datW7PX9OzZMytWrPjQ973iiivStWvXpkffvn1b89cAAAAAALKNf017xIgRTf8+6KCDMnjw4Hz605/OT3/603Tu3Pkjv++UKVMyadKkpp/r6uoESQAAAABoZdv0JyP/r27dumWfffbJ4sWL06tXr7z99ttZs2ZNs+esXLlys9eYfL+OHTumS5cuzR4AAAAAQOvarmLk+vXr88ILL6R379455JBD0r59+8ydO7fp/HPPPZeXX345Q4YM2YorAQAAAIDN2aa/pj158uSMGjUqn/70p7Ns2bJMmzYtbdu2zQknnJCuXbtmwoQJmTRpUrp3754uXbrkrLPOypAhQ9xJGwAAAAC2Qdt0jFy6dGlOOOGErF69OrvttlsOO+ywPProo9ltt92SJNddd13atGmTMWPGpL6+PsOGDcuNN964lVcDAAAAAJuzTcfIO+6440PPd+rUKTfccENuuOGGQosAAAAAgI9qu7pmJAAAAACw/RIjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAixEgAAAAAoAgxEgAAAAAoQowEAAAAAIoQIwEAAACAIsRIAAAAAKAIMRIAAAAAKEKMBAAAAACKECMBAAAAgCLESAAAAACgCDESAAAAAChCjAQAAAAAihAjAQAAAIAiaiZG3nDDDdljjz3SqVOnDB48OI899tjWngQAAAAAvE9NxMg777wzkyZNyrRp07Jo0aIcfPDBGTZsWFatWrW1pwEAAAAA/6smYuS1116biRMn5pRTTsmAAQMyY8aM7LDDDvnhD3+4tacBAAAAAP+r3dYe8HG9/fbbWbhwYaZMmdJ0rE2bNhk6dGgeeeSRzb6mvr4+9fX1TT+vXbs2SVJXV9e6Y7eixvoNW3sCANACavnvFTbP33EAUBtq/e+4936/jRs3fujztvsY+dprr6WhoSE9e/Zsdrxnz575r//6r82+5oorrsgll1yyyfG+ffu2ykYAgJbSdfrWXgAAwEfx5/J33Lp169K1a9cPPL/dx8iPYsqUKZk0aVLTz42NjXn99dezyy67pFKpbMVlAB9dXV1d+vbtm1deeSVdunTZ2nMAANhC/o4DasHGjRuzbt269OnT50Oft93HyF133TVt27bNypUrmx1fuXJlevXqtdnXdOzYMR07dmx2rFu3bq22EaCkLl26+CMWAGA75O84YHv3YZ+IfM92fwObDh065JBDDsncuXObjjU2Nmbu3LkZMmTIVlwGAAAAALzfdv/JyCSZNGlSxo8fn0GDBuVzn/tcpk+fnjfffDOnnHLK1p4GAAAAAPyvmoiRX//61/PHP/4xU6dOzYoVK/LZz342c+bM2eSmNgC1rGPHjpk2bdoml6EAAGDb5u844M9JZeOfut82AAAAAEAL2O6vGQkAAAAAbB/ESAAAAACgCDESAAAAAChCjASoQZVKJbNmzdraMwAAAKAZMRJgC5x88smpVCqpVCrp0KFD9tprr/zjP/5j3n333S16/UsvvZRKpZInn3yylZd+PIsXL84pp5yST33qU+nYsWP23HPPnHDCCXniiSeanvPe/4dK5f+1d/8xUdd/HMCfHyA4ODjwV53kAPH4bUgLZMUKLAlIZ4tKRGwoGiYUnFwgDJfAkCI5Q2ogszzIiKioZE0hD204cjFiiCjKGctZHKEZEj+KFnz/cNw8D47DAPtuz8dfx/v9ev+4zz+893rf+/0RYG9vj6CgIJw8eVJXHxISArlcbtB3WVkZHBwc5uR7EBEREZni6tWriIuLg6OjIywtLeHs7Izk5GT89ttvenEhISEQBAGffPKJXnlhYSFcXFx0f5eVlUEQBISHh+vF9fX1QRAEfPvttwCAs2fPwtLSEjU1NXpx1dXVEIlEaG9vNzrv6upqhISEwN7eHra2tvD19UVOTg5u3LihNw9BEGBmZoYlS5Zgy5Yt6O3tBWB8bTrZWo6IaKYwGUlEZKLw8HBotVpoNBooFApkZWVh3759cz6Pv//+e1b6bW5uxiOPPILOzk6UlpbiwoUL+PLLL+Hp6QmFQqEXq1KpoNVq0djYiIULF2Lt2rXo6uqalXkRERERzYauri74+/tDo9GgsrISly9fxsGDB1FfX49HH31Ul9gbJxKJsHv37inXYhYWFlCr1Th16tSkMStWrMAbb7yB+Ph4XeKzt7cXr7zyCrKzs7F8+fJJ22ZmZiIqKgoBAQE4fvw42tvboVQqcfbsWRw5ckQXJ5FIoNVq8fPPP+PQoUM4fvw4XnrpJVMeDRHRrGIykojIRFZWVpBKpXB2dsaOHTuwevVq1NTUYHBwEBKJBJ9//rle/FdffQWxWIw//vgDS5cuBQA8/PDDEAQBISEhAIDR0VHk5OTofono5+eH2tpaXR/ju9ZVVVUIDg6GSCRCRUUFAODw4cPw8fGBlZUVFi9ejFdffVVv/OvXr+O5556DjY0N3NzcDHbebzc2NobNmzfDzc0Np0+fxpo1a7Bs2TL4+flhz549OHr0qF68g4MDpFIpli9fjpKSEgwPD+PEiRN3/WyJiIiI5lpiYiIsLS3xzTffIDg4GE5OToiIiIBarcYvv/yCzMxMvfjo6Gj09fXh0KFDRvsVi8WIi4tDenq60biMjAw4OTkhMTERALB9+3a4ubnh9ddfn7RNU1MT8vLyoFQqsW/fPjz22GNwcXFBaGgoqqurERsbq4sVBAFSqRSOjo6IiIhAUlIS1Go1hoeHp3o0RESzislIIqK7ZG1tjZGREYjFYmzYsAEqlUqvXqVS4YUXXoCdnR2ampoAAGq1GlqtFl988QUA4MCBA1AqlSgoKEBbWxvCwsKwbt06aDQavb7S09ORnJyMjo4OhIWFoaSkBImJiYiPj8e5c+dQU1MDmUym1yY7Oxvr169HW1sbnnnmGcTExBjs8I9rbW3F+fPnoVAoYGZm+K/B2PFqa2trAMDIyMgUT4yIiIjov+HGjRuoq6tDQkKCbi0zTiqVIiYmBlVVVRgbG9OVSyQSZGZmIicnB4ODg0b7z8rKwrlz5ww2q29nbm6O8vJyHD16FBs3bkRdXR3Kyspgbm4+aZuKigrY2toiISFhwvqp1myjo6MmXzNERDRbmIwkIpqmsbExqNVq1NXV4cknnwQAbNu2DXV1ddBqtQBuHbM5duwY4uLiAACLFi0CACxYsABSqRTz588HABQUFGDXrl3YsGEDPDw8kJ+fDz8/PxQWFuqNKZfLERkZiaVLl2Lx4sXIzc2FQqFAcnIy3N3dERAQYHC3z+bNmxEdHQ2ZTIa8vDwMDAzokqJ3Gk9+enp6TutZDA0NYffu3TA3N0dwcPC02hIRERHdKxqNBmNjY/Dy8pqw3svLC7///juuXbumV56QkACRSIT9+/cb7d/R0RHJycnIzMw0mvzz8vKCXC5HZWUlsrKy4O7uPuW8XV1dcd999xmNm6jdwYMH4e/vDzs7u2m1JSKaaUxGEhGZ6Ouvv4atrS1EIhEiIiIQFRWFrKwsAMDKlSvh4+OD8vJyAMBHH30EZ2dnPPHEE5P219/fj+7ubgQFBemVBwUFoaOjQ6/M399f97m3txfd3d146qmnjM7X19dX91ksFkMikeguLb/T7bv+poiOjoatrS3s7OxQXV2NDz74QG88IiIiov8H010DWVlZIScnBwUFBbh+/brR2F27duHatWs4fPjwpDEDAwOoqqqCjY0NTp8+PaPzvXnzJmxtbWFjYwMPDw888MADuut+iIjuJSYjiYhMtGrVKrS2tkKj0WB4eBjl5eUQi8W6+m3btqGsrAzArSPaW7ZsgSAIMzL27ePceZRoMnfumAuCgNHR0Qljx3fhL168aFLf77zzDlpbW9HT04Oenh69+4kkEglu3rxp0Kavrw/29vYm9U9EREQ0m2QyGQRBMNgAHtfR0YF58+bpTrfcbtOmTXB2dkZubq7RMRwcHJCRkYHs7GwMDQ1NGJOamgqRSITvvvsOarUaH374odE+3d3d0dXVZdILDe3s7NDa2or29nYMDg6ioaFBt+aTSCQAwDUbEd0TTEYSEZlILBZDJpPByckJFhYWBvWbNm3ClStXUFRUhAsXLugl6CwtLQEA//zzj65MIpHA0dERjY2Nev00NjbC29t70nnY2dnBxcUF9fX1//Yr6fj5+cHb2xtKpXLChGVfX5/e31KpFDKZbMIFuoeHB1paWgzKW1papjx6RERERDQXFixYgNDQUBQXFxu80KWnpwcVFRWIioqacGPZzMwMb775JkpKSvDTTz8ZHee1116DmZkZDhw4YFB34sQJvP/++ygvL8dfRjBdAAADg0lEQVSKFSuQm5sLuVyuu/ZnIhs3bsTAwACKi4snrL99zWZmZgaZTAZXV1eDzez58+dj4cKF+OGHH/TK+/v7cfnyZa7ZiGhWMRlJRDRD5s2bh8jISKSmpuLpp5/GkiVLdHX3338/rK2tUVtbi19//VW3C52amor8/HxUVVXh0qVLSE9PR2trK5KTk42OlZWVBaVSiaKiImg0GrS0tODdd9+967kLggCVSoXOzk48/vjjOHbsGLq6utDW1oa9e/fi2WefNbmvHTt2oLOzE0lJSWhra8OlS5ewf/9+VFZWQqFQ3PUciYiIiGbSe++9h7/++gthYWFoaGjA1atXUVtbi9DQUDz44IPYu3fvpG3XrFmDwMBAlJaWGh1DJBIhOzsbRUVFeuX9/f3YunUrUlNTERAQAADYuXMnvL29ER8fP2l/gYGBSEtLg0KhQFpaGs6cOYMrV66gvr4eL774ou7KIFOkpKQgLy8PFRUV+PHHH9HU1ISYmBgsWrQIkZGRJvdDRDRdTEYSEc2grVu3YmRkRPfimnEWFhYoKipCaWkpHB0ddcm9pKQkpKSkQKFQ4KGHHkJtbS1qamrg5uZmdJzY2FgUFhaiuLgYPj4+WLt2rcEbuKdr5cqVaG5uhkwmw8svvwwvLy+sW7cO58+fN3ihjjGurq5oaGjAxYsXsXr1agQGBuLTTz/FZ599hvDw8H81RyIiIqKZ4ubmhubmZri6umL9+vVYtmwZ4uPjsWrVKpw5c0b3wsHJ5Ofn488//5xynNjYWLi6uuqVyeVy2Nvb6+4fB279klGlUuHkyZNGj2vn5+fj448/xvfff4+wsDD4+PggJSUFvr6+eidzppKWloY9e/YgPz8fvr6+eP755yEWi3Hq1CmTrwUiIrobwth0b+wlIqJJHTlyBDt37kR3d7fuaDYRERERERER3WJ46RkREU3b0NAQtFot3nrrLWzfvp2JSCIiIiIiIqIJ8Jg2EdEMePvtt+Hp6QmpVIqMjIx7PR0iIiIiIiKi/yQe0yYiIiIiIiIiIqI5wV9GEhERERERERER0ZxgMpKIiIiIiIiIiIjmBJORRERERERERERENCeYjCQiIiIiIiIiIqI5wWQkERERERERERERzQkmI4mIiIiIiIiIiGhOMBlJREREREREREREc4LJSCIiIiIiIiIiIpoTTEYSERERERERERHRnPgfSrgfT0i5kZoAAAAASUVORK5CYII=\n", 295 | "text/plain": [ 296 | "
" 297 | ] 298 | }, 299 | "metadata": {}, 300 | "output_type": "display_data" 301 | } 302 | ], 303 | "source": [ 304 | "pipelines = [(\"Pytorch CPU\", nlp_torch), (\"ONNX CPU\", nlp_onnx)]\n", 305 | "results = {}\n", 306 | " \n", 307 | "with warnings.catch_warnings():\n", 308 | " warnings.simplefilter(\"ignore\")\n", 309 | " for label, pipeline_ in pipelines:\n", 310 | " # Compute \n", 311 | " time_buffer = []\n", 312 | " for _ in trange(100, desc=f\"Tracking inference time for {label}\"):\n", 313 | " with track_infer_time(time_buffer):\n", 314 | " pipeline_({\"question\": \"What is ONNX runtime ?\", \"context\": \"ONNX Runtime is a highly performant single inference engine for multiple platforms and hardware\"})\n", 315 | "\n", 316 | " # Store the result\n", 317 | " results[label] = OnnxInferenceResult(\n", 318 | " time_buffer, \n", 319 | " None\n", 320 | " )\n", 321 | "\n", 322 | "plot_benchmark(results)" 323 | ] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "execution_count": null, 328 | "metadata": {}, 329 | "outputs": [], 330 | "source": [] 331 | } 332 | ], 333 | "metadata": { 334 | "kernelspec": { 335 | "display_name": "Python 3", 336 | "language": "python", 337 | "name": "python3" 338 | }, 339 | "language_info": { 340 | "codemirror_mode": { 341 | "name": "ipython", 342 | "version": 3 343 | }, 344 | "file_extension": ".py", 345 | "mimetype": "text/x-python", 346 | "name": "python", 347 | "nbconvert_exporter": "python", 348 | "pygments_lexer": "ipython3", 349 | "version": "3.7.8" 350 | } 351 | }, 352 | "nbformat": 4, 353 | "nbformat_minor": 4 354 | } 355 | -------------------------------------------------------------------------------- /onnx_transformers/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.1.0" 2 | 3 | # Pipelines 4 | from .pipelines import ( 5 | CsvPipelineDataFormat, 6 | JsonPipelineDataFormat, 7 | NerPipeline, 8 | PipedPipelineDataFormat, 9 | Pipeline, 10 | PipelineDataFormat, 11 | QuestionAnsweringPipeline, 12 | TextClassificationPipeline, 13 | TokenClassificationPipeline, 14 | ZeroShotClassificationPipeline, 15 | pipeline, 16 | ) 17 | -------------------------------------------------------------------------------- /onnx_transformers/pipelines.py: -------------------------------------------------------------------------------- 1 | # this code is taken and adapted from https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines.py 2 | 3 | import csv 4 | import json 5 | import os 6 | import pickle 7 | import sys 8 | from abc import ABC, abstractmethod 9 | from contextlib import contextmanager 10 | from itertools import chain 11 | from os.path import abspath, exists 12 | from pathlib import Path 13 | from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union 14 | 15 | import numpy as np 16 | from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions, get_all_providers 17 | from psutil import cpu_count 18 | from transformers.configuration_auto import AutoConfig 19 | from transformers.configuration_utils import PretrainedConfig 20 | from transformers.convert_graph_to_onnx import convert_pytorch, convert_tensorflow, infer_shapes 21 | from transformers.data import SquadExample, squad_convert_examples_to_features 22 | from transformers.file_utils import add_end_docstrings, is_tf_available, is_torch_available 23 | from transformers.modelcard import ModelCard 24 | from transformers.tokenization_auto import AutoTokenizer 25 | from transformers.tokenization_bert import BasicTokenizer 26 | from transformers.tokenization_utils import PreTrainedTokenizer 27 | from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy 28 | from transformers.utils import logging 29 | 30 | 31 | if is_tf_available(): 32 | import tensorflow as tf 33 | from transformers.modeling_tf_auto import ( 34 | TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, 35 | TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, 36 | TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, 37 | TF_MODEL_WITH_LM_HEAD_MAPPING, 38 | TFAutoModel, 39 | TFAutoModelForCausalLM, 40 | TFAutoModelForQuestionAnswering, 41 | TFAutoModelForSequenceClassification, 42 | TFAutoModelForTokenClassification, 43 | TFAutoModelWithLMHead, 44 | ) 45 | 46 | if is_torch_available(): 47 | import torch 48 | from transformers.modeling_auto import ( 49 | MODEL_FOR_MASKED_LM_MAPPING, 50 | MODEL_FOR_QUESTION_ANSWERING_MAPPING, 51 | MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, 52 | MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, 53 | MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, 54 | AutoModel, 55 | AutoModelForCausalLM, 56 | AutoModelForMaskedLM, 57 | AutoModelForQuestionAnswering, 58 | AutoModelForSeq2SeqLM, 59 | AutoModelForSequenceClassification, 60 | AutoModelForTokenClassification, 61 | ) 62 | 63 | if TYPE_CHECKING: 64 | from transformers.modeling_tf_utils import TFPreTrainedModel 65 | from transformers.modeling_utils import PreTrainedModel 66 | 67 | 68 | ONNX_CACHE_DIR = Path(os.path.dirname(__file__)).parent.joinpath(".onnx") 69 | 70 | 71 | logger = logging.get_logger(__name__) 72 | 73 | 74 | # Constants from the performance optimization available in onnxruntime 75 | # It needs to be done before importing onnxruntime 76 | os.environ["OMP_NUM_THREADS"] = str(cpu_count(logical=True)) 77 | os.environ["OMP_WAIT_POLICY"] = "ACTIVE" 78 | 79 | 80 | def create_model_for_provider(model_path: str, provider: str) -> InferenceSession: 81 | 82 | assert provider in get_all_providers(), f"provider {provider} not found, {get_all_providers()}" 83 | 84 | # Few properties that might have an impact on performances (provided by MS) 85 | options = SessionOptions() 86 | options.intra_op_num_threads = 1 87 | options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL 88 | 89 | # Load the model as a graph and prepare the CPU backend 90 | session = InferenceSession(model_path, options, providers=[provider]) 91 | session.disable_fallback() 92 | 93 | return session 94 | 95 | 96 | def get_framework(model=None): 97 | """ 98 | Select framework (TensorFlow or PyTorch) to use. 99 | 100 | Args: 101 | model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`, `optional`): 102 | If both frameworks are installed, picks the one corresponding to the model passed (either a model class or 103 | the model name). If no specific model is provided, defaults to using PyTorch. 104 | """ 105 | if is_tf_available() and is_torch_available() and model is not None and not isinstance(model, str): 106 | # Both framework are available but the user supplied a model class instance. 107 | # Try to guess which framework to use from the model classname 108 | framework = "tf" if model.__class__.__name__.startswith("TF") else "pt" 109 | elif not is_tf_available() and not is_torch_available(): 110 | raise RuntimeError( 111 | "At least one of TensorFlow 2.0 or PyTorch should be installed. " 112 | "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " 113 | "To install PyTorch, read the instructions at https://pytorch.org/." 114 | ) 115 | else: 116 | # framework = 'tf' if is_tf_available() else 'pt' 117 | framework = "pt" if is_torch_available() else "tf" 118 | return framework 119 | 120 | 121 | class PipelineException(Exception): 122 | """ 123 | Raised by a :class:`~transformers.Pipeline` when handling __call__. 124 | 125 | Args: 126 | task (:obj:`str`): The task of the pipeline. 127 | model (:obj:`str`): The model used by the pipeline. 128 | reason (:obj:`str`): The error message to display. 129 | """ 130 | 131 | def __init__(self, task: str, model: str, reason: str): 132 | super().__init__(reason) 133 | 134 | self.task = task 135 | self.model = model 136 | 137 | 138 | class ArgumentHandler(ABC): 139 | """ 140 | Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`. 141 | """ 142 | 143 | @abstractmethod 144 | def __call__(self, *args, **kwargs): 145 | raise NotImplementedError() 146 | 147 | 148 | class DefaultArgumentHandler(ArgumentHandler): 149 | """ 150 | Default argument parser handling parameters for each :class:`~transformers.pipelines.Pipeline`. 151 | """ 152 | 153 | @staticmethod 154 | def handle_kwargs(kwargs: Dict) -> List: 155 | if len(kwargs) == 1: 156 | output = list(kwargs.values()) 157 | else: 158 | output = list(chain(kwargs.values())) 159 | 160 | return DefaultArgumentHandler.handle_args(output) 161 | 162 | @staticmethod 163 | def handle_args(args: Sequence[Any]) -> List[str]: 164 | 165 | # Only one argument, let's do case by case 166 | if len(args) == 1: 167 | if isinstance(args[0], str): 168 | return [args[0]] 169 | elif not isinstance(args[0], list): 170 | return list(args) 171 | else: 172 | return args[0] 173 | 174 | # Multiple arguments (x1, x2, ...) 175 | elif len(args) > 1: 176 | if all([isinstance(arg, str) for arg in args]): 177 | return list(args) 178 | 179 | # If not instance of list, then it should instance of iterable 180 | elif isinstance(args, Iterable): 181 | return list(chain.from_iterable(chain(args))) 182 | else: 183 | raise ValueError( 184 | "Invalid input type {}. Pipeline supports Union[str, Iterable[str]]".format(type(args)) 185 | ) 186 | else: 187 | return [] 188 | 189 | def __call__(self, *args, **kwargs): 190 | if len(kwargs) > 0 and len(args) > 0: 191 | raise ValueError("Pipeline cannot handle mixed args and kwargs") 192 | 193 | if len(kwargs) > 0: 194 | return DefaultArgumentHandler.handle_kwargs(kwargs) 195 | else: 196 | return DefaultArgumentHandler.handle_args(args) 197 | 198 | 199 | class PipelineDataFormat: 200 | """ 201 | Base class for all the pipeline supported data format both for reading and writing. 202 | Supported data formats currently includes: 203 | - JSON 204 | - CSV 205 | - stdin/stdout (pipe) 206 | 207 | :obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets 208 | columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format. 209 | 210 | Args: 211 | output_path (:obj:`str`, `optional`): Where to save the outgoing data. 212 | input_path (:obj:`str`, `optional`): Where to look for the input data. 213 | column (:obj:`str`, `optional`): The column to read. 214 | overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`): 215 | Whether or not to overwrite the :obj:`output_path`. 216 | """ 217 | 218 | SUPPORTED_FORMATS = ["json", "csv", "pipe"] 219 | 220 | def __init__( 221 | self, 222 | output_path: Optional[str], 223 | input_path: Optional[str], 224 | column: Optional[str], 225 | overwrite: bool = False, 226 | ): 227 | self.output_path = output_path 228 | self.input_path = input_path 229 | self.column = column.split(",") if column is not None else [""] 230 | self.is_multi_columns = len(self.column) > 1 231 | 232 | if self.is_multi_columns: 233 | self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column] 234 | 235 | if output_path is not None and not overwrite: 236 | if exists(abspath(self.output_path)): 237 | raise OSError("{} already exists on disk".format(self.output_path)) 238 | 239 | if input_path is not None: 240 | if not exists(abspath(self.input_path)): 241 | raise OSError("{} doesnt exist on disk".format(self.input_path)) 242 | 243 | @abstractmethod 244 | def __iter__(self): 245 | raise NotImplementedError() 246 | 247 | @abstractmethod 248 | def save(self, data: Union[dict, List[dict]]): 249 | """ 250 | Save the provided data object with the representation for the current 251 | :class:`~transformers.pipelines.PipelineDataFormat`. 252 | 253 | Args: 254 | data (:obj:`dict` or list of :obj:`dict`): The data to store. 255 | """ 256 | raise NotImplementedError() 257 | 258 | def save_binary(self, data: Union[dict, List[dict]]) -> str: 259 | """ 260 | Save the provided data object as a pickle-formatted binary data on the disk. 261 | 262 | Args: 263 | data (:obj:`dict` or list of :obj:`dict`): The data to store. 264 | 265 | Returns: 266 | :obj:`str`: Path where the data has been saved. 267 | """ 268 | path, _ = os.path.splitext(self.output_path) 269 | binary_path = os.path.extsep.join((path, "pickle")) 270 | 271 | with open(binary_path, "wb+") as f_output: 272 | pickle.dump(data, f_output) 273 | 274 | return binary_path 275 | 276 | @staticmethod 277 | def from_str( 278 | format: str, 279 | output_path: Optional[str], 280 | input_path: Optional[str], 281 | column: Optional[str], 282 | overwrite=False, 283 | ) -> "PipelineDataFormat": 284 | """ 285 | Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending 286 | on :obj:`format`. 287 | 288 | Args: 289 | format: (:obj:`str`): 290 | The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`. 291 | output_path (:obj:`str`, `optional`): 292 | Where to save the outgoing data. 293 | input_path (:obj:`str`, `optional`): 294 | Where to look for the input data. 295 | column (:obj:`str`, `optional`): 296 | The column to read. 297 | overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`): 298 | Whether or not to overwrite the :obj:`output_path`. 299 | 300 | Returns: 301 | :class:`~transformers.pipelines.PipelineDataFormat`: The proper data format. 302 | """ 303 | if format == "json": 304 | return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) 305 | elif format == "csv": 306 | return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) 307 | elif format == "pipe": 308 | return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) 309 | else: 310 | raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format)) 311 | 312 | 313 | class CsvPipelineDataFormat(PipelineDataFormat): 314 | """ 315 | Support for pipelines using CSV data format. 316 | 317 | Args: 318 | output_path (:obj:`str`, `optional`): Where to save the outgoing data. 319 | input_path (:obj:`str`, `optional`): Where to look for the input data. 320 | column (:obj:`str`, `optional`): The column to read. 321 | overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`): 322 | Whether or not to overwrite the :obj:`output_path`. 323 | """ 324 | 325 | def __init__( 326 | self, 327 | output_path: Optional[str], 328 | input_path: Optional[str], 329 | column: Optional[str], 330 | overwrite=False, 331 | ): 332 | super().__init__(output_path, input_path, column, overwrite=overwrite) 333 | 334 | def __iter__(self): 335 | with open(self.input_path, "r") as f: 336 | reader = csv.DictReader(f) 337 | for row in reader: 338 | if self.is_multi_columns: 339 | yield {k: row[c] for k, c in self.column} 340 | else: 341 | yield row[self.column[0]] 342 | 343 | def save(self, data: List[dict]): 344 | """ 345 | Save the provided data object with the representation for the current 346 | :class:`~transformers.pipelines.PipelineDataFormat`. 347 | 348 | Args: 349 | data (:obj:`List[dict]`): The data to store. 350 | """ 351 | with open(self.output_path, "w") as f: 352 | if len(data) > 0: 353 | writer = csv.DictWriter(f, list(data[0].keys())) 354 | writer.writeheader() 355 | writer.writerows(data) 356 | 357 | 358 | class JsonPipelineDataFormat(PipelineDataFormat): 359 | """ 360 | Support for pipelines using JSON file format. 361 | 362 | Args: 363 | output_path (:obj:`str`, `optional`): Where to save the outgoing data. 364 | input_path (:obj:`str`, `optional`): Where to look for the input data. 365 | column (:obj:`str`, `optional`): The column to read. 366 | overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`): 367 | Whether or not to overwrite the :obj:`output_path`. 368 | """ 369 | 370 | def __init__( 371 | self, 372 | output_path: Optional[str], 373 | input_path: Optional[str], 374 | column: Optional[str], 375 | overwrite=False, 376 | ): 377 | super().__init__(output_path, input_path, column, overwrite=overwrite) 378 | 379 | with open(input_path, "r") as f: 380 | self._entries = json.load(f) 381 | 382 | def __iter__(self): 383 | for entry in self._entries: 384 | if self.is_multi_columns: 385 | yield {k: entry[c] for k, c in self.column} 386 | else: 387 | yield entry[self.column[0]] 388 | 389 | def save(self, data: dict): 390 | """ 391 | Save the provided data object in a json file. 392 | 393 | Args: 394 | data (:obj:`dict`): The data to store. 395 | """ 396 | with open(self.output_path, "w") as f: 397 | json.dump(data, f) 398 | 399 | 400 | class PipedPipelineDataFormat(PipelineDataFormat): 401 | """ 402 | Read data from piped input to the python process. 403 | For multi columns data, columns should separated by \t 404 | 405 | If columns are provided, then the output will be a dictionary with {column_x: value_x} 406 | 407 | Args: 408 | output_path (:obj:`str`, `optional`): Where to save the outgoing data. 409 | input_path (:obj:`str`, `optional`): Where to look for the input data. 410 | column (:obj:`str`, `optional`): The column to read. 411 | overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`): 412 | Whether or not to overwrite the :obj:`output_path`. 413 | """ 414 | 415 | def __iter__(self): 416 | for line in sys.stdin: 417 | # Split for multi-columns 418 | if "\t" in line: 419 | 420 | line = line.split("\t") 421 | if self.column: 422 | # Dictionary to map arguments 423 | yield {kwargs: l for (kwargs, _), l in zip(self.column, line)} 424 | else: 425 | yield tuple(line) 426 | 427 | # No dictionary to map arguments 428 | else: 429 | yield line 430 | 431 | def save(self, data: dict): 432 | """ 433 | Print the data. 434 | 435 | Args: 436 | data (:obj:`dict`): The data to store. 437 | """ 438 | print(data) 439 | 440 | def save_binary(self, data: Union[dict, List[dict]]) -> str: 441 | if self.output_path is None: 442 | raise KeyError( 443 | "When using piped input on pipeline outputting large object requires an output file path. " 444 | "Please provide such output path through --output argument." 445 | ) 446 | 447 | return super().save_binary(data) 448 | 449 | 450 | class _ScikitCompat(ABC): 451 | """ 452 | Interface layer for the Scikit and Keras compatibility. 453 | """ 454 | 455 | @abstractmethod 456 | def transform(self, X): 457 | raise NotImplementedError() 458 | 459 | @abstractmethod 460 | def predict(self, X): 461 | raise NotImplementedError() 462 | 463 | 464 | PIPELINE_INIT_ARGS = r""" 465 | Arguments: 466 | model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`): 467 | The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from 468 | :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for 469 | TensorFlow. 470 | tokenizer (:obj:`~transformers.PreTrainedTokenizer`): 471 | The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from 472 | :class:`~transformers.PreTrainedTokenizer`. 473 | modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`): 474 | Model card attributed to the model for this pipeline. 475 | framework (:obj:`str`, `optional`): 476 | The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework 477 | must be installed. 478 | 479 | If no framework is specified, will default to the one currently installed. If no framework is specified 480 | and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no 481 | model is provided. 482 | task (:obj:`str`, defaults to :obj:`""`): 483 | A task-identifier for the pipeline. 484 | args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`): 485 | Reference to the object in charge of parsing supplied pipeline parameters. 486 | device (:obj:`int`, `optional`, defaults to -1): 487 | Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model 488 | on the associated CUDA device id. 489 | binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`): 490 | Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text. 491 | """ 492 | 493 | 494 | @add_end_docstrings(PIPELINE_INIT_ARGS) 495 | class Pipeline(_ScikitCompat): 496 | """ 497 | The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across 498 | different pipelines. 499 | 500 | Base class implementing pipelined operations. 501 | Pipeline workflow is defined as a sequence of the following operations: 502 | 503 | Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output 504 | 505 | Pipeline supports running on CPU or GPU through the device argument or using onnx runtime (see below). 506 | 507 | Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` ) 508 | output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we 509 | provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the 510 | pickle format. 511 | """ 512 | 513 | default_input_names = None 514 | 515 | def __init__( 516 | self, 517 | model: Union["PreTrainedModel", "TFPreTrainedModel", str], 518 | tokenizer: PreTrainedTokenizer, 519 | config: PretrainedConfig, 520 | modelcard: Optional[ModelCard] = None, 521 | framework: Optional[str] = None, 522 | task: str = "", 523 | args_parser: ArgumentHandler = None, 524 | device: int = -1, 525 | binary_output: bool = False, 526 | onnx: bool = True, 527 | graph_path: Optional[Path] = None, 528 | ): 529 | 530 | if framework is None: 531 | framework = get_framework(model) 532 | 533 | self.onnx = onnx 534 | self.graph_path = graph_path 535 | self.task = task 536 | self.model = model 537 | self.config = config 538 | self.tokenizer = tokenizer 539 | self.modelcard = modelcard 540 | self.framework = framework 541 | self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device)) 542 | self.binary_output = binary_output 543 | self._args_parser = args_parser or DefaultArgumentHandler() 544 | 545 | # Special handling 546 | if self.framework == "pt" and self.device.type == "cuda" and (not onnx): 547 | self.model = self.model.to(self.device) 548 | 549 | # Export the graph 550 | if onnx: 551 | input_names_path = graph_path.parent.joinpath(f"{os.path.basename(graph_path)}.input_names.json") 552 | if not graph_path.exists() or not input_names_path.exists(): 553 | self._export_onnx_graph(input_names_path) 554 | 555 | logger.info(f"loading onnx graph from {self.graph_path.as_posix()}") 556 | self.onnx_model = create_model_for_provider(str(graph_path), "CPUExecutionProvider") 557 | self.input_names = json.load(open(input_names_path)) 558 | self.framework = "np" 559 | self._warup_onnx_graph() 560 | 561 | # TODO: handle this 562 | # Update config with task specific parameters 563 | # task_specific_params = self.model.config.task_specific_params 564 | # if task_specific_params is not None and task in task_specific_params: 565 | # self.model.config.update(task_specific_params.get(task)) 566 | 567 | def save_pretrained(self, save_directory: str): 568 | """ 569 | Save the pipeline's model and tokenizer. 570 | 571 | Args: 572 | save_directory (:obj:`str`): 573 | A path to the directory where to saved. It will be created if it doesn't exist. 574 | """ 575 | if os.path.isfile(save_directory): 576 | logger.error("Provided path ({}) should be a directory, not a file".format(save_directory)) 577 | return 578 | os.makedirs(save_directory, exist_ok=True) 579 | 580 | self.model.save_pretrained(save_directory) 581 | self.tokenizer.save_pretrained(save_directory) 582 | if self.modelcard is not None: 583 | self.modelcard.save_pretrained(save_directory) 584 | 585 | def transform(self, X): 586 | """ 587 | Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). 588 | """ 589 | return self(X=X) 590 | 591 | def predict(self, X): 592 | """ 593 | Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). 594 | """ 595 | return self(X=X) 596 | 597 | @contextmanager 598 | def device_placement(self): 599 | """ 600 | Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. 601 | 602 | Returns: 603 | Context manager 604 | 605 | Examples:: 606 | 607 | # Explicitly ask for tensor allocation on CUDA device :0 608 | pipe = pipeline(..., device=0) 609 | with pipe.device_placement(): 610 | # Every framework specific tensor allocation will be done on the request device 611 | output = pipe(...) 612 | """ 613 | if self.framework == "tf": 614 | with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)): 615 | yield 616 | else: 617 | if self.device.type == "cuda": 618 | torch.cuda.set_device(self.device) 619 | 620 | yield 621 | 622 | def ensure_tensor_on_device(self, **inputs): 623 | """ 624 | Ensure PyTorch tensors are on the specified device. 625 | 626 | Args: 627 | inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`. 628 | 629 | Return: 630 | :obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device. 631 | """ 632 | return {name: tensor.to(self.device) for name, tensor in inputs.items()} 633 | 634 | def check_model_type(self, supported_models: Union[List[str], dict]): 635 | """ 636 | Check if the model class is in supported by the pipeline. 637 | 638 | Args: 639 | supported_models (:obj:`List[str]` or :obj:`dict`): 640 | The list of models supported by the pipeline, or a dictionary with model class values. 641 | """ 642 | if not isinstance(supported_models, list): # Create from a model mapping 643 | supported_models = [item[1].__name__ for item in supported_models.items()] 644 | if self.model.__class__.__name__ not in supported_models: 645 | raise PipelineException( 646 | self.task, 647 | self.model.base_model_prefix, 648 | f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}", 649 | ) 650 | 651 | def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs): 652 | """ 653 | Parse arguments and tokenize 654 | """ 655 | # Parse arguments 656 | inputs = self._args_parser(*args, **kwargs) 657 | inputs = self.tokenizer( 658 | inputs, 659 | add_special_tokens=add_special_tokens, 660 | return_tensors=self.framework, 661 | padding=padding, 662 | ) 663 | 664 | return inputs 665 | 666 | def __call__(self, *args, **kwargs): 667 | inputs = self._parse_and_tokenize(*args, **kwargs) 668 | if self.onnx: 669 | return self._forward_onnx(inputs) 670 | else: 671 | return self._forward(inputs) 672 | 673 | def _forward(self, inputs, return_tensors=False): 674 | """ 675 | Internal framework specific forward dispatching. 676 | Args: 677 | inputs: dict holding all the keyworded arguments for required by the model forward method. 678 | return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array. 679 | Returns: 680 | Numpy array 681 | """ 682 | # Encode for forward 683 | with self.device_placement(): 684 | if self.framework == "tf": 685 | # TODO trace model 686 | predictions = self.model(inputs.data, training=False)[0] 687 | else: 688 | with torch.no_grad(): 689 | inputs = self.ensure_tensor_on_device(**inputs) 690 | predictions = self.model(**inputs)[0].cpu() 691 | 692 | if return_tensors: 693 | return predictions 694 | else: 695 | return predictions.numpy() 696 | 697 | def _export_onnx_graph(self, input_names_path: Path): 698 | # if graph exists, but we are here then it means something went wrong in previous load 699 | # so delete old graph 700 | if self.graph_path.exists(): 701 | self.graph_path.unlink() 702 | if input_names_path.exists(): 703 | input_names_path.unlink() 704 | 705 | # create parent dir 706 | if not self.graph_path.parent.exists(): 707 | os.makedirs(self.graph_path.parent.as_posix()) 708 | 709 | logger.info(f"Saving onnx graph at { self.graph_path.as_posix()}") 710 | 711 | if self.framework == "pt": 712 | convert_pytorch(self, opset=11, output=self.graph_path, use_external_format=False) 713 | else: 714 | convert_tensorflow(self, opset=11, output=self.graph_path) 715 | 716 | # save input names 717 | self.input_names = infer_shapes(self, "pt")[0] 718 | with open(input_names_path, "w") as f: 719 | json.dump(self.input_names, f) 720 | 721 | def _forward_onnx(self, inputs, return_tensors=False): 722 | # inputs_onnx = {k: v.cpu().detach().numpy() for k, v in inputs.items() if k in self.input_names} 723 | inputs_onnx = {k: v for k, v in inputs.items() if k in self.input_names} 724 | predictions = self.onnx_model.run(None, inputs_onnx) 725 | return predictions 726 | 727 | def _warup_onnx_graph(self, n=10): 728 | model_inputs = self.tokenizer("My name is Bert", return_tensors="np") 729 | for _ in range(n): 730 | self._forward_onnx(model_inputs) 731 | 732 | 733 | # Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output` 734 | class FeatureExtractionPipeline(Pipeline): 735 | """ 736 | Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base 737 | transformer, which can be used as features in downstream tasks. 738 | 739 | This feature extraction pipeline can currently be loaded from :func:`~transformers.pipeline` using the task 740 | identifier: :obj:`"feature-extraction"`. 741 | 742 | All models may be used for this pipeline. See a list of all models, including community-contributed models on 743 | `huggingface.co/models `__. 744 | 745 | Arguments: 746 | model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`): 747 | The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from 748 | :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for 749 | TensorFlow. 750 | tokenizer (:obj:`~transformers.PreTrainedTokenizer`): 751 | The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from 752 | :class:`~transformers.PreTrainedTokenizer`. 753 | modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`): 754 | Model card attributed to the model for this pipeline. 755 | framework (:obj:`str`, `optional`): 756 | The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework 757 | must be installed. 758 | 759 | If no framework is specified, will default to the one currently installed. If no framework is specified 760 | and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no 761 | model is provided. 762 | task (:obj:`str`, defaults to :obj:`""`): 763 | A task-identifier for the pipeline. 764 | args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`): 765 | Reference to the object in charge of parsing supplied pipeline parameters. 766 | device (:obj:`int`, `optional`, defaults to -1): 767 | Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model 768 | on the associated CUDA device id. 769 | """ 770 | 771 | def __init__( 772 | self, 773 | model: Union["PreTrainedModel", "TFPreTrainedModel"], 774 | tokenizer: PreTrainedTokenizer, 775 | config: PretrainedConfig, 776 | modelcard: Optional[ModelCard] = None, 777 | framework: Optional[str] = None, 778 | args_parser: ArgumentHandler = None, 779 | device: int = -1, 780 | task: str = "", 781 | **kwargs 782 | ): 783 | super().__init__( 784 | model=model, 785 | tokenizer=tokenizer, 786 | config=config, 787 | modelcard=modelcard, 788 | framework=framework, 789 | args_parser=args_parser, 790 | device=device, 791 | binary_output=True, 792 | task=task, 793 | **kwargs, 794 | ) 795 | 796 | def __call__(self, *args, **kwargs): 797 | """ 798 | Extract the features of the input(s). 799 | 800 | Args: 801 | args (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) to get the features of. 802 | 803 | Return: 804 | A nested list of :obj:`float`: The features computed by the model. 805 | """ 806 | output = super().__call__(*args, **kwargs) 807 | if self.onnx: 808 | return output[0].tolist() 809 | return output.tolist() 810 | 811 | 812 | @add_end_docstrings( 813 | PIPELINE_INIT_ARGS, 814 | r""" 815 | return_all_scores (:obj:`bool`, `optional`, defaults to :obj:`False`): 816 | Whether to return all prediction scores or just the one of the predicted class. 817 | """, 818 | ) 819 | class TextClassificationPipeline(Pipeline): 820 | """ 821 | Text classification pipeline using any :obj:`ModelForSequenceClassification`. See the 822 | `sequence classification examples <../task_summary.html#sequence-classification>`__ for more information. 823 | 824 | This text classification pipeline can currently be loaded from :func:`~transformers.pipeline` using the following 825 | task identifier: :obj:`"sentiment-analysis"` (for classifying sequences according to positive or negative 826 | sentiments). 827 | 828 | The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. 829 | See the up-to-date list of available models on 830 | `huggingface.co/models `__. 831 | """ 832 | 833 | def __init__(self, return_all_scores: bool = False, **kwargs): 834 | super().__init__(**kwargs) 835 | 836 | # self.check_model_type( 837 | # TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING 838 | # if self.framework == "tf" 839 | # else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING 840 | # ) 841 | 842 | self.return_all_scores = return_all_scores 843 | 844 | def __call__(self, *args, **kwargs): 845 | """ 846 | Classify the text(s) given as inputs. 847 | 848 | Args: 849 | args (:obj:`str` or :obj:`List[str]`): 850 | One or several textts (or one list of prompts) to classify. 851 | 852 | Return: 853 | A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the 854 | following keys: 855 | 856 | - **label** (:obj:`str`) -- The label predicted. 857 | - **score** (:obj:`float`) -- The corresponding probability. 858 | 859 | If ``self.return_all_scores=True``, one such dictionary is returned per label. 860 | """ 861 | outputs = super().__call__(*args, **kwargs) 862 | scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True) 863 | if self.return_all_scores: 864 | return [ 865 | [{"label": self.config.id2label[i], "score": score.item()} for i, score in enumerate(item)] 866 | for item in scores 867 | ] 868 | else: 869 | return [{"label": self.config.id2label[item.argmax()], "score": item.max().item()} for item in scores] 870 | 871 | 872 | class ZeroShotClassificationArgumentHandler(ArgumentHandler): 873 | """ 874 | Handles arguments for zero-shot for text classification by turning each possible label into an NLI 875 | premise/hypothesis pair. 876 | """ 877 | 878 | def _parse_labels(self, labels): 879 | if isinstance(labels, str): 880 | labels = [label.strip() for label in labels.split(",")] 881 | return labels 882 | 883 | def __call__(self, sequences, labels, hypothesis_template): 884 | if len(labels) == 0 or len(sequences) == 0: 885 | raise ValueError("You must include at least one label and at least one sequence.") 886 | if hypothesis_template.format(labels[0]) == hypothesis_template: 887 | raise ValueError( 888 | ( 889 | 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' 890 | "Make sure the passed template includes formatting syntax such as {{}} where the label should go." 891 | ).format(hypothesis_template) 892 | ) 893 | 894 | if isinstance(sequences, str): 895 | sequences = [sequences] 896 | labels = self._parse_labels(labels) 897 | 898 | sequence_pairs = [] 899 | for sequence in sequences: 900 | sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels]) 901 | 902 | return sequence_pairs 903 | 904 | 905 | @add_end_docstrings(PIPELINE_INIT_ARGS) 906 | class ZeroShotClassificationPipeline(Pipeline): 907 | """ 908 | NLI-based zero-shot classification pipeline using a :obj:`ModelForSequenceClassification` trained on NLI (natural 909 | language inference) tasks. 910 | 911 | Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis 912 | pair and passed to the pretrained model. Then, the logit for `entailment` is taken as the logit for the 913 | candidate label being valid. Any NLI model can be used as long as the first output logit corresponds to 914 | `contradiction` and the last to `entailment`. 915 | 916 | This NLI pipeline can currently be loaded from :func:`~transformers.pipeline` using the following 917 | task identifier: :obj:`"zero-shot-classification"`. 918 | 919 | The models that this pipeline can use are models that have been fine-tuned on an NLI task. 920 | See the up-to-date list of available models on 921 | `huggingface.co/models `__. 922 | """ 923 | 924 | def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs): 925 | super().__init__(*args, args_parser=args_parser, **kwargs) 926 | 927 | def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs): 928 | """ 929 | Parse arguments and tokenize only_first so that hypothesis (label) is not truncated 930 | """ 931 | inputs = self._args_parser(*args, **kwargs) 932 | inputs = self.tokenizer( 933 | inputs, 934 | add_special_tokens=add_special_tokens, 935 | return_tensors=self.framework, 936 | padding=padding, 937 | truncation="only_first", 938 | ) 939 | 940 | return inputs 941 | 942 | def __call__(self, sequences, candidate_labels, hypothesis_template="This example is {}.", multi_class=False): 943 | """ 944 | Classify the sequence(s) given as inputs. 945 | 946 | Args: 947 | sequences (:obj:`str` or :obj:`List[str]`): 948 | The sequence(s) to classify, will be truncated if the model input is too large. 949 | candidate_labels (:obj:`str` or :obj:`List[str]`): 950 | The set of possible class labels to classify each sequence into. Can be a single label, a string of 951 | comma-separated labels, or a list of labels. 952 | hypothesis_template (:obj:`str`, `optional`, defaults to :obj:`"This example is {}."`): 953 | The template used to turn each label into an NLI-style hypothesis. This template must include a {} 954 | or similar syntax for the candidate label to be inserted into the template. For example, the default 955 | template is :obj:`"This example is {}."` With the candidate label :obj:`"sports"`, this would be fed 956 | into the model like :obj:`" sequence to classify This example is sports . "`. The 957 | default template works well in many cases, but it may be worthwhile to experiment with different 958 | templates depending on the task setting. 959 | multi_class (:obj:`bool`, `optional`, defaults to :obj:`False`): 960 | Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized 961 | such that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are 962 | considered independent and probabilities are normalized for each candidate by doing a softmax of 963 | the entailment score vs. the contradiction score. 964 | 965 | Return: 966 | A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the 967 | following keys: 968 | 969 | - **sequence** (:obj:`str`) -- The sequence for which this is the output. 970 | - **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood. 971 | - **scores** (:obj:`List[float]`) -- The probabilities for each of the labels. 972 | """ 973 | outputs = super().__call__(sequences, candidate_labels, hypothesis_template) 974 | if self.onnx: 975 | outputs = outputs[0] 976 | num_sequences = 1 if isinstance(sequences, str) else len(sequences) 977 | candidate_labels = self._args_parser._parse_labels(candidate_labels) 978 | reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1)) 979 | 980 | if len(candidate_labels) == 1: 981 | multi_class = True 982 | 983 | if not multi_class: 984 | # softmax the "entailment" logits over all candidate labels 985 | entail_logits = reshaped_outputs[..., -1] 986 | scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True) 987 | else: 988 | # softmax over the entailment vs. contradiction dim for each label independently 989 | entail_contr_logits = reshaped_outputs[..., [0, -1]] 990 | scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True) 991 | scores = scores[..., 1] 992 | 993 | result = [] 994 | for iseq in range(num_sequences): 995 | top_inds = list(reversed(scores[iseq].argsort())) 996 | result.append( 997 | { 998 | "sequence": sequences if isinstance(sequences, str) else sequences[iseq], 999 | "labels": [candidate_labels[i] for i in top_inds], 1000 | "scores": scores[iseq][top_inds].tolist(), 1001 | } 1002 | ) 1003 | 1004 | if len(result) == 1: 1005 | return result[0] 1006 | return result 1007 | 1008 | 1009 | @add_end_docstrings( 1010 | PIPELINE_INIT_ARGS, 1011 | r""" 1012 | ignore_labels (:obj:`List[str]`, defaults to :obj:`["O"]`): 1013 | A list of labels to ignore. 1014 | grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`): 1015 | Whether or not to group the tokens corresponding to the same entity together in the predictions or not. 1016 | """, 1017 | ) 1018 | class TokenClassificationPipeline(Pipeline): 1019 | """ 1020 | Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the 1021 | `named entity recognition examples <../task_summary.html#named-entity-recognition>`__ for more information. 1022 | 1023 | This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following 1024 | task identifier: :obj:`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location 1025 | or miscellaneous). 1026 | 1027 | The models that this pipeline can use are models that have been fine-tuned on a token classification task. 1028 | See the up-to-date list of available models on 1029 | `huggingface.co/models `__. 1030 | """ 1031 | 1032 | default_input_names = "sequences" 1033 | 1034 | def __init__( 1035 | self, 1036 | model: Union["PreTrainedModel", "TFPreTrainedModel"], 1037 | tokenizer: PreTrainedTokenizer, 1038 | config: PretrainedConfig, 1039 | modelcard: Optional[ModelCard] = None, 1040 | framework: Optional[str] = None, 1041 | args_parser: ArgumentHandler = None, 1042 | device: int = -1, 1043 | binary_output: bool = False, 1044 | onnx: bool = True, 1045 | graph_path: Optional[str] = None, 1046 | ignore_labels=["O"], 1047 | task: str = "", 1048 | grouped_entities: bool = False, 1049 | ): 1050 | super().__init__( 1051 | model=model, 1052 | tokenizer=tokenizer, 1053 | modelcard=modelcard, 1054 | framework=framework, 1055 | args_parser=args_parser, 1056 | device=device, 1057 | binary_output=binary_output, 1058 | task=task, 1059 | config=config, 1060 | onnx=onnx, 1061 | graph_path=graph_path, 1062 | ) 1063 | 1064 | # self.check_model_type( 1065 | # TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING 1066 | # if self.framework == "tf" 1067 | # else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING 1068 | # ) 1069 | 1070 | self._basic_tokenizer = BasicTokenizer(do_lower_case=False) 1071 | self.ignore_labels = ignore_labels 1072 | self.grouped_entities = grouped_entities 1073 | 1074 | def __call__(self, *args, **kwargs): 1075 | """ 1076 | Classify each token of the text(s) given as inputs. 1077 | 1078 | Args: 1079 | args (:obj:`str` or :obj:`List[str]`): 1080 | One or several texts (or one list of texts) for token classification. 1081 | 1082 | Return: 1083 | A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in 1084 | the corresponding input, or each entity if this pipeline was instantiated with 1085 | :obj:`grouped_entities=True`) with the following keys: 1086 | 1087 | - **word** (:obj:`str`) -- The token/word classified. 1088 | - **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`. 1089 | - **entity** (:obj:`str`) -- The entity predicted for that token/word. 1090 | - **index** (:obj:`int`, only present when ``self.grouped_entities=False``) -- The index of the 1091 | corresponding token in the sentence. 1092 | """ 1093 | inputs = self._args_parser(*args, **kwargs) 1094 | answers = [] 1095 | for sentence in inputs: 1096 | if self.onnx: 1097 | tokens = self.tokenizer( 1098 | sentence, 1099 | return_attention_mask=True, 1100 | return_tensors=self.framework, 1101 | truncation=True, 1102 | ) 1103 | entities = self._forward_onnx(tokens)[0] 1104 | entities = entities.squeeze(0) 1105 | input_ids = tokens["input_ids"][0] 1106 | else: 1107 | tokens = self.tokenizer( 1108 | sentence, 1109 | return_attention_mask=False, 1110 | return_tensors=self.framework, 1111 | truncation=True, 1112 | ) 1113 | # Manage correct placement of the tensors 1114 | with self.device_placement(): 1115 | # Forward 1116 | if self.framework == "tf": 1117 | entities = self.model(tokens.data)[0][0].numpy() 1118 | input_ids = tokens["input_ids"].numpy()[0] 1119 | else: 1120 | with torch.no_grad(): 1121 | tokens = self.ensure_tensor_on_device(**tokens) 1122 | entities = self.model(**tokens)[0][0].cpu().numpy() 1123 | input_ids = tokens["input_ids"].cpu().numpy()[0] 1124 | 1125 | score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True) 1126 | labels_idx = score.argmax(axis=-1) 1127 | 1128 | entities = [] 1129 | # Filter to labels not in `self.ignore_labels` 1130 | filtered_labels_idx = [ 1131 | (idx, label_idx) 1132 | for idx, label_idx in enumerate(labels_idx) 1133 | if self.config.id2label[label_idx] not in self.ignore_labels 1134 | ] 1135 | 1136 | for idx, label_idx in filtered_labels_idx: 1137 | 1138 | entity = { 1139 | "word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])), 1140 | "score": score[idx][label_idx].item(), 1141 | "entity": self.config.id2label[label_idx], 1142 | "index": idx, 1143 | } 1144 | 1145 | entities += [entity] 1146 | 1147 | # Append grouped entities 1148 | if self.grouped_entities: 1149 | answers += [self.group_entities(entities)] 1150 | # Append ungrouped entities 1151 | else: 1152 | answers += [entities] 1153 | 1154 | if len(answers) == 1: 1155 | return answers[0] 1156 | return answers 1157 | 1158 | def group_sub_entities(self, entities: List[dict]) -> dict: 1159 | """ 1160 | Group together the adjacent tokens with the same entity predicted. 1161 | 1162 | Args: 1163 | entities (:obj:`dict`): The entities predicted by the pipeline. 1164 | """ 1165 | # Get the first entity in the entity group 1166 | entity = entities[0]["entity"] 1167 | scores = np.mean([entity["score"] for entity in entities]) 1168 | tokens = [entity["word"] for entity in entities] 1169 | 1170 | entity_group = { 1171 | "entity_group": entity, 1172 | "score": np.mean(scores), 1173 | "word": self.tokenizer.convert_tokens_to_string(tokens), 1174 | } 1175 | return entity_group 1176 | 1177 | def group_entities(self, entities: List[dict]) -> List[dict]: 1178 | """ 1179 | Find and group together the adjacent tokens with the same entity predicted. 1180 | 1181 | Args: 1182 | entities (:obj:`dict`): The entities predicted by the pipeline. 1183 | """ 1184 | 1185 | entity_groups = [] 1186 | entity_group_disagg = [] 1187 | 1188 | if entities: 1189 | last_idx = entities[-1]["index"] 1190 | 1191 | for entity in entities: 1192 | is_last_idx = entity["index"] == last_idx 1193 | if not entity_group_disagg: 1194 | entity_group_disagg += [entity] 1195 | if is_last_idx: 1196 | entity_groups += [self.group_sub_entities(entity_group_disagg)] 1197 | continue 1198 | 1199 | # If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group 1200 | # The split is meant to account for the "B" and "I" suffixes 1201 | if ( 1202 | entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1] 1203 | and entity["index"] == entity_group_disagg[-1]["index"] + 1 1204 | ): 1205 | entity_group_disagg += [entity] 1206 | # Group the entities at the last entity 1207 | if is_last_idx: 1208 | entity_groups += [self.group_sub_entities(entity_group_disagg)] 1209 | # If the current entity is different from the previous entity, aggregate the disaggregated entity group 1210 | else: 1211 | entity_groups += [self.group_sub_entities(entity_group_disagg)] 1212 | entity_group_disagg = [entity] 1213 | # If it's the last entity, add it to the entity groups 1214 | if is_last_idx: 1215 | entity_groups += [self.group_sub_entities(entity_group_disagg)] 1216 | 1217 | return entity_groups 1218 | 1219 | 1220 | NerPipeline = TokenClassificationPipeline 1221 | 1222 | 1223 | class QuestionAnsweringArgumentHandler(ArgumentHandler): 1224 | """ 1225 | QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped 1226 | to internal :class:`~transformers.SquadExample`. 1227 | 1228 | QuestionAnsweringArgumentHandler manages all the possible to create a :class:`~transformers.SquadExample` from 1229 | the command-line supplied arguments. 1230 | """ 1231 | 1232 | def __call__(self, *args, **kwargs): 1233 | # Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating 1234 | if args is not None and len(args) > 0: 1235 | if len(args) == 1: 1236 | kwargs["X"] = args[0] 1237 | else: 1238 | kwargs["X"] = list(args) 1239 | 1240 | # Generic compatibility with sklearn and Keras 1241 | # Batched data 1242 | if "X" in kwargs or "data" in kwargs: 1243 | inputs = kwargs["X"] if "X" in kwargs else kwargs["data"] 1244 | 1245 | if isinstance(inputs, dict): 1246 | inputs = [inputs] 1247 | else: 1248 | # Copy to avoid overriding arguments 1249 | inputs = [i for i in inputs] 1250 | 1251 | for i, item in enumerate(inputs): 1252 | if isinstance(item, dict): 1253 | if any(k not in item for k in ["question", "context"]): 1254 | raise KeyError("You need to provide a dictionary with keys {question:..., context:...}") 1255 | 1256 | inputs[i] = QuestionAnsweringPipeline.create_sample(**item) 1257 | 1258 | elif not isinstance(item, SquadExample): 1259 | raise ValueError( 1260 | "{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format( 1261 | "X" if "X" in kwargs else "data" 1262 | ) 1263 | ) 1264 | 1265 | # Tabular input 1266 | elif "question" in kwargs and "context" in kwargs: 1267 | if isinstance(kwargs["question"], str): 1268 | kwargs["question"] = [kwargs["question"]] 1269 | 1270 | if isinstance(kwargs["context"], str): 1271 | kwargs["context"] = [kwargs["context"]] 1272 | 1273 | inputs = [ 1274 | QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"]) 1275 | ] 1276 | else: 1277 | raise ValueError("Unknown arguments {}".format(kwargs)) 1278 | 1279 | if not isinstance(inputs, list): 1280 | inputs = [inputs] 1281 | 1282 | return inputs 1283 | 1284 | 1285 | @add_end_docstrings(PIPELINE_INIT_ARGS) 1286 | class QuestionAnsweringPipeline(Pipeline): 1287 | """ 1288 | Question Answering pipeline using any :obj:`ModelForQuestionAnswering`. See the 1289 | `question answering examples <../task_summary.html#question-answering>`__ for more information. 1290 | 1291 | This question answering pipeline can currently be loaded from :func:`~transformers.pipeline` using the following 1292 | task identifier: :obj:`"question-answering"`. 1293 | 1294 | The models that this pipeline can use are models that have been fine-tuned on a question answering task. 1295 | See the up-to-date list of available models on 1296 | `huggingface.co/models `__. 1297 | """ 1298 | 1299 | default_input_names = "question,context" 1300 | 1301 | def __init__( 1302 | self, 1303 | model: Union["PreTrainedModel", "TFPreTrainedModel"], 1304 | tokenizer: PreTrainedTokenizer, 1305 | modelcard: Optional[ModelCard] = None, 1306 | framework: Optional[str] = None, 1307 | device: int = -1, 1308 | task: str = "", 1309 | **kwargs 1310 | ): 1311 | super().__init__( 1312 | model=model, 1313 | tokenizer=tokenizer, 1314 | modelcard=modelcard, 1315 | framework=framework, 1316 | args_parser=QuestionAnsweringArgumentHandler(), 1317 | device=device, 1318 | task=task, 1319 | **kwargs, 1320 | ) 1321 | 1322 | # self.check_model_type( 1323 | # TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING 1324 | # ) 1325 | 1326 | @staticmethod 1327 | def create_sample( 1328 | question: Union[str, List[str]], context: Union[str, List[str]] 1329 | ) -> Union[SquadExample, List[SquadExample]]: 1330 | """ 1331 | QuestionAnsweringPipeline leverages the :class:`~transformers.SquadExample` internally. 1332 | This helper method encapsulate all the logic for converting question(s) and context(s) to 1333 | :class:`~transformers.SquadExample`. 1334 | 1335 | We currently support extractive question answering. 1336 | 1337 | Arguments: 1338 | question (:obj:`str` or :obj:`List[str]`): The question(s) asked. 1339 | context (:obj:`str` or :obj:`List[str]`): The context(s) in which we will look for the answer. 1340 | 1341 | Returns: 1342 | One or a list of :class:`~transformers.SquadExample`: The corresponding 1343 | :class:`~transformers.SquadExample` grouping question and context. 1344 | """ 1345 | if isinstance(question, list): 1346 | return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)] 1347 | else: 1348 | return SquadExample(None, question, context, None, None, None) 1349 | 1350 | def __call__(self, *args, **kwargs): 1351 | """ 1352 | Answer the question(s) given as inputs by using the context(s). 1353 | 1354 | Args: 1355 | args (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`): 1356 | One or several :class:`~transformers.SquadExample` containing the question and context. 1357 | X (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`): 1358 | One or several :class:`~transformers.SquadExample` containing the question and context 1359 | (will be treated the same way as if passed as the first positional argument). 1360 | data (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`): 1361 | One or several :class:`~transformers.SquadExample` containing the question and context 1362 | (will be treated the same way as if passed as the first positional argument). 1363 | question (:obj:`str` or :obj:`List[str]`): 1364 | One or several question(s) (must be used in conjunction with the :obj:`context` argument). 1365 | context (:obj:`str` or :obj:`List[str]`): 1366 | One or several context(s) associated with the qustion(s) (must be used in conjunction with the 1367 | :obj:`question` argument). 1368 | topk (:obj:`int`, `optional`, defaults to 1): 1369 | The number of answers to return (will be chosen by order of likelihood). 1370 | doc_stride (:obj:`int`, `optional`, defaults to 128): 1371 | If the context is too long to fit with the question for the model, it will be split in several chunks 1372 | with some overlap. This argument controls the size of that overlap. 1373 | max_answer_len (:obj:`int`, `optional`, defaults to 15): 1374 | The maximum length of predicted answers (e.g., only answers with a shorter length are considered). 1375 | max_seq_len (:obj:`int`, `optional`, defaults to 384): 1376 | The maximum length of the total sentence (context + question) after tokenization. The context will be 1377 | split in several chunks (using :obj:`doc_stride`) if needed. 1378 | max_question_len (:obj:`int`, `optional`, defaults to 64): 1379 | The maximum length of the question after tokenization. It will be truncated if needed. 1380 | handle_impossible_answer (:obj:`bool`, `optional`, defaults to :obj:`False`): 1381 | Whether or not we accept impossible as an answer. 1382 | 1383 | Return: 1384 | A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the 1385 | following keys: 1386 | 1387 | - **score** (:obj:`float`) -- The probability associated to the answer. 1388 | - **start** (:obj:`int`) -- The start index of the answer (in the tokenized version of the input). 1389 | - **end** (:obj:`int`) -- The end index of the answer (in the tokenized version of the input). 1390 | - **answer** (:obj:`str`) -- The answer to the question. 1391 | """ 1392 | # Set defaults values 1393 | kwargs.setdefault("topk", 1) 1394 | kwargs.setdefault("doc_stride", 128) 1395 | kwargs.setdefault("max_answer_len", 15) 1396 | kwargs.setdefault("max_seq_len", 384) 1397 | kwargs.setdefault("max_question_len", 64) 1398 | kwargs.setdefault("handle_impossible_answer", False) 1399 | 1400 | if kwargs["topk"] < 1: 1401 | raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"])) 1402 | 1403 | if kwargs["max_answer_len"] < 1: 1404 | raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"])) 1405 | 1406 | # Convert inputs to features 1407 | examples = self._args_parser(*args, **kwargs) 1408 | features_list = [ 1409 | squad_convert_examples_to_features( 1410 | examples=[example], 1411 | tokenizer=self.tokenizer, 1412 | max_seq_length=kwargs["max_seq_len"], 1413 | doc_stride=kwargs["doc_stride"], 1414 | max_query_length=kwargs["max_question_len"], 1415 | padding_strategy=PaddingStrategy.DO_NOT_PAD.value, 1416 | is_training=False, 1417 | tqdm_enabled=False, 1418 | ) 1419 | for example in examples 1420 | ] 1421 | all_answers = [] 1422 | for features, example in zip(features_list, examples): 1423 | model_input_names = self.tokenizer.model_input_names + ["input_ids"] 1424 | fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names} 1425 | 1426 | # Manage tensor allocation on correct device 1427 | if self.onnx: 1428 | # fw_args = {k: torch.tensor(v) for (k, v) in fw_args.items()} 1429 | fw_args = {k: np.array(v) for (k, v) in fw_args.items()} 1430 | start, end = self._forward_onnx(fw_args)[:2] 1431 | else: 1432 | with self.device_placement(): 1433 | if self.framework == "tf": 1434 | fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()} 1435 | start, end = self.model(fw_args)[:2] 1436 | start, end = start.numpy(), end.numpy() 1437 | else: 1438 | with torch.no_grad(): 1439 | # Retrieve the score for the context tokens only (removing question tokens) 1440 | fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()} 1441 | start, end = self.model(**fw_args)[:2] 1442 | start, end = start.cpu().numpy(), end.cpu().numpy() 1443 | 1444 | min_null_score = 1000000 # large and positive 1445 | answers = [] 1446 | for (feature, start_, end_) in zip(features, start, end): 1447 | # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. 1448 | undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask 1449 | 1450 | # Generate mask 1451 | undesired_tokens_mask = undesired_tokens == 0.0 1452 | 1453 | # Make sure non-context indexes in the tensor cannot contribute to the softmax 1454 | start_ = np.where(undesired_tokens_mask, -10000.0, start_) 1455 | end_ = np.where(undesired_tokens_mask, -10000.0, end_) 1456 | 1457 | # Normalize logits and spans to retrieve the answer 1458 | start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True))) 1459 | end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True))) 1460 | 1461 | if kwargs["handle_impossible_answer"]: 1462 | min_null_score = min(min_null_score, (start_[0] * end_[0]).item()) 1463 | 1464 | # Mask CLS 1465 | start_[0] = end_[0] = 0.0 1466 | 1467 | starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"]) 1468 | char_to_word = np.array(example.char_to_word_offset) 1469 | 1470 | # Convert the answer (tokens) back to the original text 1471 | answers += [ 1472 | { 1473 | "score": score.item(), 1474 | "start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(), 1475 | "end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(), 1476 | "answer": " ".join( 1477 | example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1] 1478 | ), 1479 | } 1480 | for s, e, score in zip(starts, ends, scores) 1481 | ] 1482 | 1483 | if kwargs["handle_impossible_answer"]: 1484 | answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""}) 1485 | 1486 | answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]] 1487 | all_answers += answers 1488 | 1489 | if len(all_answers) == 1: 1490 | return all_answers[0] 1491 | return all_answers 1492 | 1493 | def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple: 1494 | """ 1495 | Take the output of any :obj:`ModelForQuestionAnswering` and will generate probalities for each span to be 1496 | the actual answer. 1497 | 1498 | In addition, it filters out some unwanted/impossible cases like answer len being greater than 1499 | max_answer_len or answer end position being before the starting position. 1500 | The method supports output the k-best answer through the topk argument. 1501 | 1502 | Args: 1503 | start (:obj:`np.ndarray`): Individual start probabilities for each token. 1504 | end (:obj:`np.ndarray`): Individual end probabilities for each token. 1505 | topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output. 1506 | max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output. 1507 | """ 1508 | # Ensure we have batch axis 1509 | if start.ndim == 1: 1510 | start = start[None] 1511 | 1512 | if end.ndim == 1: 1513 | end = end[None] 1514 | 1515 | # Compute the score of each tuple(start, end) to be the real answer 1516 | outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) 1517 | 1518 | # Remove candidate with end < start and end - start > max_answer_len 1519 | candidates = np.tril(np.triu(outer), max_answer_len - 1) 1520 | 1521 | # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) 1522 | scores_flat = candidates.flatten() 1523 | if topk == 1: 1524 | idx_sort = [np.argmax(scores_flat)] 1525 | elif len(scores_flat) < topk: 1526 | idx_sort = np.argsort(-scores_flat) 1527 | else: 1528 | idx = np.argpartition(-scores_flat, topk)[0:topk] 1529 | idx_sort = idx[np.argsort(-scores_flat[idx])] 1530 | 1531 | start, end = np.unravel_index(idx_sort, candidates.shape)[1:] 1532 | return start, end, candidates[0, start, end] 1533 | 1534 | def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: 1535 | """ 1536 | When decoding from token probalities, this method maps token indexes to actual word in 1537 | the initial context. 1538 | 1539 | Args: 1540 | text (:obj:`str`): The actual context to extract the answer from. 1541 | start (:obj:`int`): The answer starting token index. 1542 | end (:obj:`int`): The answer end token index. 1543 | 1544 | Returns: 1545 | Dictionary like :obj:`{'answer': str, 'start': int, 'end': int}` 1546 | """ 1547 | words = [] 1548 | token_idx = char_start_idx = char_end_idx = chars_idx = 0 1549 | 1550 | for i, word in enumerate(text.split(" ")): 1551 | token = self.tokenizer.tokenize(word) 1552 | 1553 | # Append words if they are in the span 1554 | if start <= token_idx <= end: 1555 | if token_idx == start: 1556 | char_start_idx = chars_idx 1557 | 1558 | if token_idx == end: 1559 | char_end_idx = chars_idx + len(word) 1560 | 1561 | words += [word] 1562 | 1563 | # Stop if we went over the end of the answer 1564 | if token_idx > end: 1565 | break 1566 | 1567 | # Append the subtokenization length to the running index 1568 | token_idx += len(token) 1569 | chars_idx += len(word) + 1 1570 | 1571 | # Join text with spaces 1572 | return { 1573 | "answer": " ".join(words), 1574 | "start": max(0, char_start_idx), 1575 | "end": min(len(text), char_end_idx), 1576 | } 1577 | 1578 | 1579 | # Register all the supported tasks here 1580 | SUPPORTED_TASKS = { 1581 | "feature-extraction": { 1582 | "impl": FeatureExtractionPipeline, 1583 | "tf": TFAutoModel if is_tf_available() else None, 1584 | "pt": AutoModel if is_torch_available() else None, 1585 | "default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}}, 1586 | }, 1587 | "sentiment-analysis": { 1588 | "impl": TextClassificationPipeline, 1589 | "tf": TFAutoModelForSequenceClassification if is_tf_available() else None, 1590 | "pt": AutoModelForSequenceClassification if is_torch_available() else None, 1591 | "default": { 1592 | "model": { 1593 | "pt": "distilbert-base-uncased-finetuned-sst-2-english", 1594 | "tf": "distilbert-base-uncased-finetuned-sst-2-english", 1595 | }, 1596 | }, 1597 | }, 1598 | "ner": { 1599 | "impl": TokenClassificationPipeline, 1600 | "tf": TFAutoModelForTokenClassification if is_tf_available() else None, 1601 | "pt": AutoModelForTokenClassification if is_torch_available() else None, 1602 | "default": { 1603 | "model": { 1604 | "pt": "dbmdz/bert-large-cased-finetuned-conll03-english", 1605 | "tf": "dbmdz/bert-large-cased-finetuned-conll03-english", 1606 | }, 1607 | }, 1608 | }, 1609 | "question-answering": { 1610 | "impl": QuestionAnsweringPipeline, 1611 | "tf": TFAutoModelForQuestionAnswering if is_tf_available() else None, 1612 | "pt": AutoModelForQuestionAnswering if is_torch_available() else None, 1613 | "default": { 1614 | "model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"}, 1615 | }, 1616 | }, 1617 | "zero-shot-classification": { 1618 | "impl": ZeroShotClassificationPipeline, 1619 | "tf": TFAutoModelForSequenceClassification if is_tf_available() else None, 1620 | "pt": AutoModelForSequenceClassification if is_torch_available() else None, 1621 | "default": { 1622 | "model": {"pt": "roberta-large-mnli", "tf": "roberta-large-mnli"}, 1623 | "config": {"pt": "roberta-large-mnli", "tf": "roberta-large-mnli"}, 1624 | "tokenizer": {"pt": "roberta-large-mnli", "tf": "roberta-large-mnli"}, 1625 | }, 1626 | }, 1627 | } 1628 | 1629 | 1630 | def pipeline( 1631 | task: str, 1632 | model: Optional[str] = None, 1633 | config: Optional[Union[str, PretrainedConfig]] = None, 1634 | tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, 1635 | framework: Optional[str] = None, 1636 | onnx: bool = True, 1637 | **kwargs 1638 | ) -> Pipeline: 1639 | """ 1640 | Utility factory method to build a :class:`~transformers.Pipeline`. 1641 | 1642 | Pipelines are made of: 1643 | 1644 | - A :doc:`tokenizer ` in charge of mapping raw textual input to token. 1645 | - A :doc:`model ` to make predictions from the inputs. 1646 | - Some (optional) post processing for enhancing model's output. 1647 | 1648 | Args: 1649 | task (:obj:`str`): 1650 | The task defining which pipeline will be returned. Currently accepted tasks are: 1651 | 1652 | - :obj:`"feature-extraction"`: will return a :class:`~transformers.FeatureExtractionPipeline`. 1653 | - :obj:`"sentiment-analysis"`: will return a :class:`~transformers.TextClassificationPipeline`. 1654 | - :obj:`"ner"`: will return a :class:`~transformers.TokenClassificationPipeline`. 1655 | - :obj:`"question-answering"`: will return a :class:`~transformers.QuestionAnsweringPipeline`. 1656 | - :obj:`"zero-shot-classification"`: will return a :class:`~transformers.ZeroShotClassificationPipeline`. 1657 | model (:obj:`str`, `optional`): 1658 | The model that will be used by the pipeline to make predictions. This should be a model identifier 1659 | 1660 | If not provided, the default for the :obj:`task` will be loaded. 1661 | config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`): 1662 | The configuration that will be used by the pipeline to instantiate the model. This can be a model 1663 | identifier or an actual pretrained model configuration inheriting from 1664 | :class:`~transformers.PretrainedConfig`. 1665 | 1666 | If not provided, the default for the :obj:`task` will be loaded. 1667 | tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`): 1668 | The tokenizer that will be used by the pipeline to encode data for the model. This can be a model 1669 | identifier or an actual pretrained tokenizer inheriting from 1670 | :class:`~transformers.PreTrainedTokenizer`. 1671 | 1672 | If not provided, the default for the :obj:`task` will be loaded. 1673 | framework (:obj:`str`, `optional`): 1674 | The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework 1675 | must be installed. 1676 | 1677 | If no framework is specified, will default to the one currently installed. If no framework is specified 1678 | and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no 1679 | model is provided. 1680 | kwargs: 1681 | Additional keyword arguments passed along to the specific pipeline init (see the documentation for the 1682 | corresponding pipeline class for possible values). 1683 | 1684 | Returns: 1685 | :class:`~transformers.Pipeline`: A suitable pipeline for the task. 1686 | 1687 | Examples:: 1688 | 1689 | from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer 1690 | 1691 | # Sentiment analysis pipeline 1692 | pipeline('sentiment-analysis') 1693 | 1694 | # Question answering pipeline, specifying the checkpoint identifier 1695 | pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased') 1696 | """ 1697 | # Retrieve the task 1698 | if task not in SUPPORTED_TASKS: 1699 | raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()))) 1700 | 1701 | framework = framework or get_framework(model) 1702 | 1703 | targeted_task = SUPPORTED_TASKS[task] 1704 | task_class, model_class = targeted_task["impl"], targeted_task[framework] 1705 | 1706 | # Use default model/config/tokenizer for the task if no model is provided 1707 | if model is None: 1708 | model = targeted_task["default"]["model"][framework] 1709 | 1710 | # Try to infer tokenizer from model or config name (if provided as str) 1711 | if tokenizer is None: 1712 | if isinstance(model, str): 1713 | tokenizer = model 1714 | elif isinstance(config, str): 1715 | tokenizer = config 1716 | else: 1717 | # Impossible to guest what is the right tokenizer here 1718 | raise Exception( 1719 | "Impossible to guess which tokenizer to use. " 1720 | "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." 1721 | ) 1722 | 1723 | modelcard = None 1724 | # Try to infer modelcard from model or config name (if provided as str) 1725 | if isinstance(model, str): 1726 | modelcard = model 1727 | elif isinstance(config, str): 1728 | modelcard = config 1729 | 1730 | # Instantiate tokenizer if needed 1731 | if isinstance(tokenizer, (str, tuple)): 1732 | if isinstance(tokenizer, tuple): 1733 | # For tuple we have (tokenizer name, {kwargs}) 1734 | tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) 1735 | else: 1736 | tokenizer = AutoTokenizer.from_pretrained(tokenizer) 1737 | 1738 | # Instantiate config 1739 | if config is not None and isinstance(config, str): 1740 | config = AutoConfig.from_pretrained(config) 1741 | elif config is None: 1742 | config = AutoConfig.from_pretrained(model) 1743 | 1744 | # Instantiate modelcard if needed 1745 | if isinstance(modelcard, str): 1746 | modelcard = ModelCard.from_pretrained(modelcard) 1747 | 1748 | # Instantiate model if needed 1749 | graph_name = f"{os.path.basename(model)}.onnx" 1750 | graph_path = ONNX_CACHE_DIR.joinpath(model, graph_name) 1751 | 1752 | # TODO: assert when model is not `str` 1753 | # Instantiate the model if graph is not found or if doing normal inference 1754 | if (onnx and not os.path.exists(graph_path)) or not onnx: 1755 | # Handle transparent TF/PT model conversion 1756 | model_kwargs = {} 1757 | if framework == "pt" and model.endswith(".h5"): 1758 | model_kwargs["from_tf"] = True 1759 | logger.warning( 1760 | "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " 1761 | "Trying to load the model with PyTorch." 1762 | ) 1763 | elif framework == "tf" and model.endswith(".bin"): 1764 | model_kwargs["from_pt"] = True 1765 | logger.warning( 1766 | "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " 1767 | "Trying to load the model with Tensorflow." 1768 | ) 1769 | model = model_class.from_pretrained(model, config=config, **model_kwargs) 1770 | 1771 | return task_class( 1772 | model=model, 1773 | tokenizer=tokenizer, 1774 | modelcard=modelcard, 1775 | framework=framework, 1776 | task=task, 1777 | onnx=onnx, 1778 | graph_path=graph_path, 1779 | config=config, 1780 | **kwargs, 1781 | ) 1782 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | default_section = FIRSTPARTY 3 | ensure_newline_before_comments = True 4 | force_grid_wrap = 0 5 | include_trailing_comma = True 6 | line_length = 119 7 | lines_after_imports = 2 8 | multi_line_output = 3 9 | use_parentheses = True 10 | 11 | [flake8] 12 | ignore = E203, E501, E741, W503, W605 13 | max-line-length = 119 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | from os import path 3 | 4 | with open("README.md", encoding="utf-8") as f: 5 | long_description = f.read() 6 | 7 | extras = {} 8 | extras["testing"] = ["pytest", "pytest-xdist", "timeout-decorator", "psutil"] 9 | extras["quality"] = ["black >= 20.8b1", "isort >= 5", "flake8"] 10 | extras["dev"] = extras["testing"] + extras["quality"] 11 | 12 | setup( 13 | name="onnx_transformers", 14 | version="0.1.0", 15 | description="Accelerated nlp pipelines using Transformers and ONNX Runtime", 16 | long_description=long_description, 17 | long_description_content_type="text/markdown", 18 | author="Suraj Patil", 19 | author_email="surajp815@gmail.com", 20 | packages=find_packages(), 21 | keywords =["ONNX", "onnxruntime", "NLP", "transformer", "transformers", "inference", "fast inference",], 22 | license="Apache", 23 | url="https://github.com/patil-suraj/onnx_transformers", 24 | install_requires=[ 25 | "transformers>=3.1.0", 26 | "onnxruntime>=1.4.0", 27 | "onnxruntime-tools>=1.4.2", 28 | "psutil", 29 | ], 30 | extras_require=extras, 31 | python_requires=">=3.6.0", 32 | classifiers=[ 33 | "Development Status :: 3 - Alpha", 34 | "Intended Audience :: Developers", 35 | "License :: OSI Approved :: Apache Software License", 36 | "Operating System :: OS Independent", 37 | "Programming Language :: Python :: 3", 38 | "Programming Language :: Python :: 3.6", 39 | "Programming Language :: Python :: 3.7", 40 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 41 | ], 42 | project_urls={ 43 | 'Documentation': "https://github.com/patil-suraj/onnx_transformers", 44 | 'Source': "https://github.com/patil-suraj/onnx_transformers", 45 | }, 46 | ) -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patil-suraj/onnx_transformers/463dcfc9b7d037dedd85b1637fe44eeb58f4a5a3/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_pipelines_onnx.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | from onnx_transformers import pipeline 5 | from onnxruntime import InferenceSession 6 | 7 | 8 | class OnnxExportTestCase(unittest.TestCase): 9 | MODEL_TO_TEST = ["bert-base-cased", "gpt2", "roberta-base"] 10 | 11 | def test_onnx_graph_creation(self): 12 | try: 13 | nlp = pipeline("feature-extraction", onnx=True) 14 | assert isinstance(nlp.onnx_model, InferenceSession) 15 | except Exception as e: 16 | self.fail(e) 17 | 18 | def test_feature_extraction_forward(self): 19 | self._test_pipeline_forward("feature-extraction", "My name is Bert") 20 | 21 | def test_sentiment_analysis_forward(self): 22 | self._test_pipeline_forward("sentiment-analysis", "This is a positive text.") 23 | 24 | def test_ner_forward(self): 25 | self._test_pipeline_forward("ner", "My name is Bert") 26 | 27 | def test_question_answering_forward(self): 28 | self._test_pipeline_forward( 29 | "question-answering", {"question": "Who is Jim Henson ?", "context": "Jim Henson was a nice puppet"} 30 | ) 31 | 32 | def test_zero_shot_classification_forward(self): 33 | sequence = "Who are you voting for in 2020?" 34 | candidate_labels = ["economics", "politics", "public health"] 35 | 36 | try: 37 | # test onnx forward 38 | nlp = pipeline("zero-shot-classification", onnx=True) 39 | nlp(sequence, candidate_labels) 40 | 41 | # test torch forward 42 | nlp = pipeline("zero-shot-classification", onnx=False) 43 | assert isinstance(nlp.model, torch.nn.Module) 44 | nlp(sequence, candidate_labels) 45 | except Exception as e: 46 | self.fail(e) 47 | 48 | def _test_pipeline_forward(self, task, example): 49 | try: 50 | # test onnx forward 51 | nlp = pipeline(task, onnx=True) 52 | nlp(example) 53 | 54 | # test torch forward 55 | nlp = pipeline(task, onnx=False) 56 | assert isinstance(nlp.model, torch.nn.Module) 57 | nlp(example) 58 | except Exception as e: 59 | self.fail(e) 60 | --------------------------------------------------------------------------------