├── .github └── workflows │ └── colab-badges.yml ├── .gitignore ├── LICENSE ├── README.md ├── ai-superstream ├── README.md ├── images │ ├── about.png │ ├── cat.png │ ├── fp32-to-int8.png │ ├── hardware.png │ ├── marie-curie.png │ ├── onnx-ort.png │ ├── optimum.jpeg │ └── prod.jpeg └── optimum-tour.ipynb ├── bosch ├── README.md └── dynamic-quantization.ipynb ├── datacamp-webinar ├── README.md ├── diffusers-webinar.ipynb └── slides.pdf ├── europython-2022 ├── 01-transformers-tour.ipynb ├── 02-text-classification.ipynb ├── 03-gradio-demo.ipynb ├── How To Train Your Graphics Card To Read With Transformers.pdf ├── README.md └── images │ ├── clf_arch.png │ ├── gen_steps.png │ ├── ner_arch.png │ ├── object_detection.png │ ├── pipeline.png │ ├── qa_arch.png │ ├── speech2text.png │ └── tapas.png ├── fewshot-learning-in-production ├── README.md └── setfit-optimisation.ipynb ├── luzern-university ├── 01_transformers_tour.ipynb ├── 02-text-classification.ipynb ├── 03-gradio-demo.ipynb ├── 04-text-generation.ipynb ├── 05-stable-diffusion.ipynb ├── 06-stable-diffusion-gradio.ipynb ├── README.md └── images │ ├── clf_arch.png │ ├── gen_steps.png │ ├── ner_arch.png │ ├── object_detection.png │ ├── pipeline.png │ ├── qa_arch.png │ ├── speech2text.png │ └── tapas.png ├── machine-learning-tokyo ├── 01-transformers-tour.ipynb ├── 02-text-classification.ipynb ├── 03-gradio-demo.ipynb ├── README.md ├── images │ ├── clf_arch.png │ ├── gen_steps.png │ ├── ner_arch.png │ ├── object_detection.png │ ├── pipeline.png │ ├── qa_arch.png │ ├── speech2text.png │ └── tapas.png └── slides.pdf ├── mlops-world ├── README.md ├── dynamic-quantization.ipynb ├── images │ ├── dynamic-quantization.png │ └── static-quantization.png ├── slides.pdf ├── static-quantization.ipynb └── utils.py ├── nlp-zurich ├── 01-transformers_tour.ipynb ├── 02-text-classification.ipynb ├── 03-gradio-demo.ipynb ├── README.md ├── images │ ├── clf_arch.png │ ├── gen_steps.png │ ├── ner_arch.png │ ├── object_detection.png │ ├── pipeline.png │ ├── qa_arch.png │ ├── speech2text.png │ └── tapas.png └── slides.pdf └── transformers-book-reading-group ├── session-1 ├── 01_tour_of_transformers.ipynb └── transformers-reading-group-session1.pdf ├── session-2 ├── 02_how_to_open_source.ipynb ├── images │ ├── book_cover.png │ ├── chapter02_hf-libraries.png │ └── tokenization_pipeline.svg └── slides.ipynb ├── session-3 ├── Chapter_3.ipynb └── HF reading group session 3.pdf ├── session-4 └── slides.pdf └── session-5 └── slides-text-generation.pdf /.github/workflows/colab-badges.yml: -------------------------------------------------------------------------------- 1 | name: Workshop workflow 2 | on: [push] 3 | 4 | jobs: 5 | build: 6 | name: Add Colab links 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Checkout first 10 | id: checkout 11 | uses: actions/checkout@v2 12 | 13 | - name: Add/update badges 14 | id: badges 15 | uses: trsvchn/colab-badge-action@v4 16 | with: 17 | check: 'all' 18 | update: true 19 | target_branch: main 20 | target_repository: huggingface/workshops 21 | 22 | - name: Commit & push changes 23 | uses: stefanzweifel/git-auto-commit-action@v4 24 | with: 25 | commit_message: Add/update Colab badges -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # mac OS 132 | .DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤗 Workshops 2 | 3 | Materials for workshops on the Hugging Face ecosystem 4 | -------------------------------------------------------------------------------- /ai-superstream/README.md: -------------------------------------------------------------------------------- 1 | # AISuperstream: NLP in Production 2 | 3 | > Here you can find all the materials for the workshop on _Accelerating Transformers with Hugging Face Optimum_ at O'Reilly's [AI Superstream: NLP in Production](https://learning.oreilly.com/live-events/ai-superstream-nlp-in-production/0636920064955/0636920064953/)! 4 | 5 | ## Getting started 6 | 7 | If you want to follow along, make sure you have: 8 | 9 | * Signed up for a Hugging Face account: https://huggingface.co/join 10 | * Signed up to Google Colab so you can run code easily: https://colab.research.google.com/ 11 | -------------------------------------------------------------------------------- /ai-superstream/images/about.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/about.png -------------------------------------------------------------------------------- /ai-superstream/images/cat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/cat.png -------------------------------------------------------------------------------- /ai-superstream/images/fp32-to-int8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/fp32-to-int8.png -------------------------------------------------------------------------------- /ai-superstream/images/hardware.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/hardware.png -------------------------------------------------------------------------------- /ai-superstream/images/marie-curie.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/marie-curie.png -------------------------------------------------------------------------------- /ai-superstream/images/onnx-ort.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/onnx-ort.png -------------------------------------------------------------------------------- /ai-superstream/images/optimum.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/optimum.jpeg -------------------------------------------------------------------------------- /ai-superstream/images/prod.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/ai-superstream/images/prod.jpeg -------------------------------------------------------------------------------- /ai-superstream/optimum-tour.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d22c5c66", 6 | "metadata": {}, 7 | "source": [ 8 | "\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "d5512190-6089-45f6-999f-0f3cd0c07548", 14 | "metadata": {}, 15 | "source": [ 16 | "# Accelerating Transformers with Hugging Face Optimum\n", 17 | "\n", 18 | "Lewis Tunstall (open-source @ Hugging Face)\n", 19 | "\n", 20 | "\ud83d\udc26 `@_lewtun`" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "id": "2e29d3e0-a464-4082-bff0-0182db3f32c3", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "id": "bdaf20df-151e-4260-ba12-c6bfe90eec3c", 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "id": "2f7dbd23-050e-4aa3-ba12-da722f6fa96b", 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "3b5e53d3-b56a-43a7-9807-48d01ea2c95e", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "id": "a20e6104-e455-4b06-85b7-96782c0d6291", 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "id": "a3fac18b-d473-46fa-af3e-4cb632f771a3", 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "id": "7b808e27-e3b0-4a36-bd44-cdaa295e2d7a", 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "a1374980-6c13-4a6e-aa29-e23752d0da5d", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "id": "5a357469-00a8-43cc-8bfe-f491e367b3d6", 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "98beeef6-8c32-4588-b298-1298995427da", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "id": "def9147f-41c2-4411-b8d7-5039f0df1d58", 106 | "metadata": {}, 107 | "source": [ 108 | "## Who is Lewis?" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "id": "45d5ce5e-c30c-47bf-ae76-0af809c1f078", 114 | "metadata": {}, 115 | "source": [ 116 | "
\n", 117 | " \"About\n", 118 | "
" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "id": "f1f44e9c-4aaf-4f34-a165-69c85c9a49ce", 124 | "metadata": {}, 125 | "source": [ 126 | "* PhD in Physics from University of Adelaide, Australia\n", 127 | "* Co-author of O'Reilly book [_Natural Language Processing with Transformers_](https://transformersbook.com/)\n", 128 | "* Co-developer of the **free** [Hugging Face course](https://huggingface.co/course/chapter1/1)\n", 129 | "* Maintainer of ONNX API in `transformers`" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "id": "668fd108-ca54-409c-908c-2efbc541e9cb", 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "id": "7bae6037-b173-4ee6-8ba5-c28dad8b9c0a", 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "id": "6e08e039-400f-463d-8ae2-befb01afc545", 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": null, 159 | "id": "f620abf4-7cf4-4fee-b2f0-1e05d6986701", 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "id": "4c7f1b5f-f5ba-4571-a794-2fbf697e595b", 167 | "metadata": {}, 168 | "source": [ 169 | "## Outline\n", 170 | "\n", 171 | "* What is Optimum?\n", 172 | "* Question answering as a case study\n", 173 | "* Making models faster with quantization\n", 174 | "* Optimizing inference with ONNX and ONNX Runtime" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "id": "228ca919-fb1d-45f7-8871-b9ca78c13e4a", 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": null, 188 | "id": "890031d0-e9c9-4c0a-aaf5-f0c00dc4642e", 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "id": "fc5a6bcd-d778-4891-9736-99b76f42e22c", 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": null, 204 | "id": "d191f88e-27ad-4b0f-8e26-345330f2aff0", 205 | "metadata": {}, 206 | "outputs": [], 207 | "source": [] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "id": "860983ce-0a16-45bf-aa96-fc39c6498e2a", 212 | "metadata": {}, 213 | "source": [ 214 | "## What is Optimum?" 215 | ] 216 | }, 217 | { 218 | "cell_type": "markdown", 219 | "id": "4e793d56-9a67-41a9-9236-78b5466d6acf", 220 | "metadata": {}, 221 | "source": [ 222 | "
\n", 223 | " \"About\n", 224 | "
\n", 225 | "\n", 226 | "* An open-source library and extension of Hugging Face Transformers\n", 227 | "* Provides a unified API of performance optimization tools to achieve maximum efficiency to train and run models on accelerated hardware\n", 228 | "* Can be used for accelerated training, quantization, graph optimization, and inference with support for `transformers` pipelines." 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "id": "a222d57d-14a1-4893-94e4-71eaaf829196", 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [] 238 | }, 239 | { 240 | "cell_type": "markdown", 241 | "id": "bc859a79-b61b-43a3-85cd-c90b5300e365", 242 | "metadata": {}, 243 | "source": [ 244 | "
\n", 245 | " \"About\n", 246 | "
" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "id": "a10f55c1-13c1-49e1-a592-821f7d16a932", 252 | "metadata": {}, 253 | "source": [ 254 | "Today:\n", 255 | "\n", 256 | "* Running inference with **ONNX Runtime** in Optimum\n", 257 | "* Dynamic quantization as a demo" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "id": "e5d06351-e926-48c0-9b0e-787aaa5bf523", 264 | "metadata": {}, 265 | "outputs": [], 266 | "source": [] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "id": "44f4d0ef-a6a6-47bc-bd20-84d67b2b159f", 271 | "metadata": {}, 272 | "source": [ 273 | "## Question answering as a case study" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "id": "d0374c68-4569-455e-acd9-be3681e6e916", 279 | "metadata": {}, 280 | "source": [ 281 | "
\n", 282 | " \"About\n", 283 | "
" 284 | ] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "id": "2d388663-dbdb-4687-ba0f-db839e5a86c8", 289 | "metadata": {}, 290 | "source": [ 291 | "* Low latencies critical for user experience!" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": null, 297 | "id": "1e2b1b46-4ea0-457e-b5f8-fca0b7ac7e9d", 298 | "metadata": {}, 299 | "outputs": [], 300 | "source": [] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": 42, 305 | "id": "3d0db8f6-c8c0-47ea-bf2f-4ceebef4515a", 306 | "metadata": {}, 307 | "outputs": [ 308 | { 309 | "data": { 310 | "text/html": [ 311 | "\n", 312 | " \n", 320 | " " 321 | ], 322 | "text/plain": [ 323 | "" 324 | ] 325 | }, 326 | "execution_count": 42, 327 | "metadata": {}, 328 | "output_type": "execute_result" 329 | } 330 | ], 331 | "source": [ 332 | "import IPython\n", 333 | "\n", 334 | "IPython.display.IFrame(\"https://hf.space/gradioiframe/abidlabs/question-answering-simple/+\", width=1200, height=800)" 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": 27, 340 | "id": "192d9d8a-e43f-43d1-bb31-c1265f82941a", 341 | "metadata": {}, 342 | "outputs": [ 343 | { 344 | "data": { 345 | "text/plain": [ 346 | "{'score': 0.7219798564910889, 'start': 277, 'end': 281, 'answer': '1903'}" 347 | ] 348 | }, 349 | "execution_count": 27, 350 | "metadata": {}, 351 | "output_type": "execute_result" 352 | } 353 | ], 354 | "source": [ 355 | "from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline\n", 356 | "\n", 357 | "model = AutoModelForQuestionAnswering.from_pretrained(\"deepset/roberta-base-squad2\")\n", 358 | "tokenizer = AutoTokenizer.from_pretrained(\"deepset/roberta-base-squad2\")\n", 359 | "\n", 360 | "question_answerer = pipeline(\"question-answering\", model=model, tokenizer=tokenizer, handle_impossible_answer=True)\n", 361 | "\n", 362 | "context = \"\"\"Marie Sklodowska was born in Warsaw, Poland, to a family of teachers who believed strongly in education. She moved to Paris to continue her studies and there met Pierre Curie, who became both her husband and colleague in the field of radioactivity. The couple later shared the 1903 Nobel Prize in Physics. Marie was widowed in 1906, but continued the couple's work and went on to become the first person ever to be awarded two Nobel Prizes. During World War I, Curie organized mobile X-ray teams. The Curies' daughter, Irene, was also jointly awarded the Nobel Prize in Chemistry alongside her husband, Frederic Joliot.\"\"\"\n", 363 | "question = \"when did marie curie win her first nobel prize?\"\n", 364 | "pred = question_answerer(question, context)\n", 365 | "pred" 366 | ] 367 | }, 368 | { 369 | "cell_type": "markdown", 370 | "id": "921fc405-34cc-4760-8158-9d6017a9664c", 371 | "metadata": {}, 372 | "source": [ 373 | "* Model looks good, deploy to prod?" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": null, 379 | "id": "190d3ac2-f8c6-4696-b877-578b33dc7468", 380 | "metadata": {}, 381 | "outputs": [], 382 | "source": [] 383 | }, 384 | { 385 | "cell_type": "code", 386 | "execution_count": null, 387 | "id": "df542d6b-02b7-4339-bed5-cf3b24570c19", 388 | "metadata": {}, 389 | "outputs": [], 390 | "source": [] 391 | }, 392 | { 393 | "cell_type": "code", 394 | "execution_count": null, 395 | "id": "331b8491-1490-4785-ab43-bce0f3fcadac", 396 | "metadata": {}, 397 | "outputs": [], 398 | "source": [] 399 | }, 400 | { 401 | "cell_type": "markdown", 402 | "id": "53eafe5a-7654-423a-bf6a-e4d475434125", 403 | "metadata": {}, 404 | "source": [ 405 | "
\n", 406 | " \"About\n", 407 | "
" 408 | ] 409 | }, 410 | { 411 | "cell_type": "markdown", 412 | "id": "c53ec027-081d-4cde-a9ad-ad1e639ef11d", 413 | "metadata": {}, 414 | "source": [ 415 | "Deployment involves tradoff among several constraints:\n", 416 | "\n", 417 | "* Model performance (accuracy, F1 score etc)\n", 418 | "* Latency\n", 419 | "* Memory" 420 | ] 421 | }, 422 | { 423 | "cell_type": "code", 424 | "execution_count": null, 425 | "id": "4176c1b3-8a60-4d79-acde-8cf04bf474c5", 426 | "metadata": {}, 427 | "outputs": [], 428 | "source": [] 429 | }, 430 | { 431 | "cell_type": "code", 432 | "execution_count": 28, 433 | "id": "f81d9c14-1f15-47e1-9681-1745b5991ab6", 434 | "metadata": {}, 435 | "outputs": [], 436 | "source": [ 437 | "from time import perf_counter\n", 438 | "import numpy as np \n", 439 | "\n", 440 | "def measure_latency(pipe):\n", 441 | " latencies = []\n", 442 | " # warm up\n", 443 | " for _ in range(10):\n", 444 | " _ = pipe(question=question, context=context)\n", 445 | " # Timed run\n", 446 | " for _ in range(100):\n", 447 | " start_time = perf_counter()\n", 448 | " _ = pipe(question=question, context=context)\n", 449 | " latency = perf_counter() - start_time\n", 450 | " latencies.append(latency)\n", 451 | " # Compute run statistics\n", 452 | " time_avg_ms = 1000 * np.mean(latencies)\n", 453 | " time_std_ms = 1000 * np.std(latencies)\n", 454 | " return f\"average latency (ms) - {time_avg_ms:.2f} +\\- {time_std_ms:.2f}\"" 455 | ] 456 | }, 457 | { 458 | "cell_type": "code", 459 | "execution_count": 29, 460 | "id": "2e39e6f3-d5ea-4a14-ae34-937ab5948ef1", 461 | "metadata": {}, 462 | "outputs": [ 463 | { 464 | "name": "stdout", 465 | "output_type": "stream", 466 | "text": [ 467 | "Vanilla model average latency (ms) - 96.80 +\\- 0.22\n" 468 | ] 469 | } 470 | ], 471 | "source": [ 472 | "print(f\"Vanilla model {measure_latency(question_answerer)}\")" 473 | ] 474 | }, 475 | { 476 | "cell_type": "code", 477 | "execution_count": null, 478 | "id": "5233f44f-b3ce-4483-8976-49d537c1e549", 479 | "metadata": {}, 480 | "outputs": [], 481 | "source": [] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "id": "06f907e9-8392-4295-bf77-0f4c88eacb2c", 487 | "metadata": {}, 488 | "outputs": [], 489 | "source": [] 490 | }, 491 | { 492 | "cell_type": "markdown", 493 | "id": "f89e7bc8-2e93-4765-8bb1-330691b4354a", 494 | "metadata": {}, 495 | "source": [ 496 | "## Making models faster with quantization\n", 497 | "\n", 498 | "Basic idea:\n", 499 | "\n", 500 | "* Represent weights and activations with **low-precision data types** like 8-bit integer instead of 32-bit floating point.\n", 501 | "* Less memory storage & faster matmuls!\n", 502 | "\n", 503 | "In practice:\n", 504 | "\n", 505 | "* Map range $[f_\\mathrm{min}, f_\\mathrm{max}]$ of floating-point values to smaller range $[q_\\mathrm{min}, q_\\mathrm{max}]$:\n", 506 | "\n", 507 | "$$ f = \\left(\\frac{f_\\mathrm{max} - f_\\mathrm{min}}{q_\\mathrm{max} - q_\\mathrm{min}} \\right)(q - Z) = S(q-Z) $$\n", 508 | "\n", 509 | "* $S$ is _scale factor_ and $Z$ the _zero point_ (where the quantized value of $f=0$)" 510 | ] 511 | }, 512 | { 513 | "cell_type": "markdown", 514 | "id": "4fbb9a36-1efd-4b14-8ed5-cdd59c552f22", 515 | "metadata": {}, 516 | "source": [ 517 | "
\n", 518 | " \"Quantization\"\n", 519 | "
\n", 520 | "\n", 521 | "_Figure courtesy of Manas Sahni_" 522 | ] 523 | }, 524 | { 525 | "cell_type": "markdown", 526 | "id": "f8776304-7ed8-441c-943c-4f827f8618c7", 527 | "metadata": {}, 528 | "source": [ 529 | "Three main ways to quantize:\n", 530 | "\n", 531 | "* **Dynamic quantization:** quantize weights & activations on-the-fly. Simplest to start with.\n", 532 | "* **Static quantization:** precompute quantization scheme by observing activation patterns on sample of data. Generally ives better latency, but more complex to calibrate\n", 533 | "* **Quantization aware training:** simulate quantization during training with \"fake\" quantization of FP32 values. " 534 | ] 535 | }, 536 | { 537 | "cell_type": "code", 538 | "execution_count": null, 539 | "id": "6b8f0449-984e-4af3-be2f-c71f1991d9ea", 540 | "metadata": {}, 541 | "outputs": [], 542 | "source": [] 543 | }, 544 | { 545 | "cell_type": "markdown", 546 | "id": "500a472a-f6ce-48d4-93e2-c4932a50835b", 547 | "metadata": {}, 548 | "source": [ 549 | "## Optimizing inference with ONNX and ONNX Runtime" 550 | ] 551 | }, 552 | { 553 | "cell_type": "markdown", 554 | "id": "bc5f5a2b-4a7a-4889-90b9-ca125c1109b0", 555 | "metadata": {}, 556 | "source": [ 557 | "### Step 1: Install Optimum" 558 | ] 559 | }, 560 | { 561 | "cell_type": "code", 562 | "execution_count": null, 563 | "id": "cfe36b01-8128-4438-9fb9-0c6e5365f85d", 564 | "metadata": {}, 565 | "outputs": [], 566 | "source": [] 567 | }, 568 | { 569 | "cell_type": "code", 570 | "execution_count": null, 571 | "id": "84785ee0-451a-45c9-bd51-ddcec03041c3", 572 | "metadata": {}, 573 | "outputs": [], 574 | "source": [ 575 | "%pip install \"optimum[onnxruntime]==1.2.0\"" 576 | ] 577 | }, 578 | { 579 | "cell_type": "code", 580 | "execution_count": null, 581 | "id": "2984eaca-027c-41ed-b042-355663eeb847", 582 | "metadata": {}, 583 | "outputs": [], 584 | "source": [] 585 | }, 586 | { 587 | "cell_type": "markdown", 588 | "id": "a17b5a36-8059-45b8-96ad-04f301dce78e", 589 | "metadata": {}, 590 | "source": [ 591 | "### Step 2: Export the model to ONNX" 592 | ] 593 | }, 594 | { 595 | "cell_type": "code", 596 | "execution_count": 4, 597 | "id": "dd5685f1-c917-4b4c-bcd8-598f152a5e84", 598 | "metadata": {}, 599 | "outputs": [ 600 | { 601 | "data": { 602 | "application/vnd.jupyter.widget-view+json": { 603 | "model_id": "a88c56cfe8e24fc084b06f56e2b205a2", 604 | "version_major": 2, 605 | "version_minor": 0 606 | }, 607 | "text/plain": [ 608 | "Downloading: 0%| | 0.00/571 [00:00\n", 685 | " \"Quantization\"\n", 686 | "" 687 | ] 688 | }, 689 | { 690 | "cell_type": "code", 691 | "execution_count": null, 692 | "id": "0b4a4ef0-aeb8-405c-8bda-4b8beb1caee3", 693 | "metadata": {}, 694 | "outputs": [], 695 | "source": [] 696 | }, 697 | { 698 | "cell_type": "code", 699 | "execution_count": 5, 700 | "id": "e922c46d-5692-45e6-9973-eda27882c3ab", 701 | "metadata": {}, 702 | "outputs": [], 703 | "source": [ 704 | "from optimum.onnxruntime import ORTQuantizer\n", 705 | "from optimum.onnxruntime.configuration import AutoQuantizationConfig\n", 706 | "\n", 707 | "# create ORTQuantizer and define quantization configuration\n", 708 | "quantizer = ORTQuantizer.from_pretrained(model_id, feature=task)\n", 709 | "qconfig = AutoQuantizationConfig.avx512_vnni(is_static=False, per_channel=True)\n", 710 | "\n", 711 | "# apply the quantization configuration to the model\n", 712 | "quantized_path = quantizer.export(\n", 713 | " onnx_model_path=onnx_path / \"model.onnx\",\n", 714 | " onnx_quantized_model_output_path=onnx_path / \"model-quantized.onnx\",\n", 715 | " quantization_config=qconfig,\n", 716 | ")" 717 | ] 718 | }, 719 | { 720 | "cell_type": "code", 721 | "execution_count": 31, 722 | "id": "348466dc-54dc-4845-af36-8eab97a23e4c", 723 | "metadata": {}, 724 | "outputs": [ 725 | { 726 | "name": "stdout", 727 | "output_type": "stream", 728 | "text": [ 729 | "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", 730 | "To disable this warning, you can either:\n", 731 | "\t- Avoid using `tokenizers` before the fork if possible\n", 732 | "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n", 733 | "config.json model.onnx\t\t special_tokens_map.json tokenizer.json\n", 734 | "merges.txt model-quantized.onnx tokenizer_config.json vocab.json\n" 735 | ] 736 | } 737 | ], 738 | "source": [ 739 | "!ls onnx" 740 | ] 741 | }, 742 | { 743 | "cell_type": "code", 744 | "execution_count": 32, 745 | "id": "d630192f-65eb-4e24-8a12-83dcd59dfe98", 746 | "metadata": {}, 747 | "outputs": [ 748 | { 749 | "name": "stdout", 750 | "output_type": "stream", 751 | "text": [ 752 | "Vanilla Onnx Model file size: 473.34 MB\n", 753 | "Quantized Onnx Model file size: 230.83 MB\n" 754 | ] 755 | } 756 | ], 757 | "source": [ 758 | "import os\n", 759 | "# get model file size\n", 760 | "size = os.path.getsize(onnx_path / \"model.onnx\")/(1024*1024)\n", 761 | "print(f\"Vanilla Onnx Model file size: {size:.2f} MB\")\n", 762 | "size = os.path.getsize(onnx_path / \"model-quantized.onnx\")/(1024*1024)\n", 763 | "print(f\"Quantized Onnx Model file size: {size:.2f} MB\")" 764 | ] 765 | }, 766 | { 767 | "cell_type": "markdown", 768 | "id": "711966fd-9dcc-4b8d-b525-73689a420198", 769 | "metadata": {}, 770 | "source": [ 771 | "### Step 4: Run inference with Transformers pipelines" 772 | ] 773 | }, 774 | { 775 | "cell_type": "code", 776 | "execution_count": null, 777 | "id": "668b8de1-a513-45e8-b17d-1cb50fd96889", 778 | "metadata": {}, 779 | "outputs": [], 780 | "source": [] 781 | }, 782 | { 783 | "cell_type": "markdown", 784 | "id": "2ec81e44-7784-4d5f-bc77-68dbb1f33d08", 785 | "metadata": {}, 786 | "source": [ 787 | "
\n", 788 | " \"Quantization\"\n", 789 | "
" 790 | ] 791 | }, 792 | { 793 | "cell_type": "code", 794 | "execution_count": null, 795 | "id": "1ae914d2-fd44-41ba-9f5b-df0417926eb5", 796 | "metadata": {}, 797 | "outputs": [], 798 | "source": [] 799 | }, 800 | { 801 | "cell_type": "code", 802 | "execution_count": null, 803 | "id": "7298af6c-0476-4f94-9ce6-d10cd24ad1d3", 804 | "metadata": {}, 805 | "outputs": [], 806 | "source": [] 807 | }, 808 | { 809 | "cell_type": "code", 810 | "execution_count": null, 811 | "id": "cbad9cd9-9a4d-4ec1-8eb4-3fc373b7632b", 812 | "metadata": {}, 813 | "outputs": [], 814 | "source": [] 815 | }, 816 | { 817 | "cell_type": "code", 818 | "execution_count": null, 819 | "id": "37f3a95b-93e1-45f6-bc3d-ae8cff75ff06", 820 | "metadata": {}, 821 | "outputs": [], 822 | "source": [] 823 | }, 824 | { 825 | "cell_type": "code", 826 | "execution_count": 33, 827 | "id": "7a13891c-e0fc-498e-b5f5-ca49e0a108bf", 828 | "metadata": {}, 829 | "outputs": [ 830 | { 831 | "data": { 832 | "text/plain": [ 833 | "{'score': 0.4670557975769043, 'start': 277, 'end': 281, 'answer': '1903'}" 834 | ] 835 | }, 836 | "execution_count": 33, 837 | "metadata": {}, 838 | "output_type": "execute_result" 839 | } 840 | ], 841 | "source": [ 842 | "# load quantized model\n", 843 | "quantized_model = ORTModelForQuestionAnswering.from_pretrained(onnx_path, file_name=\"model-quantized.onnx\")\n", 844 | "\n", 845 | "# test the quantized model with using transformers pipeline\n", 846 | "quantized_optimum_qa = pipeline(task, model=quantized_model, tokenizer=tokenizer, handle_impossible_answer=True)\n", 847 | "prediction = quantized_optimum_qa(question=question, context=context)\n", 848 | "prediction" 849 | ] 850 | }, 851 | { 852 | "cell_type": "code", 853 | "execution_count": 34, 854 | "id": "15bb048c-570f-4c87-a1ba-3ea5a7a5734f", 855 | "metadata": {}, 856 | "outputs": [ 857 | { 858 | "name": "stdout", 859 | "output_type": "stream", 860 | "text": [ 861 | "Quantized model average latency (ms) - 37.64 +\\- 0.13\n" 862 | ] 863 | } 864 | ], 865 | "source": [ 866 | "print(f\"Quantized model {measure_latency(quantized_optimum_qa)}\")" 867 | ] 868 | }, 869 | { 870 | "cell_type": "markdown", 871 | "id": "e4467fec-4634-4d46-a2a5-be74dd0df445", 872 | "metadata": {}, 873 | "source": [ 874 | "Nice, dynamic quantization gave a ~2x speed-up \ud83e\udd2f!" 875 | ] 876 | }, 877 | { 878 | "cell_type": "code", 879 | "execution_count": null, 880 | "id": "5fba5184-e7d1-4799-baa7-50ad494188d3", 881 | "metadata": {}, 882 | "outputs": [], 883 | "source": [] 884 | }, 885 | { 886 | "cell_type": "code", 887 | "execution_count": null, 888 | "id": "ca90d903-f661-4b5a-87a6-ce293cc5eae0", 889 | "metadata": {}, 890 | "outputs": [], 891 | "source": [] 892 | }, 893 | { 894 | "cell_type": "markdown", 895 | "id": "70bb508d-0260-436f-b76f-0326bfd49d36", 896 | "metadata": {}, 897 | "source": [ 898 | "## Evaluation" 899 | ] 900 | }, 901 | { 902 | "cell_type": "code", 903 | "execution_count": 35, 904 | "id": "3dea108a-a3a7-4183-9a42-a3ac8d497359", 905 | "metadata": {}, 906 | "outputs": [ 907 | { 908 | "data": { 909 | "application/vnd.jupyter.widget-view+json": { 910 | "model_id": "28712846d57345a7a473aa37785c65f5", 911 | "version_major": 2, 912 | "version_minor": 0 913 | }, 914 | "text/plain": [ 915 | " 0%| | 0/2 [00:00 Here you can find all the materials for the Bosch workshop! 4 | 5 | ## Slides 6 | 7 | You can find slides for the workshops at the links below: 8 | 9 | * [Accelerating Transformers with 🤗 Optimum](https://docs.google.com/presentation/d/1UOcmrbsLVUTKnAUsYEcCIXFmZLPv17-HINQycRa-kqg/edit?usp=sharing) 10 | 11 | ## Getting started 12 | 13 | If you want to follow along, make sure you have: 14 | 15 | * Signed up for a Hugging Face account: https://huggingface.co/join 16 | * Signed up to Google Colab so you can run code on free GPUs: https://colab.research.google.com/ 17 | 18 | -------------------------------------------------------------------------------- /datacamp-webinar/README.md: -------------------------------------------------------------------------------- 1 | # Generating Photorealistic Images using AI with Diffusers in Python 2 | 3 | Contains Colab Notebook and Slides for our DataCamp webinar: [Generating Photorealistic Images using AI with Diffusers in Python](https://www.datacamp.com/webinars/Generating-photorealistic-images-using-ai-with-diffusers-python). 4 | 5 | ## Getting started 6 | 7 | If you want to follow along, make sure you have: 8 | 9 | * Signed up for a Hugging Face account: https://huggingface.co/join 10 | * Signed up to Google Colab so you can run code easily: https://colab.research.google.com/ 11 | -------------------------------------------------------------------------------- /datacamp-webinar/slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/datacamp-webinar/slides.pdf -------------------------------------------------------------------------------- /europython-2022/03-gradio-demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d55fd487-9ea6-4580-98a9-d19d3c1db83d", 6 | "metadata": {}, 7 | "source": [ 8 | "\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "87c47386", 14 | "metadata": {}, 15 | "source": [ 16 | "# Creating a Transformers demo with Gradio" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "2ee3a648-2519-4617-a206-19b8c5cfef3d", 22 | "metadata": {}, 23 | "source": [ 24 | "## References\n", 25 | "\n", 26 | "* https://huggingface.co/blog/gradio-spaces\n", 27 | "* https://huggingface.co/blog/gradio" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "id": "f793dc42", 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "# !pip install transformers gradio sentencepiece" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "87712d83", 43 | "metadata": {}, 44 | "source": [ 45 | "## Example 1: Using the Transformers pipeline" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "efab6bce-f259-4956-98b6-ad379e4fccd6", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "import gradio as gr\n", 56 | "from transformers import pipeline" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "ce4a01e6-6143-4dec-a560-19498428f01e", 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "pipe = pipeline(\"text-classification\", model=\"Rocketknight1/europython-imdb\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "e9751057", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "pipe(\"This was the most boring 94 minutes of my life.\")" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "e064676e", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "label2emoji = {\"Negative\": \"💩\", \"Positive\": \"😻\"}\n", 87 | "\n", 88 | "def predict(text):\n", 89 | " preds = pipe(text)[0]\n", 90 | " return label2emoji[preds[\"label\"]], round(preds[\"score\"], 5)\n", 91 | "\n", 92 | "predict(\"I loved this movie!\")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "9c65091e", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "gradio_ui = gr.Interface(\n", 103 | " fn=predict,\n", 104 | " title=\"Review analysis\",\n", 105 | " description=\"Enter some review text and check what the model predicts for its sentiment.\",\n", 106 | " inputs=[\n", 107 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 108 | " ],\n", 109 | " outputs=[\n", 110 | " gr.outputs.Textbox(label=\"Label\"),\n", 111 | " gr.outputs.Textbox(label=\"Score\"),\n", 112 | " ],\n", 113 | " examples=[\n", 114 | " [\"I loved it, best movie ever!\"], [\"Turgid and preposterous, almost unwatchable.\"]\n", 115 | " ],\n", 116 | ")\n", 117 | "\n", 118 | "gradio_ui.launch(debug=True)" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "id": "4a4afb5f-6f9b-42ab-881e-34870729d028", 124 | "metadata": {}, 125 | "source": [ 126 | "## Example 2: Using the Inference API from the Hugging Face Hub" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "5b440dde-e6e7-4dc0-afc0-79f097360797", 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "from huggingface_hub import InferenceApi\n", 137 | "\n", 138 | "text = \"I loved it, best movie ever!\"\n", 139 | "inference = InferenceApi(\"Rocketknight1/europython-imdb\")\n", 140 | "preds = inference(inputs=text)\n", 141 | "preds[0]" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "id": "e02675d8-95d4-4874-9b30-aa375da20054", 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True) \n", 152 | "sorted_preds" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "id": "96673a20-fc00-4540-b85d-3cd2696df1bd", 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "def inference_predict(text):\n", 163 | " inference = InferenceApi(\"Rocketknight1/europython-imdb\")\n", 164 | " preds = inference(inputs=text)\n", 165 | " sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True)[0]\n", 166 | " return label2emoji[sorted_preds[\"label\"]], round(sorted_preds[\"score\"], 5)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "id": "c8250d8a-6bd1-4406-8a06-3e0de40eebd8", 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "inference_predict(text)" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "id": "d5f756f0", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "gradio_ui = gr.Interface.load(\n", 187 | " name=\"Rocketknight1/europython-imdb\",\n", 188 | " src=\"huggingface\",\n", 189 | " fn=inference_predict,\n", 190 | " title=\"Review analysis\",\n", 191 | " description=\"Enter some review text and check what the model predicts for its sentiment.\",\n", 192 | " inputs=[\n", 193 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 194 | " ],\n", 195 | " outputs=[\n", 196 | " gr.outputs.Textbox(label=\"Label\"),\n", 197 | " gr.outputs.Textbox(label=\"Score\"),\n", 198 | " ],\n", 199 | " examples=[\n", 200 | " [\"I loved it, best movie ever!\"], [\"Turgid and preposterous, almost unwatchable.\"]\n", 201 | " ],\n", 202 | ")\n", 203 | "\n", 204 | "gradio_ui.launch(debug=True)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "id": "3b8188dd", 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [] 214 | } 215 | ], 216 | "metadata": { 217 | "interpreter": { 218 | "hash": "9f835d35ef2d7d572ed1f1be271ae903cca02f9a46b282db81c294a2d4ce247b" 219 | }, 220 | "kernelspec": { 221 | "display_name": "Python 3 (ipykernel)", 222 | "language": "python", 223 | "name": "python3" 224 | }, 225 | "language_info": { 226 | "codemirror_mode": { 227 | "name": "ipython", 228 | "version": 3 229 | }, 230 | "file_extension": ".py", 231 | "mimetype": "text/x-python", 232 | "name": "python", 233 | "nbconvert_exporter": "python", 234 | "pygments_lexer": "ipython3", 235 | "version": "3.10.4" 236 | } 237 | }, 238 | "nbformat": 4, 239 | "nbformat_minor": 5 240 | } 241 | -------------------------------------------------------------------------------- /europython-2022/How To Train Your Graphics Card To Read With Transformers.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/How To Train Your Graphics Card To Read With Transformers.pdf -------------------------------------------------------------------------------- /europython-2022/README.md: -------------------------------------------------------------------------------- 1 | # Europython 2022 2 | 3 | > Here you can find all the materials for the Europython 2022 "How To Train Your Graphics Card To Read" workshop! 4 | 5 | ## Getting started 6 | 7 | The address for this repo is **https://github.com/huggingface/workshops** 8 | 9 | If you want to follow along, make sure you have: 10 | 11 | * Signed up for a Hugging Face account: https://huggingface.co/join 12 | * Signed up to Google Colab so you can run code on free GPUs: https://colab.research.google.com/ 13 | -------------------------------------------------------------------------------- /europython-2022/images/clf_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/clf_arch.png -------------------------------------------------------------------------------- /europython-2022/images/gen_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/gen_steps.png -------------------------------------------------------------------------------- /europython-2022/images/ner_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/ner_arch.png -------------------------------------------------------------------------------- /europython-2022/images/object_detection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/object_detection.png -------------------------------------------------------------------------------- /europython-2022/images/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/pipeline.png -------------------------------------------------------------------------------- /europython-2022/images/qa_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/qa_arch.png -------------------------------------------------------------------------------- /europython-2022/images/speech2text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/speech2text.png -------------------------------------------------------------------------------- /europython-2022/images/tapas.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/europython-2022/images/tapas.png -------------------------------------------------------------------------------- /fewshot-learning-in-production/README.md: -------------------------------------------------------------------------------- 1 | # Few-Shot Learning in Production 2 | 3 | > Here you can find all the materials for the SetFit workshop on few-shot learning! 4 | 5 | ## Slides 6 | 7 | You can find slides for the workshops at the links below: 8 | 9 | * [Few-Shot Learning in Production](https://docs.google.com/presentation/d/1LVnwWShIVNVBxA8eG017zsDioP7BnT7DHc8eU0NGC3E/edit?usp=sharing) 10 | 11 | ## Getting started 12 | 13 | If you want to follow along, make sure you have: 14 | 15 | * Signed up for a Hugging Face account: https://huggingface.co/join 16 | * Signed up to Google Colab so you can run code on free GPUs: https://colab.research.google.com/ 17 | * A [valid payment method](https://huggingface.co/docs/inference-endpoints/guides/access) on your Hugging Face account if you'd like to deploy your own models to Inference Endpoints -------------------------------------------------------------------------------- /luzern-university/03-gradio-demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d55fd487-9ea6-4580-98a9-d19d3c1db83d", 6 | "metadata": {}, 7 | "source": [ 8 | "\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "87c47386", 14 | "metadata": {}, 15 | "source": [ 16 | "# Creating a Transformers demo with Gradio" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "2ee3a648-2519-4617-a206-19b8c5cfef3d", 22 | "metadata": {}, 23 | "source": [ 24 | "## References\n", 25 | "\n", 26 | "* https://huggingface.co/blog/gradio-spaces\n", 27 | "* https://huggingface.co/blog/gradio" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "id": "f793dc42", 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "# !pip install transformers gradio sentencepiece" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "87712d83", 43 | "metadata": {}, 44 | "source": [ 45 | "## Example 1: Using the Transformers pipeline" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "efab6bce-f259-4956-98b6-ad379e4fccd6", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "import gradio as gr\n", 56 | "from transformers import pipeline" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "ce4a01e6-6143-4dec-a560-19498428f01e", 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "pipe = pipeline(\"text-classification\", model=\"lewtun/xlm-roberta-base-finetuned-marc-en\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "e9751057", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "pipe(\"The Lord of the Rings is waaay too long to read!!\")" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "e064676e", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "label2emoji = {\"terrible\": \"\ud83d\udca9\", \"poor\": \"\ud83d\ude3e\", \"ok\": \"\ud83d\udc31\", \"good\": \"\ud83d\ude3a\", \"great\": \"\ud83d\ude3b\"}\n", 87 | "\n", 88 | "def predict(text):\n", 89 | " preds = pipe(text)[0]\n", 90 | " return label2emoji[preds[\"label\"]], round(preds[\"score\"], 5)\n", 91 | "\n", 92 | "predict(\"I love this soccer ball\")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "9c65091e", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "gradio_ui = gr.Interface(\n", 103 | " fn=predict,\n", 104 | " title=\"Predicting review scores from customer reviews\",\n", 105 | " description=\"Enter some review text about an Amazon product and check what the model predicts for it's star rating.\",\n", 106 | " inputs=[\n", 107 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 108 | " ],\n", 109 | " outputs=[\n", 110 | " gr.outputs.Textbox(label=\"Label\"),\n", 111 | " gr.outputs.Textbox(label=\"Score\"),\n", 112 | " ],\n", 113 | " examples=[\n", 114 | " [\"My favourite book is Cryptonomicon!\"], [\"\u79c1\u306e\u597d\u304d\u306a\u672c\u306f\u300c\u30af\u30ea\u30d7\u30c8\u30ce\u30df\u30b3\u30f3\u300d\u3067\u3059\"]\n", 115 | " ],\n", 116 | ")\n", 117 | "\n", 118 | "gradio_ui.launch(debug=True)" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "id": "4a4afb5f-6f9b-42ab-881e-34870729d028", 124 | "metadata": {}, 125 | "source": [ 126 | "## Example 2: Using the Inference API from the Hugging Face Hub" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "5b440dde-e6e7-4dc0-afc0-79f097360797", 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "from huggingface_hub import InferenceApi\n", 137 | "\n", 138 | "text = \"My favourite book is Cryptonomicon!\"\n", 139 | "inference = InferenceApi(\"lewtun/xlm-roberta-base-finetuned-marc-en\")\n", 140 | "preds = inference(inputs=text)\n", 141 | "preds[0]" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "id": "e02675d8-95d4-4874-9b30-aa375da20054", 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True) \n", 152 | "sorted_preds" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "id": "96673a20-fc00-4540-b85d-3cd2696df1bd", 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "def inference_predict(text):\n", 163 | " inference = InferenceApi(\"lewtun/xlm-roberta-base-finetuned-marc-en\")\n", 164 | " preds = inference(inputs=text)\n", 165 | " sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True)[0]\n", 166 | " return label2emoji[sorted_preds[\"label\"]], round(sorted_preds[\"score\"], 5)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "id": "c8250d8a-6bd1-4406-8a06-3e0de40eebd8", 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "inference_predict(text)" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "id": "d5f756f0", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "gradio_ui = gr.Interface.load(\n", 187 | " name=\"lewtun/xlm-roberta-base-finetuned-marc\",\n", 188 | " src=\"huggingface\",\n", 189 | " fn=inference_predict,\n", 190 | " title=\"Review analysis\",\n", 191 | " description=\"Enter some text and check if model detects it's star rating.\",\n", 192 | " inputs=[\n", 193 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 194 | " ],\n", 195 | " outputs=[\n", 196 | " gr.outputs.Textbox(label=\"Label\"),\n", 197 | " gr.outputs.Textbox(label=\"Score\"),\n", 198 | " ],\n", 199 | " examples=[\n", 200 | " [\"My favourite book is Cryptonomicon!\"], [\"\u79c1\u306e\u597d\u304d\u306a\u672c\u306f\u300c\u30af\u30ea\u30d7\u30c8\u30ce\u30df\u30b3\u30f3\u300d\u3067\u3059\"]\n", 201 | " ],\n", 202 | ")\n", 203 | "\n", 204 | "gradio_ui.launch(debug=True)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "id": "a5193629", 210 | "metadata": {}, 211 | "source": [] 212 | } 213 | ], 214 | "metadata": { 215 | "interpreter": { 216 | "hash": "9f835d35ef2d7d572ed1f1be271ae903cca02f9a46b282db81c294a2d4ce247b" 217 | }, 218 | "kernelspec": { 219 | "display_name": "Python 3 (ipykernel)", 220 | "language": "python", 221 | "name": "python3" 222 | }, 223 | "language_info": { 224 | "codemirror_mode": { 225 | "name": "ipython", 226 | "version": 3 227 | }, 228 | "file_extension": ".py", 229 | "mimetype": "text/x-python", 230 | "name": "python", 231 | "nbconvert_exporter": "python", 232 | "pygments_lexer": "ipython3", 233 | "version": "3.8.10" 234 | } 235 | }, 236 | "nbformat": 4, 237 | "nbformat_minor": 5 238 | } -------------------------------------------------------------------------------- /luzern-university/06-stable-diffusion-gradio.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "f21dd773", 6 | "metadata": {}, 7 | "source": [ 8 | "\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "0c5db8ce-b7a1-4c4c-a1c3-6933efd1d186", 14 | "metadata": {}, 15 | "source": [ 16 | "# Creating a Diffusers Demo with Gradio" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "892a3b27", 22 | "metadata": {}, 23 | "source": [ 24 | "**Learning goals:** The goal of this tutorial is to learn How To\n", 25 | "\n", 26 | "1. Build a quick demo for your machine learning model in Python using the `gradio` library\n", 27 | "2. Host the demos for free with Hugging Face Spaces\n", 28 | "\n", 29 | "**Duration**: 20-40\n", 30 | " minutes\n", 31 | "\n", 32 | "**Prerequisites:** Knowledge of Python and basic familiarity with machine learning\n", 33 | "\n", 34 | "All of these steps can be done for free! All you need is an Internet browser and a place where you can write Python 👩‍💻" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "27b8c03d", 40 | "metadata": {}, 41 | "source": [ 42 | "## Why Demos?\n", 43 | "\n", 44 | "**Demos** of machine learning models are an increasingly important part of machine learning _courses_ and _conferences_. Demos allow:\n", 45 | "\n", 46 | "* model developers to easily **present** their work to a wide audience\n", 47 | "* increase **reproducibility** of machine learning research\n", 48 | "* diverse users to more easily **identify and debug** failure points of models\n", 49 | "\n", 50 | "\n", 51 | "As a quick example of what we would like to build, check out the [Keras Org on Hugging Face](https://huggingface.co/keras-io), which includes a description card and a collection of Models and Spaces built by Keras community. Any Space can be opened in your browser and you can use the model immediately, as shown here: \n", 52 | "\n", 53 | "![](https://i.ibb.co/7y6DGjB/ezgif-5-cc52b7e590.gif)\n", 54 | "\n", 55 | "\n" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "id": "ad8cf951-41e2-49a9-b2a7-fb3ea8019c64", 61 | "metadata": { 62 | "tags": [] 63 | }, 64 | "source": [ 65 | "## Setup" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "id": "1469f27a-1e94-4e54-8cb3-41c9387cbb59", 71 | "metadata": {}, 72 | "source": [ 73 | "If you're running this notebook on Google Colab or locally, you'll need a few dependencies installed. You can install them with `pip` as follows:" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "id": "49a51177-60dc-4641-be71-ff05f067f738", 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "#! pip install diffusers transformers ftfy gradio" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "id": "7741507d-1b34-41cc-bedb-4fd135817f68", 89 | "metadata": {}, 90 | "source": [ 91 | "To be able to access the Stable Diffusion checkpoints from the Hugging Face Hub, you'll need to store your authentication token from the Hugging Face website. Sign up [here](https://huggingface.co/join) if you haven't already, then execute the following cell and input your API token:" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "id": "7b9432d2-911b-4587-aa58-c4b42ab0900f", 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "from huggingface_hub import notebook_login\n", 102 | "\n", 103 | "notebook_login()" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "id": "ec400261-f810-41ac-a7fb-3c77ababd611", 109 | "metadata": {}, 110 | "source": [ 111 | "Finally, let's set the device (CPU or GPU) to run our pipelines on:" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "id": "ca1a1399-9d75-417e-b4ec-07724edfe2b5", 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "import torch\n", 122 | "\n", 123 | "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", 124 | "torch_dtype = torch.float16 if device == \"cuda\" else None" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "id": "58335633-23dc-46a3-a8b0-73f14d0727ec", 130 | "metadata": {}, 131 | "source": [ 132 | "## Building a demo with Gradio" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "id": "ab2bbd06", 138 | "metadata": {}, 139 | "source": [ 140 | "`gradio` is a handy Python library that lets you build web demos simply by specifying the list of input and output **components** expected by your machine learning model. \n", 141 | "\n", 142 | "What do I mean by input and output components? Gradio comes with a bunch of predefined components for different kinds of machine learning models. Here are some examples:\n", 143 | "\n", 144 | "* For an **image classifier**, the expected input type is an `Image` and the output type is a `Label`. \n", 145 | "* For a **speech recognition model**, the expected input component is an `Microphone` (which lets users record from the browser) or `Audio` (which lets users drag-and-drop audio files), while the output type is `Text`. \n", 146 | "* For a **question answering model**, we expect **2 inputs**: [`Text`, `Text`], one textbox for the paragraph and one for the question, and the output type is a single `Text` corresponding to the answer. \n", 147 | "\n", 148 | "You get the idea... (for all of the supported components, [see the docs](https://gradio.app/docs/))\n", 149 | "\n", 150 | "In addition to the input and output types, Gradio expects a third parameter, which is the prediction function itself. This parameter can be ***any* regular Python function** that takes in parameter(s) corresponding to the input component(s) and returns value(s) corresponding to the output component(s)\n", 151 | "\n", 152 | "Enough words. Let's see some code!\n", 153 | "\n", 154 | "First, let's load the `StableDiffusionPipeline` and check it can generate an image:" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "id": "ce5b462f", 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "from diffusers import StableDiffusionPipeline\n", 165 | "\n", 166 | "model_id = \"CompVis/stable-diffusion-v1-4\"\n", 167 | "pipe = StableDiffusionPipeline.from_pretrained(\n", 168 | " model_id, revision=\"fp16\", torch_dtype=torch_dtype\n", 169 | ").to(device)\n", 170 | "prompt = \"a photograph of an astronaut riding a horse\"\n", 171 | "\n", 172 | "outputs = pipe(prompt)\n", 173 | "outputs.images[0]" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "id": "bedd0ad1", 179 | "metadata": {}, 180 | "source": [ 181 | "Next, we need to implement a function that takes a text prompt and returns an image:" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 7, 187 | "id": "b9087d78-5da7-4ed0-882a-401ff32ae407", 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "def predict(prompt):\n", 192 | " return pipe(prompt).images[0]\n", 193 | "\n", 194 | "predict(prompt)" 195 | ] 196 | }, 197 | { 198 | "cell_type": "markdown", 199 | "id": "df63195c-dda8-4418-a938-79b297e47a0c", 200 | "metadata": {}, 201 | "source": [ 202 | "The final step is to implement a simple interface:" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "id": "0bd091b0-4843-4fb3-8192-b5b705d7171a", 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "import gradio as gr\n", 213 | "\n", 214 | "gradio_ui = gr.Interface(\n", 215 | " fn=predict,\n", 216 | " title=\"Stable Diffusion Demo\",\n", 217 | " description=\"Enter a description of an image you'd like to generate!\",\n", 218 | " inputs=[\n", 219 | " gr.Textbox(lines=2, label=\"Paste some text here\"),\n", 220 | " ],\n", 221 | " outputs=[\"image\"],\n", 222 | " examples=[[\"a photograph of an astronaut riding a horse\"]],\n", 223 | ")\n", 224 | "\n", 225 | "gradio_ui.launch(debug=True)" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "id": "b7e85e6f", 231 | "metadata": {}, 232 | "source": [ 233 | "Running the code above should produce a simple GUI inside this notebook allowing you to type example inputs and see the output returned by your function.\n", 234 | "\n", 235 | "Notice that we define an `Interface` using the 3 ingredients mentioned earlier:\n", 236 | "* A function\n", 237 | "* Input component(s)\n", 238 | "* Output component(s)\n", 239 | "\n", 240 | "This is a simple example for images, but the same principle holds true for any other kind of data type." 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "id": "bbb717a2", 246 | "metadata": {}, 247 | "source": [ 248 | "## Host the Demo (for free) on Hugging Face Spaces\n", 249 | "\n", 250 | "Once you made a Gradio demo, you can host it permanently on Hugging Spaces very easily:\n", 251 | "\n", 252 | "Here are the steps to that (shown in the GIF below):\n", 253 | "\n", 254 | "A. First, create a Hugging Face account if you do not already have one, by visiting https://huggingface.co/ and clicking \"Sign Up\"\n", 255 | "\n", 256 | "B. Once you are logged in, click on your profile picture and then click on \"New Space\" underneath it to get to this page: https://huggingface.co/new-space\n", 257 | "\n", 258 | "C. Give your Space a name and a license. Select \"Gradio\" as the Space SDK, and then choose \"Public\" if you are fine with everyone accessing your Space and the underlying code\n", 259 | "\n", 260 | "D. Then you will find a page that provides you instructions on how to upload your files into the Git repository for that Space. You may also need to add a `requirements.txt` file to specify any Python package dependencies.\n", 261 | "\n", 262 | "E. Once you have pushed your files, that's it! Spaces will automatically build your Gradio demo allowing you to share it with anyone, anywhere!\n", 263 | "\n", 264 | "![GIF](https://huggingface.co/blog/assets/28_gradio-spaces/spaces-demo-finalized.gif)\n" 265 | ] 266 | }, 267 | { 268 | "cell_type": "markdown", 269 | "id": "27c5cea9", 270 | "metadata": {}, 271 | "source": [ 272 | "You can even embed your Gradio demo on any website -- in a blog, a portfolio page, or even in a colab notebook, like I've done with a Pictionary sketch recognition model below:" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 15, 278 | "id": "d0d5f0a8", 279 | "metadata": {}, 280 | "outputs": [ 281 | { 282 | "data": { 283 | "text/html": [ 284 | "\n", 285 | " \n", 293 | " " 294 | ], 295 | "text/plain": [ 296 | "" 297 | ] 298 | }, 299 | "execution_count": 15, 300 | "metadata": {}, 301 | "output_type": "execute_result" 302 | } 303 | ], 304 | "source": [ 305 | "from IPython.display import IFrame\n", 306 | "\n", 307 | "IFrame(\n", 308 | " src=\"https://hf.space/gradioiframe/stabilityai/stable-diffusion/+\",\n", 309 | " width=1000,\n", 310 | " height=800,\n", 311 | ")" 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "execution_count": null, 317 | "id": "35457fd6", 318 | "metadata": {}, 319 | "outputs": [], 320 | "source": [] 321 | } 322 | ], 323 | "metadata": { 324 | "kernelspec": { 325 | "display_name": "hf", 326 | "language": "python", 327 | "name": "hf" 328 | }, 329 | "language_info": { 330 | "codemirror_mode": { 331 | "name": "ipython", 332 | "version": 3 333 | }, 334 | "file_extension": ".py", 335 | "mimetype": "text/x-python", 336 | "name": "python", 337 | "nbconvert_exporter": "python", 338 | "pygments_lexer": "ipython3", 339 | "version": "3.9.7" 340 | } 341 | }, 342 | "nbformat": 4, 343 | "nbformat_minor": 5 344 | } 345 | -------------------------------------------------------------------------------- /luzern-university/README.md: -------------------------------------------------------------------------------- 1 | # Luzern University of Applied Sciences and Arts 2 | 3 | > Here you can find all the materials for the guest lectures at the [Luzern University of Applied Sciences and Arts](https://www.hslu.ch/en/)! 4 | 5 | ## Slides 6 | 7 | You can find slides for the workshops at the links below. 8 | 9 | ### 2021 10 | 11 | * [Getting Started with Transformers](https://drive.google.com/file/d/1HozLk1F4HGz5dvACbzedtaXWp4-cSHET/view?usp=sharing) 12 | 13 | 14 | ### 2022 15 | 16 | * [Getting Started with Transformers](https://drive.google.com/file/d/1wYQktDHB9TXvzqeZ_XVfhnfy1DC6u89W/view?usp=sharing) 17 | * [Getting Started with Diffusion Models](https://drive.google.com/file/d/16tc53K0AKEcH5FSVg4oFNPA-Dju-Vhsr/view?usp=sharing) 18 | 19 | ## Getting started 20 | 21 | If you want to follow along, make sure you have: 22 | 23 | * Signed up for a Hugging Face account: https://huggingface.co/join 24 | * Signed up to Google Colab so you can run code on free GPUs: https://colab.research.google.com/ 25 | 26 | -------------------------------------------------------------------------------- /luzern-university/images/clf_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/clf_arch.png -------------------------------------------------------------------------------- /luzern-university/images/gen_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/gen_steps.png -------------------------------------------------------------------------------- /luzern-university/images/ner_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/ner_arch.png -------------------------------------------------------------------------------- /luzern-university/images/object_detection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/object_detection.png -------------------------------------------------------------------------------- /luzern-university/images/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/pipeline.png -------------------------------------------------------------------------------- /luzern-university/images/qa_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/qa_arch.png -------------------------------------------------------------------------------- /luzern-university/images/speech2text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/speech2text.png -------------------------------------------------------------------------------- /luzern-university/images/tapas.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/luzern-university/images/tapas.png -------------------------------------------------------------------------------- /machine-learning-tokyo/03-gradio-demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d55fd487-9ea6-4580-98a9-d19d3c1db83d", 6 | "metadata": {}, 7 | "source": [ 8 | "\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "87c47386", 14 | "metadata": {}, 15 | "source": [ 16 | "# Creating a Transformers demo with Gradio" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "2ee3a648-2519-4617-a206-19b8c5cfef3d", 22 | "metadata": {}, 23 | "source": [ 24 | "## References\n", 25 | "\n", 26 | "* https://huggingface.co/blog/gradio-spaces\n", 27 | "* https://huggingface.co/blog/gradio" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "id": "f793dc42", 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "# !pip install transformers gradio sentencepiece" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "87712d83", 43 | "metadata": {}, 44 | "source": [ 45 | "## Example 1: Using the Transformers pipeline" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "efab6bce-f259-4956-98b6-ad379e4fccd6", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "import gradio as gr\n", 56 | "from transformers import pipeline" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "ce4a01e6-6143-4dec-a560-19498428f01e", 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "pipe = pipeline(\"text-classification\", model=\"lewtun/xlm-roberta-base-finetuned-marc-en\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "e9751057", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "pipe(\"The Lord of the Rings is waaay too long to read!!\")" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "e064676e", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "label2emoji = {\"terrible\": \"\ud83d\udca9\", \"poor\": \"\ud83d\ude3e\", \"ok\": \"\ud83d\udc31\", \"good\": \"\ud83d\ude3a\", \"great\": \"\ud83d\ude3b\"}\n", 87 | "\n", 88 | "def predict(text):\n", 89 | " preds = pipe(text)[0]\n", 90 | " return label2emoji[preds[\"label\"]], round(preds[\"score\"], 5)\n", 91 | "\n", 92 | "predict(\"I love this soccer ball\")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "9c65091e", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "gradio_ui = gr.Interface(\n", 103 | " fn=predict,\n", 104 | " title=\"Predicting review scores from customer reviews\",\n", 105 | " description=\"Enter some review text about an Amazon product and check what the model predicts for it's star rating.\",\n", 106 | " inputs=[\n", 107 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 108 | " ],\n", 109 | " outputs=[\n", 110 | " gr.outputs.Textbox(label=\"Label\"),\n", 111 | " gr.outputs.Textbox(label=\"Score\"),\n", 112 | " ],\n", 113 | " examples=[\n", 114 | " [\"My favourite book is Cryptonomicon!\"], [\"\u79c1\u306e\u597d\u304d\u306a\u672c\u306f\u300c\u30af\u30ea\u30d7\u30c8\u30ce\u30df\u30b3\u30f3\u300d\u3067\u3059\"]\n", 115 | " ],\n", 116 | ")\n", 117 | "\n", 118 | "gradio_ui.launch(debug=True)" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "id": "4a4afb5f-6f9b-42ab-881e-34870729d028", 124 | "metadata": {}, 125 | "source": [ 126 | "## Example 2: Using the Inference API from the Hugging Face Hub" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "5b440dde-e6e7-4dc0-afc0-79f097360797", 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "from huggingface_hub import InferenceApi\n", 137 | "\n", 138 | "text = \"My favourite book is Cryptonomicon!\"\n", 139 | "inference = InferenceApi(\"lewtun/xlm-roberta-base-finetuned-marc-en\")\n", 140 | "preds = inference(inputs=text)\n", 141 | "preds[0]" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "id": "e02675d8-95d4-4874-9b30-aa375da20054", 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True) \n", 152 | "sorted_preds" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "id": "96673a20-fc00-4540-b85d-3cd2696df1bd", 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "def inference_predict(text):\n", 163 | " inference = InferenceApi(\"lewtun/xlm-roberta-base-finetuned-marc-en\")\n", 164 | " preds = inference(inputs=text)\n", 165 | " sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True)[0]\n", 166 | " return label2emoji[sorted_preds[\"label\"]], round(sorted_preds[\"score\"], 5)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "id": "c8250d8a-6bd1-4406-8a06-3e0de40eebd8", 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "inference_predict(text)" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "id": "d5f756f0", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "gradio_ui = gr.Interface.load(\n", 187 | " name=\"lewtun/xlm-roberta-base-finetuned-marc\",\n", 188 | " src=\"huggingface\",\n", 189 | " fn=inference_predict,\n", 190 | " title=\"Review analysis\",\n", 191 | " description=\"Enter some text and check if model detects it's star rating.\",\n", 192 | " inputs=[\n", 193 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 194 | " ],\n", 195 | " outputs=[\n", 196 | " gr.outputs.Textbox(label=\"Label\"),\n", 197 | " gr.outputs.Textbox(label=\"Score\"),\n", 198 | " ],\n", 199 | " examples=[\n", 200 | " [\"My favourite book is Cryptonomicon!\"], [\"\u79c1\u306e\u597d\u304d\u306a\u672c\u306f\u300c\u30af\u30ea\u30d7\u30c8\u30ce\u30df\u30b3\u30f3\u300d\u3067\u3059\"]\n", 201 | " ],\n", 202 | ")\n", 203 | "\n", 204 | "gradio_ui.launch(debug=True)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "id": "a5193629", 210 | "metadata": {}, 211 | "source": [] 212 | } 213 | ], 214 | "metadata": { 215 | "interpreter": { 216 | "hash": "9f835d35ef2d7d572ed1f1be271ae903cca02f9a46b282db81c294a2d4ce247b" 217 | }, 218 | "kernelspec": { 219 | "display_name": "Python 3 (ipykernel)", 220 | "language": "python", 221 | "name": "python3" 222 | }, 223 | "language_info": { 224 | "codemirror_mode": { 225 | "name": "ipython", 226 | "version": 3 227 | }, 228 | "file_extension": ".py", 229 | "mimetype": "text/x-python", 230 | "name": "python", 231 | "nbconvert_exporter": "python", 232 | "pygments_lexer": "ipython3", 233 | "version": "3.8.10" 234 | } 235 | }, 236 | "nbformat": 4, 237 | "nbformat_minor": 5 238 | } -------------------------------------------------------------------------------- /machine-learning-tokyo/README.md: -------------------------------------------------------------------------------- 1 | # Machine Learning Tokyo 2 | 3 | > Here you can find all the materials for the MLT workshop! 4 | 5 | ## Getting started 6 | 7 | If you want to follow along, make sure you have: 8 | 9 | * Signed up for a Hugging Face account: https://huggingface.co/join 10 | * Signed up to Google Colab so you can run code on free GPUs: https://colab.research.google.com/ 11 | * Joined the Hugging Face Spaces beta for creating demos: https://hf.co/spaces/to-the-moon 12 | -------------------------------------------------------------------------------- /machine-learning-tokyo/images/clf_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/clf_arch.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/gen_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/gen_steps.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/ner_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/ner_arch.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/object_detection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/object_detection.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/pipeline.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/qa_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/qa_arch.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/speech2text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/speech2text.png -------------------------------------------------------------------------------- /machine-learning-tokyo/images/tapas.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/images/tapas.png -------------------------------------------------------------------------------- /machine-learning-tokyo/slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/machine-learning-tokyo/slides.pdf -------------------------------------------------------------------------------- /mlops-world/README.md: -------------------------------------------------------------------------------- 1 | # MLOps World 2 | 3 | > Here you can find all the materials for the workshop on _Accelerating Transformers with Hugging Face Optimum and Infinity_ at [MLOps World 2022](https://mlopsworld.com/)! 4 | 5 | ## Getting started 6 | 7 | If you want to follow along, make sure you have: 8 | 9 | * Signed up for a Hugging Face account: https://huggingface.co/join 10 | * Signed up to Google Colab so you can run code easily: https://colab.research.google.com/ 11 | -------------------------------------------------------------------------------- /mlops-world/dynamic-quantization.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "628d20db-4bb6-49bd-a43b-72897b831826", 6 | "metadata": {}, 7 | "source": [ 8 | "\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "ee47bc31-6686-4cf4-b81d-1ac6b0a99a05", 14 | "metadata": {}, 15 | "source": [ 16 | "# Dynamic Quantization with Hugging Face Optimum" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "c100e377-0f26-4966-a76c-d4e1f5dfe3de", 22 | "metadata": {}, 23 | "source": [ 24 | "In this session, you will learn how to apply _dynamic quantization_ to a 🤗 Transformers model. You will quantize a [DistilBERT model](https://huggingface.co/optimum/distilbert-base-uncased-finetuned-banking77) that's been fine-tuned on the [Banking77 dataset](https://huggingface.co/datasets/banking77) for intent classification. \n", 25 | "\n", 26 | "Along the way, you'll learn how to use two open-source libraries: \n", 27 | "\n", 28 | "* [🤗 Optimum](https://github.com/huggingface/optimum): an extension of 🤗 Transformers, which provides a set of performance optimization tools enabling maximum efficiency to train and run models on targeted hardware.\n", 29 | "* [🤗 Evaluate](https://github.com/huggingface/evaluate): a library that makes evaluating and comparing models and reporting their performance easier and more standardized.\n", 30 | "\n", 31 | "\n", 32 | "By the end of this session, you see how quantization with 🤗 Optimum can significantly decrease model latency while keeping almost 100% of the full-precision model.\n", 33 | "\n", 34 | "\n", 35 | "> This tutorial was created and run on a c6i.xlarge AWS EC2 Instance." 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "4149dbcf-508c-4c70-b794-91376a7e3662", 41 | "metadata": {}, 42 | "source": [ 43 | "## Learning objectives\n", 44 | "\n", 45 | "By the end of this session, you will know how to:\n", 46 | "\n", 47 | "* Setup a development environment\n", 48 | "* Convert a 🤗 Transformers model to ONNX for inference\n", 49 | "* Apply dynamic quantization using `ORTQuantizer` from 🤗 Optimum\n", 50 | "* Test inference with the quantized model\n", 51 | "* Evaluate the model performance with 🤗 Evaluate\n", 52 | "* Compare the latency of the quantized model against the original one\n", 53 | "* Push the quantized model to the Hub\n", 54 | "* Load and run inference with a quantized model from the Hub\n", 55 | "\n", 56 | "\n", 57 | "Let's get started! 🚀" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "id": "0795d087-77f9-4d82-915f-983f13abf4ea", 63 | "metadata": { 64 | "tags": [] 65 | }, 66 | "source": [ 67 | "## 1. Setup development environment" 68 | ] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "id": "72bdd46b-9161-4a09-acfa-cbcd095f55e2", 73 | "metadata": {}, 74 | "source": [ 75 | "Our first step is to install 🤗 Optimum, along with 🤗 Evaluate and some other libraries. Running the following cell will install all the required packages for us including 🤗 Transformer, PyTorch, and ONNX Runtime utilities:" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "id": "327dfc3f-02be-4816-bd20-305607accef0", 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "# Remove the mkl-include and mkl dependencies if running on Colab\n", 86 | "%pip install \"optimum[onnxruntime]==1.2.2\" \"evaluate[evaluator]\" sklearn mkl-include mkl" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "id": "c90e16fc-e9bb-41f0-aa50-948a3c4c7d25", 92 | "metadata": {}, 93 | "source": [ 94 | "> If you want to run inference on a GPU, you can install 🤗 Optimum with `pip install optimum[onnxruntime-gpu]`." 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "id": "35b84656-0248-4b39-a8cb-65bd3b615a0d", 100 | "metadata": {}, 101 | "source": [ 102 | "While we're at it, let's turn off some of the warnings from the 🤗 Datasets library and the tokenizer:" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 1, 108 | "id": "1c8841c5-79c6-4ff7-936f-832faaf706cc", 109 | "metadata": {}, 110 | "outputs": [ 111 | { 112 | "name": "stdout", 113 | "output_type": "stream", 114 | "text": [ 115 | "env: TOKENIZERS_PARALLELISM=false\n" 116 | ] 117 | } 118 | ], 119 | "source": [ 120 | "import datasets\n", 121 | "\n", 122 | "datasets.logging.set_verbosity_error()\n", 123 | "\n", 124 | "%env TOKENIZERS_PARALLELISM=false" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "id": "90a977d3-9006-47a7-aefb-6d5ea79c00f8", 130 | "metadata": {}, 131 | "source": [ 132 | "## 2. Convert a 🤗 Transformers model to ONNX for inference" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "id": "9680fe1b-e176-49ed-8ed6-f0ec35049d2b", 138 | "metadata": {}, 139 | "source": [ 140 | "Before we can optimize and quantize our model, we first need to export it to the ONNX format. To do this we will use the `ORTModelForSequenceClassification` class and call the `from_pretrained()` method. This method will download the PyTorch weights from the Hub and export them via the `from_transformers` argument. The model we are using is `optimum/distilbert-base-uncased-finetuned-banking77`, which is a fine-tuned DistilBERT model on the Banking77 dataset achieving an accuracy score of 92.5% and as the feature (task) text-classification:" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 2, 146 | "id": "0866e06f-cadc-412c-9bc4-ab7c77d84e3b", 147 | "metadata": {}, 148 | "outputs": [ 149 | { 150 | "data": { 151 | "application/vnd.jupyter.widget-view+json": { 152 | "model_id": "9e393274daf6406c9d01b4db27a2fa95", 153 | "version_major": 2, 154 | "version_minor": 0 155 | }, 156 | "text/plain": [ 157 | "Downloading: 0%| | 0.00/5.81k [00:00 -->\"Open" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "87c47386", 14 | "metadata": {}, 15 | "source": [ 16 | "# Creating a Transformers demo with Gradio" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "2ee3a648-2519-4617-a206-19b8c5cfef3d", 22 | "metadata": {}, 23 | "source": [ 24 | "## References\n", 25 | "\n", 26 | "* https://huggingface.co/blog/gradio-spaces\n", 27 | "* https://huggingface.co/blog/gradio" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "id": "f793dc42", 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "# !pip install transformers gradio sentencepiece" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "87712d83", 43 | "metadata": {}, 44 | "source": [ 45 | "## Example 1: Using the Transformers pipeline" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "efab6bce-f259-4956-98b6-ad379e4fccd6", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "import gradio as gr\n", 56 | "from transformers import pipeline" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "ce4a01e6-6143-4dec-a560-19498428f01e", 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "pipe = pipeline(\"text-classification\", model=\"lewtun/xlm-roberta-base-finetuned-marc-de\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "e9751057", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "pipe(\"This tennis racquet is amazing!\")" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "e064676e", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "label2emoji = {\"terrible\": \"\ud83d\udca9\", \"poor\": \"\ud83d\ude3e\", \"ok\": \"\ud83d\udc31\", \"good\": \"\ud83d\ude3a\", \"great\": \"\ud83d\ude3b\"}\n", 87 | "\n", 88 | "def predict(text):\n", 89 | " preds = pipe(text)[0]\n", 90 | " return label2emoji[preds[\"label\"]], round(preds[\"score\"], 5)\n", 91 | "\n", 92 | "predict(\"I love this soccer ball\")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "9c65091e", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "gradio_ui = gr.Interface(\n", 103 | " fn=predict,\n", 104 | " title=\"Predicting review scores from customer reviews\",\n", 105 | " description=\"Enter some review text about an Amazon product and check what the model predicts for it's star rating.\",\n", 106 | " inputs=[\n", 107 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 108 | " ],\n", 109 | " outputs=[\n", 110 | " gr.outputs.Textbox(label=\"Label\"),\n", 111 | " gr.outputs.Textbox(label=\"Score\"),\n", 112 | " ],\n", 113 | " examples=[\n", 114 | " [\"I love these running shoes\"], [\"J'adore ces chaussures de course\"], [\"Ich liebe diese Laufschuhe\"]\n", 115 | " ],\n", 116 | ")\n", 117 | "\n", 118 | "gradio_ui.launch(debug=True)" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "id": "4a4afb5f-6f9b-42ab-881e-34870729d028", 124 | "metadata": {}, 125 | "source": [ 126 | "## Example 2: Using the Inference API from the Hugging Face Hub" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "5b440dde-e6e7-4dc0-afc0-79f097360797", 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "from huggingface_hub import InferenceApi\n", 137 | "\n", 138 | "inference = InferenceApi(\"lewtun/xlm-roberta-base-finetuned-marc-de\")\n", 139 | "preds = inference(inputs=\"I am really unhappy with my jacket\")\n", 140 | "preds[0]" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": null, 146 | "id": "e02675d8-95d4-4874-9b30-aa375da20054", 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True) \n", 151 | "sorted_preds" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "id": "96673a20-fc00-4540-b85d-3cd2696df1bd", 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "def inference_predict(text):\n", 162 | " inference = InferenceApi(\"lewtun/xlm-roberta-base-finetuned-marc\")\n", 163 | " preds = inference(inputs=text)\n", 164 | " sorted_preds = sorted(preds[0], key=lambda d: d['score'], reverse=True)[0]\n", 165 | " return label2emoji[sorted_preds[\"label\"]], round(sorted_preds[\"score\"], 5)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "id": "c8250d8a-6bd1-4406-8a06-3e0de40eebd8", 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "inference_predict(\"I love these shoes\")" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "id": "d5f756f0", 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "gradio_ui = gr.Interface.load(\n", 186 | " name=\"lewtun/xlm-roberta-base-finetuned-marc\",\n", 187 | " src=\"huggingface\",\n", 188 | " fn=inference_predict,\n", 189 | " title=\"Review analysis\",\n", 190 | " description=\"Enter some text and check if model detects it's star rating.\",\n", 191 | " inputs=[\n", 192 | " gr.inputs.Textbox(lines=5, label=\"Paste some text here\"),\n", 193 | " ],\n", 194 | " outputs=[\n", 195 | " gr.outputs.Textbox(label=\"Label\"),\n", 196 | " gr.outputs.Textbox(label=\"Score\"),\n", 197 | " ],\n", 198 | " examples=[\n", 199 | " [\"I love these running shoes\"], [\"J'adore ces chaussures de course\"], [\"Ich liebe diese Laufschuhe\"]\n", 200 | " ],\n", 201 | ")\n", 202 | "\n", 203 | "gradio_ui.launch(debug=True)" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "id": "a5193629", 209 | "metadata": {}, 210 | "source": [] 211 | } 212 | ], 213 | "metadata": { 214 | "interpreter": { 215 | "hash": "9f835d35ef2d7d572ed1f1be271ae903cca02f9a46b282db81c294a2d4ce247b" 216 | }, 217 | "kernelspec": { 218 | "display_name": "Python 3 (ipykernel)", 219 | "language": "python", 220 | "name": "python3" 221 | }, 222 | "language_info": { 223 | "codemirror_mode": { 224 | "name": "ipython", 225 | "version": 3 226 | }, 227 | "file_extension": ".py", 228 | "mimetype": "text/x-python", 229 | "name": "python", 230 | "nbconvert_exporter": "python", 231 | "pygments_lexer": "ipython3", 232 | "version": "3.8.10" 233 | } 234 | }, 235 | "nbformat": 4, 236 | "nbformat_minor": 5 237 | } -------------------------------------------------------------------------------- /nlp-zurich/README.md: -------------------------------------------------------------------------------- 1 | # NLP Zurich 2 | 3 | > Here you can find all the materials for the NLP Zurich workshop! 4 | 5 | ## Getting started 6 | 7 | If you want to follow along, make sure you have: 8 | 9 | * Signed up for a Hugging Face account: https://huggingface.co/join 10 | * Signed up to Google Colab so you can run code on free GPUs: https://colab.research.google.com/ 11 | * Joined the Hugging Face Spaces beta for creating demos: https://hf.co/spaces/to-the-moon -------------------------------------------------------------------------------- /nlp-zurich/images/clf_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/clf_arch.png -------------------------------------------------------------------------------- /nlp-zurich/images/gen_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/gen_steps.png -------------------------------------------------------------------------------- /nlp-zurich/images/ner_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/ner_arch.png -------------------------------------------------------------------------------- /nlp-zurich/images/object_detection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/object_detection.png -------------------------------------------------------------------------------- /nlp-zurich/images/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/pipeline.png -------------------------------------------------------------------------------- /nlp-zurich/images/qa_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/qa_arch.png -------------------------------------------------------------------------------- /nlp-zurich/images/speech2text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/speech2text.png -------------------------------------------------------------------------------- /nlp-zurich/images/tapas.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/images/tapas.png -------------------------------------------------------------------------------- /nlp-zurich/slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/nlp-zurich/slides.pdf -------------------------------------------------------------------------------- /transformers-book-reading-group/session-1/transformers-reading-group-session1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/transformers-book-reading-group/session-1/transformers-reading-group-session1.pdf -------------------------------------------------------------------------------- /transformers-book-reading-group/session-2/images/book_cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/transformers-book-reading-group/session-2/images/book_cover.png -------------------------------------------------------------------------------- /transformers-book-reading-group/session-2/images/chapter02_hf-libraries.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/transformers-book-reading-group/session-2/images/chapter02_hf-libraries.png -------------------------------------------------------------------------------- /transformers-book-reading-group/session-3/Chapter_3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 41, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import math\n", 10 | "import torch\n", 11 | "import torch.nn as nn\n", 12 | "import torch.nn.functional as F" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 42, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "vocab_size = 20\n", 22 | "embedding_dim = 512\n", 23 | "dropout_prob = 0.5\n", 24 | "num_heads = 8\n", 25 | "seq_len = 5\n", 26 | "embedding_layer = nn.Embedding(vocab_size, embedding_dim)\n", 27 | "input_ids = torch.randint(low=0, high=vocab_size, size=(1, seq_len))\n", 28 | "embeddings = embedding_layer(input_ids)" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 43, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "def check_shape(func, shape, *args, **kwargs):\n", 38 | "\tassert func(*args, **kwargs).shape == shape\n", 39 | "expected_shape = torch.Size([1, seq_len, embedding_dim])" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "# Scaled dot prod attention / Optional Mask" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 44, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "def scaled_dot_product_attention(query, key, value, mask=None, verbose=False):\n", 56 | "\t## query, key, value - bs, num_tokens, embedding_size\n", 57 | "\tattention_scores = torch.bmm(query, key.transpose(1, 2)) # bs, num_tokens, num_tokens\n", 58 | "\tattention_scores /= math.sqrt(query.shape[-1])\n", 59 | "\n", 60 | "\t# Apply mask before softmax\n", 61 | "\tif mask is not None:\n", 62 | "\t\tif verbose:\n", 63 | "\t\t\tprint (\"\\n-------Before-------\\n\")\n", 64 | "\t\t\tprint (attention_scores)\n", 65 | "\t\tattention_scores.masked_fill_(mask == 0, -float('inf')) # '_' means its an in-place op\n", 66 | "\n", 67 | "\t\tif verbose:\n", 68 | "\t\t\tprint (\"\\n-------After-------\\n\")\n", 69 | "\t\t\tprint (attention_scores)\n", 70 | "\n", 71 | "\tattention_weights = F.softmax(attention_scores, dim=-1)\n", 72 | "\n", 73 | "\tif verbose:\n", 74 | "\t\tprint (\"\\n-------Attention Weights-------\\n\")\n", 75 | "\t\tprint (attention_weights)\n", 76 | "\tassert (torch.allclose(attention_weights[0].sum(-1), torch.ones(attention_weights.shape[1])))\n", 77 | "\treturn torch.bmm(attention_weights, value)" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 46, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "check_shape(scaled_dot_product_attention, expected_shape, embeddings, embeddings, embeddings)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "metadata": {}, 92 | "source": [ 93 | "# Attention Layer" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 47, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "class AttentionLayer(nn.Module):\n", 103 | " def __init__(self, input_dim, embedding_dim, mask=None):\n", 104 | " super().__init__()\n", 105 | " self.query = nn.Linear(input_dim, embedding_dim)\n", 106 | " self.key = nn.Linear(input_dim, embedding_dim)\n", 107 | " self.value = nn.Linear(input_dim, embedding_dim)\n", 108 | " self.mask = mask\n", 109 | " \n", 110 | " def forward(self, x):\n", 111 | " query_embed = self.query(x)\n", 112 | " key_embed = self.key(x)\n", 113 | " value_embed = self.value(x)\n", 114 | " \n", 115 | " return scaled_dot_product_attention(query_embed, key_embed, value_embed, self.mask)" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 48, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "check_shape(AttentionLayer(embedding_dim, embedding_dim).forward, expected_shape, embeddings)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "# Multi-Head Attention" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 49, 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [ 140 | "class MultiHeadAttention(nn.Module):\n", 141 | " def __init__(self, num_heads, embedding_dim, attention_layer, mask=None):\n", 142 | " super().__init__()\n", 143 | " self.num_heads = num_heads\n", 144 | " per_head_dim = embedding_dim // num_heads\n", 145 | " self.attention_layers = nn.ModuleList([attention_layer(embedding_dim, per_head_dim, mask) \n", 146 | " for _ in range(num_heads)])\n", 147 | " self.lin = nn.Linear(embedding_dim, embedding_dim)\n", 148 | " \n", 149 | " def forward(self, x, key=None, value=None):\n", 150 | " if key is not None and value is not None:\n", 151 | " x = torch.cat([self.attention_layers[i](x, key, value) for i in range(self.num_heads)], dim=-1)\n", 152 | " else:\n", 153 | " x = torch.cat([self.attention_layers[i](x) for i in range(self.num_heads)], dim=-1)\n", 154 | " return self.lin(x)" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 50, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "check_shape(MultiHeadAttention(num_heads, embedding_dim, AttentionLayer).forward, expected_shape, embeddings)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "# Position-wise FFN aka 1-D Conv" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 51, 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "class PFFN(nn.Module): \n", 180 | "\tdef __init__(self, embedding_dim, dropout_prob):\n", 181 | "\t\tsuper().__init__()\n", 182 | "\t\tself.lin1 = nn.Linear(embedding_dim, embedding_dim * 4)\n", 183 | "\t\tself.lin2 = nn.Linear(embedding_dim * 4, embedding_dim)\n", 184 | "\t\tself.dropout = nn.Dropout(dropout_prob)\n", 185 | "\n", 186 | "\tdef forward(self, x):\n", 187 | "\t\tx = F.gelu(self.lin1(x))\n", 188 | "\t\tx = self.dropout(self.lin2(x))\n", 189 | "\t\treturn x" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": 52, 195 | "metadata": {}, 196 | "outputs": [], 197 | "source": [ 198 | "check_shape(PFFN(embedding_dim, dropout_prob).forward, expected_shape, embeddings)" 199 | ] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "# LayerNorm, Skip-connections and the Encoder Layer" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 53, 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "class TransformerEncoderLayer(nn.Module):\n", 215 | "\tdef __init__(self, num_heads, embedding_dim, dropout_prob):\n", 216 | "\t\tsuper().__init__()\n", 217 | "\t\tself.attention = MultiHeadAttention(num_heads, embedding_dim, AttentionLayer)\n", 218 | "\t\tself.layer_norm1 = nn.LayerNorm(embedding_dim)\n", 219 | "\t\tself.pffn = PFFN(embedding_dim, dropout_prob)\n", 220 | "\t\tself.layer_norm2 = nn.LayerNorm(embedding_dim)\n", 221 | "\n", 222 | "\tdef forward(self, x):\n", 223 | "\t\tattn_op = self.attention(x)\n", 224 | "\t\tx = x + attn_op # Skip connection\n", 225 | "\t\tx = self.layer_norm1(x)\n", 226 | "\t\tx = self.pffn(x) + x # Skip connection\n", 227 | "\t\treturn self.layer_norm2(x)" 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "execution_count": 54, 233 | "metadata": {}, 234 | "outputs": [], 235 | "source": [ 236 | "check_shape(TransformerEncoderLayer(num_heads, embedding_dim, dropout_prob).forward, expected_shape, embeddings)" 237 | ] 238 | }, 239 | { 240 | "cell_type": "markdown", 241 | "metadata": {}, 242 | "source": [ 243 | "# Masked Multi-Head Attention" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": 55, 249 | "metadata": {}, 250 | "outputs": [], 251 | "source": [ 252 | "mask = torch.tril(torch.ones(seq_len, seq_len)).unsqueeze(0)\n", 253 | "check_shape(scaled_dot_product_attention, expected_shape, embeddings, embeddings, embeddings, mask=mask, verbose=False)" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 56, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "check_shape(MultiHeadAttention(num_heads, embedding_dim, AttentionLayer, mask).forward, expected_shape, embeddings)" 263 | ] 264 | }, 265 | { 266 | "cell_type": "markdown", 267 | "metadata": {}, 268 | "source": [ 269 | "# Encoder-Decoder Attention" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": 57, 275 | "metadata": {}, 276 | "outputs": [], 277 | "source": [ 278 | "class EncoderDecoderAttentionLayer(nn.Module):\n", 279 | " def __init__(self, input_dim, embedding_dim, mask=None):\n", 280 | " super().__init__()\n", 281 | " self.query = nn.Linear(input_dim, embedding_dim)\n", 282 | " self.key = nn.Linear(input_dim, embedding_dim)\n", 283 | " self.value = nn.Linear(input_dim, embedding_dim)\n", 284 | " self.mask = mask\n", 285 | " \n", 286 | " def forward(self, x, key, value):\n", 287 | " query_embed = self.query(x)\n", 288 | " key_embed = self.key(key)\n", 289 | " value_embed = self.key(value)\n", 290 | "\n", 291 | " return scaled_dot_product_attention(query_embed, key_embed, value_embed, self.mask)" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": 58, 297 | "metadata": {}, 298 | "outputs": [], 299 | "source": [ 300 | "check_shape(EncoderDecoderAttentionLayer(embedding_dim, embedding_dim).forward, expected_shape,\n", 301 | " embeddings, embeddings, embeddings)" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": 59, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "check_shape(MultiHeadAttention(num_heads, embedding_dim, EncoderDecoderAttentionLayer, mask).forward, expected_shape, \n", 311 | " x=embeddings, key=embeddings, value=embeddings)" 312 | ] 313 | }, 314 | { 315 | "cell_type": "markdown", 316 | "metadata": {}, 317 | "source": [ 318 | "# DecoderLayer" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": 60, 324 | "metadata": {}, 325 | "outputs": [], 326 | "source": [ 327 | "class TransformerDecoderLayer(nn.Module):\n", 328 | "\tdef __init__(self, num_heads, embedding_dim, dropout_prob, mask):\n", 329 | "\t\tsuper().__init__()\n", 330 | "\t\tself.masked_attention = MultiHeadAttention(num_heads, embedding_dim, AttentionLayer, mask)\n", 331 | "\t\tself.encoder_decoder_attention = MultiHeadAttention(num_heads, embedding_dim, EncoderDecoderAttentionLayer)\n", 332 | "\t\tself.pffn = PFFN(embedding_dim, dropout_prob)\n", 333 | "\t\tself.layer_norm1 = nn.LayerNorm(embedding_dim)\n", 334 | "\t\tself.layer_norm2 = nn.LayerNorm(embedding_dim)\n", 335 | "\t\tself.layer_norm3 = nn.LayerNorm(embedding_dim)\n", 336 | "\n", 337 | "\tdef forward(self, x, key, value):\n", 338 | "\t\tattn_op = self.masked_attention(x)\n", 339 | "\t\tx = x + attn_op # Skip connection\n", 340 | "\t\tlayer_norm_op = self.layer_norm1(x)\n", 341 | "\t\tenc_dec_attn_op = self.encoder_decoder_attention(layer_norm_op, key, value)\n", 342 | "\t\tx = enc_dec_attn_op + layer_norm_op # Skip connection\n", 343 | "\t\tlayer_norm_op = self.layer_norm2(x)\n", 344 | "\t\tx = self.pffn(x) + x # Skip connection\n", 345 | "\t\treturn self.layer_norm3(x)" 346 | ] 347 | }, 348 | { 349 | "cell_type": "code", 350 | "execution_count": 61, 351 | "metadata": {}, 352 | "outputs": [], 353 | "source": [ 354 | "check_shape(TransformerDecoderLayer(num_heads, embedding_dim, dropout_prob, mask).forward, expected_shape,\n", 355 | " x=embeddings, key=embeddings, value=embeddings)" 356 | ] 357 | }, 358 | { 359 | "cell_type": "markdown", 360 | "metadata": {}, 361 | "source": [ 362 | "# Encoder, Decoder and Transformer Network" 363 | ] 364 | }, 365 | { 366 | "cell_type": "code", 367 | "execution_count": 62, 368 | "metadata": {}, 369 | "outputs": [], 370 | "source": [ 371 | "class Transformer(nn.Module):\n", 372 | " def __init__(self, num_heads, embedding_layer, pos_embedding_layer, dropout_prob, seq_len, num_classes):\n", 373 | " super().__init__()\n", 374 | " embedding_dim = embedding_layer.embedding_dim\n", 375 | " self.embedding_layer = embedding_layer\n", 376 | " self.pos_embedding_layer = pos_embedding_layer\n", 377 | " self.embedding_layer_norm = nn.LayerNorm(embedding_dim)\n", 378 | " self.enc = TransformerEncoderLayer(num_heads, embedding_dim, dropout_prob)\n", 379 | " mask = torch.tril(torch.ones(seq_len, seq_len)).unsqueeze(0)\n", 380 | " self.dec = TransformerDecoderLayer(num_heads, embedding_dim, dropout_prob, mask)\n", 381 | " self.lin = nn.Linear(embedding_dim, num_classes)\n", 382 | " \n", 383 | " def forward(self, token_ids, labels=None):\n", 384 | " embeddings = self.embedding_layer(token_ids)\n", 385 | " seq_len = token_ids.shape[-1]\n", 386 | " pos_embeddings = self.pos_embedding_layer(torch.arange(seq_len)).unsqueeze(0)\n", 387 | " embeddings = self.embedding_layer_norm(embeddings + pos_embeddings)\n", 388 | " x = self.enc(embeddings)\n", 389 | " \n", 390 | " if labels is not None:\n", 391 | " dec_embeddings = self.embedding_layer(labels) + pos_embeddings\n", 392 | " x = self.dec(dec_embeddings, x, x) # Pass op of encoder as key and value args\n", 393 | " return self.lin(x)\n" 394 | ] 395 | }, 396 | { 397 | "cell_type": "code", 398 | "execution_count": 78, 399 | "metadata": {}, 400 | "outputs": [], 401 | "source": [ 402 | "import random\n", 403 | "input_ids = torch.Tensor(random.sample(range(vocab_size // 2), seq_len)).long()\n", 404 | "labels = torch.Tensor(random.sample(range(vocab_size // 2, vocab_size), seq_len)).long()\n", 405 | "embedding_layer = nn.Embedding(vocab_size, embedding_dim)\n", 406 | "positional_embedding_layer = nn.Embedding(seq_len, embedding_dim)\n", 407 | "transformer = Transformer(num_heads, embedding_layer, positional_embedding_layer, dropout_prob, seq_len, vocab_size)\n", 408 | "\n", 409 | "check_shape(transformer.forward, torch.Size([1, seq_len, vocab_size]), input_ids, labels)" 410 | ] 411 | }, 412 | { 413 | "cell_type": "markdown", 414 | "metadata": {}, 415 | "source": [ 416 | "# Overfit" 417 | ] 418 | }, 419 | { 420 | "cell_type": "code", 421 | "execution_count": 79, 422 | "metadata": {}, 423 | "outputs": [ 424 | { 425 | "data": { 426 | "text/plain": [ 427 | "(tensor([3, 9, 2, 6, 8]), tensor([12, 18, 19, 13, 17]))" 428 | ] 429 | }, 430 | "execution_count": 79, 431 | "metadata": {}, 432 | "output_type": "execute_result" 433 | } 434 | ], 435 | "source": [ 436 | "input_ids, labels" 437 | ] 438 | }, 439 | { 440 | "cell_type": "code", 441 | "execution_count": 80, 442 | "metadata": {}, 443 | "outputs": [ 444 | { 445 | "name": "stderr", 446 | "output_type": "stream", 447 | "text": [ 448 | "100%|██████████| 100/100 [00:00<00:00, 100.84it/s]\n" 449 | ] 450 | } 451 | ], 452 | "source": [ 453 | "import torch.optim as optim\n", 454 | "from tqdm import tqdm\n", 455 | "optimizer = optim.SGD(transformer.parameters(), lr=1e-4)\n", 456 | "num_epochs = 100\n", 457 | "losses = []\n", 458 | "\n", 459 | "for i in tqdm(range(num_epochs)):\n", 460 | " op = transformer(input_ids, labels).squeeze()\n", 461 | " loss = F.cross_entropy(op, labels.squeeze())\n", 462 | " losses.append(loss.item())\n", 463 | " loss.backward()\n", 464 | " optimizer.step()\n" 465 | ] 466 | }, 467 | { 468 | "cell_type": "code", 469 | "execution_count": 81, 470 | "metadata": {}, 471 | "outputs": [ 472 | { 473 | "data": { 474 | "text/plain": [ 475 | "[]" 476 | ] 477 | }, 478 | "execution_count": 81, 479 | "metadata": {}, 480 | "output_type": "execute_result" 481 | }, 482 | { 483 | "data": { 484 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy89olMNAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA18UlEQVR4nO3deXxU5d3///eZTDIJJDMhxGwkQBRk32QNtuKCIi6V2ttaqzfUrdVCK6X33dvYalcb++3PpbdVua1V2lqKUhUrrhg2KQFklX2HsGRjSSYJZJs5vz9CRqIEMlnmzPJ6Ph7nATlzncxnrocP5u11Xec6hmmapgAAACxis7oAAAAQ2QgjAADAUoQRAABgKcIIAACwFGEEAABYijACAAAsRRgBAACWIowAAABL2a0uoDW8Xq+OHj2qhIQEGYZhdTkAAKAVTNNUZWWlMjIyZLO1PP4REmHk6NGjysrKsroMAADQBocOHVJmZmaLr4dEGElISJDU+GGcTqfF1QAAgNZwu93KysryfY+3JCTCSNPUjNPpJIwAABBiLrTEggWsAADAUoQRAABgKcIIAACwFGEEAABYijACAAAsRRgBAACWIowAAABLEUYAAIClCCMAAMBShBEAAGApwggAALAUYQQAAFiKMHIBB49X6/mle1Rd22B1KQAAhKWQeGqvVUzT1A//sUGbDlfoaPlp/WbKEKtLAgAg7DAych7/3nNcmw5XSJLmri7UrpJKiysCACD8EEbO449LdkuSHHabvKb064XbZJqmxVUBABBeCCMtWHfwhFbtO6HoKEN/uWeMYqJs+mT3MS3dWWZ1aQAAhBXCSAueX7JXknTriEyNu7i7vnN5b0nSb97dpnqP18LKAAAILxEdRrxe85zTLtuOupW/o1Q2Q3rgykskSTOu7qPuXWO0t6xac1cXBrpUAADCVkSHkVdWHtB3XvlUe8uqmp1/YVnjqMgNQ9KVndxVkuSMjdaPrr1UkvT0x7tUcao+sMUCABCmIjaM1NR79PySPVq2q0yTnl6u3763XZU19dp/rFrvfnZUkvT9K/s0u+Zbo7N0aWq8yk/V6/H3tqmm3mNF6QAAhBXDDIHbQ9xut1wulyoqKuR0Ojvs9+4/Vq1fL9ymxTtKJUnJ8Q717t5Faw+e1DX9U/Tn74z+0jWf7C7Tf/55zZn2MbprXC/dNa6XkuMdHVYXAADhoLXf3xEdRpos2VGqXy/cpn3Hqn3n3nhwvEb26nbO9v9YU6hn83fraEWNJCnGbtPXh/fQozcPVLyDfeQAAJAII36ra/Bqzsr9emHpXn2l70V69o4R521f7/Hqgy3FemnFfm06VC5J+s743vrF1wZ1Sn0AAIQawkg7mKYpwzBa3fbN9Uf04/mblBzv0OpHrlGUrXXXAgAQzlr7/R2xC1jPp7VBpKnt14ZnKLFLtI5V1Wr1vuOdWBkAAOGHMNIBoqNsmjw4TZL0zpk7cQAAQOsQRjrITUMzJEnvbylmh1YAAPxAGOkgY7OTlBwfo/JT9fr3nmNWlwMAQMggjHQQe5RNkwenS5IWflZkcTUAAIQOwkgHunlY41TNh1uLVdvA7qwAALSGX2HkhRde0NChQ+V0OuV0OpWTk6P333//vNfMnz9f/fv3V2xsrIYMGaL33nuvXQUHs1G9uinNGavKmgYt38VUDQAAreFXGMnMzNQTTzyhdevWae3atbr66qt1yy23aOvWredsv3LlSt1xxx269957tWHDBk2ZMkVTpkzRli1bOqT4YGOzGbphSNNUDXfVAADQGu3e9CwpKUm///3vde+9937ptdtvv13V1dVauHCh79y4ceM0fPhwzZ49u9XvEehNz9pjfeFJ3fr8SnWNidLan12ruJgoq0sCAMASnb7pmcfj0bx581RdXa2cnJxztikoKNDEiRObnZs0aZIKCgra+rZBb0RWonokxqm6zqMlO0utLgcAgKDndxjZvHmz4uPj5XA49MADD+itt97SwIEDz9m2uLhYqampzc6lpqaquLj4vO9RW1srt9vd7AgVhmHopmFM1QAA0Fp+h5F+/fpp48aNWr16tR588EFNmzZN27Zt69Ci8vLy5HK5fEdWVlaH/v7OdvOZDdDyt5equrbB4moAAAhufoeRmJgY9enTRyNHjlReXp6GDRumP/zhD+dsm5aWppKSkmbnSkpKlJaWdt73yM3NVUVFhe84dOiQv2VaalCGUz2Tuqi2watPdpdZXQ4AAEGt3fuMeL1e1dbWnvO1nJwc5efnNzu3aNGiFteYNHE4HL7bh5uOUGIYhq4d2Dg99dG2kgu0BgAgsvkVRnJzc7V8+XIdOHBAmzdvVm5urpYuXao777xTkjR16lTl5ub62j/00EP64IMP9OSTT2rHjh36xS9+obVr12rGjBkd+ymC0HVnwsjiHaVq4Fk1AAC0yO5P49LSUk2dOlVFRUVyuVwaOnSoPvzwQ1177bWSpMLCQtlsn+eb8ePHa+7cufrZz36mRx55RH379tWCBQs0ePDgjv0UQWhkr27q1iVaJ0/V69MDJ5VzSXerSwIAICi1e5+RQAilfUbO9uPXN+mN9Yd1z+XZeuzmc99xBABAuOr0fUZwYdcNapyqWbS9WCGQ+QAAsARhpBN9tW+yHHabDp04rR3FlVaXAwBAUCKMdKIuMXZ9tW+yJGkRd9UAAHBOhJFOdt3Axj1VCCMAAJwbYaSTXT0gRYYhbT5SoaPlp60uBwCAoEMY6WTJ8Q6N7NlNkvTxdkZHAAD4IsJIAPjuqmGqBgCALyGMBMC1Z9aNFOw9rorT9RZXAwBAcCGMBEB2clf1SYlXg9fU0p2lVpcDAEBQIYwESNOzav6+upAN0AAAOAthJEDuHNdLsdE2rdl/Qgs/K7K6HAAAggZhJEB6JMbp+1f2kSQ9/u52Vdc2WFwRAADBgTASQN+94mJlJcWp2F2j55bssbocAACCAmEkgGKjo/TojY1P733pk/3af6za4ooAALAeYSTArh2YqisuvUh1Hq9+9c5Wq8sBAMByhJEAMwxDP795oKKjDC3ZWaZ8dmUFAEQ4wogFLrkoXvd8JVuS9Mt3tqn8VJ3FFQEAYB3CiEV+cHVfpTodKjxxSrc+v1IHj7N+BAAQmQgjFol32PXXe8YqwxWrfceq9fXnV2rdwRNWlwUAQMARRizULy1BC6ZfriE9XDpRXac7/rRa72w6anVZAAAEFGHEYinOWL32vXG6dmCq6hq8+sE/NmjWaxu1ZEep6hq8VpcHAECnM8wQeFCK2+2Wy+VSRUWFnE6n1eV0Co/X1G/f264/r9jvO+eMtevagWn6+oge+krfZAurAwDAf639/iaMBJm1B07oX5uO6v0txSqrrPWdn/9Ajkb3TrKwMgAA/NPa72+maYLMqN5J+tUtg7Uq9xq9/r0cDenhkiStP3jS4soAAOgchJEgFWUzNCY7SZMGpUqSthe5La4IAIDOQRgJcv3TGoe1thdVWlwJAACdgzAS5AZkNIaRvWVVqm3wWFwNAAAdjzAS5DJcsXLG2tXgNbWntMrqcgAA6HCEkSBnGIb6pzeOjuxgqgYAEIYIIyFgYHrTuhEWsQIAwg9hJAT0T0uQJO0oZmQEABB+CCMhYMBZIyMhsEcdAAB+IYyEgEtTE2QzpOPVdSqrqr3wBQAAhBDCSAiIi4lS7+SukthvBAAQfggjIWJAWtMdNSxiBQCEF8JIiBiQ3riIlTtqAADhhjASIpq2heeOGgBAuCGMhIimbeH3lLItPAAgvBBGQsTZ28LvLa22uhwAADoMYSREnL0tPOtGAADhhDASQtgWHgAQjggjIYRt4QEA4civMJKXl6fRo0crISFBKSkpmjJlinbu3Hnea+bMmSPDMJodsbGx7So6UrEtPAAgHPkVRpYtW6bp06dr1apVWrRokerr63Xdddepuvr8CyqdTqeKiop8x8GDB9tVdKRiW3gAQDiy+9P4gw8+aPbznDlzlJKSonXr1umKK65o8TrDMJSWlta2CuHTtC38vrJqbS+qVEoCI0wAgNDXrjUjFRUVkqSkpKTztquqqlKvXr2UlZWlW265RVu3bj1v+9raWrnd7mYHGrEtPAAg3LQ5jHi9Xs2cOVOXX365Bg8e3GK7fv366eWXX9bbb7+tV199VV6vV+PHj9fhw4dbvCYvL08ul8t3ZGVltbXMsMO28ACAcGOYbVwJ+eCDD+r999/XihUrlJmZ2err6uvrNWDAAN1xxx369a9/fc42tbW1qq39fE2E2+1WVlaWKioq5HQ621Ju2Ph4W4nu++taZXaL09L/ulL2KG6IAgAEJ7fbLZfLdcHv7zZ9k82YMUMLFy7UkiVL/AoikhQdHa0RI0Zoz549LbZxOBxyOp3NDjQad0l3desSrcMnT2vep4esLgcAgHbzK4yYpqkZM2borbfe0uLFi5Wdne33G3o8Hm3evFnp6el+Xwsp3mHXzImXSpKeXrRLlTX1FlcEAED7+BVGpk+frldffVVz585VQkKCiouLVVxcrNOnT/vaTJ06Vbm5ub6ff/WrX+mjjz7Svn37tH79et111106ePCg7rvvvo77FBHm22N76uKLuup4dZ1eWLrX6nIAAGgXv8LICy+8oIqKCl155ZVKT0/3Ha+99pqvTWFhoYqKinw/nzx5Uvfff78GDBigG264QW63WytXrtTAgQM77lNEmOgom3InD5Ak/XnFfh0pP32BKwAACF5tXsAaSK1dABNJTNPUt15cpdX7T+jrI3ro6duHW10SAADNdOoCVljPMAz97MbG0aW3NhzRZ4fLrS0IAIA2IoyEsCGZLt06oock6Tfvbud5NQCAkEQYCXH/NamfHHab1uw/oWW7yqwuBwAAvxFGQlxGYpy+MbJxrxfCCAAgFBFGwsCY3o3PBlpfWG5tIQAAtAFhJAxc1rObJGnb0QrV1HssrgYAAP8QRsJAVlKckuNjVO8xteVIhdXlAADgF8JIGDAMQyPOjI6sLzxpcTUAAPiHMBImmqZq1h8st7YQAAD8RBgJE5f1TJTUODLCfiMAgFBCGAkTQzMTZbcZKq2s5Vk1AICQQhgJE3ExURqQ3rjvP7f4AgBCCWEkjPimag6yiBUAEDoII2Hksl6Ni1g3cEcNACCEEEbCSNMdNVuPutn8DAAQMggjYSSzW5yS4x1q8JrazOZnAIAQQRgJI4ZhsG4EABByCCNhpmndCDuxAgBCBWEkzPh2Yi0sZ/MzAEBIIIyEmaGZLtlthsoqa3X4JJufAQCCH2EkzMRGR2lgRtPmZ0zVAACCH2EkDDVN1WxgJ1YAQAggjIShEWfuqPlkd5mO8pwaAECQI4yEodG9k2QY0t6yan3ld4t1z5xPtWhbiRo8XqtLAwDgSwgjYSgjMU4vTR2lnIu7y2tKi3eU6v6/rtVX/98SbTxUbnV5AAA0Y7e6AHSOawak6poBqdpXVqXXPj2k+esOq6iiRq+uOqjhWYlWlwcAgA8jI2Hu4ovilXvDAD31zWGSpDX7T1hcEQAAzRFGIsTIXt1kM6TCE6dUVMGiVgBA8CCMRIiE2GgNynBJYnQEABBcCCMRZEx2kiTCCAAguBBGIghhBAAQjAgjEWR078Ywsru0Sieq6yyuBgCARoSRCJLUNUaXpsZLkj49wOgIACA4EEYiDFM1AIBgQxiJMGOyu0sijAAAggdhJMKMObNuZOvRClXW1FtcDQAAhJGIk+aKVa/uXeQ1pXUHT1pdDgAAhJFI1HRXDVM1AIBgQBiJQCxiBQAEE8JIBBp7JoxsOlyumnqPxdUAACIdYSQC9UzqolSnQ/UeUxsKy60uBwAQ4fwKI3l5eRo9erQSEhKUkpKiKVOmaOfOnRe8bv78+erfv79iY2M1ZMgQvffee20uGO1nGAa3+AIAgoZfYWTZsmWaPn26Vq1apUWLFqm+vl7XXXedqqurW7xm5cqVuuOOO3Tvvfdqw4YNmjJliqZMmaItW7a0u3i0nW/dyIHjFlcCAIh0hmmaZlsvLisrU0pKipYtW6YrrrjinG1uv/12VVdXa+HChb5z48aN0/DhwzV79uxWvY/b7ZbL5VJFRYWcTmdby8VZdpVU6rqnlys22qbNv5ik6Chm7AAAHau139/t+gaqqKiQJCUlJbXYpqCgQBMnTmx2btKkSSooKGjxmtraWrnd7mYHOlafi+KVEGtXTb1Xe0qrrC4HABDB2hxGvF6vZs6cqcsvv1yDBw9usV1xcbFSU1ObnUtNTVVxcXGL1+Tl5cnlcvmOrKystpaJFthshgakNabU7UWEPQCAddocRqZPn64tW7Zo3rx5HVmPJCk3N1cVFRW+49ChQx3+HpAGpCdIIowAAKxlb8tFM2bM0MKFC7V8+XJlZmaet21aWppKSkqanSspKVFaWlqL1zgcDjkcjraUBj8MSG8aGam0uBIAQCTza2TENE3NmDFDb731lhYvXqzs7OwLXpOTk6P8/Pxm5xYtWqScnBz/KkWH+zyMuNWOdcwAALSLXyMj06dP19y5c/X2228rISHBt+7D5XIpLi5OkjR16lT16NFDeXl5kqSHHnpIEyZM0JNPPqkbb7xR8+bN09q1a/Xiiy928EeBv/qlJchmSMer61RWWasUZ6zVJQEAIpBfIyMvvPCCKioqdOWVVyo9Pd13vPbaa742hYWFKioq8v08fvx4zZ07Vy+++KKGDRumf/7zn1qwYMF5F70iMGKjo5Sd3FWStI11IwAAi/g1MtKaofylS5d+6dxtt92m2267zZ+3QoAMSHdqb1m1thdV6sp+KVaXAwCIQOx0FeHOXjcCAIAVCCMRbiBhBABgMcJIhGsaGdl3rFo19R6LqwEARCLCSIRLdTrUrUu0PF5Tu0vYFh4AEHiEkQhnGAbrRgAAliKMwBdGuL0XAGAFwggYGQEAWIowgmYPzGNbeABAoBFGoD4p8bLbDLlrGnS0osbqcgAAEYYwAjnsUbrkonhJ0vajTNUAAAKLMAJJzadqAAAIJMIIJJ21iLWYMAIACCzCCCSdfUdNpcWVAAAiDWEEkj4PIweOV+tUXYPF1QAAIglhBJKkixIcSo53yDSlHcWMjgAAAocwAh8WsQIArEAYgc9AdmIFAFiAMAKfgRlnnlHDXiMAgAAijMBnUIZLUuMdNR4v28IDAAKDMAKf7OSuiouO0ul6j/Yfq7K6HABAhCCMwCfKZvgWsW5lqgYAECCEETTTNFVDGAEABAphBM0MOrOIdevRCosrAQBECsIImhnc4/OREdNkESsAoPMRRtBM39R42W2Gyk/V60j5aavLAQBEAMIImnHYo9Q3lUWsAIDAIYzgSz5fN0IYAQB0PsIIvmSQbydWFrECADofYQRfcvYiVgAAOhthBF8yIN0pw5CKKmp0vKrW6nIAAGGOMIIviXfY1bt7V0mMjgAAOh9hBOc0kEWsAIAAIYzgnNiJFQAQKIQRnNPgM8+o2cbICACgkxFGcE5NIyP7j1erurbB4moAAOGMMIJz6h7vUJozVqYpbS9idAQA0HkII2hR0+jIliOsGwEAdB7CCFrEtvAAgEAgjKBFAzPYiRUA0PkII2jR4B6NIyO7SytV1+C1uBoAQLgijKBFPRLjlNglWvUek/1GAACdxu8wsnz5ct18883KyMiQYRhasGDBedsvXbpUhmF86SguLm5rzQgQwzCUc3F3SdLyXccsrgYAEK78DiPV1dUaNmyYnnvuOb+u27lzp4qKinxHSkqKv28NC0y49CJJ0rJdpRZXAgAIV3Z/L5g8ebImT57s9xulpKQoMTHR7+tgrQn9GsPIxkPlKj9Vp8QuMRZXBAAINwFbMzJ8+HClp6fr2muv1b///e9AvS3aKd0Vp36pCfKa0ie7maoBAHS8Tg8j6enpmj17tt544w298cYbysrK0pVXXqn169e3eE1tba3cbnezA9ZpGh1ZtqvM4koAAOHI72kaf/Xr10/9+vXz/Tx+/Hjt3btXTz/9tP72t7+d85q8vDz98pe/7OzS0EoTLr1ILy7fp2W7ymSapgzDsLokAEAYseTW3jFjxmjPnj0tvp6bm6uKigrfcejQoQBWhy8a1bubusREqayyVtt4Tg0AoINZEkY2btyo9PT0Fl93OBxyOp3NDljHYY/S+Esab/FlqgYA0NH8nqapqqpqNqqxf/9+bdy4UUlJSerZs6dyc3N15MgR/fWvf5UkPfPMM8rOztagQYNUU1Ojl156SYsXL9ZHH33UcZ8CnW7CpRfp4+2lWrazTN+/so/V5QAAwojfYWTt2rW66qqrfD/PmjVLkjRt2jTNmTNHRUVFKiws9L1eV1enH//4xzpy5Ii6dOmioUOH6uOPP272OxD8JlyaImmr1h08qcqaeiXERltdEgAgTBimaZpWF3EhbrdbLpdLFRUVTNlY6Kr/b6n2H6vW7LtG6vrBaVaXAwAIcq39/ubZNGi1z3djZd0IAKDjEEbQak37jSw/c4svAAAdgTCCVhuX3V0xdpuOlJ/WntIqq8sBAIQJwghaLS4mSmOzkyQxVQMA6DiEEfjlyn6NT1tevIOn+AIAOgZhBH65pn9jGFm9/4TKT9VZXA0AIBwQRuCX3sld1S81QR6vqfztjI4AANqPMAK/TTqzx8iHW4strgQAEA4II/DbpEGpkqTlu8t0us5jcTUAgFBHGIHfBqY7ldktTjX1Xu6qAQC0G2EEfjMMQ5MGMVUDAOgYhBG0SVMYyd9eonqP1+JqAAChjDCCNhnZq5u6d42Ru6ZBq/Ydt7ocAEAII4ygTaJshq4d2LiQlakaAEB7EEbQZk1TNR9tLZHXy4PzAABtQxhBm43v013xDrtKK2u18XC51eUAAEIUYQRt5rBH6aoz28N/uIWpGgBA2xBG0C5NG6B9uLVYpslUDQDAf4QRtMuV/VIUY7fpwPFT2lFcaXU5AIAQRBhBu8Q77Lq6X+NUzV9WHrC2GABASCKMoN3u/Wq2JOnN9UdUWlljcTUAgFBDGEG7jerVTSN6JqrO49VfVx60uhwAQIghjKDdDMPQ9664WJL0t1UHVV3bYHFFAIBQQhhBh7h2YJp6d++iitP1en3tIavLAQCEEMIIOkSUzdB9X20cHfnziv1q4OF5AIBWIoygw/zHyEwldY3R4ZOn9R6boAEAWokwgg4TGx2lqTm9JEkvLt/LJmgAgFYhjKBDTc3prdhom7Yccatg73GrywEAhADCCDpUUtcY3TYyS5L00or9FlcDAAgFhBF0uG+P7SlJKth7XB4vUzUAgPMjjKDDXZqaoC4xUTpd79G+siqrywEABDnCCDpclM3QoAynJGnzkQqLqwEABDvCCDrF4B4uSdJnhwkjAIDzI4ygUww5E0a2MDICALgAwgg6xdDMxjCy9aibRawAgPMijKBTZCfH+xax7mURKwDgPAgj6BTNFrGybgQAcB6EEXSaIT0SJXFHDQDg/Agj6DRDMrm9FwBwYYQRdJqmO2q2sYgVAHAehBF0muzkeHVlESsA4AIII+g0jYtY2fwMAHB+foeR5cuX6+abb1ZGRoYMw9CCBQsueM3SpUt12WWXyeFwqE+fPpozZ04bSkUoGszmZwCAC/A7jFRXV2vYsGF67rnnWtV+//79uvHGG3XVVVdp48aNmjlzpu677z59+OGHfheL0NO0+dlnh8utLQQAELTs/l4wefJkTZ48udXtZ8+erezsbD355JOSpAEDBmjFihV6+umnNWnSJH/fHiGmaWRkW5FbDR6v7FHMDAIAmuv0b4aCggJNnDix2blJkyapoKCgxWtqa2vldrubHQhNFyd3VdeYKNXUe7W3rNrqcgAAQajTw0hxcbFSU1ObnUtNTZXb7dbp06fPeU1eXp5cLpfvyMrK6uwy0UlsNkODejBVAwBoWVCOmefm5qqiosJ3HDp0yOqS0A48wRcAcD5+rxnxV1pamkpKSpqdKykpkdPpVFxc3DmvcTgccjgcnV0aAqQpjLATKwDgXDp9ZCQnJ0f5+fnNzi1atEg5OTmd/dYIEkMymy9iBQDgbH6HkaqqKm3cuFEbN26U1Hjr7saNG1VYWCipcYpl6tSpvvYPPPCA9u3bp5/85CfasWOHnn/+eb3++uv60Y9+1DGfAEEvu3tXxTvsqqn3ag87sQIAvsDvMLJ27VqNGDFCI0aMkCTNmjVLI0aM0GOPPSZJKioq8gUTScrOzta7776rRYsWadiwYXryySf10ksvcVtvBLHZDA3KaHxo3obCcmuLAQAEHcM0zaB/gpnb7ZbL5VJFRYWcTqfV5aAN/vDxbj398S5d0z9Ff/7OaKvLAQAEQGu/v4PybhqEn+sHp0mSPtl9TFW1DRZXAwAIJoQRBMSlqfHKTu6qOo9XS3aUWl0OACCIEEYQEIZh+EZHPthabHE1AIBgQhhBwFw/qDGMLNlRqpp6j8XVAACCBWEEATM006V0V6xO1Xm0Yvcxq8sBAAQJwggCxjAMTTozOvL+FqZqAACNCCMIqKZ1Ix9vL1E9u7ECAEQYQYCN7p2k7l1jVHG6Xqv3nbC6HABAECCMIKCibIauHZgqSfpga5HF1QAAggFhBAHXNFXz4dYSeb1BvwEwAKCTEUYQcOMvSVaCw66yylptOHTS6nIAABYjjCDgYuw2XTMgRZL0AXfVAEDEI4zAEk1TNe9vKVYIPKsRANCJCCOwxIRLUxQXHaXDJ0/rs8MVVpcDALAQYQSWiIuJ0sQzd9W8s+moxdUAAKxEGIFlbh6aLkl6d3MRd9UAQAQjjMAyE/pdpASHXUUVNVpXyF01ABCpCCOwjMMepevOPKtmIVM1ABCxCCOw1E3DmqZqiuVhqgYAIhJhBJb6Sp9kJXaJ1rGqWq3ed9zqcgAAFiCMwFLRUTZNPrPnyDufMVUDAJGIMALL3Tw0Q1LjBmj1Hq/F1QAAAo0wAsuNvbi7kuMdKj9VrxV7jlldDgAgwAgjsFyUzdCNQ85M1XBXDQBEHMIIgsJNwxqnahZtLVFNvcfiagAAgUQYQVAY2bOb0l2xqqxt0LJdZVaXAwAIIMIIgoLNZuimM9vDv7n+sMXVAAACiTCCoPEfI7MkSfnbS1VWWWtxNQCAQCGMIGj0S0vQsKxENXhNLdhwxOpyAAABQhhBULl9VOPoyGtrD8k02R4eACIBYQRB5eZh6YqNtmlPaZXWF5ZbXQ4AIAAIIwgqCbHRumFI40LW+WsPWVwNACAQCCMIOk1TNe9sOqrq2gaLqwEAdDbCCILOmOwkZSd3VXWdR+9uLrK6HABAJyOMIOgYhqHbRmVKYqoGACIBYQRB6RuXZcpmSJ8eOKm9ZVVWlwMA6ESEEQSlVGesruqXIkl6ndERAAhrhBEErW+OblzI+sa6I6r3eC2uBgDQWQgjCFpX909RcnyMjlXVatlOHp4HAOGKMIKgFR1l05ThPSRJ89cxVQMA4YowgqB226jPH553vIqH5wFAOGpTGHnuuefUu3dvxcbGauzYsVqzZk2LbefMmSPDMJodsbGxbS4YkaVfWoKGZroaH5638ajV5QAAOoHfYeS1117TrFmz9POf/1zr16/XsGHDNGnSJJWWlrZ4jdPpVFFRke84ePBgu4pGZLlt5Od7jvDwPAAIP36Hkaeeekr333+/7r77bg0cOFCzZ89Wly5d9PLLL7d4jWEYSktL8x2pqantKhqR5WvDeijGbtOO4kptPeq2uhwAQAfzK4zU1dVp3bp1mjhx4ue/wGbTxIkTVVBQ0OJ1VVVV6tWrl7KysnTLLbdo69at532f2tpaud3uZgcil6tLtK4b2Bhg/7nusMXVAAA6ml9h5NixY/J4PF8a2UhNTVVxcfE5r+nXr59efvllvf3223r11Vfl9Xo1fvx4HT7c8pdKXl6eXC6X78jKyvKnTIShpoWsCzYeUW2Dx+JqAAAdqdPvpsnJydHUqVM1fPhwTZgwQW+++aYuuugi/d///V+L1+Tm5qqiosJ3HDrEbZ2R7it9kpXmjFX5qXrlb295fRIAIPT4FUaSk5MVFRWlkpKSZudLSkqUlpbWqt8RHR2tESNGaM+ePS22cTgccjqdzQ5EtiiboVsvO7PnCNvDA0BY8SuMxMTEaOTIkcrPz/ed83q9ys/PV05OTqt+h8fj0ebNm5Wenu5fpYh4/3Hmrpplu8pU4q6xuBoAQEfxe5pm1qxZ+tOf/qS//OUv2r59ux588EFVV1fr7rvvliRNnTpVubm5vva/+tWv9NFHH2nfvn1av3697rrrLh08eFD33Xdfx30KRISLL4rXqF7d5DUZHQGAcGL394Lbb79dZWVleuyxx1RcXKzhw4frgw8+8C1qLSwslM32ecY5efKk7r//fhUXF6tbt24aOXKkVq5cqYEDB3bcp0DE+PbYnlp78KT+b9k+fXN0llIS2EAPAEKdYYbALlJut1sul0sVFRWsH4lwHq+pW5//tzYdrtCtI3roqduHW10SAKAFrf3+5tk0CClRNkO/umWwDEN6c8MRrd533OqSAADtRBhByBmWlag7xvSUJD329lbVe7wWVwQAaA/CCELSf1/XT926RGtnSaX+WsCzjgAglBFGEJK6dY3R/1zfX5L09KJdKuVWXwAIWYQRhKxvjsrSsKxEVdU26Lfvbbe6HABAGxFGELJsNkO/ObOYdcHGo9pQeNLqkgAAbUAYQUgbkunSNy5r3Jn16Y93W1wNAKAtCCMIeT+8uq/sNkPLd5Vp3cETVpcDAPATYQQhr2f3Lr7n1jy9iNERAAg1hBGEhelX9ZHdZmjFnmNas5/REQAIJYQRhIWspC765ugsSY23+gIAQgdhBGFj+lV9FB1lqGDfca1im3gACBmEEYSNHolx+tboxm3in1q0SyHwDEgAgAgjCDPfv+oSxUTZtGb/CRXsZXQEAEIBYQRhJd0Vp2+PbRwd+cU7W3WqrsHiigAAF0IYQdiZcXUfpSQ4tKukSg+/sZnpGgAIcoQRhJ3keIeeu/My2W2G/rXpqP6y8oDVJQEAzoMwgrA0uneScm8YIEn6zbvbtfYAe48AQLAijCBs3XN5b900NF0NXlPT565XaWWN1SUBAM6BMIKwZRiGfveNoeqTEq8Sd61+MHeDGjxeq8sCAHwBYQRhravDrtl3jVTXmCit3n9Cr689bHVJAIAvIIwg7PVJidePr+snSXp28W7V1HssrggAcDbCCCLCt8f2VJozVkUVNZq3ptDqcgAAZyGMICLERkfpB9f0kST9cclena5jdAQAggVhBBHjtpFZyuwWp2NVtfprwQGrywEAnEEYQcSIsdv00DV9JUmzl+1VVS1bxQNAMCCMIKJ8fUQPXZzcVSdP1euVFfutLgcAIMIIIow9yqaZ114qSXrxk32qOFVvcUUAAMIIIs5NQ9LVLzVBlTUN+t2HO1TXwEZoAGAlwggijs1maNZ1jaMjc1cX6pqnluqtDYfl9fJ0XwCwAmEEEWnSoDT9v28MVUqCQ4dOnNaPXtukG/73Ey3eUSLTJJQAQCAZZgj8y+t2u+VyuVRRUSGn02l1OQgjp+s8emXlfr2wdK8qaxrvrhl3cZJ+esNADcl0WVwdAIS21n5/E0YASeWn6vTC0r16ZeUB3xqSKcMz9F+T+imzWxeLqwOA0EQYAdrg8MlTevKjXXprwxFJjXuTfO+Ki/WDq/sqxs6sJgD4gzACtMPmwxX67XvbVbDvuCRpUIZTz9w+XH1TEyyuDABCR2u/v/lfPeAchmS6NPf+sXrhzsvUrUu0th5166ZnV2jOv/dz1w0AdDDCCNACwzA0eUi6Ppx5hSZcepFqG7z6xTvbNO2VNdp0qJy7bgCggzBNA7SCaZr626qDevzd7ao9s8C1b0q8vjEyU18f0UOpzliLKwSA4MOaEaAT7Cmt0rOLd+uDLcW+UGIzpKv6pei7V1ysMdlJMgzD4ioBIDgQRoBO5K6p13ufFemf6w5r7cGTvvMjeibqe1dcousGpspmI5QAiGyEESBA9pZV6c8r9uuf6w779ii5OLmrci7pruzkrurdvat6J3dRVlIXOexRFlcLAIHTqWHkueee0+9//3sVFxdr2LBhevbZZzVmzJgW28+fP1+PPvqoDhw4oL59++p3v/udbrjhhla/H2EEoaC0skZ/WXlAfys4KPeZ3VzPFmUz1Kt7F12akqBLU+PVNzVBvbt3VVZSnFxx0UzvAAg7nRZGXnvtNU2dOlWzZ8/W2LFj9cwzz2j+/PnauXOnUlJSvtR+5cqVuuKKK5SXl6ebbrpJc+fO1e9+9zutX79egwcP7tAPAwSDqtoGLdpWrD2lVTpw/JQOHKvWgWPVqq7ztHhNvMOuzG5xykrqoouTuyq76bioq5K7OpjyARCSOi2MjB07VqNHj9Yf//hHSZLX61VWVpZ+8IMf6OGHH/5S+9tvv13V1dVauHCh79y4ceM0fPhwzZ49u0M/DBCsTNNUibtWu0oqtaukUrtLqrS7tFKHTp5WWWXtBa/vGhOlrg674h12xcfa5YyNlisuWs44u5xx0XLGRivhzPmEWLsSYqMVG22Twx4lh90mR7RNMVE2xdhtio5q/DsBB0Bna+33t92fX1pXV6d169YpNzfXd85ms2nixIkqKCg45zUFBQWaNWtWs3OTJk3SggULWnyf2tpa1dZ+/g+02+32p0wg6BiGoTRXrNJcsbri0ouavVZT79Hhk6d1+OQpHTx+SvuPVfuOwydPyWtK1XUeVdd5VNqK4NJadpshe5ShaJtN9ihD9iibom2GoqIM2W02RdkM2W2GbIYhm02yGYYMw5DNkKIMQzbbmb+faSM1tWn802bo8/Y2Q4YM6cxrhiSj8ccv9VPj+abrG/9+7j49+0+j2bnPz5zVroXf05LPr2tNW4LdhdBFwe+ey7OVlWTNs7j8CiPHjh2Tx+NRampqs/OpqanasWPHOa8pLi4+Z/vi4uIW3ycvL0+//OUv/SkNCFmx0VHqkxKvPinxX3qtrsGrypp6VdU2NB41DaqsaZC7pl7u0/WqON2gitP1qqypV2VNgyprG/+sqmlQbYNXtQ0e1dR7VVPvUcMXdo5t8Jpq8JqqkTdQHxVAELt5WEZohJFAyc3NbTaa4na7lZWVZWFFgDVi7DZ1j3eoe7yj3b/L6zVV5/Gq3uNVXYNXdR6vGjym6j1eNXjP/Okx5TFNebymGjymGrxemabkNU3fnx6vKa9pymvqrL83vd44JXX2z2e/bpqmTH3e7otMUzJlnnm98dqWNF3f1MRs9nfzC20v3D/nbNKKC1szzx3Iexa/+NmB1rJy80a/wkhycrKioqJUUlLS7HxJSYnS0tLOeU1aWppf7SXJ4XDI4Wj/P74APmezGYq1RSk2mtuLAQQXv55NExMTo5EjRyo/P993zuv1Kj8/Xzk5Oee8Jicnp1l7SVq0aFGL7QEAQGTxe5pm1qxZmjZtmkaNGqUxY8bomWeeUXV1te6++25J0tSpU9WjRw/l5eVJkh566CFNmDBBTz75pG688UbNmzdPa9eu1YsvvtixnwQAAIQkv8PI7bffrrKyMj322GMqLi7W8OHD9cEHH/gWqRYWFspm+3zAZfz48Zo7d65+9rOf6ZFHHlHfvn21YMGCVu8xAgAAwhvbwQMAgE7R2u9vv9aMAAAAdDTCCAAAsBRhBAAAWIowAgAALEUYAQAAliKMAAAASxFGAACApQgjAADAUoQRAABgKb+3g7dC0yaxbrfb4koAAEBrNX1vX2iz95AII5WVlZKkrKwsiysBAAD+qqyslMvlavH1kHg2jdfr1dGjR5WQkCDDMDrs97rdbmVlZenQoUM886aT0deBQ18HFv0dOPR14HRUX5umqcrKSmVkZDR7iO4XhcTIiM1mU2ZmZqf9fqfTyX/YAUJfBw59HVj0d+DQ14HTEX19vhGRJixgBQAAliKMAAAAS0V0GHE4HPr5z38uh8NhdSlhj74OHPo6sOjvwKGvAyfQfR0SC1gBAED4iuiREQAAYD3CCAAAsBRhBAAAWIowAgAALBXRYeS5555T7969FRsbq7Fjx2rNmjVWlxTy8vLyNHr0aCUkJCglJUVTpkzRzp07m7WpqanR9OnT1b17d8XHx+sb3/iGSkpKLKo4PDzxxBMyDEMzZ870naOfO9aRI0d01113qXv37oqLi9OQIUO0du1a3+umaeqxxx5Tenq64uLiNHHiRO3evdvCikOTx+PRo48+quzsbMXFxemSSy7Rr3/962bPNqGv22b58uW6+eablZGRIcMwtGDBgmavt6ZfT5w4oTvvvFNOp1OJiYm69957VVVV1f7izAg1b948MyYmxnz55ZfNrVu3mvfff7+ZmJholpSUWF1aSJs0aZL5yiuvmFu2bDE3btxo3nDDDWbPnj3NqqoqX5sHHnjAzMrKMvPz8821a9ea48aNM8ePH29h1aFtzZo1Zu/evc2hQ4eaDz30kO88/dxxTpw4Yfbq1cv8zne+Y65evdrct2+f+eGHH5p79uzxtXniiSdMl8tlLliwwNy0aZP5ta99zczOzjZPnz5tYeWh5/HHHze7d+9uLly40Ny/f785f/58Mz4+3vzDH/7ga0Nft817771n/vSnPzXffPNNU5L51ltvNXu9Nf16/fXXm8OGDTNXrVplfvLJJ2afPn3MO+64o921RWwYGTNmjDl9+nTfzx6Px8zIyDDz8vIsrCr8lJaWmpLMZcuWmaZpmuXl5WZ0dLQ5f/58X5vt27ebksyCggKrygxZlZWVZt++fc1FixaZEyZM8IUR+rlj/c///I/5la98pcXXvV6vmZaWZv7+97/3nSsvLzcdDof5j3/8IxAlho0bb7zRvOeee5qdu/XWW80777zTNE36uqN8MYy0pl+3bdtmSjI//fRTX5v333/fNAzDPHLkSLvqichpmrq6Oq1bt04TJ070nbPZbJo4caIKCgosrCz8VFRUSJKSkpIkSevWrVN9fX2zvu/fv7969uxJ37fB9OnTdeONNzbrT4l+7mj/+te/NGrUKN12221KSUnRiBEj9Kc//cn3+v79+1VcXNysv10ul8aOHUt/+2n8+PHKz8/Xrl27JEmbNm3SihUrNHnyZEn0dWdpTb8WFBQoMTFRo0aN8rWZOHGibDabVq9e3a73D4kH5XW0Y8eOyePxKDU1tdn51NRU7dixw6Kqwo/X69XMmTN1+eWXa/DgwZKk4uJixcTEKDExsVnb1NRUFRcXW1Bl6Jo3b57Wr1+vTz/99Euv0c8da9++fXrhhRc0a9YsPfLII/r000/1wx/+UDExMZo2bZqvT8/1bwr97Z+HH35Ybrdb/fv3V1RUlDwejx5//HHdeeedkkRfd5LW9GtxcbFSUlKavW6325WUlNTuvo/IMILAmD59urZs2aIVK1ZYXUrYOXTokB566CEtWrRIsbGxVpcT9rxer0aNGqXf/va3kqQRI0Zoy5Ytmj17tqZNm2ZxdeHl9ddf19///nfNnTtXgwYN0saNGzVz5kxlZGTQ12EsIqdpkpOTFRUV9aU7C0pKSpSWlmZRVeFlxowZWrhwoZYsWaLMzEzf+bS0NNXV1am8vLxZe/reP+vWrVNpaakuu+wy2e122e12LVu2TP/7v/8ru92u1NRU+rkDpaena+DAgc3ODRgwQIWFhZLk61P+TWm///7v/9bDDz+sb33rWxoyZIj+8z//Uz/60Y+Ul5cnib7uLK3p17S0NJWWljZ7vaGhQSdOnGh330dkGImJidHIkSOVn5/vO+f1epWfn6+cnBwLKwt9pmlqxowZeuutt7R48WJlZ2c3e33kyJGKjo5u1vc7d+5UYWEhfe+Ha665Rps3b9bGjRt9x6hRo3TnnXf6/k4/d5zLL7/8S7eo79q1S7169ZIkZWdnKy0trVl/u91urV69mv7206lTp2SzNf9qioqKktfrlURfd5bW9GtOTo7Ky8u1bt06X5vFixfL6/Vq7Nix7SugXctfQ9i8efNMh8Nhzpkzx9y2bZv53e9+10xMTDSLi4utLi2kPfjgg6bL5TKXLl1qFhUV+Y5Tp0752jzwwANmz549zcWLF5tr1641c3JyzJycHAurDg9n301jmvRzR1qzZo1pt9vNxx9/3Ny9e7f597//3ezSpYv56quv+to88cQTZmJiovn222+bn332mXnLLbdwu2kbTJs2zezRo4fv1t4333zTTE5ONn/yk5/42tDXbVNZWWlu2LDB3LBhgynJfOqpp8wNGzaYBw8eNE2zdf16/fXXmyNGjDBXr15trlixwuzbty+39rbXs88+a/bs2dOMiYkxx4wZY65atcrqkkKepHMer7zyiq/N6dOnze9///tmt27dzC5duphf//rXzaKiIuuKDhNfDCP0c8d65513zMGDB5sOh8Ps37+/+eKLLzZ73ev1mo8++qiZmppqOhwO85prrjF37txpUbWhy+12mw899JDZs2dPMzY21rz44ovNn/70p2Ztba2vDX3dNkuWLDnnv8/Tpk0zTbN1/Xr8+HHzjjvuMOPj402n02nefffdZmVlZbtrM0zzrG3tAAAAAiwi14wAAIDgQRgBAACWIowAAABLEUYAAIClCCMAAMBShBEAAGApwggAALAUYQQAAFiKMAIAACxFGAEAAJYijAAAAEsRRgAAgKX+f+3Lgx2talRTAAAAAElFTkSuQmCC", 485 | "text/plain": [ 486 | "
" 487 | ] 488 | }, 489 | "metadata": {}, 490 | "output_type": "display_data" 491 | } 492 | ], 493 | "source": [ 494 | "import matplotlib.pyplot as plt\n", 495 | "\n", 496 | "plt.plot(list(range(len(losses))), losses)" 497 | ] 498 | }, 499 | { 500 | "cell_type": "code", 501 | "execution_count": 82, 502 | "metadata": {}, 503 | "outputs": [ 504 | { 505 | "name": "stdout", 506 | "output_type": "stream", 507 | "text": [ 508 | "Overfits!\n" 509 | ] 510 | } 511 | ], 512 | "source": [ 513 | "op = transformer(input_ids, labels)\n", 514 | "if (op.argmax(dim=2) == labels).all():\n", 515 | "\tprint (\"Overfits!\")" 516 | ] 517 | } 518 | ], 519 | "metadata": { 520 | "kernelspec": { 521 | "display_name": "Python 3 (ipykernel)", 522 | "language": "python", 523 | "name": "python3" 524 | }, 525 | "language_info": { 526 | "codemirror_mode": { 527 | "name": "ipython", 528 | "version": 3 529 | }, 530 | "file_extension": ".py", 531 | "mimetype": "text/x-python", 532 | "name": "python", 533 | "nbconvert_exporter": "python", 534 | "pygments_lexer": "ipython3", 535 | "version": "3.9.13" 536 | }, 537 | "vscode": { 538 | "interpreter": { 539 | "hash": "e8b7038d63dd4e2c311f66bb2cdc461873330b7f75ec6cf56c29cc17e4421480" 540 | } 541 | } 542 | }, 543 | "nbformat": 4, 544 | "nbformat_minor": 2 545 | } -------------------------------------------------------------------------------- /transformers-book-reading-group/session-3/HF reading group session 3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/transformers-book-reading-group/session-3/HF reading group session 3.pdf -------------------------------------------------------------------------------- /transformers-book-reading-group/session-4/slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/transformers-book-reading-group/session-4/slides.pdf -------------------------------------------------------------------------------- /transformers-book-reading-group/session-5/slides-text-generation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/workshops/ab22955ca3401d9a6866e62c3d198eca291c75ff/transformers-book-reading-group/session-5/slides-text-generation.pdf --------------------------------------------------------------------------------