├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── data └── data-readme.md ├── meta ├── banner.png ├── wandb-sentiment.jpg ├── wandb.png └── wandb.svg ├── models └── models-readme.md ├── transformers_multi_label_classification.ipynb ├── transformers_multiclass_classification.ipynb ├── transformers_ner.ipynb ├── transformers_sentiment_wandb.ipynb └── transformers_summarization_wandb.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | . 2 | .. 3 | .vscode 4 | .ipynb_checkpoints 5 | *.csv 6 | *.bin 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | pip-wheel-metadata/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | target/ 83 | 84 | # Jupyter Notebook 85 | .ipynb_checkpoints 86 | 87 | # IPython 88 | profile_default/ 89 | ipython_config.py 90 | 91 | # pyenv 92 | .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # celery beat schedule file 102 | celerybeat-schedule 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # Other files 135 | *.csv 136 | *.pkl 137 | .vscode 138 | .idea 139 | 140 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | We welcome and appreciate all contributions to the project! Below are some guidelines to follow when contributing. 3 | 4 | ## Issues 5 | - Before opening a new issue, please check if the issue has already been reported. 6 | - When opening a new issue, provide a clear and concise description of the problem. Include as much relevant information as possible (e.g. version, operating system, etc.). 7 | ## Pull Requests 8 | - Fork the repository and create a new branch for your changes. 9 | - Keep your changes in a single commit and limit the commit message to 72 characters in the subject line and a brief description in the body. 10 | - Make sure that your code follows the existing coding style and best practices. 11 | - Ensure that your code is well-documented and tested. 12 | - Open a pull request with a clear title and description of your changes. Reference the related issue (if any) and explain the changes in detail. 13 | - Wait for a maintainer to review and merge your pull request. 14 | ## Code of Conduct 15 | - By participating in this project, you agree to abide by our Code of Conduct. 16 | 17 | Thank you for your contribution! 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Abhishek Kumar Mishra 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #
4 |
5 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | ### Introduction
15 |
16 | The field of **NLP** was revolutionized in the year 2018 by introduction of **BERT** and his **Transformer** friends(RoBerta, XLM etc.).
17 |
18 | These novel transformer based neural network architectures and new ways to training a neural network on natural language data introduced transfer learning to NLP problems. Transfer learning had been giving out state of the art results in the Computer Vision domain for a few years now and introduction of transformer models for NLP brought about the same paradigm change in NLP.
19 |
20 | Companies like [Google](https://github.com/google-research/bert) and [Facebook](https://github.com/pytorch/fairseq/tree/master/examples/roberta) trained their neural networks on large swathes of Natural Language Data to grasp the intricacies of language thereby generating a Language model. Finally these models were fine tuned to specific domain dataset to achieve state of the art results for a specific problem statement. They also published these trained models to open source community. The community members were now able to fine tune these models to their specific use cases.
21 |
22 | [Hugging Face](https://github.com/huggingface) made it easier for community to access and fine tune these models using their Python Package: [Transformers](https://github.com/huggingface/transformers).
23 |
24 | ### Motivation
25 | Despite these amazing technological advancements applying these solutions to business problems is still a challenge given the niche knowledge required to understand and apply these method on specific problem statements. Hence, In the following tutorials i will be demonstrating how a user can leverage technologies along with some other python tools to fine tune these Language models to specific type of tasks.
26 |
27 | Before i proceed i will like to mention the following groups for the fantastic work they are doing and sharing which have made these notebooks and tutorials possible:
28 |
29 | Please review these amazing sources of information and subscribe to their channels/sources.
30 | - [Hugging Face Team](https://huggingface.co/)
31 | - Abhishek Thakur for his amazing [Youtube videos](https://www.youtube.com/user/abhisheksvnit)
32 |
33 | The problem statements that i will be working with are:
34 |
35 | | Notebook |Github Link |Colab Link|Kaggle Kernel|
36 | |--|--|--|--|
37 | |Text Classification: Multi-Class| [Github](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb) |[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb)|[Kaggle](https://www.kaggle.com/eggwhites2705/transformers-multiclass-classification-ipynb)|
38 | |Text Classification: Multi-Label| [Github](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|[Kaggle](https://www.kaggle.com/eggwhites2705/transformers-multi-label-classification)|
39 | |Sentiment Classification **with Experiment Tracking in [WandB](https://app.wandb.ai/abhimishra-91/transformers_tutorials_sentiment/runs/1zwn4gbg?workspace=user-abhimishra-91)!**|[Github](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_sentiment_wandb.ipynb)|[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_sentiment_wandb.ipynb)||
40 | |Named Entity Recognition: **with TPU processing!**|[Github](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_ner.ipynb)|[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_ner.ipynb)|[Kaggle](https://www.kaggle.com/eggwhites2705/transformers-ner)|
41 | |Question Answering||||
42 | |Summary Writing: **with Experiment Tracking in [WandB](https://app.wandb.ai/abhimishra-91/transformers_tutorials_summarization?workspace=user-abhimishra-91)!**|[Github](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|[Kaggle](https://www.kaggle.com/eggwhites2705/transformers-summarization-t5/output)|
43 |
44 |
45 |
46 | ### Directory Structure
47 |
48 | 1. `data`: This folder contains all the toy data used for fine tuning.
49 | 2. `utils`: This folder will contain any miscellaneous script used to prepare for the fine tuning.
50 | 3. `models`: Folder to save all the artifacts post fine tuning.
51 |
52 | ### Further Watching/Reading
53 |
54 | I will try to cover the practical and implementation aspects of fine tuning of these language models on various NLP tasks. You can improve your knowledge on this topic by reading/watching the following resources.
55 |
56 |
57 | - Watching
58 | - [Introduction in Simple terms](https://www.youtube.com/watch?v=gcHkxP9adiM)
59 | - [Transfer Learning in NLP](https://www.youtube.com/watch?v=0T_Qr4qBrqc)
60 | - [BERT Research Series from ChrisMcCormickAI](https://www.youtube.com/playlist?list=PLam9sigHPGwOBuH4_4fr-XvDbe5uneaf6)
61 |
62 | - Reading
63 | - [Transformers Documentation](https://huggingface.co/transformers/)
64 | - [Pytorch Documentation](https://pytorch.org/docs/stable/index.html)
65 | - [Google AI Blog](https://ai.googleblog.com/)
66 |
--------------------------------------------------------------------------------
/data/data-readme.md:
--------------------------------------------------------------------------------
1 | # ReadMe for the Data folder
2 |
3 | Data Used for Text Classification: Multi Class
4 |
--------------------------------------------------------------------------------
/meta/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhimishra91/transformers-tutorials/3d5a9b1d735eb68648588526d77d6dda7735d631/meta/banner.png
--------------------------------------------------------------------------------
/meta/wandb-sentiment.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhimishra91/transformers-tutorials/3d5a9b1d735eb68648588526d77d6dda7735d631/meta/wandb-sentiment.jpg
--------------------------------------------------------------------------------
/meta/wandb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abhimishra91/transformers-tutorials/3d5a9b1d735eb68648588526d77d6dda7735d631/meta/wandb.png
--------------------------------------------------------------------------------
/meta/wandb.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/models-readme.md:
--------------------------------------------------------------------------------
1 | # ReadMe for the Model folder
2 |
3 | All the files for Models saved here
--------------------------------------------------------------------------------
/transformers_multiclass_classification.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Fine Tuning Transformer for MultiClass Text Classification"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "### Introduction\n",
15 | "\n",
16 | "In this tutorial we will be fine tuning a transformer model for the **Multiclass text classification** problem. \n",
17 | "This is one of the most common business problems where a given piece of text/sentence/document needs to be classified into one of the categories out of the given list.\n",
18 | "\n",
19 | "#### Flow of the notebook\n",
20 | "\n",
21 | "The notebook will be divided into seperate sections to provide a organized walk through for the process used. This process can be modified for individual use cases. The sections are:\n",
22 | "\n",
23 | "1. [Importing Python Libraries and preparing the environment](#section01)\n",
24 | "2. [Importing and Pre-Processing the domain data](#section02)\n",
25 | "3. [Preparing the Dataset and Dataloader](#section03)\n",
26 | "4. [Creating the Neural Network for Fine Tuning](#section04)\n",
27 | "5. [Fine Tuning the Model](#section05)\n",
28 | "6. [Validating the Model Performance](#section06)\n",
29 | "7. [Saving the model and artifacts for Inference in Future](#section07)\n",
30 | "\n",
31 | "#### Technical Details\n",
32 | "\n",
33 | "This script leverages on multiple tools designed by other teams. Details of the tools used below. Please ensure that these elements are present in your setup to successfully implement this script.\n",
34 | "\n",
35 | " - Data: \n",
36 | "\t - We are using the News aggregator dataset available at by [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/News+Aggregator)\n",
37 | "\t - We are referring only to the first csv file from the data dump: `newsCorpora.csv`\n",
38 | "\t - There are `422937` rows of data. Where each row has the following data-point: \n",
39 | "\t\t - ID Numeric ID \n",
40 | "\t\t - TITLE News title \n",
41 | "\t\t - URL Url \n",
42 | "\t\t - PUBLISHER Publisher name \n",
43 | "\t\t - CATEGORY News category (b = business, t = science and technology, e = entertainment, m = health) \n",
44 | "\t\t - STORY Alphanumeric ID of the cluster that includes news about the same story \n",
45 | "\t\t - HOSTNAME Url hostname \n",
46 | "\t\t - TIMESTAMP Approximate time the news was published, as the number of milliseconds since the epoch 00:00:00 GMT, January 1, 1970\n",
47 | "\n",
48 | "\n",
49 | " - Language Model Used:\n",
50 | "\t - DistilBERT this is a smaller transformer model as compared to BERT or Roberta. It is created by process of distillation applied to Bert. \n",
51 | "\t - [Blog-Post](https://medium.com/huggingface/distilbert-8cf3380435b5)\n",
52 | "\t - [Research Paper](https://arxiv.org/abs/1910.01108)\n",
53 | " - [Documentation for python](https://huggingface.co/transformers/model_doc/distilbert.html)\n",
54 | "\n",
55 | "\n",
56 | " - Hardware Requirements:\n",
57 | "\t - Python 3.6 and above\n",
58 | "\t - Pytorch, Transformers and All the stock Python ML Libraries\n",
59 | "\t - GPU enabled setup \n",
60 | "\n",
61 | "\n",
62 | " - Script Objective:\n",
63 | "\t - The objective of this script is to fine tune DistilBERT to be able to classify a news headline into the following categories:\n",
64 | "\t\t - Business\n",
65 | "\t\t - Technology\n",
66 | "\t\t - Health\n",
67 | "\t\t - Entertainment \n"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "\n",
75 | "### Importing Python Libraries and preparing the environment\n",
76 | "\n",
77 | "At this step we will be importing the libraries and modules needed to run our script. Libraries are:\n",
78 | "* Pandas\n",
79 | "* Pytorch\n",
80 | "* Pytorch Utils for Dataset and Dataloader\n",
81 | "* Transformers\n",
82 | "* DistilBERT Model and Tokenizer\n",
83 | "\n",
84 | "Followed by that we will preapre the device for CUDA execeution. This configuration is needed if you want to leverage on onboard GPU. "
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": 1,
90 | "metadata": {
91 | "colab": {
92 | "base_uri": "https://localhost:8080/",
93 | "height": 114,
94 | "referenced_widgets": [
95 | "7532a60d077248ca963f514988d41acc",
96 | "e39121739a4c4bffb16155e9a61a58cc",
97 | "ca0092edb8f5442a988c87c89930a10e",
98 | "5a975bc922a14c1b82245195b7c7f659",
99 | "f9dd503454b2450e957a96bff807c8d9",
100 | "222af74a298140ff9ec6e6f1f286a52b",
101 | "3516912b49bf4e3e98c9ce6c7b1b1469",
102 | "8335299626724853865234b86745e2a5",
103 | "fd6f40d5854c49f79056cebd16f86626",
104 | "cde06f09879848a3bfc5892390a51d36",
105 | "b13c341a69c14c059edc40e6b438ee80",
106 | "dd7699e7813d4b9f9e80990498a39539",
107 | "dd589fc95cd94cd190bc2640f9ef618c",
108 | "4641ff326eea446c88903e70b85c90d7",
109 | "c1db6876e9b04293b4077f13006b4a19",
110 | "b8728a6fcdc54b72a10c8bfdcf794fad"
111 | ]
112 | },
113 | "colab_type": "code",
114 | "id": "wuMlXT80GAMK",
115 | "outputId": "074dad6a-a18e-45bd-8c9c-29e318962dcd"
116 | },
117 | "outputs": [],
118 | "source": [
119 | "# Importing the libraries needed\n",
120 | "import pandas as pd\n",
121 | "import torch\n",
122 | "import transformers\n",
123 | "from torch.utils.data import Dataset, DataLoader\n",
124 | "from transformers import DistilBertModel, DistilBertTokenizer"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": 2,
130 | "metadata": {
131 | "colab": {},
132 | "colab_type": "code",
133 | "id": "xQMKTZ4ARk12"
134 | },
135 | "outputs": [],
136 | "source": [
137 | "# Setting up the device for GPU usage\n",
138 | "\n",
139 | "from torch import cuda\n",
140 | "device = 'cuda' if cuda.is_available() else 'cpu'"
141 | ]
142 | },
143 | {
144 | "cell_type": "markdown",
145 | "metadata": {},
146 | "source": [
147 | "\n",
148 | "### Importing and Pre-Processing the domain data\n",
149 | "\n",
150 | "We will be working with the data and preparing for fine tuning purposes. \n",
151 | "*Assuming that the `newCorpora.csv` is already downloaded in your `data` folder*\n",
152 | "\n",
153 | "Import the file in a dataframe and give it the headers as per the documentation.\n",
154 | "Cleaning the file to remove the unwanted columns and create an additional column for training.\n",
155 | "The final Dataframe will be something like this:\n",
156 | "\n",
157 | "|TITLE|CATEGORY|ENCODED_CAT|\n",
158 | "|--|--|--|\n",
159 | "| title_1|Entertainment | 1 |\n",
160 | "| title_2|Entertainment | 1 |\n",
161 | "| title_3|Business| 2 |\n",
162 | "| title_4|Science| 3 |\n",
163 | "| title_5|Science| 3 |\n",
164 | "| title_6|Health| 4 |"
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": 5,
170 | "metadata": {
171 | "colab": {},
172 | "colab_type": "code",
173 | "id": "iNCaZ2epNcSO"
174 | },
175 | "outputs": [],
176 | "source": [
177 | "# Import the csv into pandas dataframe and add the headers\n",
178 | "df = pd.read_csv('./data/newsCorpora.csv', sep='\\t', names=['ID','TITLE', 'URL', 'PUBLISHER', 'CATEGORY', 'STORY', 'HOSTNAME', 'TIMESTAMP'])\n",
179 | "# df.head()\n",
180 | "# # Removing unwanted columns and only leaving title of news and the category which will be the target\n",
181 | "df = df[['TITLE','CATEGORY']]\n",
182 | "# df.head()\n",
183 | "\n",
184 | "# # Converting the codes to appropriate categories using a dictionary\n",
185 | "my_dict = {\n",
186 | " 'e':'Entertainment',\n",
187 | " 'b':'Business',\n",
188 | " 't':'Science',\n",
189 | " 'm':'Health'\n",
190 | "}\n",
191 | "\n",
192 | "def update_cat(x):\n",
193 | " return my_dict[x]\n",
194 | "\n",
195 | "df['CATEGORY'] = df['CATEGORY'].apply(lambda x: update_cat(x))\n",
196 | "\n",
197 | "encode_dict = {}\n",
198 | "\n",
199 | "def encode_cat(x):\n",
200 | " if x not in encode_dict.keys():\n",
201 | " encode_dict[x]=len(encode_dict)\n",
202 | " return encode_dict[x]\n",
203 | "\n",
204 | "df['ENCODE_CAT'] = df['CATEGORY'].apply(lambda x: encode_cat(x))"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "\n",
212 | "### Preparing the Dataset and Dataloader\n",
213 | "\n",
214 | "We will start with defining few key variables that will be used later during the training/fine tuning stage.\n",
215 | "Followed by creation of Dataset class - This defines how the text is pre-processed before sending it to the neural network. We will also define the Dataloader that will feed the data in batches to the neural network for suitable training and processing. \n",
216 | "Dataset and Dataloader are constructs of the PyTorch library for defining and controlling the data pre-processing and its passage to neural network. For further reading into Dataset and Dataloader read the [docs at PyTorch](https://pytorch.org/docs/stable/data.html)\n",
217 | "\n",
218 | "#### *Triage* Dataset Class\n",
219 | "- This class is defined to accept the Dataframe as input and generate tokenized output that is used by the DistilBERT model for training. \n",
220 | "- We are using the DistilBERT tokenizer to tokenize the data in the `TITLE` column of the dataframe. \n",
221 | "- The tokenizer uses the `encode_plus` method to perform tokenization and generate the necessary outputs, namely: `ids`, `attention_mask`\n",
222 | "- To read further into the tokenizer, [refer to this document](https://huggingface.co/transformers/model_doc/distilbert.html#distilberttokenizer)\n",
223 | "- `target` is the encoded category on the news headline. \n",
224 | "- The *Triage* class is used to create 2 datasets, for training and for validation.\n",
225 | "- *Training Dataset* is used to fine tune the model: **80% of the original data**\n",
226 | "- *Validation Dataset* is used to evaluate the performance of the model. The model has not seen this data during training. \n",
227 | "\n",
228 | "#### Dataloader\n",
229 | "- Dataloader is used to for creating training and validation dataloader that load data to the neural network in a defined manner. This is needed because all the data from the dataset cannot be loaded to the memory at once, hence the amount of dataloaded to the memory and then passed to the neural network needs to be controlled.\n",
230 | "- This control is achieved using the parameters such as `batch_size` and `max_len`.\n",
231 | "- Training and Validation dataloaders are used in the training and validation part of the flow respectively"
232 | ]
233 | },
234 | {
235 | "cell_type": "code",
236 | "execution_count": 4,
237 | "metadata": {
238 | "colab": {},
239 | "colab_type": "code",
240 | "id": "JrBr2YesGdO_"
241 | },
242 | "outputs": [],
243 | "source": [
244 | "# Defining some key variables that will be used later on in the training\n",
245 | "MAX_LEN = 512\n",
246 | "TRAIN_BATCH_SIZE = 4\n",
247 | "VALID_BATCH_SIZE = 2\n",
248 | "EPOCHS = 1\n",
249 | "LEARNING_RATE = 1e-05\n",
250 | "tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')"
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": 5,
256 | "metadata": {
257 | "colab": {},
258 | "colab_type": "code",
259 | "id": "2vX7kzaAHu39"
260 | },
261 | "outputs": [],
262 | "source": [
263 | "class Triage(Dataset):\n",
264 | " def __init__(self, dataframe, tokenizer, max_len):\n",
265 | " self.len = len(dataframe)\n",
266 | " self.data = dataframe\n",
267 | " self.tokenizer = tokenizer\n",
268 | " self.max_len = max_len\n",
269 | " \n",
270 | " def __getitem__(self, index):\n",
271 | " title = str(self.data.TITLE[index])\n",
272 | " title = \" \".join(title.split())\n",
273 | " inputs = self.tokenizer.encode_plus(\n",
274 | " title,\n",
275 | " None,\n",
276 | " add_special_tokens=True,\n",
277 | " max_length=self.max_len,\n",
278 | " pad_to_max_length=True,\n",
279 | " return_token_type_ids=True,\n",
280 | " truncation=True\n",
281 | " )\n",
282 | " ids = inputs['input_ids']\n",
283 | " mask = inputs['attention_mask']\n",
284 | "\n",
285 | " return {\n",
286 | " 'ids': torch.tensor(ids, dtype=torch.long),\n",
287 | " 'mask': torch.tensor(mask, dtype=torch.long),\n",
288 | " 'targets': torch.tensor(self.data.ENCODE_CAT[index], dtype=torch.long)\n",
289 | " } \n",
290 | " \n",
291 | " def __len__(self):\n",
292 | " return self.len"
293 | ]
294 | },
295 | {
296 | "cell_type": "code",
297 | "execution_count": 6,
298 | "metadata": {
299 | "colab": {},
300 | "colab_type": "code",
301 | "id": "Zcwq13c0NE9c"
302 | },
303 | "outputs": [
304 | {
305 | "name": "stdout",
306 | "output_type": "stream",
307 | "text": [
308 | "FULL Dataset: (422419, 3)\n",
309 | "TRAIN Dataset: (337935, 3)\n",
310 | "TEST Dataset: (84484, 3)\n"
311 | ]
312 | }
313 | ],
314 | "source": [
315 | "# Creating the dataset and dataloader for the neural network\n",
316 | "\n",
317 | "train_size = 0.8\n",
318 | "train_dataset=df.sample(frac=train_size,random_state=200)\n",
319 | "test_dataset=df.drop(train_dataset.index).reset_index(drop=True)\n",
320 | "train_dataset = train_dataset.reset_index(drop=True)\n",
321 | "\n",
322 | "\n",
323 | "print(\"FULL Dataset: {}\".format(df.shape))\n",
324 | "print(\"TRAIN Dataset: {}\".format(train_dataset.shape))\n",
325 | "print(\"TEST Dataset: {}\".format(test_dataset.shape))\n",
326 | "\n",
327 | "training_set = Triage(train_dataset, tokenizer, MAX_LEN)\n",
328 | "testing_set = Triage(test_dataset, tokenizer, MAX_LEN)"
329 | ]
330 | },
331 | {
332 | "cell_type": "code",
333 | "execution_count": 7,
334 | "metadata": {
335 | "colab": {},
336 | "colab_type": "code",
337 | "id": "l1BgA1CkQSYa"
338 | },
339 | "outputs": [],
340 | "source": [
341 | "train_params = {'batch_size': TRAIN_BATCH_SIZE,\n",
342 | " 'shuffle': True,\n",
343 | " 'num_workers': 0\n",
344 | " }\n",
345 | "\n",
346 | "test_params = {'batch_size': VALID_BATCH_SIZE,\n",
347 | " 'shuffle': True,\n",
348 | " 'num_workers': 0\n",
349 | " }\n",
350 | "\n",
351 | "training_loader = DataLoader(training_set, **train_params)\n",
352 | "testing_loader = DataLoader(testing_set, **test_params)"
353 | ]
354 | },
355 | {
356 | "cell_type": "markdown",
357 | "metadata": {},
358 | "source": [
359 | "\n",
360 | "### Creating the Neural Network for Fine Tuning\n",
361 | "\n",
362 | "#### Neural Network\n",
363 | " - We will be creating a neural network with the `DistillBERTClass`. \n",
364 | " - This network will have the DistilBERT Language model followed by a `dropout` and finally a `Linear` layer to obtain the final outputs. \n",
365 | " - The data will be fed to the DistilBERT Language model as defined in the dataset. \n",
366 | " - Final layer outputs is what will be compared to the `encoded category` to determine the accuracy of models prediction. \n",
367 | " - We will initiate an instance of the network called `model`. This instance will be used for training and then to save the final trained model for future inference. \n",
368 | " \n",
369 | "#### Loss Function and Optimizer\n",
370 | " - `Loss Function` and `Optimizer` and defined in the next cell.\n",
371 | " - The `Loss Function` is used the calculate the difference in the output created by the model and the actual output. \n",
372 | " - `Optimizer` is used to update the weights of the neural network to improve its performance.\n",
373 | " \n",
374 | "#### Further Reading\n",
375 | "- You can refer to my [Pytorch Tutorials](https://github.com/abhimishra91/pytorch-tutorials) to get an intuition of Loss Function and Optimizer.\n",
376 | "- [Pytorch Documentation for Loss Function](https://pytorch.org/docs/stable/nn.html#loss-functions)\n",
377 | "- [Pytorch Documentation for Optimizer](https://pytorch.org/docs/stable/optim.html)\n",
378 | "- Refer to the links provided on the top of the notebook to read more about DistiBERT. "
379 | ]
380 | },
381 | {
382 | "cell_type": "code",
383 | "execution_count": 8,
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "# Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model. \n",
388 | "\n",
389 | "class DistillBERTClass(torch.nn.Module):\n",
390 | " def __init__(self):\n",
391 | " super(DistillBERTClass, self).__init__()\n",
392 | " self.l1 = DistilBertModel.from_pretrained(\"distilbert-base-uncased\")\n",
393 | " self.pre_classifier = torch.nn.Linear(768, 768)\n",
394 | " self.dropout = torch.nn.Dropout(0.3)\n",
395 | " self.classifier = torch.nn.Linear(768, 4)\n",
396 | "\n",
397 | " def forward(self, input_ids, attention_mask):\n",
398 | " output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)\n",
399 | " hidden_state = output_1[0]\n",
400 | " pooler = hidden_state[:, 0]\n",
401 | " pooler = self.pre_classifier(pooler)\n",
402 | " pooler = torch.nn.ReLU()(pooler)\n",
403 | " pooler = self.dropout(pooler)\n",
404 | " output = self.classifier(pooler)\n",
405 | " return output"
406 | ]
407 | },
408 | {
409 | "cell_type": "code",
410 | "execution_count": 9,
411 | "metadata": {
412 | "collapsed": true
413 | },
414 | "outputs": [
415 | {
416 | "data": {
417 | "text/plain": [
418 | "DistillBERTClass(\n",
419 | " (l1): DistilBertModel(\n",
420 | " (embeddings): Embeddings(\n",
421 | " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n",
422 | " (position_embeddings): Embedding(512, 768)\n",
423 | " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
424 | " (dropout): Dropout(p=0.1, inplace=False)\n",
425 | " )\n",
426 | " (transformer): Transformer(\n",
427 | " (layer): ModuleList(\n",
428 | " (0): TransformerBlock(\n",
429 | " (attention): MultiHeadSelfAttention(\n",
430 | " (dropout): Dropout(p=0.1, inplace=False)\n",
431 | " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
432 | " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
433 | " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
434 | " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
435 | " )\n",
436 | " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
437 | " (ffn): FFN(\n",
438 | " (dropout): Dropout(p=0.1, inplace=False)\n",
439 | " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
440 | " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
441 | " )\n",
442 | " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
443 | " )\n",
444 | " (1): TransformerBlock(\n",
445 | " (attention): MultiHeadSelfAttention(\n",
446 | " (dropout): Dropout(p=0.1, inplace=False)\n",
447 | " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
448 | " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
449 | " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
450 | " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
451 | " )\n",
452 | " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
453 | " (ffn): FFN(\n",
454 | " (dropout): Dropout(p=0.1, inplace=False)\n",
455 | " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
456 | " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
457 | " )\n",
458 | " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
459 | " )\n",
460 | " (2): TransformerBlock(\n",
461 | " (attention): MultiHeadSelfAttention(\n",
462 | " (dropout): Dropout(p=0.1, inplace=False)\n",
463 | " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
464 | " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
465 | " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
466 | " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
467 | " )\n",
468 | " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
469 | " (ffn): FFN(\n",
470 | " (dropout): Dropout(p=0.1, inplace=False)\n",
471 | " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
472 | " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
473 | " )\n",
474 | " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
475 | " )\n",
476 | " (3): TransformerBlock(\n",
477 | " (attention): MultiHeadSelfAttention(\n",
478 | " (dropout): Dropout(p=0.1, inplace=False)\n",
479 | " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
480 | " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
481 | " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
482 | " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
483 | " )\n",
484 | " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
485 | " (ffn): FFN(\n",
486 | " (dropout): Dropout(p=0.1, inplace=False)\n",
487 | " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
488 | " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
489 | " )\n",
490 | " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
491 | " )\n",
492 | " (4): TransformerBlock(\n",
493 | " (attention): MultiHeadSelfAttention(\n",
494 | " (dropout): Dropout(p=0.1, inplace=False)\n",
495 | " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
496 | " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
497 | " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
498 | " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
499 | " )\n",
500 | " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
501 | " (ffn): FFN(\n",
502 | " (dropout): Dropout(p=0.1, inplace=False)\n",
503 | " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
504 | " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
505 | " )\n",
506 | " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
507 | " )\n",
508 | " (5): TransformerBlock(\n",
509 | " (attention): MultiHeadSelfAttention(\n",
510 | " (dropout): Dropout(p=0.1, inplace=False)\n",
511 | " (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
512 | " (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
513 | " (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
514 | " (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
515 | " )\n",
516 | " (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
517 | " (ffn): FFN(\n",
518 | " (dropout): Dropout(p=0.1, inplace=False)\n",
519 | " (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
520 | " (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
521 | " )\n",
522 | " (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
523 | " )\n",
524 | " )\n",
525 | " )\n",
526 | " )\n",
527 | " (l2): Dropout(p=0.3, inplace=False)\n",
528 | " (l3): Linear(in_features=768, out_features=1, bias=True)\n",
529 | ")"
530 | ]
531 | },
532 | "execution_count": 9,
533 | "metadata": {},
534 | "output_type": "execute_result"
535 | }
536 | ],
537 | "source": [
538 | "model = DistillBERTClass()\n",
539 | "model.to(device)"
540 | ]
541 | },
542 | {
543 | "cell_type": "code",
544 | "execution_count": 10,
545 | "metadata": {},
546 | "outputs": [],
547 | "source": [
548 | "# Creating the loss function and optimizer\n",
549 | "loss_function = torch.nn.CrossEntropyLoss()\n",
550 | "optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)"
551 | ]
552 | },
553 | {
554 | "cell_type": "markdown",
555 | "metadata": {},
556 | "source": [
557 | "\n",
558 | "### Fine Tuning the Model\n",
559 | "\n",
560 | "After all the effort of loading and preparing the data and datasets, creating the model and defining its loss and optimizer. This is probably the easier steps in the process. \n",
561 | "\n",
562 | "Here we define a training function that trains the model on the training dataset created above, specified number of times (EPOCH), An epoch defines how many times the complete data will be passed through the network. \n",
563 | "\n",
564 | "Following events happen in this function to fine tune the neural network:\n",
565 | "- The dataloader passes data to the model based on the batch size. \n",
566 | "- Subsequent output from the model and the actual category are compared to calculate the loss. \n",
567 | "- Loss value is used to optimize the weights of the neurons in the network.\n",
568 | "- After every 5000 steps the loss value is printed in the console.\n",
569 | "\n",
570 | "As you can see just in 1 epoch by the final step the model was working with a miniscule loss of 0.0002485 i.e. the output is extremely close to the actual output."
571 | ]
572 | },
573 | {
574 | "cell_type": "code",
575 | "execution_count": null,
576 | "metadata": {},
577 | "outputs": [],
578 | "source": [
579 | "# Function to calcuate the accuracy of the model\n",
580 | "\n",
581 | "def calcuate_accu(big_idx, targets):\n",
582 | " n_correct = (big_idx==targets).sum().item()\n",
583 | " return n_correct"
584 | ]
585 | },
586 | {
587 | "cell_type": "code",
588 | "execution_count": 11,
589 | "metadata": {},
590 | "outputs": [],
591 | "source": [
592 | "# Defining the training function on the 80% of the dataset for tuning the distilbert model\n",
593 | "\n",
594 | "def train(epoch):\n",
595 | " tr_loss = 0\n",
596 | " n_correct = 0\n",
597 | " nb_tr_steps = 0\n",
598 | " nb_tr_examples = 0\n",
599 | " model.train()\n",
600 | " for _,data in enumerate(training_loader, 0):\n",
601 | " ids = data['ids'].to(device, dtype = torch.long)\n",
602 | " mask = data['mask'].to(device, dtype = torch.long)\n",
603 | " targets = data['targets'].to(device, dtype = torch.long)\n",
604 | "\n",
605 | " outputs = model(ids, mask)\n",
606 | " loss = loss_function(outputs, targets)\n",
607 | " tr_loss += loss.item()\n",
608 | " big_val, big_idx = torch.max(outputs.data, dim=1)\n",
609 | " n_correct += calcuate_accu(big_idx, targets)\n",
610 | "\n",
611 | " nb_tr_steps += 1\n",
612 | " nb_tr_examples+=targets.size(0)\n",
613 | " \n",
614 | " if _%5000==0:\n",
615 | " loss_step = tr_loss/nb_tr_steps\n",
616 | " accu_step = (n_correct*100)/nb_tr_examples \n",
617 | " print(f\"Training Loss per 5000 steps: {loss_step}\")\n",
618 | " print(f\"Training Accuracy per 5000 steps: {accu_step}\")\n",
619 | "\n",
620 | " optimizer.zero_grad()\n",
621 | " loss.backward()\n",
622 | " # # When using GPU\n",
623 | " optimizer.step()\n",
624 | "\n",
625 | " print(f'The Total Accuracy for Epoch {epoch}: {(n_correct*100)/nb_tr_examples}')\n",
626 | " epoch_loss = tr_loss/nb_tr_steps\n",
627 | " epoch_accu = (n_correct*100)/nb_tr_examples\n",
628 | " print(f\"Training Loss Epoch: {epoch_loss}\")\n",
629 | " print(f\"Training Accuracy Epoch: {epoch_accu}\")\n",
630 | "\n",
631 | " return "
632 | ]
633 | },
634 | {
635 | "cell_type": "code",
636 | "execution_count": 12,
637 | "metadata": {},
638 | "outputs": [
639 | {
640 | "name": "stdout",
641 | "output_type": "stream",
642 | "text": [
643 | "Epoch: 0, Loss: 6.332988739013672\n",
644 | "Epoch: 0, Loss: 0.0013066530227661133\n",
645 | "Epoch: 0, Loss: 0.0029534101486206055\n",
646 | "Epoch: 0, Loss: 0.005258679389953613\n",
647 | "Epoch: 0, Loss: 0.0020235776901245117\n",
648 | "Epoch: 0, Loss: 0.0023298263549804688\n",
649 | "Epoch: 0, Loss: 0.0034378767013549805\n",
650 | "Epoch: 0, Loss: 0.004993081092834473\n",
651 | "Epoch: 0, Loss: 0.008559942245483398\n",
652 | "Epoch: 0, Loss: 0.0014510154724121094\n",
653 | "Epoch: 0, Loss: 0.0028634071350097656\n",
654 | "Epoch: 0, Loss: 0.0006411075592041016\n",
655 | "Epoch: 0, Loss: 0.0012137889862060547\n",
656 | "Epoch: 0, Loss: 0.002307891845703125\n",
657 | "Epoch: 0, Loss: 0.00028586387634277344\n",
658 | "Epoch: 0, Loss: 0.0029143095016479492\n",
659 | "Epoch: 0, Loss: 0.0002485513687133789\n"
660 | ]
661 | }
662 | ],
663 | "source": [
664 | "for epoch in range(EPOCHS):\n",
665 | " train(epoch)"
666 | ]
667 | },
668 | {
669 | "cell_type": "markdown",
670 | "metadata": {},
671 | "source": [
672 | "\n",
673 | "### Validating the Model\n",
674 | "\n",
675 | "During the validation stage we pass the unseen data(Testing Dataset) to the model. This step determines how good the model performs on the unseen data. \n",
676 | "\n",
677 | "This unseen data is the 20% of `newscorpora.csv` which was seperated during the Dataset creation stage. \n",
678 | "During the validation stage the weights of the model are not updated. Only the final output is compared to the actual value. This comparison is then used to calcuate the accuracy of the model. \n",
679 | "\n",
680 | "As you can see the model is predicting the correct category of a given headline to a 99.9% accuracy."
681 | ]
682 | },
683 | {
684 | "cell_type": "code",
685 | "execution_count": 15,
686 | "metadata": {},
687 | "outputs": [],
688 | "source": [
689 | "def valid(model, testing_loader):\n",
690 | " model.eval()\n",
691 | " n_correct = 0; n_wrong = 0; total = 0\n",
692 | " with torch.no_grad():\n",
693 | " for _, data in enumerate(testing_loader, 0):\n",
694 | " ids = data['ids'].to(device, dtype = torch.long)\n",
695 | " mask = data['mask'].to(device, dtype = torch.long)\n",
696 | " targets = data['targets'].to(device, dtype = torch.long)\n",
697 | " outputs = model(ids, mask).squeeze()\n",
698 | " loss = loss_function(outputs, targets)\n",
699 | " tr_loss += loss.item()\n",
700 | " big_val, big_idx = torch.max(outputs.data, dim=1)\n",
701 | " n_correct += calcuate_accu(big_idx, targets)\n",
702 | "\n",
703 | " nb_tr_steps += 1\n",
704 | " nb_tr_examples+=targets.size(0)\n",
705 | " \n",
706 | " if _%5000==0:\n",
707 | " loss_step = tr_loss/nb_tr_steps\n",
708 | " accu_step = (n_correct*100)/nb_tr_examples\n",
709 | " print(f\"Validation Loss per 100 steps: {loss_step}\")\n",
710 | " print(f\"Validation Accuracy per 100 steps: {accu_step}\")\n",
711 | " epoch_loss = tr_loss/nb_tr_steps\n",
712 | " epoch_accu = (n_correct*100)/nb_tr_examples\n",
713 | " print(f\"Validation Loss Epoch: {epoch_loss}\")\n",
714 | " print(f\"Validation Accuracy Epoch: {epoch_accu}\")\n",
715 | " \n",
716 | " return epoch_accu\n"
717 | ]
718 | },
719 | {
720 | "cell_type": "code",
721 | "execution_count": 16,
722 | "metadata": {},
723 | "outputs": [
724 | {
725 | "name": "stdout",
726 | "output_type": "stream",
727 | "text": [
728 | "This is the validation section to print the accuracy and see how it performs\n",
729 | "Here we are leveraging on the dataloader crearted for the validation dataset, the approcah is using more of pytorch\n",
730 | "Accuracy on test data = 99.99%\n"
731 | ]
732 | }
733 | ],
734 | "source": [
735 | "print('This is the validation section to print the accuracy and see how it performs')\n",
736 | "print('Here we are leveraging on the dataloader crearted for the validation dataset, the approcah is using more of pytorch')\n",
737 | "\n",
738 | "acc = valid(model, testing_loader)\n",
739 | "print(\"Accuracy on test data = %0.2f%%\" % acc)"
740 | ]
741 | },
742 | {
743 | "cell_type": "markdown",
744 | "metadata": {},
745 | "source": [
746 | "\n",
747 | "### Saving the Trained Model Artifacts for inference\n",
748 | "\n",
749 | "This is the final step in the process of fine tuning the model. \n",
750 | "\n",
751 | "The model and its vocabulary are saved locally. These files are then used in the future to make inference on new inputs of news headlines.\n",
752 | "\n",
753 | "Please remember that a trained neural network is only useful when used in actual inference after its training. \n",
754 | "\n",
755 | "In the lifecycle of an ML projects this is only half the job done. We will leave the inference of these models for some other day. "
756 | ]
757 | },
758 | {
759 | "cell_type": "code",
760 | "execution_count": 20,
761 | "metadata": {},
762 | "outputs": [
763 | {
764 | "name": "stdout",
765 | "output_type": "stream",
766 | "text": [
767 | "All files saved\n",
768 | "This tutorial is completed\n"
769 | ]
770 | }
771 | ],
772 | "source": [
773 | "# Saving the files for re-use\n",
774 | "\n",
775 | "output_model_file = './models/pytorch_distilbert_news.bin'\n",
776 | "output_vocab_file = './models/vocab_distilbert_news.bin'\n",
777 | "\n",
778 | "model_to_save = model\n",
779 | "torch.save(model_to_save, output_model_file)\n",
780 | "tokenizer.save_vocabulary(output_vocab_file)\n",
781 | "\n",
782 | "print('All files saved')\n",
783 | "print('This tutorial is completed')"
784 | ]
785 | }
786 | ],
787 | "metadata": {
788 | "colab": {
789 | "collapsed_sections": [],
790 | "name": "01_transformers_multiclass_classification.ipynb",
791 | "provenance": []
792 | },
793 | "kernelspec": {
794 | "display_name": "Python 3",
795 | "language": "python",
796 | "name": "python3"
797 | },
798 | "varInspector": {
799 | "cols": {
800 | "lenName": 16,
801 | "lenType": 16,
802 | "lenVar": 40
803 | },
804 | "kernels_config": {
805 | "python": {
806 | "delete_cmd_postfix": "",
807 | "delete_cmd_prefix": "del ",
808 | "library": "var_list.py",
809 | "varRefreshCmd": "print(var_dic_list())"
810 | },
811 | "r": {
812 | "delete_cmd_postfix": ") ",
813 | "delete_cmd_prefix": "rm(",
814 | "library": "var_list.r",
815 | "varRefreshCmd": "cat(var_dic_list()) "
816 | }
817 | },
818 | "types_to_exclude": [
819 | "module",
820 | "function",
821 | "builtin_function_or_method",
822 | "instance",
823 | "_Feature"
824 | ],
825 | "window_display": false
826 | },
827 | "widgets": {
828 | "application/vnd.jupyter.widget-state+json": {
829 | "222af74a298140ff9ec6e6f1f286a52b": {
830 | "model_module": "@jupyter-widgets/base",
831 | "model_name": "LayoutModel",
832 | "state": {
833 | "_model_module": "@jupyter-widgets/base",
834 | "_model_module_version": "1.2.0",
835 | "_model_name": "LayoutModel",
836 | "_view_count": null,
837 | "_view_module": "@jupyter-widgets/base",
838 | "_view_module_version": "1.2.0",
839 | "_view_name": "LayoutView",
840 | "align_content": null,
841 | "align_items": null,
842 | "align_self": null,
843 | "border": null,
844 | "bottom": null,
845 | "display": null,
846 | "flex": null,
847 | "flex_flow": null,
848 | "grid_area": null,
849 | "grid_auto_columns": null,
850 | "grid_auto_flow": null,
851 | "grid_auto_rows": null,
852 | "grid_column": null,
853 | "grid_gap": null,
854 | "grid_row": null,
855 | "grid_template_areas": null,
856 | "grid_template_columns": null,
857 | "grid_template_rows": null,
858 | "height": null,
859 | "justify_content": null,
860 | "justify_items": null,
861 | "left": null,
862 | "margin": null,
863 | "max_height": null,
864 | "max_width": null,
865 | "min_height": null,
866 | "min_width": null,
867 | "object_fit": null,
868 | "object_position": null,
869 | "order": null,
870 | "overflow": null,
871 | "overflow_x": null,
872 | "overflow_y": null,
873 | "padding": null,
874 | "right": null,
875 | "top": null,
876 | "visibility": null,
877 | "width": null
878 | }
879 | },
880 | "3516912b49bf4e3e98c9ce6c7b1b1469": {
881 | "model_module": "@jupyter-widgets/controls",
882 | "model_name": "DescriptionStyleModel",
883 | "state": {
884 | "_model_module": "@jupyter-widgets/controls",
885 | "_model_module_version": "1.5.0",
886 | "_model_name": "DescriptionStyleModel",
887 | "_view_count": null,
888 | "_view_module": "@jupyter-widgets/base",
889 | "_view_module_version": "1.2.0",
890 | "_view_name": "StyleView",
891 | "description_width": ""
892 | }
893 | },
894 | "4641ff326eea446c88903e70b85c90d7": {
895 | "model_module": "@jupyter-widgets/base",
896 | "model_name": "LayoutModel",
897 | "state": {
898 | "_model_module": "@jupyter-widgets/base",
899 | "_model_module_version": "1.2.0",
900 | "_model_name": "LayoutModel",
901 | "_view_count": null,
902 | "_view_module": "@jupyter-widgets/base",
903 | "_view_module_version": "1.2.0",
904 | "_view_name": "LayoutView",
905 | "align_content": null,
906 | "align_items": null,
907 | "align_self": null,
908 | "border": null,
909 | "bottom": null,
910 | "display": null,
911 | "flex": null,
912 | "flex_flow": null,
913 | "grid_area": null,
914 | "grid_auto_columns": null,
915 | "grid_auto_flow": null,
916 | "grid_auto_rows": null,
917 | "grid_column": null,
918 | "grid_gap": null,
919 | "grid_row": null,
920 | "grid_template_areas": null,
921 | "grid_template_columns": null,
922 | "grid_template_rows": null,
923 | "height": null,
924 | "justify_content": null,
925 | "justify_items": null,
926 | "left": null,
927 | "margin": null,
928 | "max_height": null,
929 | "max_width": null,
930 | "min_height": null,
931 | "min_width": null,
932 | "object_fit": null,
933 | "object_position": null,
934 | "order": null,
935 | "overflow": null,
936 | "overflow_x": null,
937 | "overflow_y": null,
938 | "padding": null,
939 | "right": null,
940 | "top": null,
941 | "visibility": null,
942 | "width": null
943 | }
944 | },
945 | "5a975bc922a14c1b82245195b7c7f659": {
946 | "model_module": "@jupyter-widgets/controls",
947 | "model_name": "HTMLModel",
948 | "state": {
949 | "_dom_classes": [],
950 | "_model_module": "@jupyter-widgets/controls",
951 | "_model_module_version": "1.5.0",
952 | "_model_name": "HTMLModel",
953 | "_view_count": null,
954 | "_view_module": "@jupyter-widgets/controls",
955 | "_view_module_version": "1.5.0",
956 | "_view_name": "HTMLView",
957 | "description": "",
958 | "description_tooltip": null,
959 | "layout": "IPY_MODEL_8335299626724853865234b86745e2a5",
960 | "placeholder": "",
961 | "style": "IPY_MODEL_3516912b49bf4e3e98c9ce6c7b1b1469",
962 | "value": " 442/442 [00:01<00:00, 276B/s]"
963 | }
964 | },
965 | "7532a60d077248ca963f514988d41acc": {
966 | "model_module": "@jupyter-widgets/controls",
967 | "model_name": "HBoxModel",
968 | "state": {
969 | "_dom_classes": [],
970 | "_model_module": "@jupyter-widgets/controls",
971 | "_model_module_version": "1.5.0",
972 | "_model_name": "HBoxModel",
973 | "_view_count": null,
974 | "_view_module": "@jupyter-widgets/controls",
975 | "_view_module_version": "1.5.0",
976 | "_view_name": "HBoxView",
977 | "box_style": "",
978 | "children": [
979 | "IPY_MODEL_ca0092edb8f5442a988c87c89930a10e",
980 | "IPY_MODEL_5a975bc922a14c1b82245195b7c7f659"
981 | ],
982 | "layout": "IPY_MODEL_e39121739a4c4bffb16155e9a61a58cc"
983 | }
984 | },
985 | "8335299626724853865234b86745e2a5": {
986 | "model_module": "@jupyter-widgets/base",
987 | "model_name": "LayoutModel",
988 | "state": {
989 | "_model_module": "@jupyter-widgets/base",
990 | "_model_module_version": "1.2.0",
991 | "_model_name": "LayoutModel",
992 | "_view_count": null,
993 | "_view_module": "@jupyter-widgets/base",
994 | "_view_module_version": "1.2.0",
995 | "_view_name": "LayoutView",
996 | "align_content": null,
997 | "align_items": null,
998 | "align_self": null,
999 | "border": null,
1000 | "bottom": null,
1001 | "display": null,
1002 | "flex": null,
1003 | "flex_flow": null,
1004 | "grid_area": null,
1005 | "grid_auto_columns": null,
1006 | "grid_auto_flow": null,
1007 | "grid_auto_rows": null,
1008 | "grid_column": null,
1009 | "grid_gap": null,
1010 | "grid_row": null,
1011 | "grid_template_areas": null,
1012 | "grid_template_columns": null,
1013 | "grid_template_rows": null,
1014 | "height": null,
1015 | "justify_content": null,
1016 | "justify_items": null,
1017 | "left": null,
1018 | "margin": null,
1019 | "max_height": null,
1020 | "max_width": null,
1021 | "min_height": null,
1022 | "min_width": null,
1023 | "object_fit": null,
1024 | "object_position": null,
1025 | "order": null,
1026 | "overflow": null,
1027 | "overflow_x": null,
1028 | "overflow_y": null,
1029 | "padding": null,
1030 | "right": null,
1031 | "top": null,
1032 | "visibility": null,
1033 | "width": null
1034 | }
1035 | },
1036 | "b13c341a69c14c059edc40e6b438ee80": {
1037 | "model_module": "@jupyter-widgets/controls",
1038 | "model_name": "IntProgressModel",
1039 | "state": {
1040 | "_dom_classes": [],
1041 | "_model_module": "@jupyter-widgets/controls",
1042 | "_model_module_version": "1.5.0",
1043 | "_model_name": "IntProgressModel",
1044 | "_view_count": null,
1045 | "_view_module": "@jupyter-widgets/controls",
1046 | "_view_module_version": "1.5.0",
1047 | "_view_name": "ProgressView",
1048 | "bar_style": "success",
1049 | "description": "Downloading: 100%",
1050 | "description_tooltip": null,
1051 | "layout": "IPY_MODEL_4641ff326eea446c88903e70b85c90d7",
1052 | "max": 231508,
1053 | "min": 0,
1054 | "orientation": "horizontal",
1055 | "style": "IPY_MODEL_dd589fc95cd94cd190bc2640f9ef618c",
1056 | "value": 231508
1057 | }
1058 | },
1059 | "b8728a6fcdc54b72a10c8bfdcf794fad": {
1060 | "model_module": "@jupyter-widgets/base",
1061 | "model_name": "LayoutModel",
1062 | "state": {
1063 | "_model_module": "@jupyter-widgets/base",
1064 | "_model_module_version": "1.2.0",
1065 | "_model_name": "LayoutModel",
1066 | "_view_count": null,
1067 | "_view_module": "@jupyter-widgets/base",
1068 | "_view_module_version": "1.2.0",
1069 | "_view_name": "LayoutView",
1070 | "align_content": null,
1071 | "align_items": null,
1072 | "align_self": null,
1073 | "border": null,
1074 | "bottom": null,
1075 | "display": null,
1076 | "flex": null,
1077 | "flex_flow": null,
1078 | "grid_area": null,
1079 | "grid_auto_columns": null,
1080 | "grid_auto_flow": null,
1081 | "grid_auto_rows": null,
1082 | "grid_column": null,
1083 | "grid_gap": null,
1084 | "grid_row": null,
1085 | "grid_template_areas": null,
1086 | "grid_template_columns": null,
1087 | "grid_template_rows": null,
1088 | "height": null,
1089 | "justify_content": null,
1090 | "justify_items": null,
1091 | "left": null,
1092 | "margin": null,
1093 | "max_height": null,
1094 | "max_width": null,
1095 | "min_height": null,
1096 | "min_width": null,
1097 | "object_fit": null,
1098 | "object_position": null,
1099 | "order": null,
1100 | "overflow": null,
1101 | "overflow_x": null,
1102 | "overflow_y": null,
1103 | "padding": null,
1104 | "right": null,
1105 | "top": null,
1106 | "visibility": null,
1107 | "width": null
1108 | }
1109 | },
1110 | "c1db6876e9b04293b4077f13006b4a19": {
1111 | "model_module": "@jupyter-widgets/controls",
1112 | "model_name": "DescriptionStyleModel",
1113 | "state": {
1114 | "_model_module": "@jupyter-widgets/controls",
1115 | "_model_module_version": "1.5.0",
1116 | "_model_name": "DescriptionStyleModel",
1117 | "_view_count": null,
1118 | "_view_module": "@jupyter-widgets/base",
1119 | "_view_module_version": "1.2.0",
1120 | "_view_name": "StyleView",
1121 | "description_width": ""
1122 | }
1123 | },
1124 | "ca0092edb8f5442a988c87c89930a10e": {
1125 | "model_module": "@jupyter-widgets/controls",
1126 | "model_name": "IntProgressModel",
1127 | "state": {
1128 | "_dom_classes": [],
1129 | "_model_module": "@jupyter-widgets/controls",
1130 | "_model_module_version": "1.5.0",
1131 | "_model_name": "IntProgressModel",
1132 | "_view_count": null,
1133 | "_view_module": "@jupyter-widgets/controls",
1134 | "_view_module_version": "1.5.0",
1135 | "_view_name": "ProgressView",
1136 | "bar_style": "success",
1137 | "description": "Downloading: 100%",
1138 | "description_tooltip": null,
1139 | "layout": "IPY_MODEL_222af74a298140ff9ec6e6f1f286a52b",
1140 | "max": 442,
1141 | "min": 0,
1142 | "orientation": "horizontal",
1143 | "style": "IPY_MODEL_f9dd503454b2450e957a96bff807c8d9",
1144 | "value": 442
1145 | }
1146 | },
1147 | "cde06f09879848a3bfc5892390a51d36": {
1148 | "model_module": "@jupyter-widgets/base",
1149 | "model_name": "LayoutModel",
1150 | "state": {
1151 | "_model_module": "@jupyter-widgets/base",
1152 | "_model_module_version": "1.2.0",
1153 | "_model_name": "LayoutModel",
1154 | "_view_count": null,
1155 | "_view_module": "@jupyter-widgets/base",
1156 | "_view_module_version": "1.2.0",
1157 | "_view_name": "LayoutView",
1158 | "align_content": null,
1159 | "align_items": null,
1160 | "align_self": null,
1161 | "border": null,
1162 | "bottom": null,
1163 | "display": null,
1164 | "flex": null,
1165 | "flex_flow": null,
1166 | "grid_area": null,
1167 | "grid_auto_columns": null,
1168 | "grid_auto_flow": null,
1169 | "grid_auto_rows": null,
1170 | "grid_column": null,
1171 | "grid_gap": null,
1172 | "grid_row": null,
1173 | "grid_template_areas": null,
1174 | "grid_template_columns": null,
1175 | "grid_template_rows": null,
1176 | "height": null,
1177 | "justify_content": null,
1178 | "justify_items": null,
1179 | "left": null,
1180 | "margin": null,
1181 | "max_height": null,
1182 | "max_width": null,
1183 | "min_height": null,
1184 | "min_width": null,
1185 | "object_fit": null,
1186 | "object_position": null,
1187 | "order": null,
1188 | "overflow": null,
1189 | "overflow_x": null,
1190 | "overflow_y": null,
1191 | "padding": null,
1192 | "right": null,
1193 | "top": null,
1194 | "visibility": null,
1195 | "width": null
1196 | }
1197 | },
1198 | "dd589fc95cd94cd190bc2640f9ef618c": {
1199 | "model_module": "@jupyter-widgets/controls",
1200 | "model_name": "ProgressStyleModel",
1201 | "state": {
1202 | "_model_module": "@jupyter-widgets/controls",
1203 | "_model_module_version": "1.5.0",
1204 | "_model_name": "ProgressStyleModel",
1205 | "_view_count": null,
1206 | "_view_module": "@jupyter-widgets/base",
1207 | "_view_module_version": "1.2.0",
1208 | "_view_name": "StyleView",
1209 | "bar_color": null,
1210 | "description_width": "initial"
1211 | }
1212 | },
1213 | "dd7699e7813d4b9f9e80990498a39539": {
1214 | "model_module": "@jupyter-widgets/controls",
1215 | "model_name": "HTMLModel",
1216 | "state": {
1217 | "_dom_classes": [],
1218 | "_model_module": "@jupyter-widgets/controls",
1219 | "_model_module_version": "1.5.0",
1220 | "_model_name": "HTMLModel",
1221 | "_view_count": null,
1222 | "_view_module": "@jupyter-widgets/controls",
1223 | "_view_module_version": "1.5.0",
1224 | "_view_name": "HTMLView",
1225 | "description": "",
1226 | "description_tooltip": null,
1227 | "layout": "IPY_MODEL_b8728a6fcdc54b72a10c8bfdcf794fad",
1228 | "placeholder": "",
1229 | "style": "IPY_MODEL_c1db6876e9b04293b4077f13006b4a19",
1230 | "value": " 232k/232k [00:00<00:00, 584kB/s]"
1231 | }
1232 | },
1233 | "e39121739a4c4bffb16155e9a61a58cc": {
1234 | "model_module": "@jupyter-widgets/base",
1235 | "model_name": "LayoutModel",
1236 | "state": {
1237 | "_model_module": "@jupyter-widgets/base",
1238 | "_model_module_version": "1.2.0",
1239 | "_model_name": "LayoutModel",
1240 | "_view_count": null,
1241 | "_view_module": "@jupyter-widgets/base",
1242 | "_view_module_version": "1.2.0",
1243 | "_view_name": "LayoutView",
1244 | "align_content": null,
1245 | "align_items": null,
1246 | "align_self": null,
1247 | "border": null,
1248 | "bottom": null,
1249 | "display": null,
1250 | "flex": null,
1251 | "flex_flow": null,
1252 | "grid_area": null,
1253 | "grid_auto_columns": null,
1254 | "grid_auto_flow": null,
1255 | "grid_auto_rows": null,
1256 | "grid_column": null,
1257 | "grid_gap": null,
1258 | "grid_row": null,
1259 | "grid_template_areas": null,
1260 | "grid_template_columns": null,
1261 | "grid_template_rows": null,
1262 | "height": null,
1263 | "justify_content": null,
1264 | "justify_items": null,
1265 | "left": null,
1266 | "margin": null,
1267 | "max_height": null,
1268 | "max_width": null,
1269 | "min_height": null,
1270 | "min_width": null,
1271 | "object_fit": null,
1272 | "object_position": null,
1273 | "order": null,
1274 | "overflow": null,
1275 | "overflow_x": null,
1276 | "overflow_y": null,
1277 | "padding": null,
1278 | "right": null,
1279 | "top": null,
1280 | "visibility": null,
1281 | "width": null
1282 | }
1283 | },
1284 | "f9dd503454b2450e957a96bff807c8d9": {
1285 | "model_module": "@jupyter-widgets/controls",
1286 | "model_name": "ProgressStyleModel",
1287 | "state": {
1288 | "_model_module": "@jupyter-widgets/controls",
1289 | "_model_module_version": "1.5.0",
1290 | "_model_name": "ProgressStyleModel",
1291 | "_view_count": null,
1292 | "_view_module": "@jupyter-widgets/base",
1293 | "_view_module_version": "1.2.0",
1294 | "_view_name": "StyleView",
1295 | "bar_color": null,
1296 | "description_width": "initial"
1297 | }
1298 | },
1299 | "fd6f40d5854c49f79056cebd16f86626": {
1300 | "model_module": "@jupyter-widgets/controls",
1301 | "model_name": "HBoxModel",
1302 | "state": {
1303 | "_dom_classes": [],
1304 | "_model_module": "@jupyter-widgets/controls",
1305 | "_model_module_version": "1.5.0",
1306 | "_model_name": "HBoxModel",
1307 | "_view_count": null,
1308 | "_view_module": "@jupyter-widgets/controls",
1309 | "_view_module_version": "1.5.0",
1310 | "_view_name": "HBoxView",
1311 | "box_style": "",
1312 | "children": [
1313 | "IPY_MODEL_b13c341a69c14c059edc40e6b438ee80",
1314 | "IPY_MODEL_dd7699e7813d4b9f9e80990498a39539"
1315 | ],
1316 | "layout": "IPY_MODEL_cde06f09879848a3bfc5892390a51d36"
1317 | }
1318 | }
1319 | }
1320 | }
1321 | },
1322 | "nbformat": 4,
1323 | "nbformat_minor": 1
1324 | }
1325 |
--------------------------------------------------------------------------------
/transformers_ner.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "rFvcIe4qz_2t"
8 | },
9 | "source": [
10 | "# Fine Tuning Transformer for Named Entity Recognition"
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {
16 | "colab_type": "text",
17 | "id": "Zt90X_Dw0B_T"
18 | },
19 | "source": [
20 | "### Introduction\n",
21 | "\n",
22 | "In this tutorial we will be fine tuning a transformer model for the **Named Entity Recognition** problem. \n",
23 | "This is one of the most common business problems where a given piece of text/sentence/document different entites need to be identified such as: Name, Location, Number, Entity etc.\n",
24 | "\n",
25 | "#### Flow of the notebook\n",
26 | "\n",
27 | "The notebook will be divided into seperate sections to provide a organized walk through for the process used. This process can be modified for individual use cases. The sections are:\n",
28 | "\n",
29 | "1. [Installing packages for preparing the system](#section00)\n",
30 | "2. [Importing Python Libraries and preparing the environment](#section01)\n",
31 | "3. [Importing and Pre-Processing the domain data](#section02)\n",
32 | "4. [Preparing the Dataset and Dataloader](#section03)\n",
33 | "5. [Creating the Neural Network for Fine Tuning](#section04)\n",
34 | "6. [Fine Tuning the Model](#section05)\n",
35 | "7. [Validating the Model Performance](#section06)\n",
36 | "\n",
37 | "#### Technical Details\n",
38 | "\n",
39 | "This script leverages on multiple tools designed by other teams. Details of the tools used below. Please ensure that these elements are present in your setup to successfully implement this script.\n",
40 | "\n",
41 | " - Data:\n",
42 | "\t- We are working from a dataset available on [Kaggle](https://www.kaggle.com/)\n",
43 | " - This NER annotated dataset is available at the following [link](https://www.kaggle.com/abhinavwalia95/entity-annotated-corpus)\n",
44 | " - We will be working with the file `ner.csv` from the dataset. \n",
45 | " - In the given file we will be looking at the following columns for the purpose of this fine tuning:\n",
46 | " - `sentence_idx` : This is the identifier that the word in the row is part of the same sentence\n",
47 | " - `word` : Word in the sentence\n",
48 | " - `tag` : This is the identifier that is used to identify the entity in the dataset. \n",
49 | " - The various entites tagged in this dataset are as per below:\n",
50 | " - geo = Geographical Entity\n",
51 | " - org = Organization\n",
52 | " - per = Person\n",
53 | " - gpe = Geopolitical Entity\n",
54 | " - tim = Time indicator\n",
55 | " - art = Artifact\n",
56 | " - eve = Event\n",
57 | " - nat = Natural Phenomenon\n",
58 | "\n",
59 | "\n",
60 | " - Language Model Used:\n",
61 | "\t - We are using BERT for this project. Hugging face team has created a customized model for token classification, called **BertForTokenClassification**. We will be using it in our custommodel class for training. \n",
62 | "\t - [Blog-Post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html)\n",
63 | " - [Documentation for python](https://huggingface.co/transformers/model_doc/bert.html#bertfortokenclassification)\n",
64 | "\n",
65 | "\n",
66 | " - Hardware Requirements:\n",
67 | "\t - Python 3.6 and above\n",
68 | "\t - Pytorch, Transformers and All the stock Python ML Libraries\n",
69 | "\t - TPU enabled setup. This can also be executed over GPU but the code base will need some changes. \n",
70 | "\n",
71 | "\n",
72 | " - Script Objective:\n",
73 | "\t - The objective of this script is to fine tune **BertForTokenClassification**` to be able to identify the entites as per the given test dataset. The entities labled in the given dataset are as follows:"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "\n",
81 | "### Installing packages for preparing the system\n",
82 | "\n",
83 | "We are installing 2 packages for the purposes of TPU execution and f1 metric score calculation respectively\n",
84 | "*You can skip this step if you already have these libraries installed in your environment*"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": 2,
90 | "metadata": {
91 | "colab": {
92 | "base_uri": "https://localhost:8080/",
93 | "height": 773
94 | },
95 | "colab_type": "code",
96 | "id": "pWbkd8Ld8MwL",
97 | "outputId": "b44f7ea3-2c0a-4e7c-f7ed-19f43d62de28"
98 | },
99 | "outputs": [
100 | {
101 | "name": "stdout",
102 | "output_type": "stream",
103 | "text": [
104 | " % Total % Received % Xferd Average Speed Time Time Time Current\n",
105 | " Dload Upload Total Spent Left Speed\n",
106 | "\r",
107 | " 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r",
108 | "100 3727 100 3727 0 0 41876 0 --:--:-- --:--:-- --:--:-- 41876\n",
109 | "Updating TPU and VM. This may take around 2 minutes.\n",
110 | "Updating TPU runtime to pytorch-dev20200325 ...\n",
111 | "Done updating TPU runtime:
\n",
273 | " \n",
274 | "
\n",
320 | "\n",
275 | " \n",
281 | " \n",
282 | " \n",
283 | " \n",
276 | " pos \n",
277 | " sentence_idx \n",
278 | " word \n",
279 | " tag \n",
280 | " \n",
284 | " \n",
290 | " 0 \n",
285 | " NNS \n",
286 | " 1.0 \n",
287 | " Thousands \n",
288 | " O \n",
289 | " \n",
291 | " \n",
297 | " 1 \n",
292 | " IN \n",
293 | " 1.0 \n",
294 | " of \n",
295 | " O \n",
296 | " \n",
298 | " \n",
304 | " 2 \n",
299 | " NNS \n",
300 | " 1.0 \n",
301 | " demonstrators \n",
302 | " O \n",
303 | " \n",
305 | " \n",
311 | " 3 \n",
306 | " VBP \n",
307 | " 1.0 \n",
308 | " have \n",
309 | " O \n",
310 | " \n",
312 | " \n",
318 | " \n",
319 | "4 \n",
313 | " VBN \n",
314 | " 1.0 \n",
315 | " marched \n",
316 | " O \n",
317 | "
\n",
739 | " Project page: https://app.wandb.ai/abhimishra-91/transformers_tutorials_sentiment
\n",
740 | " Run page: https://app.wandb.ai/abhimishra-91/transformers_tutorials_sentiment/runs/1zwn4gbg
\n",
741 | " "
742 | ],
743 | "text/plain": [
744 | "