├── Logo.jpeg ├── CITATION.cff ├── Preprocessing.py ├── Tokenizer.py ├── README.md └── DarijaBERT_Dialect_Identification.ipynb /Logo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AIOXLABS/DBert/HEAD/Logo.jpeg -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: DarijaBert 6 | message: >- 7 | If you use this model, please cite it using the 8 | metadata from this file. 9 | type: software 10 | authors: 11 | - given-names: Kamel Gaanoun 12 | email: kamel.gaanoun@gmail.com 13 | affiliation: INSEA 14 | - given-names: Imade Benelallam 15 | email: i.benelallam@insea.ac.ma 16 | affiliation: INSEA/AIOX 17 | - given-names: Abdoumohamed Naira 18 | email: nabdoumohamed@insea.ac.ma 19 | affiliation: INSEA/AIOX 20 | - given-names: Anas Allak 21 | email: aallak@insea.ac.ma 22 | affiliation: INSEA/AIOX 23 | -------------------------------------------------------------------------------- /Preprocessing.py: -------------------------------------------------------------------------------- 1 | #pip install pyarabic 2 | 3 | import string 4 | import re 5 | from pyarabic.araby import strip_tatweel,strip_tashkeel 6 | 7 | def repted(text): 8 | text=re.sub(r'(.)\1+', r'\1', text)# Replace with only one (remove repetitions) 9 | return text 10 | 11 | ###For Pandas dataframe 12 | def pre_processing(df,source,field): 13 | df[field] = df[source] 14 | df[field] = df[field].replace(r'http\S+', 'URL', regex=True).replace(r'www\S+', 'URL', regex=True) # Replace URLs with URL string 15 | df[field] = df[field].replace(r'@[^\s]+', 'USER', regex=True) # Replace user mentions with USER string 16 | df[field] = df[field].replace(r'#[^\s]+', 'HASHTAG', regex=True) # Replace Hashtags with HASHTAG string 17 | df=df[df[field].apply(lambda x:len(re.findall(r'[\u0600-\u06FF]+', x)))>1] #Keep sequences with at least 2 arabic words 18 | df[field] = df[field].apply(strip_tatweel) #Remove Tatweel string 19 | df[field] = df[field].apply(strip_tashkeel) # Remove Diacritics 20 | df[field] = df[field].apply(repted) 21 | return df 22 | -------------------------------------------------------------------------------- /Tokenizer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from tokenizers import BertWordPieceTokenizer 4 | 5 | 6 | 7 | # Initialize a tokenizer 8 | tokenizer = BertWordPieceTokenizer() 9 | 10 | TOKENS_TO_ADD = ['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]', '', ''] 11 | 12 | Vocab_size=80000 13 | 14 | # Customize training 15 | tokenizer.train(files=["path/to/text/data/file.txt"], 16 | vocab_size=Vocab_size, 17 | min_frequency=2, 18 | special_tokens=TOKENS_TO_ADD) 19 | 20 | tokenizer.enable_truncation(max_length=128) 21 | 22 | #Save the tokenizer vocabulary :Vocab.txt 23 | tokenizer.save_model("path/to/config/files") 24 | 25 | # Save tokenizer config: config.json 26 | fw = open(os.path.join("path/to/config/files", 'config.json'), 'w') 27 | json.dump({"do_lower_case": True, 28 | "unk_token": "[UNK]", 29 | "sep_token": "[SEP]", 30 | "pad_token": "[PAD]", 31 | "cls_token": "[CLS]", 32 | "mask_token": "[MASK]", 33 | "model_max_length": 128, 34 | "max_len": 128, 35 | "model_type": "bert", 36 | "vocab_size":Vocab_size}, fw) 37 | fw.close() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | 5 | # What is DarijaBERT ? 6 | DarijaBERT is the first Open Source BERT model for the Moroccan Arabic dialect called “Darija”. It is based on the same architecture as BERT-base, but without the Next Sentence Prediction (NSP) objective. This model was trained on a total of ~3 Million sequences of Darija dialect representing 691MB of text or a total of ~100M tokens. 7 | 8 | # What has been released in this repository? 9 | 10 | We are releasing the following : 11 | 12 | * Pre-processing code 13 | * WordPiece tokenization code 14 | * Pre-trained model in both PyTorch and TensorFlow versions(future plan) 15 | * Example notebook to finetune the model 16 | * MTCD dataset 17 | 18 | # Pretraining data 19 | 20 | The model was trained on a dataset issued from three different sources: 21 | * Stories written in Darija scrapped from a dedicated website 22 | * Youtube comments from 40 different Moroccan channels 23 | * Tweets crawled based on a list of Darija keywords. 24 | 25 | Concatenating these datasets sums up to 691MB of text. 26 | 27 | # Data preprocessing: 28 | 29 | * Replacing repeated characters with one occurrence of this character 30 | * Replacing hashtags, user mentions and URLs respectively with following words: HASHTAG, USER, URL. 31 | * Keeping sequences with at least two arabic words 32 | * Removing Tatweel character '\\u0640' 33 | * Removing diacritics 34 | # Pretraining Process 35 | 36 | * Same architecture as [BERT-base](https://github.com/google-research/bert) was used, but without the Next Sentence Prediction objective. 37 | 38 | * Whole Word Masking (WWM) with a probability of 15% was adopted 39 | 40 | * The sequences were tokenized using the WordPiece Tokenizer from the [Huggingface Transformer library](https://huggingface.co/transformers/). We chose 128 as the maximum length of the input for the model. 41 | 42 | * The vocabulary size is 80.000 wordpiece token 43 | 44 | The whole training was done on GCP Compute Engine using free cloud TPU v3.8 offered by Google's TensorFlow Research Cloud (TRC) program. It took 49 hours to run the 40 epochs of pretraining. 45 | # Masking task 46 | Since DarijaBERT was trained using Whole Word Masking, it is capable of predicting missing word in sentence. 47 | ```python 48 | from transformers import pipeline 49 | unmasker = pipeline('fill-mask', model='Kamel/DarijaBERT') 50 | unmasker(" اشنو [MASK] ليك ") 51 | 52 | {'score': 0.02539043314754963, 53 | 'sequence': 'اشنو سيفطو ليك', 54 | 'token': 25722, 55 | 'token_str': 'سيفطو'}, 56 | ``` 57 | 58 | # Downstream tasks 59 | 60 | 61 | **UPCOMING** 62 | 63 | 64 | 65 | ********* DarijaBERT models were transfered on the SI2M Lab HuggingFace repo : Juin 20th,2022 ******** 66 | 67 | ## Loading the model 68 | 69 | The model can be loaded directly using the Huggingface library: 70 | ```python 71 | from transformers import AutoTokenizer, AutoModel 72 | DarijaBert_tokenizer = AutoTokenizer.from_pretrained("SI2M-Lab/DarijaBERT") 73 | DarijaBert_model = AutoModel.from_pretrained("SI2M-Lab/DarijaBERT") 74 | ``` 75 | 76 | Checkpoint for the Pytorch framework is available for downloading in the link below: 77 | 78 | [https://huggingface.co/SI2M-Lab/DarijaBERT](https://huggingface.co/SI2M-Lab/DarijaBERT) 79 | 80 | This checkpoint is destined exclusively for research, any commercial use should be done with author's permission, please contact via email at dbert@aiox-labs.com 81 | 82 | ## Citation 83 | If you use our models for your scientific publication, or if you find the resources in this repository useful, please cite our paper as follows (to be updated): 84 | ``` 85 | @article{gaanoun2023darijabert, 86 | title={Darijabert: a Step Forward in Nlp for the Written Moroccan Dialect}, 87 | author={Gaanoun, Kamel and Naira, Abdou Mohamed and Allak, Anass and Benelallam, Imade}, 88 | year={2023} 89 | } 90 | 91 | ``` 92 | # Acknowledgments 93 | We gratefully acknowledge Google’s TensorFlow Research Cloud (TRC) program for providing us with free Cloud TPUs. 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /DarijaBERT_Dialect_Identification.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "accelerator": "GPU", 6 | "colab": { 7 | "name": "DarijaBERT Dialect Identification.ipynb", 8 | "provenance": [], 9 | "collapsed_sections": [], 10 | "toc_visible": true 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 3", 14 | "name": "python3" 15 | }, 16 | "language_info": { 17 | "name": "python" 18 | } 19 | }, 20 | "cells": [ 21 | { 22 | "cell_type": "markdown", 23 | "metadata": { 24 | "id": "zYY_H8Kkp8BK" 25 | }, 26 | "source": [ 27 | "This is a an example notebook describing the different steps to finetune DarijaBERT on Dialect Identification Task.\n", 28 | " \n", 29 | "Dataset: [MSDA](https://msda.um6p.ma/msda_datasets) " 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": { 35 | "id": "ploSy9CKqfiG" 36 | }, 37 | "source": [ 38 | "# Libraries" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "metadata": { 44 | "colab": { 45 | "base_uri": "https://localhost:8080/" 46 | }, 47 | "id": "1CWzIGZ5RwMB", 48 | "outputId": "0c2c277c-d745-4ba1-ce66-0544ff175913" 49 | }, 50 | "source": [ 51 | "!pip install transformers" 52 | ], 53 | "execution_count": null, 54 | "outputs": [ 55 | { 56 | "output_type": "stream", 57 | "name": "stdout", 58 | "text": [ 59 | "Collecting transformers\n", 60 | " Downloading transformers-4.12.2-py3-none-any.whl (3.1 MB)\n", 61 | "\u001b[K |████████████████████████████████| 3.1 MB 8.8 MB/s \n", 62 | "\u001b[?25hRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers) (4.8.1)\n", 63 | "Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\n", 64 | "Collecting pyyaml>=5.1\n", 65 | " Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)\n", 66 | "\u001b[K |████████████████████████████████| 596 kB 49.0 MB/s \n", 67 | "\u001b[?25hCollecting tokenizers<0.11,>=0.10.1\n", 68 | " Downloading tokenizers-0.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (3.3 MB)\n", 69 | "\u001b[K |████████████████████████████████| 3.3 MB 57.4 MB/s \n", 70 | "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.3.0)\n", 71 | "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers) (21.0)\n", 72 | "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.19.5)\n", 73 | "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\n", 74 | "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.62.3)\n", 75 | "Collecting sacremoses\n", 76 | " Downloading sacremoses-0.0.46-py3-none-any.whl (895 kB)\n", 77 | "\u001b[K |████████████████████████████████| 895 kB 63.0 MB/s \n", 78 | "\u001b[?25hCollecting huggingface-hub>=0.0.17\n", 79 | " Downloading huggingface_hub-0.0.19-py3-none-any.whl (56 kB)\n", 80 | "\u001b[K |████████████████████████████████| 56 kB 6.8 MB/s \n", 81 | "\u001b[?25hRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from huggingface-hub>=0.0.17->transformers) (3.7.4.3)\n", 82 | "Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->transformers) (2.4.7)\n", 83 | "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers) (3.6.0)\n", 84 | "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.5.30)\n", 85 | "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\n", 86 | "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\n", 87 | "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\n", 88 | "Requirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.0.1)\n", 89 | "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\n", 90 | "Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\n", 91 | "Installing collected packages: pyyaml, tokenizers, sacremoses, huggingface-hub, transformers\n", 92 | " Attempting uninstall: pyyaml\n", 93 | " Found existing installation: PyYAML 3.13\n", 94 | " Uninstalling PyYAML-3.13:\n", 95 | " Successfully uninstalled PyYAML-3.13\n", 96 | "Successfully installed huggingface-hub-0.0.19 pyyaml-6.0 sacremoses-0.0.46 tokenizers-0.10.3 transformers-4.12.2\n" 97 | ] 98 | } 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "metadata": { 104 | "colab": { 105 | "base_uri": "https://localhost:8080/" 106 | }, 107 | "id": "s0mQl3uQb8VH", 108 | "outputId": "1ad3c2a7-0292-4e82-ebb3-884266fbaf44" 109 | }, 110 | "source": [ 111 | "#options\n", 112 | "pd.set_option(\"mode.chained_assignment\", None)\n", 113 | "\n", 114 | "#Libraries\n", 115 | "import pandas as pd\n", 116 | "import numpy as np\n", 117 | "import scipy\n", 118 | "import os\n", 119 | "import random\n", 120 | "import time\n", 121 | "import datetime\n", 122 | "import nltk\n", 123 | "\n", 124 | "\n", 125 | "\n", 126 | "nltk.download('punkt')\n", 127 | "nltk.download('stopwords')\n", 128 | "from sklearn.metrics import f1_score, accuracy_score\n", 129 | "from sklearn.model_selection import train_test_split\n", 130 | "from keras.preprocessing.sequence import pad_sequences\n", 131 | "from nltk.corpus import stopwords\n", 132 | "from nltk.tokenize import word_tokenize \n", 133 | "from sklearn import preprocessing\n", 134 | "import tensorflow as tf\n", 135 | "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n", 136 | "from transformers import BertForSequenceClassification, AdamW, BertConfig\n", 137 | "from transformers import get_linear_schedule_with_warmup\n", 138 | "from transformers import BertTokenizer,AutoTokenizer, AutoModel\n", 139 | "\n", 140 | "\n", 141 | "import torch\n", 142 | "import torch.nn as nn\n", 143 | "import torch.nn.functional as F\n", 144 | "from torch.autograd import Variable\n", 145 | "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n", 146 | "\n", 147 | "import itertools\n", 148 | "import re\n", 149 | "\n", 150 | "\n", 151 | "\n" 152 | ], 153 | "execution_count": null, 154 | "outputs": [ 155 | { 156 | "output_type": "stream", 157 | "name": "stdout", 158 | "text": [ 159 | "[nltk_data] Downloading package punkt to /root/nltk_data...\n", 160 | "[nltk_data] Package punkt is already up-to-date!\n", 161 | "[nltk_data] Downloading package stopwords to /root/nltk_data...\n", 162 | "[nltk_data] Package stopwords is already up-to-date!\n" 163 | ] 164 | } 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "metadata": { 170 | "colab": { 171 | "base_uri": "https://localhost:8080/" 172 | }, 173 | "id": "kYbc3xljxeq2", 174 | "outputId": "c10a8d34-8341-4476-88c3-7792628ce377" 175 | }, 176 | "source": [ 177 | "!pip install pyarabic" 178 | ], 179 | "execution_count": null, 180 | "outputs": [ 181 | { 182 | "output_type": "stream", 183 | "name": "stdout", 184 | "text": [ 185 | "Collecting pyarabic\n", 186 | " Downloading PyArabic-0.6.14-py3-none-any.whl (126 kB)\n", 187 | "\u001b[?25l\r\u001b[K |██▋ | 10 kB 34.8 MB/s eta 0:00:01\r\u001b[K |█████▏ | 20 kB 29.0 MB/s eta 0:00:01\r\u001b[K |███████▉ | 30 kB 18.9 MB/s eta 0:00:01\r\u001b[K |██████████▍ | 40 kB 15.8 MB/s eta 0:00:01\r\u001b[K |█████████████ | 51 kB 8.9 MB/s eta 0:00:01\r\u001b[K |███████████████▋ | 61 kB 8.6 MB/s eta 0:00:01\r\u001b[K |██████████████████▏ | 71 kB 8.3 MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 81 kB 9.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 92 kB 9.6 MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 102 kB 8.4 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▌ | 112 kB 8.4 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 122 kB 8.4 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 126 kB 8.4 MB/s \n", 188 | "\u001b[?25hRequirement already satisfied: six>=1.14.0 in /usr/local/lib/python3.7/dist-packages (from pyarabic) (1.15.0)\n", 189 | "Installing collected packages: pyarabic\n", 190 | "Successfully installed pyarabic-0.6.14\n" 191 | ] 192 | } 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "metadata": { 198 | "id": "K0Xe8FsuxVxv" 199 | }, 200 | "source": [ 201 | "from pyarabic.araby import strip_tatweel,strip_tashkeel" 202 | ], 203 | "execution_count": null, 204 | "outputs": [] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": { 209 | "id": "6OXugcevKtKY" 210 | }, 211 | "source": [ 212 | "# Data" 213 | ] 214 | }, 215 | { 216 | "cell_type": "markdown", 217 | "metadata": { 218 | "id": "CUMPsJmRrTsr" 219 | }, 220 | "source": [ 221 | "**Please download the MSDA dataset and then import it like in the next cell**" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "metadata": { 227 | "id": "1GolU0F6thRE" 228 | }, 229 | "source": [ 230 | "MSDA=pd.read_csv(\"Path/to/MSDA.csv\")" 231 | ], 232 | "execution_count": null, 233 | "outputs": [] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "metadata": { 238 | "colab": { 239 | "base_uri": "https://localhost:8080/", 240 | "height": 204 241 | }, 242 | "id": "RGZ8ng0bts7V", 243 | "outputId": "5da4f5cd-01e8-48f8-bb0c-331533c90818" 244 | }, 245 | "source": [ 246 | "MSDA.head()" 247 | ], 248 | "execution_count": null, 249 | "outputs": [ 250 | { 251 | "output_type": "execute_result", 252 | "data": { 253 | "text/html": [ 254 | "
\n", 255 | "\n", 268 | "\n", 269 | " \n", 270 | " \n", 271 | " \n", 272 | " \n", 273 | " \n", 274 | " \n", 275 | " \n", 276 | " \n", 277 | " \n", 278 | " \n", 279 | " \n", 280 | " \n", 281 | " \n", 282 | " \n", 283 | " \n", 284 | " \n", 285 | " \n", 286 | " \n", 287 | " \n", 288 | " \n", 289 | " \n", 290 | " \n", 291 | " \n", 292 | " \n", 293 | " \n", 294 | " \n", 295 | " \n", 296 | " \n", 297 | " \n", 298 | " \n", 299 | " \n", 300 | " \n", 301 | " \n", 302 | " \n", 303 | "
Twitsdialect
0الدكتورة نجوى قامة في القصف الصاروخي لك ان ترى منشوراتها وتدخلاتهاAlgerian
1سكيكدة_المجاهدة❤ صورة تجمع أحد أكبر و أعظم زعماء ثورتنا المباركة: زيغود يوسف، رويبح حسين، العربي بن مهيدي، عمر أوعمران الذين عقدوا العزم أن تتحرر الجزائر و نعيش اليوم نحن معززين مكرمين تحت راية الشهداء التي إرتضوها أن تكون رمزا لمقاومتهم و تاج فوق رؤوسنا اليومAlgerian
2😂😂😂😂😂 شكون يعرفها🤔Algerian
3يهود الأندلس للاسف جاو زرعوا الفتنة في وسطناAlgerian
4اليهودAlgerian
\n", 304 | "
" 305 | ], 306 | "text/plain": [ 307 | " Twits dialect\n", 308 | "0 الدكتورة نجوى قامة في القصف الصاروخي لك ان ترى منشوراتها وتدخلاتها Algerian\n", 309 | "1 سكيكدة_المجاهدة❤ صورة تجمع أحد أكبر و أعظم زعماء ثورتنا المباركة: زيغود يوسف، رويبح حسين، العربي بن مهيدي، عمر أوعمران الذين عقدوا العزم أن تتحرر الجزائر و نعيش اليوم نحن معززين مكرمين تحت راية الشهداء التي إرتضوها أن تكون رمزا لمقاومتهم و تاج فوق رؤوسنا اليوم Algerian\n", 310 | "2 😂😂😂😂😂 شكون يعرفها🤔 Algerian\n", 311 | "3 يهود الأندلس للاسف جاو زرعوا الفتنة في وسطنا Algerian\n", 312 | "4 اليهود Algerian" 313 | ] 314 | }, 315 | "metadata": {}, 316 | "execution_count": 188 317 | } 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "metadata": { 323 | "colab": { 324 | "base_uri": "https://localhost:8080/" 325 | }, 326 | "id": "wOLOMl9XtyIH", 327 | "outputId": "43d29f7f-0729-498d-d4c9-155d6e9e1568" 328 | }, 329 | "source": [ 330 | "MSDA.dialect.value_counts()" 331 | ], 332 | "execution_count": null, 333 | "outputs": [ 334 | { 335 | "data": { 336 | "text/plain": [ 337 | "Lebanon 14482\n", 338 | "Algerian 13393\n", 339 | "Morocco 9965\n", 340 | "Tunisian 8044\n", 341 | "egypt 7519\n", 342 | "Name: dialect, dtype: int64" 343 | ] 344 | }, 345 | "execution_count": 50, 346 | "metadata": {}, 347 | "output_type": "execute_result" 348 | } 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "metadata": { 354 | "id": "5PojpkUdt2xK" 355 | }, 356 | "source": [ 357 | "#Transform to binary dataset (Moroccan Vs others) and recode to 1/0\n", 358 | "MSDA['labels']=np.where(MSDA.dialect==\"Morocco\",1,0)" 359 | ], 360 | "execution_count": null, 361 | "outputs": [] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "metadata": { 366 | "id": "azVuCzWUx611" 367 | }, 368 | "source": [ 369 | "MSDA.rename(columns={'Twits':'tweet'},inplace=True)" 370 | ], 371 | "execution_count": null, 372 | "outputs": [] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "metadata": { 377 | "id": "vaRrIi6wuJ1C" 378 | }, 379 | "source": [ 380 | "MSDA.drop(columns='dialect',inplace=True)" 381 | ], 382 | "execution_count": null, 383 | "outputs": [] 384 | }, 385 | { 386 | "cell_type": "markdown", 387 | "metadata": { 388 | "id": "gDCnqKlixuu4" 389 | }, 390 | "source": [ 391 | "## Preprocessing" 392 | ] 393 | }, 394 | { 395 | "cell_type": "code", 396 | "metadata": { 397 | "id": "PLdNffkl3Izn" 398 | }, 399 | "source": [ 400 | "\n", 401 | "def repted(text):\n", 402 | " \n", 403 | " text=re.sub(r'(.)\\1+', r'\\1', text) # Replace with only one (remove repetitions) \n", 404 | " return text\n", 405 | "def pre_processing(df,field):\n", 406 | " df[field] = df[field].replace(r'http\\S+', 'URL', regex=True).replace(r'www\\S+', 'URL', regex=True) # Replace URLs with URL string\n", 407 | " df[field] = df[field].replace(r'@[^\\s]+', 'USER', regex=True) # Replace user mentions with USER string\n", 408 | " df[field] = df[field].replace(r'#[^\\s]+', 'HASHTAG', regex=True) # Replace Hashtags with HASHTAG string\n", 409 | "\n", 410 | " df=df[df[field].apply(lambda x:len(re.findall(r'[\\u0600-\\u06FF]+', x)))>1] #Keep sequences with at least 2 arabic words\n", 411 | " df[field] = df[field].apply(strip_tatweel) #Remove Tatweel string \n", 412 | " df[field] = df[field].apply(strip_tashkeel) # Remove Diacritics\n", 413 | " df[field] = df[field].apply(repted)\n", 414 | "\n", 415 | " return df" 416 | ], 417 | "execution_count": null, 418 | "outputs": [] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "metadata": { 423 | "id": "dBmZbSTpzFex" 424 | }, 425 | "source": [ 426 | "MSDA_prc=pre_processing(MSDA,'tweet')" 427 | ], 428 | "execution_count": null, 429 | "outputs": [] 430 | }, 431 | { 432 | "cell_type": "markdown", 433 | "metadata": { 434 | "id": "C_Bz7dAn0ZcV" 435 | }, 436 | "source": [ 437 | "## Train Test split" 438 | ] 439 | }, 440 | { 441 | "cell_type": "code", 442 | "metadata": { 443 | "id": "iii8TeBI0g2U" 444 | }, 445 | "source": [ 446 | "#Specify which Data to use\n", 447 | "Data=MSDA_prc.copy()" 448 | ], 449 | "execution_count": null, 450 | "outputs": [] 451 | }, 452 | { 453 | "cell_type": "code", 454 | "metadata": { 455 | "id": "pE5iGsuDcuPz" 456 | }, 457 | "source": [ 458 | "train,valid,train_y,valid_y=train_test_split(Data.tweet.values,Data.labels,test_size=0.2,random_state=42,stratify=Data.labels,)" 459 | ], 460 | "execution_count": null, 461 | "outputs": [] 462 | }, 463 | { 464 | "cell_type": "code", 465 | "metadata": { 466 | "id": "omOPIAf3eWhp" 467 | }, 468 | "source": [ 469 | "dev,test,dev_y,test_y=train_test_split(valid,valid_y,test_size=0.5,random_state=42,stratify=valid_y)" 470 | ], 471 | "execution_count": null, 472 | "outputs": [] 473 | }, 474 | { 475 | "cell_type": "markdown", 476 | "metadata": { 477 | "id": "hf5yKySDuAEt" 478 | }, 479 | "source": [ 480 | "##Variables to be used in the training functions" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "metadata": { 486 | "id": "aSYGb-LfgfZt" 487 | }, 488 | "source": [ 489 | "train_labels=train_y.values\n", 490 | "train_tweets=train" 491 | ], 492 | "execution_count": null, 493 | "outputs": [] 494 | }, 495 | { 496 | "cell_type": "code", 497 | "metadata": { 498 | "id": "ClylLGvaHWOT" 499 | }, 500 | "source": [ 501 | "dev_labels=dev_y.values\n", 502 | "dev_tweets=dev" 503 | ], 504 | "execution_count": null, 505 | "outputs": [] 506 | }, 507 | { 508 | "cell_type": "code", 509 | "metadata": { 510 | "id": "61vXYWXHxjxE" 511 | }, 512 | "source": [ 513 | "test_labels=test_y.values\n", 514 | "test_tweets=test" 515 | ], 516 | "execution_count": null, 517 | "outputs": [] 518 | }, 519 | { 520 | "cell_type": "markdown", 521 | "metadata": { 522 | "id": "mWsOnYQFuHNy" 523 | }, 524 | "source": [ 525 | "# GPU device" 526 | ] 527 | }, 528 | { 529 | "cell_type": "code", 530 | "metadata": { 531 | "colab": { 532 | "base_uri": "https://localhost:8080/" 533 | }, 534 | "id": "GcoVESeVqirY", 535 | "outputId": "4c492636-23cf-4722-fc7d-7f20d844b0c8" 536 | }, 537 | "source": [ 538 | "import torch\n", 539 | "\n", 540 | "if torch.cuda.is_available(): \n", 541 | " device = torch.device(\"cuda\")\n", 542 | " print(f'There are {torch.cuda.device_count()} GPU(s) available.')\n", 543 | " print('Device name:', torch.cuda.get_device_name(0))\n", 544 | "\n", 545 | "else:\n", 546 | " print('No GPU available, using the CPU instead.')\n", 547 | " device = torch.device(\"cpu\")" 548 | ], 549 | "execution_count": null, 550 | "outputs": [ 551 | { 552 | "output_type": "stream", 553 | "name": "stdout", 554 | "text": [ 555 | "There are 1 GPU(s) available.\n", 556 | "Device name: Tesla P100-PCIE-16GB\n" 557 | ] 558 | } 559 | ] 560 | }, 561 | { 562 | "cell_type": "markdown", 563 | "metadata": { 564 | "id": "lXI62U_i9E2V" 565 | }, 566 | "source": [ 567 | "# Helper Functions" 568 | ] 569 | }, 570 | { 571 | "cell_type": "code", 572 | "metadata": { 573 | "id": "9cQNvaZ9bnyy" 574 | }, 575 | "source": [ 576 | "# Function to calculate the accuracy of our predictions vs labels\n", 577 | "def flat_accuracy(preds, labels):\n", 578 | " pred_flat = np.argmax(preds, axis=1).flatten()\n", 579 | " labels_flat = labels.flatten()\n", 580 | " return np.sum(pred_flat == labels_flat) / len(labels_flat)" 581 | ], 582 | "execution_count": null, 583 | "outputs": [] 584 | }, 585 | { 586 | "cell_type": "code", 587 | "metadata": { 588 | "id": "0GEW9HgRTUCA" 589 | }, 590 | "source": [ 591 | "# Function to calculate the F1-score of our predictions vs labels\n", 592 | "def flat_f1(preds, labels):\n", 593 | " pred_flat = np.argmax(preds, axis=1).flatten()\n", 594 | " labels_flat = labels.flatten()\n", 595 | " return f1_score(labels_flat,pred_flat) " 596 | ], 597 | "execution_count": null, 598 | "outputs": [] 599 | }, 600 | { 601 | "cell_type": "code", 602 | "metadata": { 603 | "id": "gpt6tR83keZD" 604 | }, 605 | "source": [ 606 | "\n", 607 | "def format_time(elapsed):\n", 608 | " '''\n", 609 | " Takes a time in seconds and returns a string hh:mm:ss\n", 610 | " '''\n", 611 | " # Round to the nearest second.\n", 612 | " elapsed_rounded = int(round((elapsed)))\n", 613 | " \n", 614 | " # Format as hh:mm:ss\n", 615 | " return str(datetime.timedelta(seconds=elapsed_rounded))\n" 616 | ], 617 | "execution_count": null, 618 | "outputs": [] 619 | }, 620 | { 621 | "cell_type": "markdown", 622 | "metadata": { 623 | "id": "795iPKtQAvYi" 624 | }, 625 | "source": [ 626 | "# Training Functions" 627 | ] 628 | }, 629 | { 630 | "cell_type": "markdown", 631 | "metadata": { 632 | "id": "60FwRKAoCXLU" 633 | }, 634 | "source": [ 635 | "### Tokenization" 636 | ] 637 | }, 638 | { 639 | "cell_type": "code", 640 | "metadata": { 641 | "id": "2bBdb3pt8LuQ" 642 | }, 643 | "source": [ 644 | "#Function doing the Bert tokenizer with pading\n", 645 | "def Tweets_Tokenizer(tweets,max_len=128):\n", 646 | " # Tokenize all of the sentences and map the tokens to their word IDs.\n", 647 | " input_ids = []\n", 648 | " # Create attention masks\n", 649 | " attention_masks = []\n", 650 | " MAX_LEN = max_len\n", 651 | "\n", 652 | " \n", 653 | " # For every sentence...\n", 654 | " for tweet in tweets:\n", 655 | " \n", 656 | " \n", 657 | " # `encode` will:\n", 658 | " # (1) Tokenize the sentence.\n", 659 | " # (2) Prepend the `[CLS]` token to the start.\n", 660 | " # (3) Append the `[SEP]` token to the end.\n", 661 | " # (4) Map tokens to their IDs.\n", 662 | " encoded_sent = tokenizer.encode(\n", 663 | " tweet, # Sentence to encode.\n", 664 | " add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n", 665 | "\n", 666 | " # This function also supports truncation and conversion\n", 667 | " # to pytorch tensors, but we need to do padding, so we\n", 668 | " # can't use these features :( .\n", 669 | " max_length = MAX_LEN, # Truncate all sentences.\n", 670 | " truncation=True\n", 671 | " #return_tensors = 'pt', # Return pytorch tensors.\n", 672 | " )\n", 673 | " \n", 674 | " # Add the encoded sentence to the list.\n", 675 | " input_ids.append(encoded_sent)\n", 676 | "\n", 677 | " input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype=\"long\",value=0, truncating=\"post\", padding=\"post\",)\n", 678 | "\n", 679 | " # For each sentence...\n", 680 | " for sent in input_ids: \n", 681 | " \n", 682 | " \n", 683 | " # Create the attention mask.\n", 684 | " # - If a token ID is 0, then it's padding, set the mask to 0.\n", 685 | " # - If a token ID is > 0, then it's a real token, set the mask to 1.\n", 686 | " att_mask = [int(token_id > 0) for token_id in sent]\n", 687 | " \n", 688 | " # Store the attention mask for this sentence.\n", 689 | " attention_masks.append(att_mask)\n", 690 | " \n", 691 | " print('\\n Tokenizer is Done.')\n", 692 | "\n", 693 | " return (input_ids,attention_masks)\n", 694 | "\n" 695 | ], 696 | "execution_count": null, 697 | "outputs": [] 698 | }, 699 | { 700 | "cell_type": "markdown", 701 | "metadata": { 702 | "id": "wMdsYUIbClSE" 703 | }, 704 | "source": [ 705 | "### Training" 706 | ] 707 | }, 708 | { 709 | "cell_type": "code", 710 | "metadata": { 711 | "id": "cb8gLAaa03Yg" 712 | }, 713 | "source": [ 714 | "\n", 715 | "# This training code is based on the `run_glue.py` script here:\n", 716 | "# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128\n", 717 | "def train_final(data,LR=2e-5,nepochs=3,Batches=16):\n", 718 | " # Number of training epochs (authors recommend between 2 and 4)\n", 719 | " epochs = nepochs\n", 720 | "\n", 721 | " # Set the seed value all over the place to make this reproducible.\n", 722 | " seed_val = 42\n", 723 | " batch_size = Batches\n", 724 | "\n", 725 | " random.seed(seed_val)\n", 726 | " np.random.seed(seed_val)\n", 727 | " torch.manual_seed(seed_val)\n", 728 | " torch.cuda.manual_seed_all(seed_val)\n", 729 | "\n", 730 | "\n", 731 | " # Use 90% for training and 10% for validation.\n", 732 | " input_ids,attention_masks=Tweets_Tokenizer(train_tweets)\n", 733 | " # tr_inputs, validation_inputs, tr_labels, validation_labels = train_test_split(input_ids, train_labels, \n", 734 | " # random_state=2018, test_size=0.1)\n", 735 | " # # Do the same for the masks.\n", 736 | " # train_masks, validation_masks, _, _ = train_test_split(attention_masks, train_labels,\n", 737 | " # random_state=2018, test_size=0.1)\n", 738 | " \n", 739 | " # Convert all inputs and labels into torch tensors, the required datatype \n", 740 | " # for our model.\n", 741 | " train_inputs = torch.tensor(input_ids)\n", 742 | " #validation_inputs = torch.tensor(validation_inputs)\n", 743 | "\n", 744 | " tr_labels = torch.tensor(train_labels)\n", 745 | " #validation_labels = torch.tensor(validation_labels)\n", 746 | "\n", 747 | " train_masks = torch.tensor(attention_masks)\n", 748 | " #validation_masks = torch.tensor(validation_masks)\n", 749 | "\n", 750 | " # Create the DataLoader for our training set.\n", 751 | " train_data = TensorDataset(train_inputs, train_masks, tr_labels)\n", 752 | " train_sampler = RandomSampler(train_data)\n", 753 | " train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n", 754 | "\n", 755 | " # Create the DataLoader for our validation set.\n", 756 | " # validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)\n", 757 | " # validation_sampler = SequentialSampler(validation_data)\n", 758 | " # validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)\n", 759 | "\n", 760 | " \n", 761 | " # Load BertForSequenceClassification, the pretrained BERT model with a single \n", 762 | " # linear classification layer on top. \n", 763 | " model = BertForSequenceClassification.from_pretrained(\n", 764 | " bert_repo, # Use the 12-layer BERT model, with an uncased vocab.\n", 765 | " num_labels = 2, # The number of output labels--2 for binary classification.\n", 766 | " # You can increase this for multi-class tasks. \n", 767 | " output_attentions = False, # Whether the model returns attentions weights.\n", 768 | " output_hidden_states = True, # Whether the model returns all hidden-states.\n", 769 | " )\n", 770 | " \n", 771 | " \n", 772 | "\n", 773 | " \n", 774 | " # Tell pytorch to run this model on the GPU.\n", 775 | " model.cuda()\n", 776 | " \n", 777 | " # Note: AdamW is a class from the huggingface library (as opposed to pytorch) \n", 778 | " # I believe the 'W' stands for 'Weight Decay fix\"\n", 779 | " optimizer = AdamW(model.parameters(),\n", 780 | " lr = LR, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n", 781 | " eps = 1e-8 # args.adam_epsilon - default is 1e-8.\n", 782 | " )\n", 783 | "\n", 784 | "\n", 785 | " # Total number of training steps is number of batches * number of epochs.\n", 786 | " total_steps = len(train_dataloader) * epochs\n", 787 | "\n", 788 | " # Create the learning rate scheduler.\n", 789 | " scheduler = get_linear_schedule_with_warmup(optimizer, \n", 790 | " num_warmup_steps = 0, # Default value in run_glue.py\n", 791 | " num_training_steps = total_steps)\n", 792 | "\n", 793 | " # Store the average loss after each epoch so we can plot them.\n", 794 | " loss_values = []\n", 795 | "\n", 796 | " # For each epoch...\n", 797 | " for epoch_i in range(0, epochs):\n", 798 | " \n", 799 | " # ========================================\n", 800 | " # Training\n", 801 | " # ========================================\n", 802 | " \n", 803 | " # Perform one full pass over the training set.\n", 804 | "\n", 805 | " print(\"\")\n", 806 | " print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n", 807 | " print('Training...')\n", 808 | "\n", 809 | " # Measure how long the training epoch takes.\n", 810 | " t0 = time.time()\n", 811 | "\n", 812 | " # Reset the total loss for this epoch.\n", 813 | " total_loss = 0\n", 814 | "\n", 815 | " # Put the model into training mode. Don't be mislead--the call to \n", 816 | " # `train` just changes the *mode*, it doesn't *perform* the training.\n", 817 | " # `dropout` and `batchnorm` layers behave differently during training\n", 818 | " # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)\n", 819 | " model.train()\n", 820 | "\n", 821 | " # For each batch of training data...\n", 822 | " for step, batch in enumerate(train_dataloader):\n", 823 | "\n", 824 | " # Progress update every 40 batches.\n", 825 | " if step % 40 == 0 and not step == 0:\n", 826 | " # Calculate elapsed time in minutes.\n", 827 | " elapsed = format_time(time.time() - t0)\n", 828 | " \n", 829 | " # Report progress.\n", 830 | " print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))\n", 831 | "\n", 832 | " # Unpack this training batch from our dataloader. \n", 833 | " #\n", 834 | " # As we unpack the batch, we'll also copy each tensor to the GPU using the \n", 835 | " # `to` method.\n", 836 | " #\n", 837 | " # `batch` contains three pytorch tensors:\n", 838 | " # [0]: input ids \n", 839 | " # [1]: attention masks\n", 840 | " # [2]: labels \n", 841 | " b_input_ids = batch[0].to(device)\n", 842 | " b_input_mask = batch[1].to(device)\n", 843 | " b_labels = batch[2].to(device)\n", 844 | "\n", 845 | " # Always clear any previously calculated gradients before performing a\n", 846 | " # backward pass. PyTorch doesn't do this automatically because \n", 847 | " # accumulating the gradients is \"convenient while training RNNs\". \n", 848 | " # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)\n", 849 | " model.zero_grad() \n", 850 | "\n", 851 | " # Perform a forward pass (evaluate the model on this training batch).\n", 852 | " # This will return the loss (rather than the model output) because we\n", 853 | " # have provided the `labels`.\n", 854 | " # The documentation for this `model` function is here: \n", 855 | " # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification\n", 856 | " outputs = model(b_input_ids, \n", 857 | " token_type_ids=None, \n", 858 | " attention_mask=b_input_mask, \n", 859 | " labels=b_labels.to(device=device, dtype=torch.int64))\n", 860 | " \n", 861 | " # The call to `model` always returns a tuple, so we need to pull the \n", 862 | " # loss value out of the tuple.\n", 863 | " loss = outputs[0]\n", 864 | "\n", 865 | " # Accumulate the training loss over all of the batches so that we can\n", 866 | " # calculate the average loss at the end. `loss` is a Tensor containing a\n", 867 | " # single value; the `.item()` function just returns the Python value \n", 868 | " # from the tensor.\n", 869 | " total_loss += loss.item()\n", 870 | "\n", 871 | " # Perform a backward pass to calculate the gradients.\n", 872 | " loss.backward()\n", 873 | "\n", 874 | " # Clip the norm of the gradients to 1.0.\n", 875 | " # This is to help prevent the \"exploding gradients\" problem.\n", 876 | " torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n", 877 | "\n", 878 | " # Update parameters and take a step using the computed gradient.\n", 879 | " # The optimizer dictates the \"update rule\"--how the parameters are\n", 880 | " # modified based on their gradients, the learning rate, etc.\n", 881 | " optimizer.step()\n", 882 | "\n", 883 | " # Update the learning rate.\n", 884 | " scheduler.step()\n", 885 | "\n", 886 | " # Calculate the average loss over the training data.\n", 887 | " avg_train_loss = total_loss / len(train_dataloader) \n", 888 | " \n", 889 | " # Store the loss value for plotting the learning curve.\n", 890 | " loss_values.append(avg_train_loss)\n", 891 | "\n", 892 | " print(\"\")\n", 893 | " print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n", 894 | " print(\" Training epcoh took: {:}\".format(format_time(time.time() - t0)))\n", 895 | " \n", 896 | " \n", 897 | " print(\"\")\n", 898 | " print(\"Training complete!\")\n", 899 | "\n", 900 | " return model" 901 | ], 902 | "execution_count": null, 903 | "outputs": [] 904 | }, 905 | { 906 | "cell_type": "markdown", 907 | "metadata": { 908 | "id": "AQQpmYdPIbCm" 909 | }, 910 | "source": [ 911 | "### Prediction function" 912 | ] 913 | }, 914 | { 915 | "cell_type": "code", 916 | "metadata": { 917 | "id": "TszHU7vZI3R8" 918 | }, 919 | "source": [ 920 | "def dev_pred(model,tweets,labels):\n", 921 | " dev_inputs, dev_masks = Tweets_Tokenizer(tweets) \n", 922 | "\n", 923 | " # Convert to tensors.\n", 924 | " prediction_inputs = torch.tensor(dev_inputs)\n", 925 | " prediction_masks = torch.tensor(dev_masks)\n", 926 | " prediction_labels = torch.tensor(labels)\n", 927 | "\n", 928 | " # Set the batch size. \n", 929 | " batch_size = 64 \n", 930 | "\n", 931 | " # Create the DataLoader.\n", 932 | " prediction_data = TensorDataset(prediction_inputs, prediction_masks, prediction_labels)\n", 933 | " prediction_sampler = SequentialSampler(prediction_data)\n", 934 | " prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)\n", 935 | "\n", 936 | " # Prediction on test set\n", 937 | "\n", 938 | " print('Predicting labels for {:,} sentences...'.format(len(prediction_inputs)))\n", 939 | "\n", 940 | " # Put model in evaluation mode\n", 941 | " model.eval()\n", 942 | "\n", 943 | " # Tracking variables \n", 944 | " predictions , true_labels = [], []\n", 945 | "\n", 946 | " # Predict \n", 947 | " for batch in prediction_dataloader:\n", 948 | " # Add batch to GPU\n", 949 | " batch = tuple(t.to(device) for t in batch)\n", 950 | " \n", 951 | " # Unpack the inputs from our dataloader\n", 952 | " b_input_ids, b_input_mask, b_labels = batch\n", 953 | " \n", 954 | " # Telling the model not to compute or store gradients, saving memory and \n", 955 | " # speeding up prediction\n", 956 | " with torch.no_grad():\n", 957 | " # Forward pass, calculate logit predictions\n", 958 | " outputs = model(b_input_ids, token_type_ids=None, \n", 959 | " attention_mask=b_input_mask)\n", 960 | "\n", 961 | " logits = outputs[0]\n", 962 | "\n", 963 | " # Move logits and labels to CPU\n", 964 | " logits = logits.detach().cpu().numpy()\n", 965 | " label_ids = b_labels.to('cpu').numpy()\n", 966 | " \n", 967 | " # Store predictions and true labels\n", 968 | " predictions.append(logits)\n", 969 | " true_labels.append(label_ids)\n", 970 | "\n", 971 | " \n", 972 | "\n", 973 | " # Combine the predictions for each batch into a single list of 0s and 1s.\n", 974 | " flat_predictions = [item for sublist in predictions for item in sublist]\n", 975 | " Last_predictions = np.argmax(flat_predictions, axis=1).flatten()\n", 976 | "\n", 977 | " # Combine the correct labels for each batch into a single list.\n", 978 | " flat_true_labels = [item for sublist in true_labels for item in sublist]\n", 979 | "\n", 980 | " \n", 981 | " indiv_F1=f1_score(flat_true_labels, Last_predictions) * 100\n", 982 | " macro_F1=f1_score(flat_true_labels, Last_predictions,average='macro')*100\n", 983 | "\n", 984 | " \n", 985 | " print(\"\\nF1 Score on positive: %.2f\" %indiv_F1, \"%\") \n", 986 | " #print(indiv_F1) \n", 987 | "\n", 988 | " return (flat_predictions,Last_predictions,flat_true_labels)" 989 | ], 990 | "execution_count": null, 991 | "outputs": [] 992 | }, 993 | { 994 | "cell_type": "markdown", 995 | "metadata": { 996 | "id": "zHDzj2B6_5ZR" 997 | }, 998 | "source": [ 999 | "# Running Models" 1000 | ] 1001 | }, 1002 | { 1003 | "cell_type": "code", 1004 | "metadata": { 1005 | "id": "Ohei9AHSo7C0", 1006 | "colab": { 1007 | "base_uri": "https://localhost:8080/" 1008 | }, 1009 | "outputId": "509592e1-aae8-4c9e-8f3d-fbcd37519ff2" 1010 | }, 1011 | "source": [ 1012 | "\n", 1013 | "#https://github.com/alisafaya/Arabic-BERT\n", 1014 | "tokenizer_DarijaBert = AutoTokenizer.from_pretrained(\"Kamel/DarijaBERT\")" 1015 | ], 1016 | "execution_count": null, 1017 | "outputs": [ 1018 | { 1019 | "output_type": "stream", 1020 | "name": "stderr", 1021 | "text": [ 1022 | "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can safely remove it from your `config.json` file.\n", 1023 | "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can safely remove it from your `config.json` file.\n", 1024 | "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can safely remove it from your `config.json` file.\n" 1025 | ] 1026 | } 1027 | ] 1028 | }, 1029 | { 1030 | "cell_type": "code", 1031 | "metadata": { 1032 | "id": "QGTcqW4xozOF" 1033 | }, 1034 | "source": [ 1035 | "tokenizer=tokenizer_DarijaBert\n", 1036 | "bert_repo=\"Kamel/DarijaBERT\"" 1037 | ], 1038 | "execution_count": null, 1039 | "outputs": [] 1040 | }, 1041 | { 1042 | "cell_type": "code", 1043 | "metadata": { 1044 | "colab": { 1045 | "base_uri": "https://localhost:8080/" 1046 | }, 1047 | "id": "pwSa_NB7k0SV", 1048 | "outputId": "afb7f26b-8a71-427f-9edc-e94db63c0602" 1049 | }, 1050 | "source": [ 1051 | "model_final=train_final(train,Batches=64,nepochs=1)" 1052 | ], 1053 | "execution_count": null, 1054 | "outputs": [ 1055 | { 1056 | "output_type": "stream", 1057 | "name": "stdout", 1058 | "text": [ 1059 | "\n", 1060 | " Tokenizer is Done.\n" 1061 | ] 1062 | }, 1063 | { 1064 | "output_type": "stream", 1065 | "name": "stderr", 1066 | "text": [ 1067 | "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can safely remove it from your `config.json` file.\n", 1068 | "Some weights of the model checkpoint at /content/drive/MyDrive/Doctorat/DarijaBERT/Models/DarijaBert_v1.0/epoch40 were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.weight']\n", 1069 | "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", 1070 | "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", 1071 | "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /content/drive/MyDrive/Doctorat/DarijaBERT/Models/DarijaBert_v1.0/epoch40 and are newly initialized: ['classifier.weight', 'bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.bias']\n", 1072 | "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" 1073 | ] 1074 | }, 1075 | { 1076 | "output_type": "stream", 1077 | "name": "stdout", 1078 | "text": [ 1079 | "\n", 1080 | "======== Epoch 1 / 1 ========\n", 1081 | "Training...\n", 1082 | " Batch 40 of 569. Elapsed: 0:00:29.\n", 1083 | " Batch 80 of 569. Elapsed: 0:00:58.\n", 1084 | " Batch 120 of 569. Elapsed: 0:01:27.\n", 1085 | " Batch 160 of 569. Elapsed: 0:01:56.\n", 1086 | " Batch 200 of 569. Elapsed: 0:02:25.\n", 1087 | " Batch 240 of 569. Elapsed: 0:02:54.\n", 1088 | " Batch 280 of 569. Elapsed: 0:03:23.\n", 1089 | " Batch 320 of 569. Elapsed: 0:03:51.\n", 1090 | " Batch 360 of 569. Elapsed: 0:04:20.\n", 1091 | " Batch 400 of 569. Elapsed: 0:04:49.\n", 1092 | " Batch 440 of 569. Elapsed: 0:05:18.\n", 1093 | " Batch 480 of 569. Elapsed: 0:05:47.\n", 1094 | " Batch 520 of 569. Elapsed: 0:06:16.\n", 1095 | " Batch 560 of 569. Elapsed: 0:06:45.\n", 1096 | "\n", 1097 | " Average training loss: 0.17\n", 1098 | " Training epcoh took: 0:06:51\n", 1099 | "\n", 1100 | "Training complete!\n" 1101 | ] 1102 | } 1103 | ] 1104 | }, 1105 | { 1106 | "cell_type": "markdown", 1107 | "metadata": { 1108 | "id": "784KOiml2nVc" 1109 | }, 1110 | "source": [ 1111 | "# Prediction" 1112 | ] 1113 | }, 1114 | { 1115 | "cell_type": "code", 1116 | "metadata": { 1117 | "id": "yMG8_D1j_c6G" 1118 | }, 1119 | "source": [ 1120 | "flat_predictions,Last_predictions,flat_true_labels=dev_pred(model_final,dev_tweets,dev_labels)" 1121 | ], 1122 | "execution_count": null, 1123 | "outputs": [] 1124 | }, 1125 | { 1126 | "cell_type": "code", 1127 | "metadata": { 1128 | "id": "o6juic7pJAwR" 1129 | }, 1130 | "source": [ 1131 | "flat_predictions,Last_predictions,flat_true_labels=dev_pred(model_final,test_tweets,test_labels)" 1132 | ], 1133 | "execution_count": null, 1134 | "outputs": [] 1135 | } 1136 | ] 1137 | } --------------------------------------------------------------------------------