├── .gitignore ├── LICENSE ├── README.md ├── export └── .gitkeep ├── images └── goemotions.png ├── labels.py ├── logs └── .gitkeep └── training_with_tez.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | 3 | __pycache__/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Ahmed BESBES 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Training a Multi-Label Emotion Classifier with Tez and PyTorch 2 | 3 | If you're tired of rewriting the same boilerplate code of your training pipelines in PyTorch, I've found a pretty neat solution that could make your life easier. Don't worry, it's not a heavy library that'll change your way of doing things. 4 | 5 | It's rather a lightweight wrapper that encapsulates your training logic in a single class. It's built on top of PyTorch, it's quite recent but I've tested it and I think it does what it promises so far. 6 | 7 | It's called Tez and we'll see it today in action on a fun multi-label text classification problem. Let's jump right in. 8 | 9 | ![](./images/goemotions.png) 10 | 11 | ### Things that will be covered 12 | 13 | - Using the Datasets library to load and manipulate go_emotions data 14 | - Defining the training pipeline with Tez 15 | - Training a SqueezeBert lightweight model for a multi-label classification problem and reaching +0.9 AUC on validation and test data 16 | 17 | ### Things that will be done next (PR are welcome) 18 | 19 | - Deploying the model 20 | - Crafting a small UI with React or Streamlit 21 | 22 | ### Link to the trained model 23 | 24 | [Download it here](https://goemotions-with-tez.s3.eu-west-3.amazonaws.com/model.bin) 25 | -------------------------------------------------------------------------------- /export/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedbesbes/multi-label-sentiment-classifier/03ca95b5e2a0e25685663197eef7f5cddb3aaa3a/export/.gitkeep -------------------------------------------------------------------------------- /images/goemotions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedbesbes/multi-label-sentiment-classifier/03ca95b5e2a0e25685663197eef7f5cddb3aaa3a/images/goemotions.png -------------------------------------------------------------------------------- /labels.py: -------------------------------------------------------------------------------- 1 | _CLASS_NAMES = [ 2 | "admiration", 3 | "amusement", 4 | "anger", 5 | "annoyance", 6 | "approval", 7 | "caring", 8 | "confusion", 9 | "curiosity", 10 | "desire", 11 | "disappointment", 12 | "disapproval", 13 | "disgust", 14 | "embarrassment", 15 | "excitement", 16 | "fear", 17 | "gratitude", 18 | "grief", 19 | "joy", 20 | "love", 21 | "nervousness", 22 | "optimism", 23 | "pride", 24 | "realization", 25 | "relief", 26 | "remorse", 27 | "sadness", 28 | "surprise", 29 | "neutral", 30 | ] 31 | 32 | mapping = dict(zip(range(len(_CLASS_NAMES)),_CLASS_NAMES)) -------------------------------------------------------------------------------- /logs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedbesbes/multi-label-sentiment-classifier/03ca95b5e2a0e25685663197eef7f5cddb3aaa3a/logs/.gitkeep -------------------------------------------------------------------------------- /training_with_tez.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%config Completer.use_jedi=False\n", 10 | "%matplotlib inline" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from datasets import load_dataset\n", 20 | "from labels import mapping\n", 21 | "\n", 22 | "import pandas as pd\n", 23 | "pd.set_option('display.max_colwidth', -1)\n", 24 | "import numpy as np\n", 25 | "\n", 26 | "from tqdm.notebook import tqdm\n", 27 | "from matplotlib import pyplot as plt\n", 28 | "\n", 29 | "from torch.utils.data import DataLoader\n", 30 | "\n", 31 | "import tez\n", 32 | "import torch\n", 33 | "import torch.nn as nn\n", 34 | "import transformers\n", 35 | "from sklearn import metrics, model_selection, preprocessing\n", 36 | "from transformers import AdamW, get_linear_schedule_with_warmup" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | "### Load the dataset and split it into train, valid and test" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "go_emotions = load_dataset(\"go_emotions\")\n", 53 | "\n", 54 | "data = go_emotions.data\n", 55 | "\n", 56 | "train = go_emotions.data[\"train\"].to_pandas()\n", 57 | "valid = go_emotions.data[\"validation\"].to_pandas()\n", 58 | "test = go_emotions.data[\"test\"].to_pandas()" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "train.head(10)" 68 | ] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "metadata": {}, 73 | "source": [ 74 | "### Convert the dataset to a one-hot representation" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "n_labels = len(mapping)\n", 84 | "\n", 85 | "def one_hot_labels(df):\n", 86 | " dict_labels = []\n", 87 | " for i in tqdm(range(len(df)), leave=False):\n", 88 | " d = dict(zip(range(n_labels), [0]*n_labels))\n", 89 | " labels = df.loc[i][\"labels\"]\n", 90 | " for label in labels:\n", 91 | " d[label] = 1\n", 92 | " dict_labels.append(d)\n", 93 | " df_labels = pd.DataFrame(dict_labels)\n", 94 | " return df_labels\n" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "train_oh_labels = one_hot_labels(train)\n", 104 | "valid_oh_labels = one_hot_labels(valid)\n", 105 | "test_oh_labels = one_hot_labels(test)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "train_oh_labels.shape" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "train = pd.concat([train, train_oh_labels], axis=1)\n", 124 | "valid = pd.concat([valid, valid_oh_labels], axis=1)\n", 125 | "test = pd.concat([test, test_oh_labels], axis=1)" 126 | ] 127 | }, 128 | { 129 | "cell_type": "markdown", 130 | "metadata": {}, 131 | "source": [ 132 | "### Quick check of the data" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "print(train.shape, valid.shape, test.shape)" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": { 148 | "scrolled": true 149 | }, 150 | "outputs": [], 151 | "source": [ 152 | "train.head()" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "fig = plt.figure(figsize=(15, 13))\n", 162 | "\n", 163 | "ax1 = plt.subplot(3, 1, 1)\n", 164 | "train[range(n_labels)].mean(axis=0).plot(kind=\"bar\", ax=ax1, title=\"distribution of labels in train\")\n", 165 | "\n", 166 | "ax2 = plt.subplot(3, 1, 2)\n", 167 | "valid[range(n_labels)].mean(axis=0).plot(kind=\"bar\", ax=ax2, title=\"distribution of labels in validation\")\n", 168 | "\n", 169 | "ax3 = plt.subplot(3, 1, 3)\n", 170 | "test[range(n_labels)].mean(axis=0).plot(kind=\"bar\", ax=ax3, title=\"distribution of labels in test\")" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "def inspect_data(label, n=5):\n", 180 | " samples = train[train[label] == 1].sample(5)\n", 181 | " sentiment = mapping[label]\n", 182 | " \n", 183 | " print(f\"examples from {sentiment}\")\n", 184 | " print()\n", 185 | " for text in samples[\"text\"]:\n", 186 | " print(text)\n", 187 | " print(\"---\")" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "inspect_data(0)" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "inspect_data(1)" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": null, 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "inspect_data(2)" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": null, 220 | "metadata": {}, 221 | "outputs": [], 222 | "source": [ 223 | "inspect_data(5)" 224 | ] 225 | }, 226 | { 227 | "cell_type": "markdown", 228 | "metadata": {}, 229 | "source": [ 230 | "### Define a PyTorch dataset" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": null, 236 | "metadata": {}, 237 | "outputs": [], 238 | "source": [ 239 | "class GoEmotionDataset():\n", 240 | " def __init__(self, texts, targets):\n", 241 | " self.texts = texts\n", 242 | " self.targets = targets\n", 243 | " self.tokenizer = transformers.SqueezeBertTokenizer.from_pretrained(\n", 244 | " \"squeezebert/squeezebert-uncased\", do_lower_case=True\n", 245 | " )\n", 246 | " self.max_len = 35\n", 247 | " \n", 248 | " def __len__(self):\n", 249 | " return len(self.texts)\n", 250 | "\n", 251 | " \n", 252 | " def __getitem__(self, index):\n", 253 | " target = self.targets[index]\n", 254 | " text = self.texts[index]\n", 255 | " \n", 256 | " inputs = self.tokenizer.encode_plus(text,\n", 257 | " None,\n", 258 | " add_special_tokens=True,\n", 259 | " max_length=self.max_len,\n", 260 | " padding=\"max_length\",\n", 261 | " truncation=True)\n", 262 | " \n", 263 | " ids = inputs[\"input_ids\"]\n", 264 | " mask = inputs[\"attention_mask\"]\n", 265 | "\n", 266 | " return {\n", 267 | " \"ids\": torch.tensor(ids, dtype=torch.long),\n", 268 | " \"mask\": torch.tensor(mask, dtype=torch.long),\n", 269 | " \"targets\": torch.tensor(self.targets[index], dtype=torch.long),\n", 270 | " }" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "metadata": {}, 276 | "source": [ 277 | "### Define a tez Model" 278 | ] 279 | }, 280 | { 281 | "cell_type": "code", 282 | "execution_count": null, 283 | "metadata": {}, 284 | "outputs": [], 285 | "source": [ 286 | "class EmotionClassifier(tez.Model):\n", 287 | " def __init__(self, num_train_steps, num_classes):\n", 288 | " super().__init__()\n", 289 | " self.bert = transformers.SqueezeBertModel.from_pretrained(\"squeezebert/squeezebert-uncased\")\n", 290 | " self.bert_drop = nn.Dropout(0.3)\n", 291 | " self.out = nn.Linear(768, num_classes)\n", 292 | " self.num_train_steps = num_train_steps\n", 293 | " self.step_scheduler_after = \"batch\"\n", 294 | " \n", 295 | " def fetch_optimizer(self):\n", 296 | " param_optimizer = list(self.named_parameters())\n", 297 | " no_decay = [\"bias\", \"LayerNorm.bias\"]\n", 298 | " optimizer_parameters = [\n", 299 | " {\n", 300 | " \"params\": [\n", 301 | " p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\n", 302 | " ],\n", 303 | " \"weight_decay\": 0.001,\n", 304 | " },\n", 305 | " {\n", 306 | " \"params\": [\n", 307 | " p for n, p in param_optimizer if any(nd in n for nd in no_decay)\n", 308 | " ],\n", 309 | " \"weight_decay\": 0.0,\n", 310 | " },\n", 311 | " ]\n", 312 | " opt = AdamW(optimizer_parameters, lr=3e-5)\n", 313 | " return opt\n", 314 | "\n", 315 | " \n", 316 | " def fetch_scheduler(self):\n", 317 | " sch = get_linear_schedule_with_warmup(\n", 318 | " self.optimizer, num_warmup_steps=0, num_training_steps=self.num_train_steps\n", 319 | " )\n", 320 | " return sch\n", 321 | " \n", 322 | " def loss(self, outputs, targets):\n", 323 | " if targets is None:\n", 324 | " return None\n", 325 | " return nn.BCEWithLogitsLoss()(outputs, targets.float())\n", 326 | " \n", 327 | " \n", 328 | " def monitor_metrics(self, outputs, targets):\n", 329 | " if targets is None:\n", 330 | " return {}\n", 331 | " \n", 332 | " outputs = torch.sigmoid(outputs)\n", 333 | " outputs = outputs.cpu().detach().numpy()\n", 334 | " targets = targets.cpu().detach().numpy()\n", 335 | " \n", 336 | " fpr_micro, tpr_micro, _ = metrics.roc_curve(targets.ravel(), outputs.ravel())\n", 337 | " auc_micro = metrics.auc(fpr_micro, tpr_micro)\n", 338 | " return {\"auc\": auc_micro}\n", 339 | " \n", 340 | " \n", 341 | " def forward(self, ids, mask, targets=None):\n", 342 | " o_2 = self.bert(ids, attention_mask=mask)[\"pooler_output\"]\n", 343 | " b_o = self.bert_drop(o_2)\n", 344 | " output = self.out(b_o)\n", 345 | " loss = self.loss(output, targets)\n", 346 | " acc = self.monitor_metrics(output, targets)\n", 347 | " return output, loss, acc" 348 | ] 349 | }, 350 | { 351 | "cell_type": "markdown", 352 | "metadata": {}, 353 | "source": [ 354 | "### Start the training" 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": null, 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [ 363 | "train_dataset = GoEmotionDataset(train.text.tolist(), train[range(n_labels)].values.tolist())\n", 364 | "valid_dataset = GoEmotionDataset(valid.text.tolist(), valid[range(n_labels)].values.tolist())" 365 | ] 366 | }, 367 | { 368 | "cell_type": "code", 369 | "execution_count": null, 370 | "metadata": {}, 371 | "outputs": [], 372 | "source": [ 373 | "n_train_steps = int(len(train) / 32 * 10)\n", 374 | "model = EmotionClassifier(n_train_steps, n_labels)\n", 375 | "\n", 376 | "tb_logger = tez.callbacks.TensorBoardLogger(log_dir=\"logs/\")\n", 377 | "es = tez.callbacks.EarlyStopping(monitor=\"valid_loss\", model_path=\"export/model.bin\")\n", 378 | "\n", 379 | "\n", 380 | "model.fit(train_dataset,\n", 381 | " valid_dataset, \n", 382 | " train_bs=64,\n", 383 | " device=\"cuda\", \n", 384 | " epochs=8, \n", 385 | " callbacks=[tb_logger, es], \n", 386 | " fp16=True, \n", 387 | " n_jobs=10)" 388 | ] 389 | }, 390 | { 391 | "cell_type": "code", 392 | "execution_count": null, 393 | "metadata": {}, 394 | "outputs": [], 395 | "source": [ 396 | "test_dataset = GoEmotionDataset(test.text.tolist(), test[range(n_labels)].values.tolist())\n", 397 | "dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False, drop_last=False)" 398 | ] 399 | }, 400 | { 401 | "cell_type": "markdown", 402 | "metadata": {}, 403 | "source": [ 404 | "### Evaluate on the test set" 405 | ] 406 | }, 407 | { 408 | "cell_type": "code", 409 | "execution_count": null, 410 | "metadata": {}, 411 | "outputs": [], 412 | "source": [ 413 | "outputs = []\n", 414 | "\n", 415 | "with torch.no_grad():\n", 416 | " for i, batch in tqdm(enumerate(dataloader), total=len(dataloader)):\n", 417 | " output, loss, acc = model.forward(batch[\"ids\"].to(\"cuda\"), \n", 418 | " batch[\"mask\"].to(\"cuda\"), \n", 419 | " #batch[\"token_type_ids\"].to(\"cuda\"),\n", 420 | " batch[\"targets\"].to(\"cuda\")\n", 421 | " )\n", 422 | " outputs.append(output)" 423 | ] 424 | }, 425 | { 426 | "cell_type": "code", 427 | "execution_count": null, 428 | "metadata": {}, 429 | "outputs": [], 430 | "source": [ 431 | "outputs = torch.cat(outputs)\n", 432 | "outputs = torch.sigmoid(outputs)\n", 433 | "outputs = outputs.cpu().detach().numpy()" 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": null, 439 | "metadata": {}, 440 | "outputs": [], 441 | "source": [ 442 | "roc_metrics = []\n", 443 | "\n", 444 | "for i in range(n_labels):\n", 445 | " roc = metrics.roc_auc_score(test[i].values, outputs[:, i])\n", 446 | " roc_metrics.append(roc)\n", 447 | "\n", 448 | "s = pd.Series(roc_metrics, index=range(n_labels))\n", 449 | "\n", 450 | "s.plot(kind=\"bar\", figsize=(20, 5), title=\"roc auc score per class on test data\", grid=True)" 451 | ] 452 | }, 453 | { 454 | "cell_type": "markdown", 455 | "metadata": {}, 456 | "source": [ 457 | "### Test some sentences" 458 | ] 459 | }, 460 | { 461 | "cell_type": "code", 462 | "execution_count": null, 463 | "metadata": {}, 464 | "outputs": [], 465 | "source": [ 466 | "tokenizer = transformers.SqueezeBertTokenizer.from_pretrained(\n", 467 | " \"squeezebert/squeezebert-uncased\", do_lower_case=True\n", 468 | " )" 469 | ] 470 | }, 471 | { 472 | "cell_type": "code", 473 | "execution_count": null, 474 | "metadata": {}, 475 | "outputs": [], 476 | "source": [ 477 | "def score_sentence(text, topn=5):\n", 478 | " max_len = 35\n", 479 | " with torch.no_grad():\n", 480 | "\n", 481 | " inputs = tokenizer.encode_plus(text,\n", 482 | " None,\n", 483 | " add_special_tokens=True,\n", 484 | " max_length=max_len,\n", 485 | " padding=\"max_length\",\n", 486 | " truncation=True)\n", 487 | " ids = inputs[\"input_ids\"]\n", 488 | " ids = torch.LongTensor(ids).cuda().unsqueeze(0)\n", 489 | "\n", 490 | " attention_mask = inputs[\"attention_mask\"]\n", 491 | " attention_mask = torch.LongTensor(attention_mask).cuda().unsqueeze(0)\n", 492 | "\n", 493 | " output = model.forward(ids, attention_mask)[0]\n", 494 | " output = torch.sigmoid(output)\n", 495 | "\n", 496 | " probas, indices = torch.sort(output)\n", 497 | "\n", 498 | " probas = probas.cpu().numpy()[0][::-1]\n", 499 | " indices = indices.cpu().numpy()[0][::-1]\n", 500 | "\n", 501 | " for i, p in zip(indices[:topn], probas[:topn]):\n", 502 | " print(mapping[i], p)" 503 | ] 504 | }, 505 | { 506 | "cell_type": "code", 507 | "execution_count": null, 508 | "metadata": {}, 509 | "outputs": [], 510 | "source": [ 511 | "score_sentence(\"i miss my friends\")" 512 | ] 513 | }, 514 | { 515 | "cell_type": "code", 516 | "execution_count": null, 517 | "metadata": {}, 518 | "outputs": [], 519 | "source": [ 520 | "score_sentence(\"funny how this craps out!\")" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": null, 526 | "metadata": {}, 527 | "outputs": [], 528 | "source": [ 529 | "score_sentence(\"go to hell! \")" 530 | ] 531 | }, 532 | { 533 | "cell_type": "code", 534 | "execution_count": null, 535 | "metadata": {}, 536 | "outputs": [], 537 | "source": [ 538 | "score_sentence(\"you might have a point, but i strongly disagree with you\")" 539 | ] 540 | }, 541 | { 542 | "cell_type": "code", 543 | "execution_count": null, 544 | "metadata": {}, 545 | "outputs": [], 546 | "source": [ 547 | "score_sentence(\"i'm feeling very confident about this situation\")" 548 | ] 549 | }, 550 | { 551 | "cell_type": "code", 552 | "execution_count": null, 553 | "metadata": {}, 554 | "outputs": [], 555 | "source": [ 556 | "score_sentence(\"try to be safe my friend\")" 557 | ] 558 | } 559 | ], 560 | "metadata": { 561 | "kernelspec": { 562 | "display_name": "Python [conda env:pt1.5]", 563 | "language": "python", 564 | "name": "conda-env-pt1.5-py" 565 | }, 566 | "language_info": { 567 | "codemirror_mode": { 568 | "name": "ipython", 569 | "version": 3 570 | }, 571 | "file_extension": ".py", 572 | "mimetype": "text/x-python", 573 | "name": "python", 574 | "nbconvert_exporter": "python", 575 | "pygments_lexer": "ipython3", 576 | "version": "3.7.7" 577 | } 578 | }, 579 | "nbformat": 4, 580 | "nbformat_minor": 2 581 | } 582 | --------------------------------------------------------------------------------