├── LICENSE ├── README.md ├── infineonTrainTransformed.csv ├── input └── infineon │ ├── infineonTestDataTransformed.csv │ ├── infineonTestDataTransformed2.csv │ └── infineonTestTransformed.csv └── main ├── mySubmission.csv ├── mySubmission10.csv ├── mySubmission11.csv ├── mySubmission12.csv ├── mySubmission2.csv ├── mySubmission3.csv ├── mySubmission5.csv ├── mySubmission6.csv ├── mySubmission8.csv ├── mySubmission9.csv ├── submission.csv ├── submission10.csv ├── submission11.csv ├── submission2.csv ├── submission3.csv ├── submission5.csv ├── submission6.csv ├── submission8.csv ├── submission9.csv ├── toxic-comment-classification-using-bert.ipynb ├── try2.ipynb ├── tryInfineon.ipynb ├── tryInfineon2.ipynb ├── tryInfineon3.ipynb ├── tryInfineon4.ipynb └── tryInfineon5.ipynb /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Anshul Wadhawan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Solution repository for Infineon Machine Learning Hackathon 2 | 3 | Team Name : Idk 4 | 5 | Private Leaderboard National Rank : 3 (Accuracy = 0.7485) 6 | 7 | Task : Document Tagging Problem 8 | 9 | Methodology : This is a multi-class classification system aimed at identifying a subset of 46 classes to which a particular piece of text may belong. 10 | Utilised the technique of layer freezing, and subsequently implemented it for achieving an accuracy of 74.85% on Infineon dataset. 11 | -------------------------------------------------------------------------------- /infineonTrainTransformed.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anshulwadhawan/FastAI/77af757b894d0c0cc240079fc3de507e225e66a2/infineonTrainTransformed.csv -------------------------------------------------------------------------------- /input/infineon/infineonTestDataTransformed.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anshulwadhawan/FastAI/77af757b894d0c0cc240079fc3de507e225e66a2/input/infineon/infineonTestDataTransformed.csv -------------------------------------------------------------------------------- /input/infineon/infineonTestDataTransformed2.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anshulwadhawan/FastAI/77af757b894d0c0cc240079fc3de507e225e66a2/input/infineon/infineonTestDataTransformed2.csv -------------------------------------------------------------------------------- /input/infineon/infineonTestTransformed.csv: -------------------------------------------------------------------------------- 1 | id,comment_text 2 | 13,"binu enables publishers and advertisers in emerging markets to engage consumers at scale by addressing expensive mobile data costs.in a world where smartphones continue to penetrate the market, more and more users have the physical ability to connect to the internet. however, high mobile data costs act as a restrictive barrier and remain completely unaffordable. 3 | 4 | at binu, we want to solve the data problem because we believe that people deserve to be entertained and engaged by the great content the internet has to offer, without being held hostage by crushingly restrictive data costs. we exist to enable access to great online content. 5 | 6 | we do this by transcending the a?~data cost barriera?t by obsessively focusing on data bandwidth efficiency at every level of delivering mobile content, rather than focusing on reducing data prices. 7 | 8 | we have been testing, refining and perfecting our technology for nearly 10 years with the unwavering ambition to make mobile content more affordable, at scale, to more end-users. 9 | 10 | publishers can finally reach and engage with a liberated mass market by easily and cost effectively repurposing their existing content. at binu we enable publishers, advertisers and everyday mobile consumers to become active participants in making data more affordable. " 11 | 14,"ontv provides metadata for all scripted tv series for which full episodes are available online.ontv provides metadata for all scripted tv series for which full episodes are available online. the xml-based metadata feeds cover us networks (abc, cbs, fox, nbc, scifi, the cw, the n, usa network), uk channels (bbc iplayer, itv, 4od and five) as well as tv shows available online through aggregators, such as itunes and itunes gb, aol video, amazon unbox and bittorrent. extended data set includes information enabling geo-filtering of content access by users. 12 | 13 | coverage of the ontv feed is being extended to include movies available online through amazon unbox and itunes - to be followed by other major online movie suppliers. it is its objective to be the main source for metadata on professionally produced quality content online, as it becomes available. coverage of other geographical markets and formats is planned for the months ahead. 14 | 15 | besides metadata services, ontv s own tv metadata publishing framework can be used in other analytical and publishing projects. " 16 | 15,"ubio provide direct booking capabilities for travel metasearch websites and apps.ubio enable direct bookings and other transactional processes for metasearch. ubiodo this with their automation cloud technology which can interact securely with any supplier website. 17 | 18 | they provide a simple api for booking any travel product on a supplier website. in the current metasearch model customers are passed off to the supplier to complete a booking. this means customers often drop off a?"" a particular problem on mobile. and as metasearch you cana?tt integrate an expensive distribution system as that would defeat your price advantage. 19 | 20 | their automation technology allows you to control the end-to-end customer experience. your customers remain on your site or app for the full transaction. you build an experience for booking as smart as you already do for search a?"" even on mobile. this keeps customers on your site, increasing your brand equity, customer loyalty and significantly enhances the overall user experience. and of course, that adds up to much better conversion. 21 | 22 | they know direct booking is the future of metasearch, and they can provide the means for you to achieve it today. " 23 | 16,"winr is a program that is used for compressed files to create, open, and decompress.winr (wi.nr) is a url shortener that that gives users the chance to win prizes instantly. when a user shortens a url with wi.nr, each click of the shortened url enters into an online sweepstakes. 24 | 25 | there are three ways to win: by shortening a url, by clicking on a wi.nr-shortened url, and by referring users to wi.nr. 26 | 27 | there is already a very large community who use twitter to participate in contests and sweepstakes, and winr is targeted towards this market. 28 | 29 | winr is based out of brooklyn, ny. " 30 | -------------------------------------------------------------------------------- /main/toxic-comment-classification-using-bert.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", 8 | "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5" 9 | }, 10 | "outputs": [ 11 | { 12 | "name": "stdout", 13 | "output_type": "stream", 14 | "text": [ 15 | "['__notebook__.ipynb', '__output__.json']\n" 16 | ] 17 | } 18 | ], 19 | "source": [ 20 | "# This Python 3 environment comes with many helpful analytics libraries installed\n", 21 | "# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n", 22 | "# For example, here's several helpful packages to load in \n", 23 | "\n", 24 | "import numpy as np # linear algebra\n", 25 | "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n", 26 | "\n", 27 | "# Input data files are available in the \"../input/\" directory.\n", 28 | "# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n", 29 | "\n", 30 | "import os\n", 31 | "import collections\n", 32 | "print(os.listdir(\"../working/\"))\n", 33 | "\n", 34 | "# Any results you write to the current directory are saved as output." 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 2, 40 | "metadata": { 41 | "_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0", 42 | "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stderr", 47 | "output_type": "stream", 48 | "text": [ 49 | "WARNING: Logging before flag parsing goes to stderr.\n", 50 | "W0511 15:08:25.103196 139818736010624 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" 51 | ] 52 | } 53 | ], 54 | "source": [ 55 | "from sklearn.model_selection import train_test_split\n", 56 | "import pandas as pd\n", 57 | "import tensorflow as tf\n", 58 | "import tensorflow_hub as hub\n", 59 | "from datetime import datetime" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 3, 65 | "metadata": {}, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "Collecting bert-tensorflow\r\n", 72 | "\u001b[?25l Downloading https://files.pythonhosted.org/packages/a6/66/7eb4e8b6ea35b7cc54c322c816f976167a43019750279a8473d355800a93/bert_tensorflow-1.0.1-py2.py3-none-any.whl (67kB)\r\n", 73 | "\u001b[K 100% |████████████████████████████████| 71kB 4.7MB/s \r\n", 74 | "\u001b[?25hRequirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from bert-tensorflow) (1.12.0)\r\n", 75 | "Installing collected packages: bert-tensorflow\r\n", 76 | "Successfully installed bert-tensorflow-1.0.1\r\n", 77 | "\u001b[33mYou are using pip version 19.0.3, however version 19.1.1 is available.\r\n", 78 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "!pip install bert-tensorflow" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 4, 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "import bert\n", 93 | "from bert import run_classifier\n", 94 | "from bert import optimization\n", 95 | "from bert import tokenization\n", 96 | "from bert import modeling" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": 5, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "#import tokenization\n", 106 | "#import modeling\n", 107 | "BERT_VOCAB= '../input/uncased-l12-h768-a12/vocab.txt'\n", 108 | "BERT_INIT_CHKPNT = '../input/uncased-l12-h768-a12/bert_model.ckpt'\n", 109 | "BERT_CONFIG = '../input/uncased-l12-h768-a12/bert_config.json'" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 6, 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "tokenization.validate_case_matches_checkpoint(True,BERT_INIT_CHKPNT)\n", 119 | "tokenizer = tokenization.FullTokenizer(\n", 120 | " vocab_file=BERT_VOCAB, do_lower_case=True)" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 7, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "train_data_path='../input/jigsaw-toxic-comment-classification-challenge/train.csv'\n", 130 | "train = pd.read_csv(train_data_path)\n", 131 | "test = pd.read_csv('../input/jigsaw-toxic-comment-classification-challenge/test.csv')" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 8, 137 | "metadata": {}, 138 | "outputs": [ 139 | { 140 | "data": { 141 | "text/html": [ 142 | "
\n", 143 | "\n", 156 | "\n", 157 | " \n", 158 | " \n", 159 | " \n", 160 | " \n", 161 | " \n", 162 | " \n", 163 | " \n", 164 | " \n", 165 | " \n", 166 | " \n", 167 | " \n", 168 | " \n", 169 | " \n", 170 | " \n", 171 | " \n", 172 | " \n", 173 | " \n", 174 | " \n", 175 | " \n", 176 | " \n", 177 | " \n", 178 | " \n", 179 | " \n", 180 | " \n", 181 | " \n", 182 | " \n", 183 | " \n", 184 | " \n", 185 | " \n", 186 | " \n", 187 | " \n", 188 | " \n", 189 | " \n", 190 | " \n", 191 | " \n", 192 | " \n", 193 | " \n", 194 | " \n", 195 | " \n", 196 | " \n", 197 | " \n", 198 | " \n", 199 | " \n", 200 | " \n", 201 | " \n", 202 | " \n", 203 | " \n", 204 | " \n", 205 | " \n", 206 | " \n", 207 | " \n", 208 | " \n", 209 | " \n", 210 | " \n", 211 | " \n", 212 | " \n", 213 | " \n", 214 | " \n", 215 | " \n", 216 | " \n", 217 | " \n", 218 | " \n", 219 | " \n", 220 | " \n", 221 | " \n", 222 | " \n", 223 | " \n", 224 | " \n", 225 | " \n", 226 | " \n", 227 | "
idcomment_texttoxicsevere_toxicobscenethreatinsultidentity_hate
00000997932d777bfExplanation\\nWhy the edits made under my usern...000000
1000103f0d9cfb60fD'aww! He matches this background colour I'm s...000000
2000113f07ec002fdHey man, I'm really not trying to edit war. It...000000
30001b41b1c6bb37e\"\\nMore\\nI can't make any real suggestions on ...000000
40001d958c54c6e35You, sir, are my hero. Any chance you remember...000000
\n", 228 | "
" 229 | ], 230 | "text/plain": [ 231 | " id ... identity_hate\n", 232 | "0 0000997932d777bf ... 0\n", 233 | "1 000103f0d9cfb60f ... 0\n", 234 | "2 000113f07ec002fd ... 0\n", 235 | "3 0001b41b1c6bb37e ... 0\n", 236 | "4 0001d958c54c6e35 ... 0\n", 237 | "\n", 238 | "[5 rows x 8 columns]" 239 | ] 240 | }, 241 | "execution_count": 8, 242 | "metadata": {}, 243 | "output_type": "execute_result" 244 | } 245 | ], 246 | "source": [ 247 | "train.head()" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 9, 253 | "metadata": {}, 254 | "outputs": [], 255 | "source": [ 256 | "ID = 'id'\n", 257 | "DATA_COLUMN = 'comment_text'\n", 258 | "LABEL_COLUMNS = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": 10, 264 | "metadata": {}, 265 | "outputs": [], 266 | "source": [ 267 | "class InputExample(object):\n", 268 | " \"\"\"A single training/test example for simple sequence classification.\"\"\"\n", 269 | "\n", 270 | " def __init__(self, guid, text_a, text_b=None, labels=None):\n", 271 | " \"\"\"Constructs a InputExample.\n", 272 | "\n", 273 | " Args:\n", 274 | " guid: Unique id for the example.\n", 275 | " text_a: string. The untokenized text of the first sequence. For single\n", 276 | " sequence tasks, only this sequence must be specified.\n", 277 | " text_b: (Optional) string. The untokenized text of the second sequence.\n", 278 | " Only must be specified for sequence pair tasks.\n", 279 | " labels: (Optional) [string]. The label of the example. This should be\n", 280 | " specified for train and dev examples, but not for test examples.\n", 281 | " \"\"\"\n", 282 | " self.guid = guid\n", 283 | " self.text_a = text_a\n", 284 | " self.text_b = text_b\n", 285 | " self.labels = labels\n", 286 | "\n", 287 | "\n", 288 | "class InputFeatures(object):\n", 289 | " \"\"\"A single set of features of data.\"\"\"\n", 290 | "\n", 291 | " def __init__(self, input_ids, input_mask, segment_ids, label_ids, is_real_example=True):\n", 292 | " self.input_ids = input_ids\n", 293 | " self.input_mask = input_mask\n", 294 | " self.segment_ids = segment_ids\n", 295 | " self.label_ids = label_ids,\n", 296 | " self.is_real_example=is_real_example" 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": 11, 302 | "metadata": {}, 303 | "outputs": [], 304 | "source": [ 305 | "def create_examples(df, labels_available=True):\n", 306 | " \"\"\"Creates examples for the training and dev sets.\"\"\"\n", 307 | " examples = []\n", 308 | " for (i, row) in enumerate(df.values):\n", 309 | " guid = row[0]\n", 310 | " text_a = row[1]\n", 311 | " if labels_available:\n", 312 | " labels = row[2:]\n", 313 | " else:\n", 314 | " labels = [0,0,0,0,0,0]\n", 315 | " examples.append(\n", 316 | " InputExample(guid=guid, text_a=text_a, labels=labels))\n", 317 | " return examples" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": 12, 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "TRAIN_VAL_RATIO = 0.9\n", 327 | "LEN = train.shape[0]\n", 328 | "SIZE_TRAIN = int(TRAIN_VAL_RATIO*LEN)\n", 329 | "\n", 330 | "x_train = train[:SIZE_TRAIN]\n", 331 | "x_val = train[SIZE_TRAIN:]\n", 332 | "\n", 333 | "# Use the InputExample class from BERT's run_classifier code to create examples from the data\n", 334 | "train_examples = create_examples(x_train)" 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": 13, 340 | "metadata": {}, 341 | "outputs": [ 342 | { 343 | "data": { 344 | "text/plain": [ 345 | "((159571, 8), (143613, 8), (15958, 8))" 346 | ] 347 | }, 348 | "execution_count": 13, 349 | "metadata": {}, 350 | "output_type": "execute_result" 351 | } 352 | ], 353 | "source": [ 354 | "train.shape, x_train.shape, x_val.shape" 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": 14, 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [ 363 | "import pandas\n", 364 | "\n", 365 | "def convert_examples_to_features(examples, max_seq_length, tokenizer):\n", 366 | " \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n", 367 | "\n", 368 | " features = []\n", 369 | " for (ex_index, example) in enumerate(examples):\n", 370 | " print(example.text_a)\n", 371 | " tokens_a = tokenizer.tokenize(example.text_a)\n", 372 | "\n", 373 | " tokens_b = None\n", 374 | " if example.text_b:\n", 375 | " tokens_b = tokenizer.tokenize(example.text_b)\n", 376 | " # Modifies `tokens_a` and `tokens_b` in place so that the total\n", 377 | " # length is less than the specified length.\n", 378 | " # Account for [CLS], [SEP], [SEP] with \"- 3\"\n", 379 | " _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n", 380 | " else:\n", 381 | " # Account for [CLS] and [SEP] with \"- 2\"\n", 382 | " if len(tokens_a) > max_seq_length - 2:\n", 383 | " tokens_a = tokens_a[:(max_seq_length - 2)]\n", 384 | "\n", 385 | " # The convention in BERT is:\n", 386 | " # (a) For sequence pairs:\n", 387 | " # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n", 388 | " # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n", 389 | " # (b) For single sequences:\n", 390 | " # tokens: [CLS] the dog is hairy . [SEP]\n", 391 | " # type_ids: 0 0 0 0 0 0 0\n", 392 | " #\n", 393 | " # Where \"type_ids\" are used to indicate whether this is the first\n", 394 | " # sequence or the second sequence. The embedding vectors for `type=0` and\n", 395 | " # `type=1` were learned during pre-training and are added to the wordpiece\n", 396 | " # embedding vector (and position vector). This is not *strictly* necessary\n", 397 | " # since the [SEP] token unambigiously separates the sequences, but it makes\n", 398 | " # it easier for the model to learn the concept of sequences.\n", 399 | " #\n", 400 | " # For classification tasks, the first vector (corresponding to [CLS]) is\n", 401 | " # used as as the \"sentence vector\". Note that this only makes sense because\n", 402 | " # the entire model is fine-tuned.\n", 403 | " tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n", 404 | " segment_ids = [0] * len(tokens)\n", 405 | "\n", 406 | " if tokens_b:\n", 407 | " tokens += tokens_b + [\"[SEP]\"]\n", 408 | " segment_ids += [1] * (len(tokens_b) + 1)\n", 409 | "\n", 410 | " input_ids = tokenizer.convert_tokens_to_ids(tokens)\n", 411 | "\n", 412 | " # The mask has 1 for real tokens and 0 for padding tokens. Only real\n", 413 | " # tokens are attended to.\n", 414 | " input_mask = [1] * len(input_ids)\n", 415 | "\n", 416 | " # Zero-pad up to the sequence length.\n", 417 | " padding = [0] * (max_seq_length - len(input_ids))\n", 418 | " input_ids += padding\n", 419 | " input_mask += padding\n", 420 | " segment_ids += padding\n", 421 | "\n", 422 | " assert len(input_ids) == max_seq_length\n", 423 | " assert len(input_mask) == max_seq_length\n", 424 | " assert len(segment_ids) == max_seq_length\n", 425 | " \n", 426 | " labels_ids = []\n", 427 | " for label in example.labels:\n", 428 | " labels_ids.append(int(label))\n", 429 | "\n", 430 | " if ex_index < 0:\n", 431 | " logger.info(\"*** Example ***\")\n", 432 | " logger.info(\"guid: %s\" % (example.guid))\n", 433 | " logger.info(\"tokens: %s\" % \" \".join(\n", 434 | " [str(x) for x in tokens]))\n", 435 | " logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n", 436 | " logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n", 437 | " logger.info(\n", 438 | " \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n", 439 | " logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n", 440 | "\n", 441 | " features.append(\n", 442 | " InputFeatures(input_ids=input_ids,\n", 443 | " input_mask=input_mask,\n", 444 | " segment_ids=segment_ids,\n", 445 | " label_ids=labels_ids))\n", 446 | " return features" 447 | ] 448 | }, 449 | { 450 | "cell_type": "code", 451 | "execution_count": 15, 452 | "metadata": {}, 453 | "outputs": [], 454 | "source": [ 455 | "# We'll set sequences to be at most 128 tokens long.\n", 456 | "MAX_SEQ_LENGTH = 128" 457 | ] 458 | }, 459 | { 460 | "cell_type": "code", 461 | "execution_count": 16, 462 | "metadata": {}, 463 | "outputs": [], 464 | "source": [ 465 | "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n", 466 | " labels, num_labels, use_one_hot_embeddings):\n", 467 | " \"\"\"Creates a classification model.\"\"\"\n", 468 | " model = modeling.BertModel(\n", 469 | " config=bert_config,\n", 470 | " is_training=is_training,\n", 471 | " input_ids=input_ids,\n", 472 | " input_mask=input_mask,\n", 473 | " token_type_ids=segment_ids,\n", 474 | " use_one_hot_embeddings=use_one_hot_embeddings)\n", 475 | "\n", 476 | " # In the demo, we are doing a simple classification task on the entire\n", 477 | " # segment.\n", 478 | " #\n", 479 | " # If you want to use the token-level output, use model.get_sequence_output()\n", 480 | " # instead.\n", 481 | " output_layer = model.get_pooled_output()\n", 482 | "\n", 483 | " hidden_size = output_layer.shape[-1].value\n", 484 | "\n", 485 | " output_weights = tf.get_variable(\n", 486 | " \"output_weights\", [num_labels, hidden_size],\n", 487 | " initializer=tf.truncated_normal_initializer(stddev=0.02))\n", 488 | "\n", 489 | " output_bias = tf.get_variable(\n", 490 | " \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n", 491 | "\n", 492 | " with tf.variable_scope(\"loss\"):\n", 493 | " if is_training:\n", 494 | " # I.e., 0.1 dropout\n", 495 | " output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n", 496 | "\n", 497 | " logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n", 498 | " logits = tf.nn.bias_add(logits, output_bias)\n", 499 | " \n", 500 | " # probabilities = tf.nn.softmax(logits, axis=-1) ### multiclass case\n", 501 | " probabilities = tf.nn.sigmoid(logits)#### multi-label case\n", 502 | " \n", 503 | " labels = tf.cast(labels, tf.float32)\n", 504 | " tf.logging.info(\"num_labels:{};logits:{};labels:{}\".format(num_labels, logits, labels))\n", 505 | " per_example_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\n", 506 | " loss = tf.reduce_mean(per_example_loss)\n", 507 | "\n", 508 | " # probabilities = tf.nn.softmax(logits, axis=-1)\n", 509 | " # log_probs = tf.nn.log_softmax(logits, axis=-1)\n", 510 | " #\n", 511 | " # one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n", 512 | " #\n", 513 | " # per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n", 514 | " # loss = tf.reduce_mean(per_example_loss)\n", 515 | "\n", 516 | " return (loss, per_example_loss, logits, probabilities)\n", 517 | "\n", 518 | "\n", 519 | "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n", 520 | " num_train_steps, num_warmup_steps, use_tpu,\n", 521 | " use_one_hot_embeddings):\n", 522 | " \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n", 523 | "\n", 524 | " def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n", 525 | " \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n", 526 | "\n", 527 | " #tf.logging.info(\"*** Features ***\")\n", 528 | " #for name in sorted(features.keys()):\n", 529 | " # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n", 530 | "\n", 531 | " input_ids = features[\"input_ids\"]\n", 532 | " input_mask = features[\"input_mask\"]\n", 533 | " segment_ids = features[\"segment_ids\"]\n", 534 | " label_ids = features[\"label_ids\"]\n", 535 | " is_real_example = None\n", 536 | " if \"is_real_example\" in features:\n", 537 | " is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n", 538 | " else:\n", 539 | " is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n", 540 | "\n", 541 | " is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n", 542 | "\n", 543 | " (total_loss, per_example_loss, logits, probabilities) = create_model(\n", 544 | " bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n", 545 | " num_labels, use_one_hot_embeddings)\n", 546 | "\n", 547 | " tvars = tf.trainable_variables()\n", 548 | " initialized_variable_names = {}\n", 549 | " scaffold_fn = None\n", 550 | " if init_checkpoint:\n", 551 | " (assignment_map, initialized_variable_names\n", 552 | " ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n", 553 | " if use_tpu:\n", 554 | "\n", 555 | " def tpu_scaffold():\n", 556 | " tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n", 557 | " return tf.train.Scaffold()\n", 558 | "\n", 559 | " scaffold_fn = tpu_scaffold\n", 560 | " else:\n", 561 | " tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n", 562 | "\n", 563 | " tf.logging.info(\"**** Trainable Variables ****\")\n", 564 | " for var in tvars:\n", 565 | " init_string = \"\"\n", 566 | " if var.name in initialized_variable_names:\n", 567 | " init_string = \", *INIT_FROM_CKPT*\"\n", 568 | " #tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,init_string)\n", 569 | "\n", 570 | " output_spec = None\n", 571 | " if mode == tf.estimator.ModeKeys.TRAIN:\n", 572 | "\n", 573 | " train_op = optimization.create_optimizer(\n", 574 | " total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n", 575 | "\n", 576 | " output_spec = tf.estimator.EstimatorSpec(\n", 577 | " mode=mode,\n", 578 | " loss=total_loss,\n", 579 | " train_op=train_op,\n", 580 | " scaffold=scaffold_fn)\n", 581 | " elif mode == tf.estimator.ModeKeys.EVAL:\n", 582 | "\n", 583 | " def metric_fn(per_example_loss, label_ids, probabilities, is_real_example):\n", 584 | "\n", 585 | " logits_split = tf.split(probabilities, num_labels, axis=-1)\n", 586 | " label_ids_split = tf.split(label_ids, num_labels, axis=-1)\n", 587 | " # metrics change to auc of every class\n", 588 | " eval_dict = {}\n", 589 | " for j, logits in enumerate(logits_split):\n", 590 | " label_id_ = tf.cast(label_ids_split[j], dtype=tf.int32)\n", 591 | " current_auc, update_op_auc = tf.metrics.auc(label_id_, logits)\n", 592 | " eval_dict[str(j)] = (current_auc, update_op_auc)\n", 593 | " eval_dict['eval_loss'] = tf.metrics.mean(values=per_example_loss)\n", 594 | " return eval_dict\n", 595 | "\n", 596 | " ## original eval metrics\n", 597 | " # predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n", 598 | " # accuracy = tf.metrics.accuracy(\n", 599 | " # labels=label_ids, predictions=predictions, weights=is_real_example)\n", 600 | " # loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n", 601 | " # return {\n", 602 | " # \"eval_accuracy\": accuracy,\n", 603 | " # \"eval_loss\": loss,\n", 604 | " # }\n", 605 | "\n", 606 | " eval_metrics = metric_fn(per_example_loss, label_ids, probabilities, is_real_example)\n", 607 | " output_spec = tf.estimator.EstimatorSpec(\n", 608 | " mode=mode,\n", 609 | " loss=total_loss,\n", 610 | " eval_metric_ops=eval_metrics,\n", 611 | " scaffold=scaffold_fn)\n", 612 | " else:\n", 613 | " print(\"mode:\", mode,\"probabilities:\", probabilities)\n", 614 | " output_spec = tf.estimator.EstimatorSpec(\n", 615 | " mode=mode,\n", 616 | " predictions={\"probabilities\": probabilities},\n", 617 | " scaffold=scaffold_fn)\n", 618 | " return output_spec\n", 619 | "\n", 620 | " return model_fn" 621 | ] 622 | }, 623 | { 624 | "cell_type": "code", 625 | "execution_count": 17, 626 | "metadata": {}, 627 | "outputs": [], 628 | "source": [ 629 | "# Compute train and warmup steps from batch size\n", 630 | "# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\n", 631 | "BATCH_SIZE = 32\n", 632 | "LEARNING_RATE = 2e-5\n", 633 | "NUM_TRAIN_EPOCHS = 2.0\n", 634 | "\n", 635 | "# Warmup is a period of time where hte learning rate \n", 636 | "# is small and gradually increases--usually helps training.\n", 637 | "WARMUP_PROPORTION = 0.1\n", 638 | "# Model configs\n", 639 | "SAVE_CHECKPOINTS_STEPS = 1000\n", 640 | "SAVE_SUMMARY_STEPS = 500" 641 | ] 642 | }, 643 | { 644 | "cell_type": "code", 645 | "execution_count": 18, 646 | "metadata": {}, 647 | "outputs": [], 648 | "source": [ 649 | "OUTPUT_DIR = \"../working/output\"\n", 650 | "# Specify outpit directory and number of checkpoint steps to save\n", 651 | "run_config = tf.estimator.RunConfig(\n", 652 | " model_dir=OUTPUT_DIR,\n", 653 | " save_summary_steps=SAVE_SUMMARY_STEPS,\n", 654 | " keep_checkpoint_max=1,\n", 655 | " save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)" 656 | ] 657 | }, 658 | { 659 | "cell_type": "code", 660 | "execution_count": 19, 661 | "metadata": {}, 662 | "outputs": [], 663 | "source": [ 664 | "def input_fn_builder(features, seq_length, is_training, drop_remainder):\n", 665 | " \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n", 666 | "\n", 667 | " all_input_ids = []\n", 668 | " all_input_mask = []\n", 669 | " all_segment_ids = []\n", 670 | " all_label_ids = []\n", 671 | "\n", 672 | " for feature in features:\n", 673 | " all_input_ids.append(feature.input_ids)\n", 674 | " all_input_mask.append(feature.input_mask)\n", 675 | " all_segment_ids.append(feature.segment_ids)\n", 676 | " all_label_ids.append(feature.label_ids)\n", 677 | "\n", 678 | " def input_fn(params):\n", 679 | " \"\"\"The actual input function.\"\"\"\n", 680 | " batch_size = params[\"batch_size\"]\n", 681 | "\n", 682 | " num_examples = len(features)\n", 683 | "\n", 684 | " # This is for demo purposes and does NOT scale to large data sets. We do\n", 685 | " # not use Dataset.from_generator() because that uses tf.py_func which is\n", 686 | " # not TPU compatible. The right way to load data is with TFRecordReader.\n", 687 | " d = tf.data.Dataset.from_tensor_slices({\n", 688 | " \"input_ids\":\n", 689 | " tf.constant(\n", 690 | " all_input_ids, shape=[num_examples, seq_length],\n", 691 | " dtype=tf.int32),\n", 692 | " \"input_mask\":\n", 693 | " tf.constant(\n", 694 | " all_input_mask,\n", 695 | " shape=[num_examples, seq_length],\n", 696 | " dtype=tf.int32),\n", 697 | " \"segment_ids\":\n", 698 | " tf.constant(\n", 699 | " all_segment_ids,\n", 700 | " shape=[num_examples, seq_length],\n", 701 | " dtype=tf.int32),\n", 702 | " \"label_ids\":\n", 703 | " tf.constant(all_label_ids, shape=[num_examples, len(LABEL_COLUMNS)], dtype=tf.int32),\n", 704 | " })\n", 705 | "\n", 706 | " if is_training:\n", 707 | " d = d.repeat()\n", 708 | " d = d.shuffle(buffer_size=100)\n", 709 | "\n", 710 | " d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n", 711 | " return d\n", 712 | "\n", 713 | " return input_fn\n" 714 | ] 715 | }, 716 | { 717 | "cell_type": "code", 718 | "execution_count": 20, 719 | "metadata": {}, 720 | "outputs": [], 721 | "source": [ 722 | "class PaddingInputExample(object):\n", 723 | " \"\"\"Fake example so the num input examples is a multiple of the batch size.\n", 724 | " When running eval/predict on the TPU, we need to pad the number of examples\n", 725 | " to be a multiple of the batch size, because the TPU requires a fixed batch\n", 726 | " size. The alternative is to drop the last batch, which is bad because it means\n", 727 | " the entire output data won't be generated.\n", 728 | " We use this class instead of `None` because treating `None` as padding\n", 729 | " battches could cause silent errors.\n", 730 | " \"\"\"" 731 | ] 732 | }, 733 | { 734 | "cell_type": "code", 735 | "execution_count": 21, 736 | "metadata": {}, 737 | "outputs": [], 738 | "source": [ 739 | "def convert_single_example(ex_index, example, max_seq_length,\n", 740 | " tokenizer):\n", 741 | " \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n", 742 | "\n", 743 | " if isinstance(example, PaddingInputExample):\n", 744 | " return InputFeatures(\n", 745 | " input_ids=[0] * max_seq_length,\n", 746 | " input_mask=[0] * max_seq_length,\n", 747 | " segment_ids=[0] * max_seq_length,\n", 748 | " label_ids=0,\n", 749 | " is_real_example=False)\n", 750 | "\n", 751 | " tokens_a = tokenizer.tokenize(example.text_a)\n", 752 | " tokens_b = None\n", 753 | " if example.text_b:\n", 754 | " tokens_b = tokenizer.tokenize(example.text_b)\n", 755 | "\n", 756 | " if tokens_b:\n", 757 | " # Modifies `tokens_a` and `tokens_b` in place so that the total\n", 758 | " # length is less than the specified length.\n", 759 | " # Account for [CLS], [SEP], [SEP] with \"- 3\"\n", 760 | " _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n", 761 | " else:\n", 762 | " # Account for [CLS] and [SEP] with \"- 2\"\n", 763 | " if len(tokens_a) > max_seq_length - 2:\n", 764 | " tokens_a = tokens_a[0:(max_seq_length - 2)]\n", 765 | "\n", 766 | " # The convention in BERT is:\n", 767 | " # (a) For sequence pairs:\n", 768 | " # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n", 769 | " # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n", 770 | " # (b) For single sequences:\n", 771 | " # tokens: [CLS] the dog is hairy . [SEP]\n", 772 | " # type_ids: 0 0 0 0 0 0 0\n", 773 | " #\n", 774 | " # Where \"type_ids\" are used to indicate whether this is the first\n", 775 | " # sequence or the second sequence. The embedding vectors for `type=0` and\n", 776 | " # `type=1` were learned during pre-training and are added to the wordpiece\n", 777 | " # embedding vector (and position vector). This is not *strictly* necessary\n", 778 | " # since the [SEP] token unambiguously separates the sequences, but it makes\n", 779 | " # it easier for the model to learn the concept of sequences.\n", 780 | " #\n", 781 | " # For classification tasks, the first vector (corresponding to [CLS]) is\n", 782 | " # used as the \"sentence vector\". Note that this only makes sense because\n", 783 | " # the entire model is fine-tuned.\n", 784 | " tokens = []\n", 785 | " segment_ids = []\n", 786 | " tokens.append(\"[CLS]\")\n", 787 | " segment_ids.append(0)\n", 788 | " for token in tokens_a:\n", 789 | " tokens.append(token)\n", 790 | " segment_ids.append(0)\n", 791 | " tokens.append(\"[SEP]\")\n", 792 | " segment_ids.append(0)\n", 793 | "\n", 794 | " if tokens_b:\n", 795 | " for token in tokens_b:\n", 796 | " tokens.append(token)\n", 797 | " segment_ids.append(1)\n", 798 | " tokens.append(\"[SEP]\")\n", 799 | " segment_ids.append(1)\n", 800 | "\n", 801 | " input_ids = tokenizer.convert_tokens_to_ids(tokens)\n", 802 | "\n", 803 | " # The mask has 1 for real tokens and 0 for padding tokens. Only real\n", 804 | " # tokens are attended to.\n", 805 | " input_mask = [1] * len(input_ids)\n", 806 | "\n", 807 | " # Zero-pad up to the sequence length.\n", 808 | " while len(input_ids) < max_seq_length:\n", 809 | " input_ids.append(0)\n", 810 | " input_mask.append(0)\n", 811 | " segment_ids.append(0)\n", 812 | "\n", 813 | " assert len(input_ids) == max_seq_length\n", 814 | " assert len(input_mask) == max_seq_length\n", 815 | " assert len(segment_ids) == max_seq_length\n", 816 | "\n", 817 | " labels_ids = []\n", 818 | " for label in example.labels:\n", 819 | " labels_ids.append(int(label))\n", 820 | "\n", 821 | "\n", 822 | " feature = InputFeatures(\n", 823 | " input_ids=input_ids,\n", 824 | " input_mask=input_mask,\n", 825 | " segment_ids=segment_ids,\n", 826 | " label_ids=labels_ids,\n", 827 | " is_real_example=True)\n", 828 | " return feature\n", 829 | "\n", 830 | "\n", 831 | "def file_based_convert_examples_to_features(\n", 832 | " examples, max_seq_length, tokenizer, output_file):\n", 833 | " \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n", 834 | "\n", 835 | " writer = tf.python_io.TFRecordWriter(output_file)\n", 836 | "\n", 837 | " for (ex_index, example) in enumerate(examples):\n", 838 | " #if ex_index % 10000 == 0:\n", 839 | " #tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n", 840 | "\n", 841 | " feature = convert_single_example(ex_index, example,\n", 842 | " max_seq_length, tokenizer)\n", 843 | "\n", 844 | " def create_int_feature(values):\n", 845 | " f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n", 846 | " return f\n", 847 | "\n", 848 | " features = collections.OrderedDict()\n", 849 | " features[\"input_ids\"] = create_int_feature(feature.input_ids)\n", 850 | " features[\"input_mask\"] = create_int_feature(feature.input_mask)\n", 851 | " features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n", 852 | " features[\"is_real_example\"] = create_int_feature(\n", 853 | " [int(feature.is_real_example)])\n", 854 | " if isinstance(feature.label_ids, list):\n", 855 | " label_ids = feature.label_ids\n", 856 | " else:\n", 857 | " label_ids = feature.label_ids[0]\n", 858 | " features[\"label_ids\"] = create_int_feature(label_ids)\n", 859 | "\n", 860 | " tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n", 861 | " writer.write(tf_example.SerializeToString())\n", 862 | " writer.close()\n", 863 | "\n", 864 | "\n", 865 | "def file_based_input_fn_builder(input_file, seq_length, is_training,\n", 866 | " drop_remainder):\n", 867 | " \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n", 868 | "\n", 869 | " name_to_features = {\n", 870 | " \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n", 871 | " \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n", 872 | " \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n", 873 | " \"label_ids\": tf.FixedLenFeature([6], tf.int64),\n", 874 | " \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n", 875 | " }\n", 876 | "\n", 877 | " def _decode_record(record, name_to_features):\n", 878 | " \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n", 879 | " example = tf.parse_single_example(record, name_to_features)\n", 880 | "\n", 881 | " # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n", 882 | " # So cast all int64 to int32.\n", 883 | " for name in list(example.keys()):\n", 884 | " t = example[name]\n", 885 | " if t.dtype == tf.int64:\n", 886 | " t = tf.to_int32(t)\n", 887 | " example[name] = t\n", 888 | "\n", 889 | " return example\n", 890 | "\n", 891 | " def input_fn(params):\n", 892 | " \"\"\"The actual input function.\"\"\"\n", 893 | " batch_size = params[\"batch_size\"]\n", 894 | "\n", 895 | " # For training, we want a lot of parallel reading and shuffling.\n", 896 | " # For eval, we want no shuffling and parallel reading doesn't matter.\n", 897 | " d = tf.data.TFRecordDataset(input_file)\n", 898 | " if is_training:\n", 899 | " d = d.repeat()\n", 900 | " d = d.shuffle(buffer_size=100)\n", 901 | "\n", 902 | " d = d.apply(\n", 903 | " tf.contrib.data.map_and_batch(\n", 904 | " lambda record: _decode_record(record, name_to_features),\n", 905 | " batch_size=batch_size,\n", 906 | " drop_remainder=drop_remainder))\n", 907 | "\n", 908 | " return d\n", 909 | "\n", 910 | " return input_fn\n", 911 | "\n", 912 | "\n", 913 | "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n", 914 | " \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n", 915 | "\n", 916 | " # This is a simple heuristic which will always truncate the longer sequence\n", 917 | " # one token at a time. This makes more sense than truncating an equal percent\n", 918 | " # of tokens from each, since if one sequence is very short then each token\n", 919 | " # that's truncated likely contains more information than a longer sequence.\n", 920 | " while True:\n", 921 | " total_length = len(tokens_a) + len(tokens_b)\n", 922 | " if total_length <= max_length:\n", 923 | " break\n", 924 | " if len(tokens_a) > len(tokens_b):\n", 925 | " tokens_a.pop()\n", 926 | " else:\n", 927 | " tokens_b.pop()" 928 | ] 929 | }, 930 | { 931 | "cell_type": "code", 932 | "execution_count": 22, 933 | "metadata": {}, 934 | "outputs": [], 935 | "source": [ 936 | "#from pathlib import Path\n", 937 | "train_file = os.path.join('../working', \"train.tf_record\")\n", 938 | "#filename = Path(train_file)\n", 939 | "if not os.path.exists(train_file):\n", 940 | " open(train_file, 'w').close()" 941 | ] 942 | }, 943 | { 944 | "cell_type": "markdown", 945 | "metadata": {}, 946 | "source": [ 947 | "train_features = convert_examples_to_features(\n", 948 | " train_examples, MAX_SEQ_LENGTH, tokenizer)" 949 | ] 950 | }, 951 | { 952 | "cell_type": "markdown", 953 | "metadata": {}, 954 | "source": [ 955 | "# Create an input function for training. drop_remainder = True for using TPUs.\n", 956 | "train_input_fn = input_fn_builder(\n", 957 | " features=train_features,\n", 958 | " seq_length=MAX_SEQ_LENGTH,\n", 959 | " is_training=True,\n", 960 | " drop_remainder=False)" 961 | ] 962 | }, 963 | { 964 | "cell_type": "code", 965 | "execution_count": 23, 966 | "metadata": {}, 967 | "outputs": [], 968 | "source": [ 969 | "# Compute # train and warmup steps from batch size\n", 970 | "num_train_steps = int(len(train_examples) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\n", 971 | "num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)" 972 | ] 973 | }, 974 | { 975 | "cell_type": "code", 976 | "execution_count": 24, 977 | "metadata": {}, 978 | "outputs": [ 979 | { 980 | "name": "stdout", 981 | "output_type": "stream", 982 | "text": [ 983 | "INFO:tensorflow:***** Running training *****\n" 984 | ] 985 | }, 986 | { 987 | "name": "stderr", 988 | "output_type": "stream", 989 | "text": [ 990 | "I0511 15:12:56.106339 139818736010624 :3] ***** Running training *****\n" 991 | ] 992 | }, 993 | { 994 | "name": "stdout", 995 | "output_type": "stream", 996 | "text": [ 997 | "INFO:tensorflow: Num examples = 143613\n" 998 | ] 999 | }, 1000 | { 1001 | "name": "stderr", 1002 | "output_type": "stream", 1003 | "text": [ 1004 | "I0511 15:12:56.108179 139818736010624 :4] Num examples = 143613\n" 1005 | ] 1006 | }, 1007 | { 1008 | "name": "stdout", 1009 | "output_type": "stream", 1010 | "text": [ 1011 | "INFO:tensorflow: Batch size = 32\n" 1012 | ] 1013 | }, 1014 | { 1015 | "name": "stderr", 1016 | "output_type": "stream", 1017 | "text": [ 1018 | "I0511 15:12:56.109584 139818736010624 :5] Batch size = 32\n" 1019 | ] 1020 | }, 1021 | { 1022 | "name": "stdout", 1023 | "output_type": "stream", 1024 | "text": [ 1025 | "INFO:tensorflow: Num steps = 8975\n" 1026 | ] 1027 | }, 1028 | { 1029 | "name": "stderr", 1030 | "output_type": "stream", 1031 | "text": [ 1032 | "I0511 15:12:56.110869 139818736010624 :6] Num steps = 8975\n" 1033 | ] 1034 | } 1035 | ], 1036 | "source": [ 1037 | "file_based_convert_examples_to_features(\n", 1038 | " train_examples, MAX_SEQ_LENGTH, tokenizer, train_file)\n", 1039 | "tf.logging.info(\"***** Running training *****\")\n", 1040 | "tf.logging.info(\" Num examples = %d\", len(train_examples))\n", 1041 | "tf.logging.info(\" Batch size = %d\", BATCH_SIZE)\n", 1042 | "tf.logging.info(\" Num steps = %d\", num_train_steps)\n" 1043 | ] 1044 | }, 1045 | { 1046 | "cell_type": "code", 1047 | "execution_count": 25, 1048 | "metadata": {}, 1049 | "outputs": [], 1050 | "source": [ 1051 | "train_input_fn = file_based_input_fn_builder(\n", 1052 | " input_file=train_file,\n", 1053 | " seq_length=MAX_SEQ_LENGTH,\n", 1054 | " is_training=True,\n", 1055 | " drop_remainder=True)" 1056 | ] 1057 | }, 1058 | { 1059 | "cell_type": "code", 1060 | "execution_count": 26, 1061 | "metadata": {}, 1062 | "outputs": [ 1063 | { 1064 | "name": "stdout", 1065 | "output_type": "stream", 1066 | "text": [ 1067 | "INFO:tensorflow:Using config: {'_model_dir': '../working/output', '_tf_random_seed': None, '_save_summary_steps': 500, '_save_checkpoints_steps': 1000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", 1068 | "graph_options {\n", 1069 | " rewrite_options {\n", 1070 | " meta_optimizer_iterations: ONE\n", 1071 | " }\n", 1072 | "}\n", 1073 | ", '_keep_checkpoint_max': 1, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n" 1074 | ] 1075 | }, 1076 | { 1077 | "name": "stderr", 1078 | "output_type": "stream", 1079 | "text": [ 1080 | "I0511 15:12:56.151041 139818736010624 estimator.py:201] Using config: {'_model_dir': '../working/output', '_tf_random_seed': None, '_save_summary_steps': 500, '_save_checkpoints_steps': 1000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", 1081 | "graph_options {\n", 1082 | " rewrite_options {\n", 1083 | " meta_optimizer_iterations: ONE\n", 1084 | " }\n", 1085 | "}\n", 1086 | ", '_keep_checkpoint_max': 1, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n" 1087 | ] 1088 | } 1089 | ], 1090 | "source": [ 1091 | "bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)\n", 1092 | "model_fn = model_fn_builder(\n", 1093 | " bert_config=bert_config,\n", 1094 | " num_labels= len(LABEL_COLUMNS),\n", 1095 | " init_checkpoint=BERT_INIT_CHKPNT,\n", 1096 | " learning_rate=LEARNING_RATE,\n", 1097 | " num_train_steps=num_train_steps,\n", 1098 | " num_warmup_steps=num_warmup_steps,\n", 1099 | " use_tpu=False,\n", 1100 | " use_one_hot_embeddings=False)\n", 1101 | "\n", 1102 | "estimator = tf.estimator.Estimator(\n", 1103 | " model_fn=model_fn,\n", 1104 | " config=run_config,\n", 1105 | " params={\"batch_size\": BATCH_SIZE})" 1106 | ] 1107 | }, 1108 | { 1109 | "cell_type": "code", 1110 | "execution_count": 27, 1111 | "metadata": {}, 1112 | "outputs": [ 1113 | { 1114 | "name": "stdout", 1115 | "output_type": "stream", 1116 | "text": [ 1117 | "Beginning Training!\n", 1118 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", 1119 | "Instructions for updating:\n", 1120 | "Colocations handled automatically by placer.\n" 1121 | ] 1122 | }, 1123 | { 1124 | "name": "stderr", 1125 | "output_type": "stream", 1126 | "text": [ 1127 | "W0511 15:12:56.179253 139818736010624 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", 1128 | "Instructions for updating:\n", 1129 | "Colocations handled automatically by placer.\n" 1130 | ] 1131 | }, 1132 | { 1133 | "name": "stdout", 1134 | "output_type": "stream", 1135 | "text": [ 1136 | "\n", 1137 | "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", 1138 | "For more information, please see:\n", 1139 | " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", 1140 | " * https://github.com/tensorflow/addons\n", 1141 | "If you depend on functionality not listed there, please file an issue.\n", 1142 | "\n", 1143 | "WARNING:tensorflow:From :168: map_and_batch (from tensorflow.contrib.data.python.ops.batching) is deprecated and will be removed in a future version.\n", 1144 | "Instructions for updating:\n", 1145 | "Use `tf.data.experimental.map_and_batch(...)`.\n" 1146 | ] 1147 | }, 1148 | { 1149 | "name": "stderr", 1150 | "output_type": "stream", 1151 | "text": [ 1152 | "W0511 15:12:57.607403 139818736010624 deprecation.py:323] From :168: map_and_batch (from tensorflow.contrib.data.python.ops.batching) is deprecated and will be removed in a future version.\n", 1153 | "Instructions for updating:\n", 1154 | "Use `tf.data.experimental.map_and_batch(...)`.\n" 1155 | ] 1156 | }, 1157 | { 1158 | "name": "stdout", 1159 | "output_type": "stream", 1160 | "text": [ 1161 | "WARNING:tensorflow:From :148: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", 1162 | "Instructions for updating:\n", 1163 | "Use tf.cast instead.\n" 1164 | ] 1165 | }, 1166 | { 1167 | "name": "stderr", 1168 | "output_type": "stream", 1169 | "text": [ 1170 | "W0511 15:12:57.623719 139818736010624 deprecation.py:323] From :148: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", 1171 | "Instructions for updating:\n", 1172 | "Use tf.cast instead.\n" 1173 | ] 1174 | }, 1175 | { 1176 | "name": "stdout", 1177 | "output_type": "stream", 1178 | "text": [ 1179 | "INFO:tensorflow:Calling model_fn.\n" 1180 | ] 1181 | }, 1182 | { 1183 | "name": "stderr", 1184 | "output_type": "stream", 1185 | "text": [ 1186 | "I0511 15:12:57.648558 139818736010624 estimator.py:1111] Calling model_fn.\n" 1187 | ] 1188 | }, 1189 | { 1190 | "name": "stdout", 1191 | "output_type": "stream", 1192 | "text": [ 1193 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n", 1194 | "Instructions for updating:\n", 1195 | "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n" 1196 | ] 1197 | }, 1198 | { 1199 | "name": "stderr", 1200 | "output_type": "stream", 1201 | "text": [ 1202 | "W0511 15:12:57.713801 139818736010624 deprecation.py:506] From /opt/conda/lib/python3.6/site-packages/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n", 1203 | "Instructions for updating:\n", 1204 | "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n" 1205 | ] 1206 | }, 1207 | { 1208 | "name": "stdout", 1209 | "output_type": "stream", 1210 | "text": [ 1211 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", 1212 | "Instructions for updating:\n", 1213 | "Use keras.layers.dense instead.\n" 1214 | ] 1215 | }, 1216 | { 1217 | "name": "stderr", 1218 | "output_type": "stream", 1219 | "text": [ 1220 | "W0511 15:12:57.731926 139818736010624 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", 1221 | "Instructions for updating:\n", 1222 | "Use keras.layers.dense instead.\n" 1223 | ] 1224 | }, 1225 | { 1226 | "name": "stdout", 1227 | "output_type": "stream", 1228 | "text": [ 1229 | "INFO:tensorflow:num_labels:6;logits:Tensor(\"loss/BiasAdd:0\", shape=(32, 6), dtype=float32);labels:Tensor(\"loss/Cast:0\", shape=(32, 6), dtype=float32)\n" 1230 | ] 1231 | }, 1232 | { 1233 | "name": "stderr", 1234 | "output_type": "stream", 1235 | "text": [ 1236 | "I0511 15:13:00.063098 139818736010624 :40] num_labels:6;logits:Tensor(\"loss/BiasAdd:0\", shape=(32, 6), dtype=float32);labels:Tensor(\"loss/Cast:0\", shape=(32, 6), dtype=float32)\n" 1237 | ] 1238 | }, 1239 | { 1240 | "name": "stdout", 1241 | "output_type": "stream", 1242 | "text": [ 1243 | "INFO:tensorflow:**** Trainable Variables ****\n" 1244 | ] 1245 | }, 1246 | { 1247 | "name": "stderr", 1248 | "output_type": "stream", 1249 | "text": [ 1250 | "I0511 15:13:00.822713 139818736010624 :99] **** Trainable Variables ****\n" 1251 | ] 1252 | }, 1253 | { 1254 | "name": "stdout", 1255 | "output_type": "stream", 1256 | "text": [ 1257 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/learning_rate_decay_v2.py:321: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", 1258 | "Instructions for updating:\n", 1259 | "Deprecated in favor of operator or tf.math.divide.\n" 1260 | ] 1261 | }, 1262 | { 1263 | "name": "stderr", 1264 | "output_type": "stream", 1265 | "text": [ 1266 | "W0511 15:13:00.832870 139818736010624 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/learning_rate_decay_v2.py:321: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", 1267 | "Instructions for updating:\n", 1268 | "Deprecated in favor of operator or tf.math.divide.\n" 1269 | ] 1270 | }, 1271 | { 1272 | "name": "stdout", 1273 | "output_type": "stream", 1274 | "text": [ 1275 | "INFO:tensorflow:Done calling model_fn.\n" 1276 | ] 1277 | }, 1278 | { 1279 | "name": "stderr", 1280 | "output_type": "stream", 1281 | "text": [ 1282 | "I0511 15:13:09.499606 139818736010624 estimator.py:1113] Done calling model_fn.\n" 1283 | ] 1284 | }, 1285 | { 1286 | "name": "stdout", 1287 | "output_type": "stream", 1288 | "text": [ 1289 | "INFO:tensorflow:Create CheckpointSaverHook.\n" 1290 | ] 1291 | }, 1292 | { 1293 | "name": "stderr", 1294 | "output_type": "stream", 1295 | "text": [ 1296 | "I0511 15:13:09.503598 139818736010624 basic_session_run_hooks.py:527] Create CheckpointSaverHook.\n" 1297 | ] 1298 | }, 1299 | { 1300 | "name": "stdout", 1301 | "output_type": "stream", 1302 | "text": [ 1303 | "INFO:tensorflow:Graph was finalized.\n" 1304 | ] 1305 | }, 1306 | { 1307 | "name": "stderr", 1308 | "output_type": "stream", 1309 | "text": [ 1310 | "I0511 15:13:12.968873 139818736010624 monitored_session.py:222] Graph was finalized.\n" 1311 | ] 1312 | }, 1313 | { 1314 | "name": "stdout", 1315 | "output_type": "stream", 1316 | "text": [ 1317 | "INFO:tensorflow:Running local_init_op.\n" 1318 | ] 1319 | }, 1320 | { 1321 | "name": "stderr", 1322 | "output_type": "stream", 1323 | "text": [ 1324 | "I0511 15:13:22.003614 139818736010624 session_manager.py:491] Running local_init_op.\n" 1325 | ] 1326 | }, 1327 | { 1328 | "name": "stdout", 1329 | "output_type": "stream", 1330 | "text": [ 1331 | "INFO:tensorflow:Done running local_init_op.\n" 1332 | ] 1333 | }, 1334 | { 1335 | "name": "stderr", 1336 | "output_type": "stream", 1337 | "text": [ 1338 | "I0511 15:13:22.227093 139818736010624 session_manager.py:493] Done running local_init_op.\n" 1339 | ] 1340 | }, 1341 | { 1342 | "name": "stdout", 1343 | "output_type": "stream", 1344 | "text": [ 1345 | "INFO:tensorflow:Saving checkpoints for 0 into ../working/output/model.ckpt.\n" 1346 | ] 1347 | }, 1348 | { 1349 | "name": "stderr", 1350 | "output_type": "stream", 1351 | "text": [ 1352 | "I0511 15:13:29.902737 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 0 into ../working/output/model.ckpt.\n" 1353 | ] 1354 | }, 1355 | { 1356 | "name": "stdout", 1357 | "output_type": "stream", 1358 | "text": [ 1359 | "INFO:tensorflow:loss = 0.7021637, step = 0\n" 1360 | ] 1361 | }, 1362 | { 1363 | "name": "stderr", 1364 | "output_type": "stream", 1365 | "text": [ 1366 | "I0511 15:13:50.184201 139818736010624 basic_session_run_hooks.py:249] loss = 0.7021637, step = 0\n" 1367 | ] 1368 | }, 1369 | { 1370 | "name": "stdout", 1371 | "output_type": "stream", 1372 | "text": [ 1373 | "INFO:tensorflow:global_step/sec: 1.73234\n" 1374 | ] 1375 | }, 1376 | { 1377 | "name": "stderr", 1378 | "output_type": "stream", 1379 | "text": [ 1380 | "I0511 15:14:47.908645 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.73234\n" 1381 | ] 1382 | }, 1383 | { 1384 | "name": "stdout", 1385 | "output_type": "stream", 1386 | "text": [ 1387 | "INFO:tensorflow:loss = 0.28336847, step = 100 (57.727 sec)\n" 1388 | ] 1389 | }, 1390 | { 1391 | "name": "stderr", 1392 | "output_type": "stream", 1393 | "text": [ 1394 | "I0511 15:14:47.911085 139818736010624 basic_session_run_hooks.py:247] loss = 0.28336847, step = 100 (57.727 sec)\n" 1395 | ] 1396 | }, 1397 | { 1398 | "name": "stdout", 1399 | "output_type": "stream", 1400 | "text": [ 1401 | "INFO:tensorflow:global_step/sec: 2.08897\n" 1402 | ] 1403 | }, 1404 | { 1405 | "name": "stderr", 1406 | "output_type": "stream", 1407 | "text": [ 1408 | "I0511 15:15:35.779167 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08897\n" 1409 | ] 1410 | }, 1411 | { 1412 | "name": "stdout", 1413 | "output_type": "stream", 1414 | "text": [ 1415 | "INFO:tensorflow:loss = 0.14324428, step = 200 (47.870 sec)\n" 1416 | ] 1417 | }, 1418 | { 1419 | "name": "stderr", 1420 | "output_type": "stream", 1421 | "text": [ 1422 | "I0511 15:15:35.781364 139818736010624 basic_session_run_hooks.py:247] loss = 0.14324428, step = 200 (47.870 sec)\n" 1423 | ] 1424 | }, 1425 | { 1426 | "name": "stdout", 1427 | "output_type": "stream", 1428 | "text": [ 1429 | "INFO:tensorflow:global_step/sec: 2.08972\n" 1430 | ] 1431 | }, 1432 | { 1433 | "name": "stderr", 1434 | "output_type": "stream", 1435 | "text": [ 1436 | "I0511 15:16:23.632626 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08972\n" 1437 | ] 1438 | }, 1439 | { 1440 | "name": "stdout", 1441 | "output_type": "stream", 1442 | "text": [ 1443 | "INFO:tensorflow:loss = 0.09183476, step = 300 (47.854 sec)\n" 1444 | ] 1445 | }, 1446 | { 1447 | "name": "stderr", 1448 | "output_type": "stream", 1449 | "text": [ 1450 | "I0511 15:16:23.634889 139818736010624 basic_session_run_hooks.py:247] loss = 0.09183476, step = 300 (47.854 sec)\n" 1451 | ] 1452 | }, 1453 | { 1454 | "name": "stdout", 1455 | "output_type": "stream", 1456 | "text": [ 1457 | "INFO:tensorflow:global_step/sec: 2.08995\n" 1458 | ] 1459 | }, 1460 | { 1461 | "name": "stderr", 1462 | "output_type": "stream", 1463 | "text": [ 1464 | "I0511 15:17:11.480717 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08995\n" 1465 | ] 1466 | }, 1467 | { 1468 | "name": "stdout", 1469 | "output_type": "stream", 1470 | "text": [ 1471 | "INFO:tensorflow:loss = 0.06256502, step = 400 (47.848 sec)\n" 1472 | ] 1473 | }, 1474 | { 1475 | "name": "stderr", 1476 | "output_type": "stream", 1477 | "text": [ 1478 | "I0511 15:17:11.482617 139818736010624 basic_session_run_hooks.py:247] loss = 0.06256502, step = 400 (47.848 sec)\n" 1479 | ] 1480 | }, 1481 | { 1482 | "name": "stdout", 1483 | "output_type": "stream", 1484 | "text": [ 1485 | "INFO:tensorflow:global_step/sec: 1.89989\n" 1486 | ] 1487 | }, 1488 | { 1489 | "name": "stderr", 1490 | "output_type": "stream", 1491 | "text": [ 1492 | "I0511 15:18:04.115317 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.89989\n" 1493 | ] 1494 | }, 1495 | { 1496 | "name": "stdout", 1497 | "output_type": "stream", 1498 | "text": [ 1499 | "INFO:tensorflow:loss = 0.0674735, step = 500 (52.635 sec)\n" 1500 | ] 1501 | }, 1502 | { 1503 | "name": "stderr", 1504 | "output_type": "stream", 1505 | "text": [ 1506 | "I0511 15:18:04.117643 139818736010624 basic_session_run_hooks.py:247] loss = 0.0674735, step = 500 (52.635 sec)\n" 1507 | ] 1508 | }, 1509 | { 1510 | "name": "stdout", 1511 | "output_type": "stream", 1512 | "text": [ 1513 | "INFO:tensorflow:global_step/sec: 2.08939\n" 1514 | ] 1515 | }, 1516 | { 1517 | "name": "stderr", 1518 | "output_type": "stream", 1519 | "text": [ 1520 | "I0511 15:18:51.976191 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08939\n" 1521 | ] 1522 | }, 1523 | { 1524 | "name": "stdout", 1525 | "output_type": "stream", 1526 | "text": [ 1527 | "INFO:tensorflow:loss = 0.076334186, step = 600 (47.860 sec)\n" 1528 | ] 1529 | }, 1530 | { 1531 | "name": "stderr", 1532 | "output_type": "stream", 1533 | "text": [ 1534 | "I0511 15:18:51.978075 139818736010624 basic_session_run_hooks.py:247] loss = 0.076334186, step = 600 (47.860 sec)\n" 1535 | ] 1536 | }, 1537 | { 1538 | "name": "stdout", 1539 | "output_type": "stream", 1540 | "text": [ 1541 | "INFO:tensorflow:global_step/sec: 2.09001\n" 1542 | ] 1543 | }, 1544 | { 1545 | "name": "stderr", 1546 | "output_type": "stream", 1547 | "text": [ 1548 | "I0511 15:19:39.822923 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09001\n" 1549 | ] 1550 | }, 1551 | { 1552 | "name": "stdout", 1553 | "output_type": "stream", 1554 | "text": [ 1555 | "INFO:tensorflow:loss = 0.08210529, step = 700 (47.847 sec)\n" 1556 | ] 1557 | }, 1558 | { 1559 | "name": "stderr", 1560 | "output_type": "stream", 1561 | "text": [ 1562 | "I0511 15:19:39.824764 139818736010624 basic_session_run_hooks.py:247] loss = 0.08210529, step = 700 (47.847 sec)\n" 1563 | ] 1564 | }, 1565 | { 1566 | "name": "stdout", 1567 | "output_type": "stream", 1568 | "text": [ 1569 | "INFO:tensorflow:global_step/sec: 2.09067\n" 1570 | ] 1571 | }, 1572 | { 1573 | "name": "stderr", 1574 | "output_type": "stream", 1575 | "text": [ 1576 | "I0511 15:20:27.654547 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09067\n" 1577 | ] 1578 | }, 1579 | { 1580 | "name": "stdout", 1581 | "output_type": "stream", 1582 | "text": [ 1583 | "INFO:tensorflow:loss = 0.1338317, step = 800 (47.832 sec)\n" 1584 | ] 1585 | }, 1586 | { 1587 | "name": "stderr", 1588 | "output_type": "stream", 1589 | "text": [ 1590 | "I0511 15:20:27.657091 139818736010624 basic_session_run_hooks.py:247] loss = 0.1338317, step = 800 (47.832 sec)\n" 1591 | ] 1592 | }, 1593 | { 1594 | "name": "stdout", 1595 | "output_type": "stream", 1596 | "text": [ 1597 | "INFO:tensorflow:global_step/sec: 2.09074\n" 1598 | ] 1599 | }, 1600 | { 1601 | "name": "stderr", 1602 | "output_type": "stream", 1603 | "text": [ 1604 | "I0511 15:21:15.484564 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09074\n" 1605 | ] 1606 | }, 1607 | { 1608 | "name": "stdout", 1609 | "output_type": "stream", 1610 | "text": [ 1611 | "INFO:tensorflow:loss = 0.051775124, step = 900 (47.829 sec)\n" 1612 | ] 1613 | }, 1614 | { 1615 | "name": "stderr", 1616 | "output_type": "stream", 1617 | "text": [ 1618 | "I0511 15:21:15.486547 139818736010624 basic_session_run_hooks.py:247] loss = 0.051775124, step = 900 (47.829 sec)\n" 1619 | ] 1620 | }, 1621 | { 1622 | "name": "stdout", 1623 | "output_type": "stream", 1624 | "text": [ 1625 | "INFO:tensorflow:Saving checkpoints for 1000 into ../working/output/model.ckpt.\n" 1626 | ] 1627 | }, 1628 | { 1629 | "name": "stderr", 1630 | "output_type": "stream", 1631 | "text": [ 1632 | "I0511 15:22:02.836619 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 1000 into ../working/output/model.ckpt.\n" 1633 | ] 1634 | }, 1635 | { 1636 | "name": "stdout", 1637 | "output_type": "stream", 1638 | "text": [ 1639 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py:966: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", 1640 | "Instructions for updating:\n", 1641 | "Use standard file APIs to delete files with this prefix.\n" 1642 | ] 1643 | }, 1644 | { 1645 | "name": "stderr", 1646 | "output_type": "stream", 1647 | "text": [ 1648 | "W0511 15:22:04.903683 139818736010624 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py:966: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", 1649 | "Instructions for updating:\n", 1650 | "Use standard file APIs to delete files with this prefix.\n" 1651 | ] 1652 | }, 1653 | { 1654 | "name": "stdout", 1655 | "output_type": "stream", 1656 | "text": [ 1657 | "INFO:tensorflow:global_step/sec: 1.94542\n" 1658 | ] 1659 | }, 1660 | { 1661 | "name": "stderr", 1662 | "output_type": "stream", 1663 | "text": [ 1664 | "I0511 15:22:06.887330 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.94542\n" 1665 | ] 1666 | }, 1667 | { 1668 | "name": "stdout", 1669 | "output_type": "stream", 1670 | "text": [ 1671 | "INFO:tensorflow:loss = 0.037178464, step = 1000 (51.403 sec)\n" 1672 | ] 1673 | }, 1674 | { 1675 | "name": "stderr", 1676 | "output_type": "stream", 1677 | "text": [ 1678 | "I0511 15:22:06.889229 139818736010624 basic_session_run_hooks.py:247] loss = 0.037178464, step = 1000 (51.403 sec)\n" 1679 | ] 1680 | }, 1681 | { 1682 | "name": "stdout", 1683 | "output_type": "stream", 1684 | "text": [ 1685 | "INFO:tensorflow:global_step/sec: 1.84858\n" 1686 | ] 1687 | }, 1688 | { 1689 | "name": "stderr", 1690 | "output_type": "stream", 1691 | "text": [ 1692 | "I0511 15:23:00.982815 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.84858\n" 1693 | ] 1694 | }, 1695 | { 1696 | "name": "stdout", 1697 | "output_type": "stream", 1698 | "text": [ 1699 | "INFO:tensorflow:loss = 0.032071147, step = 1100 (54.096 sec)\n" 1700 | ] 1701 | }, 1702 | { 1703 | "name": "stderr", 1704 | "output_type": "stream", 1705 | "text": [ 1706 | "I0511 15:23:00.985308 139818736010624 basic_session_run_hooks.py:247] loss = 0.032071147, step = 1100 (54.096 sec)\n" 1707 | ] 1708 | }, 1709 | { 1710 | "name": "stdout", 1711 | "output_type": "stream", 1712 | "text": [ 1713 | "INFO:tensorflow:global_step/sec: 2.09068\n" 1714 | ] 1715 | }, 1716 | { 1717 | "name": "stderr", 1718 | "output_type": "stream", 1719 | "text": [ 1720 | "I0511 15:23:48.814047 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09068\n" 1721 | ] 1722 | }, 1723 | { 1724 | "name": "stdout", 1725 | "output_type": "stream", 1726 | "text": [ 1727 | "INFO:tensorflow:loss = 0.10294303, step = 1200 (47.831 sec)\n" 1728 | ] 1729 | }, 1730 | { 1731 | "name": "stderr", 1732 | "output_type": "stream", 1733 | "text": [ 1734 | "I0511 15:23:48.816228 139818736010624 basic_session_run_hooks.py:247] loss = 0.10294303, step = 1200 (47.831 sec)\n" 1735 | ] 1736 | }, 1737 | { 1738 | "name": "stdout", 1739 | "output_type": "stream", 1740 | "text": [ 1741 | "INFO:tensorflow:global_step/sec: 2.09035\n" 1742 | ] 1743 | }, 1744 | { 1745 | "name": "stderr", 1746 | "output_type": "stream", 1747 | "text": [ 1748 | "I0511 15:24:36.653093 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09035\n" 1749 | ] 1750 | }, 1751 | { 1752 | "name": "stdout", 1753 | "output_type": "stream", 1754 | "text": [ 1755 | "INFO:tensorflow:loss = 0.053479243, step = 1300 (47.840 sec)\n" 1756 | ] 1757 | }, 1758 | { 1759 | "name": "stderr", 1760 | "output_type": "stream", 1761 | "text": [ 1762 | "I0511 15:24:36.655827 139818736010624 basic_session_run_hooks.py:247] loss = 0.053479243, step = 1300 (47.840 sec)\n" 1763 | ] 1764 | }, 1765 | { 1766 | "name": "stdout", 1767 | "output_type": "stream", 1768 | "text": [ 1769 | "INFO:tensorflow:global_step/sec: 2.09095\n" 1770 | ] 1771 | }, 1772 | { 1773 | "name": "stderr", 1774 | "output_type": "stream", 1775 | "text": [ 1776 | "I0511 15:25:24.478206 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09095\n" 1777 | ] 1778 | }, 1779 | { 1780 | "name": "stdout", 1781 | "output_type": "stream", 1782 | "text": [ 1783 | "INFO:tensorflow:loss = 0.06477397, step = 1400 (47.825 sec)\n" 1784 | ] 1785 | }, 1786 | { 1787 | "name": "stderr", 1788 | "output_type": "stream", 1789 | "text": [ 1790 | "I0511 15:25:24.480335 139818736010624 basic_session_run_hooks.py:247] loss = 0.06477397, step = 1400 (47.825 sec)\n" 1791 | ] 1792 | }, 1793 | { 1794 | "name": "stdout", 1795 | "output_type": "stream", 1796 | "text": [ 1797 | "INFO:tensorflow:global_step/sec: 2.09084\n" 1798 | ] 1799 | }, 1800 | { 1801 | "name": "stderr", 1802 | "output_type": "stream", 1803 | "text": [ 1804 | "I0511 15:26:12.306014 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09084\n" 1805 | ] 1806 | }, 1807 | { 1808 | "name": "stdout", 1809 | "output_type": "stream", 1810 | "text": [ 1811 | "INFO:tensorflow:loss = 0.09032548, step = 1500 (47.828 sec)\n" 1812 | ] 1813 | }, 1814 | { 1815 | "name": "stderr", 1816 | "output_type": "stream", 1817 | "text": [ 1818 | "I0511 15:26:12.308094 139818736010624 basic_session_run_hooks.py:247] loss = 0.09032548, step = 1500 (47.828 sec)\n" 1819 | ] 1820 | }, 1821 | { 1822 | "name": "stdout", 1823 | "output_type": "stream", 1824 | "text": [ 1825 | "INFO:tensorflow:global_step/sec: 2.09073\n" 1826 | ] 1827 | }, 1828 | { 1829 | "name": "stderr", 1830 | "output_type": "stream", 1831 | "text": [ 1832 | "I0511 15:27:00.136224 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09073\n" 1833 | ] 1834 | }, 1835 | { 1836 | "name": "stdout", 1837 | "output_type": "stream", 1838 | "text": [ 1839 | "INFO:tensorflow:loss = 0.007274413, step = 1600 (47.830 sec)\n" 1840 | ] 1841 | }, 1842 | { 1843 | "name": "stderr", 1844 | "output_type": "stream", 1845 | "text": [ 1846 | "I0511 15:27:00.138098 139818736010624 basic_session_run_hooks.py:247] loss = 0.007274413, step = 1600 (47.830 sec)\n" 1847 | ] 1848 | }, 1849 | { 1850 | "name": "stdout", 1851 | "output_type": "stream", 1852 | "text": [ 1853 | "INFO:tensorflow:global_step/sec: 2.0913\n" 1854 | ] 1855 | }, 1856 | { 1857 | "name": "stderr", 1858 | "output_type": "stream", 1859 | "text": [ 1860 | "I0511 15:27:47.953286 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.0913\n" 1861 | ] 1862 | }, 1863 | { 1864 | "name": "stdout", 1865 | "output_type": "stream", 1866 | "text": [ 1867 | "INFO:tensorflow:loss = 0.0498402, step = 1700 (47.817 sec)\n" 1868 | ] 1869 | }, 1870 | { 1871 | "name": "stderr", 1872 | "output_type": "stream", 1873 | "text": [ 1874 | "I0511 15:27:47.955284 139818736010624 basic_session_run_hooks.py:247] loss = 0.0498402, step = 1700 (47.817 sec)\n" 1875 | ] 1876 | }, 1877 | { 1878 | "name": "stdout", 1879 | "output_type": "stream", 1880 | "text": [ 1881 | "INFO:tensorflow:global_step/sec: 2.09146\n" 1882 | ] 1883 | }, 1884 | { 1885 | "name": "stderr", 1886 | "output_type": "stream", 1887 | "text": [ 1888 | "I0511 15:28:35.766830 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09146\n" 1889 | ] 1890 | }, 1891 | { 1892 | "name": "stdout", 1893 | "output_type": "stream", 1894 | "text": [ 1895 | "INFO:tensorflow:loss = 0.049082045, step = 1800 (47.814 sec)\n" 1896 | ] 1897 | }, 1898 | { 1899 | "name": "stderr", 1900 | "output_type": "stream", 1901 | "text": [ 1902 | "I0511 15:28:35.769114 139818736010624 basic_session_run_hooks.py:247] loss = 0.049082045, step = 1800 (47.814 sec)\n" 1903 | ] 1904 | }, 1905 | { 1906 | "name": "stdout", 1907 | "output_type": "stream", 1908 | "text": [ 1909 | "INFO:tensorflow:global_step/sec: 2.09122\n" 1910 | ] 1911 | }, 1912 | { 1913 | "name": "stderr", 1914 | "output_type": "stream", 1915 | "text": [ 1916 | "I0511 15:29:23.585731 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09122\n" 1917 | ] 1918 | }, 1919 | { 1920 | "name": "stdout", 1921 | "output_type": "stream", 1922 | "text": [ 1923 | "INFO:tensorflow:loss = 0.07345208, step = 1900 (47.819 sec)\n" 1924 | ] 1925 | }, 1926 | { 1927 | "name": "stderr", 1928 | "output_type": "stream", 1929 | "text": [ 1930 | "I0511 15:29:23.587659 139818736010624 basic_session_run_hooks.py:247] loss = 0.07345208, step = 1900 (47.819 sec)\n" 1931 | ] 1932 | }, 1933 | { 1934 | "name": "stdout", 1935 | "output_type": "stream", 1936 | "text": [ 1937 | "INFO:tensorflow:Saving checkpoints for 2000 into ../working/output/model.ckpt.\n" 1938 | ] 1939 | }, 1940 | { 1941 | "name": "stderr", 1942 | "output_type": "stream", 1943 | "text": [ 1944 | "I0511 15:30:10.942787 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 2000 into ../working/output/model.ckpt.\n" 1945 | ] 1946 | }, 1947 | { 1948 | "name": "stdout", 1949 | "output_type": "stream", 1950 | "text": [ 1951 | "INFO:tensorflow:global_step/sec: 1.94964\n" 1952 | ] 1953 | }, 1954 | { 1955 | "name": "stderr", 1956 | "output_type": "stream", 1957 | "text": [ 1958 | "I0511 15:30:14.877322 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.94964\n" 1959 | ] 1960 | }, 1961 | { 1962 | "name": "stdout", 1963 | "output_type": "stream", 1964 | "text": [ 1965 | "INFO:tensorflow:loss = 0.07142392, step = 2000 (51.292 sec)\n" 1966 | ] 1967 | }, 1968 | { 1969 | "name": "stderr", 1970 | "output_type": "stream", 1971 | "text": [ 1972 | "I0511 15:30:14.879259 139818736010624 basic_session_run_hooks.py:247] loss = 0.07142392, step = 2000 (51.292 sec)\n" 1973 | ] 1974 | }, 1975 | { 1976 | "name": "stdout", 1977 | "output_type": "stream", 1978 | "text": [ 1979 | "INFO:tensorflow:global_step/sec: 2.09102\n" 1980 | ] 1981 | }, 1982 | { 1983 | "name": "stderr", 1984 | "output_type": "stream", 1985 | "text": [ 1986 | "I0511 15:31:02.700871 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09102\n" 1987 | ] 1988 | }, 1989 | { 1990 | "name": "stdout", 1991 | "output_type": "stream", 1992 | "text": [ 1993 | "INFO:tensorflow:loss = 0.042824592, step = 2100 (47.824 sec)\n" 1994 | ] 1995 | }, 1996 | { 1997 | "name": "stderr", 1998 | "output_type": "stream", 1999 | "text": [ 2000 | "I0511 15:31:02.702767 139818736010624 basic_session_run_hooks.py:247] loss = 0.042824592, step = 2100 (47.824 sec)\n" 2001 | ] 2002 | }, 2003 | { 2004 | "name": "stdout", 2005 | "output_type": "stream", 2006 | "text": [ 2007 | "INFO:tensorflow:global_step/sec: 2.09171\n" 2008 | ] 2009 | }, 2010 | { 2011 | "name": "stderr", 2012 | "output_type": "stream", 2013 | "text": [ 2014 | "I0511 15:31:50.508647 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09171\n" 2015 | ] 2016 | }, 2017 | { 2018 | "name": "stdout", 2019 | "output_type": "stream", 2020 | "text": [ 2021 | "INFO:tensorflow:loss = 0.052609295, step = 2200 (47.808 sec)\n" 2022 | ] 2023 | }, 2024 | { 2025 | "name": "stderr", 2026 | "output_type": "stream", 2027 | "text": [ 2028 | "I0511 15:31:50.510569 139818736010624 basic_session_run_hooks.py:247] loss = 0.052609295, step = 2200 (47.808 sec)\n" 2029 | ] 2030 | }, 2031 | { 2032 | "name": "stdout", 2033 | "output_type": "stream", 2034 | "text": [ 2035 | "INFO:tensorflow:global_step/sec: 2.09154\n" 2036 | ] 2037 | }, 2038 | { 2039 | "name": "stderr", 2040 | "output_type": "stream", 2041 | "text": [ 2042 | "I0511 15:32:38.320268 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09154\n" 2043 | ] 2044 | }, 2045 | { 2046 | "name": "stdout", 2047 | "output_type": "stream", 2048 | "text": [ 2049 | "INFO:tensorflow:loss = 0.056395765, step = 2300 (47.812 sec)\n" 2050 | ] 2051 | }, 2052 | { 2053 | "name": "stderr", 2054 | "output_type": "stream", 2055 | "text": [ 2056 | "I0511 15:32:38.322877 139818736010624 basic_session_run_hooks.py:247] loss = 0.056395765, step = 2300 (47.812 sec)\n" 2057 | ] 2058 | }, 2059 | { 2060 | "name": "stdout", 2061 | "output_type": "stream", 2062 | "text": [ 2063 | "INFO:tensorflow:global_step/sec: 2.09157\n" 2064 | ] 2065 | }, 2066 | { 2067 | "name": "stderr", 2068 | "output_type": "stream", 2069 | "text": [ 2070 | "I0511 15:33:26.131188 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09157\n" 2071 | ] 2072 | }, 2073 | { 2074 | "name": "stdout", 2075 | "output_type": "stream", 2076 | "text": [ 2077 | "INFO:tensorflow:loss = 0.088531554, step = 2400 (47.810 sec)\n" 2078 | ] 2079 | }, 2080 | { 2081 | "name": "stderr", 2082 | "output_type": "stream", 2083 | "text": [ 2084 | "I0511 15:33:26.133240 139818736010624 basic_session_run_hooks.py:247] loss = 0.088531554, step = 2400 (47.810 sec)\n" 2085 | ] 2086 | }, 2087 | { 2088 | "name": "stdout", 2089 | "output_type": "stream", 2090 | "text": [ 2091 | "INFO:tensorflow:global_step/sec: 2.09172\n" 2092 | ] 2093 | }, 2094 | { 2095 | "name": "stderr", 2096 | "output_type": "stream", 2097 | "text": [ 2098 | "I0511 15:34:13.938695 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09172\n" 2099 | ] 2100 | }, 2101 | { 2102 | "name": "stdout", 2103 | "output_type": "stream", 2104 | "text": [ 2105 | "INFO:tensorflow:loss = 0.06953585, step = 2500 (47.808 sec)\n" 2106 | ] 2107 | }, 2108 | { 2109 | "name": "stderr", 2110 | "output_type": "stream", 2111 | "text": [ 2112 | "I0511 15:34:13.940865 139818736010624 basic_session_run_hooks.py:247] loss = 0.06953585, step = 2500 (47.808 sec)\n" 2113 | ] 2114 | }, 2115 | { 2116 | "name": "stdout", 2117 | "output_type": "stream", 2118 | "text": [ 2119 | "INFO:tensorflow:global_step/sec: 2.09134\n" 2120 | ] 2121 | }, 2122 | { 2123 | "name": "stderr", 2124 | "output_type": "stream", 2125 | "text": [ 2126 | "I0511 15:35:01.754793 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09134\n" 2127 | ] 2128 | }, 2129 | { 2130 | "name": "stdout", 2131 | "output_type": "stream", 2132 | "text": [ 2133 | "INFO:tensorflow:loss = 0.039607387, step = 2600 (47.816 sec)\n" 2134 | ] 2135 | }, 2136 | { 2137 | "name": "stderr", 2138 | "output_type": "stream", 2139 | "text": [ 2140 | "I0511 15:35:01.756904 139818736010624 basic_session_run_hooks.py:247] loss = 0.039607387, step = 2600 (47.816 sec)\n" 2141 | ] 2142 | }, 2143 | { 2144 | "name": "stdout", 2145 | "output_type": "stream", 2146 | "text": [ 2147 | "INFO:tensorflow:global_step/sec: 2.09167\n" 2148 | ] 2149 | }, 2150 | { 2151 | "name": "stderr", 2152 | "output_type": "stream", 2153 | "text": [ 2154 | "I0511 15:35:49.563446 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09167\n" 2155 | ] 2156 | }, 2157 | { 2158 | "name": "stdout", 2159 | "output_type": "stream", 2160 | "text": [ 2161 | "INFO:tensorflow:loss = 0.058110148, step = 2700 (47.809 sec)\n" 2162 | ] 2163 | }, 2164 | { 2165 | "name": "stderr", 2166 | "output_type": "stream", 2167 | "text": [ 2168 | "I0511 15:35:49.565496 139818736010624 basic_session_run_hooks.py:247] loss = 0.058110148, step = 2700 (47.809 sec)\n" 2169 | ] 2170 | }, 2171 | { 2172 | "name": "stdout", 2173 | "output_type": "stream", 2174 | "text": [ 2175 | "INFO:tensorflow:global_step/sec: 2.09185\n" 2176 | ] 2177 | }, 2178 | { 2179 | "name": "stderr", 2180 | "output_type": "stream", 2181 | "text": [ 2182 | "I0511 15:36:37.368036 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09185\n" 2183 | ] 2184 | }, 2185 | { 2186 | "name": "stdout", 2187 | "output_type": "stream", 2188 | "text": [ 2189 | "INFO:tensorflow:loss = 0.062804274, step = 2800 (47.805 sec)\n" 2190 | ] 2191 | }, 2192 | { 2193 | "name": "stderr", 2194 | "output_type": "stream", 2195 | "text": [ 2196 | "I0511 15:36:37.370290 139818736010624 basic_session_run_hooks.py:247] loss = 0.062804274, step = 2800 (47.805 sec)\n" 2197 | ] 2198 | }, 2199 | { 2200 | "name": "stdout", 2201 | "output_type": "stream", 2202 | "text": [ 2203 | "INFO:tensorflow:global_step/sec: 2.09212\n" 2204 | ] 2205 | }, 2206 | { 2207 | "name": "stderr", 2208 | "output_type": "stream", 2209 | "text": [ 2210 | "I0511 15:37:25.166364 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09212\n" 2211 | ] 2212 | }, 2213 | { 2214 | "name": "stdout", 2215 | "output_type": "stream", 2216 | "text": [ 2217 | "INFO:tensorflow:loss = 0.102470964, step = 2900 (47.798 sec)\n" 2218 | ] 2219 | }, 2220 | { 2221 | "name": "stderr", 2222 | "output_type": "stream", 2223 | "text": [ 2224 | "I0511 15:37:25.168506 139818736010624 basic_session_run_hooks.py:247] loss = 0.102470964, step = 2900 (47.798 sec)\n" 2225 | ] 2226 | }, 2227 | { 2228 | "name": "stdout", 2229 | "output_type": "stream", 2230 | "text": [ 2231 | "INFO:tensorflow:Saving checkpoints for 3000 into ../working/output/model.ckpt.\n" 2232 | ] 2233 | }, 2234 | { 2235 | "name": "stderr", 2236 | "output_type": "stream", 2237 | "text": [ 2238 | "I0511 15:38:12.499011 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 3000 into ../working/output/model.ckpt.\n" 2239 | ] 2240 | }, 2241 | { 2242 | "name": "stdout", 2243 | "output_type": "stream", 2244 | "text": [ 2245 | "INFO:tensorflow:global_step/sec: 1.93843\n" 2246 | ] 2247 | }, 2248 | { 2249 | "name": "stderr", 2250 | "output_type": "stream", 2251 | "text": [ 2252 | "I0511 15:38:16.754458 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.93843\n" 2253 | ] 2254 | }, 2255 | { 2256 | "name": "stdout", 2257 | "output_type": "stream", 2258 | "text": [ 2259 | "INFO:tensorflow:loss = 0.04797803, step = 3000 (51.588 sec)\n" 2260 | ] 2261 | }, 2262 | { 2263 | "name": "stderr", 2264 | "output_type": "stream", 2265 | "text": [ 2266 | "I0511 15:38:16.756463 139818736010624 basic_session_run_hooks.py:247] loss = 0.04797803, step = 3000 (51.588 sec)\n" 2267 | ] 2268 | }, 2269 | { 2270 | "name": "stdout", 2271 | "output_type": "stream", 2272 | "text": [ 2273 | "INFO:tensorflow:global_step/sec: 2.089\n" 2274 | ] 2275 | }, 2276 | { 2277 | "name": "stderr", 2278 | "output_type": "stream", 2279 | "text": [ 2280 | "I0511 15:39:04.624296 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.089\n" 2281 | ] 2282 | }, 2283 | { 2284 | "name": "stdout", 2285 | "output_type": "stream", 2286 | "text": [ 2287 | "INFO:tensorflow:loss = 0.038590156, step = 3100 (47.870 sec)\n" 2288 | ] 2289 | }, 2290 | { 2291 | "name": "stderr", 2292 | "output_type": "stream", 2293 | "text": [ 2294 | "I0511 15:39:04.626242 139818736010624 basic_session_run_hooks.py:247] loss = 0.038590156, step = 3100 (47.870 sec)\n" 2295 | ] 2296 | }, 2297 | { 2298 | "name": "stdout", 2299 | "output_type": "stream", 2300 | "text": [ 2301 | "INFO:tensorflow:global_step/sec: 2.09174\n" 2302 | ] 2303 | }, 2304 | { 2305 | "name": "stderr", 2306 | "output_type": "stream", 2307 | "text": [ 2308 | "I0511 15:39:52.431413 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09174\n" 2309 | ] 2310 | }, 2311 | { 2312 | "name": "stdout", 2313 | "output_type": "stream", 2314 | "text": [ 2315 | "INFO:tensorflow:loss = 0.039528202, step = 3200 (47.808 sec)\n" 2316 | ] 2317 | }, 2318 | { 2319 | "name": "stderr", 2320 | "output_type": "stream", 2321 | "text": [ 2322 | "I0511 15:39:52.433957 139818736010624 basic_session_run_hooks.py:247] loss = 0.039528202, step = 3200 (47.808 sec)\n" 2323 | ] 2324 | }, 2325 | { 2326 | "name": "stdout", 2327 | "output_type": "stream", 2328 | "text": [ 2329 | "INFO:tensorflow:global_step/sec: 2.09143\n" 2330 | ] 2331 | }, 2332 | { 2333 | "name": "stderr", 2334 | "output_type": "stream", 2335 | "text": [ 2336 | "I0511 15:40:40.245645 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09143\n" 2337 | ] 2338 | }, 2339 | { 2340 | "name": "stdout", 2341 | "output_type": "stream", 2342 | "text": [ 2343 | "INFO:tensorflow:loss = 0.019955928, step = 3300 (47.814 sec)\n" 2344 | ] 2345 | }, 2346 | { 2347 | "name": "stderr", 2348 | "output_type": "stream", 2349 | "text": [ 2350 | "I0511 15:40:40.248160 139818736010624 basic_session_run_hooks.py:247] loss = 0.019955928, step = 3300 (47.814 sec)\n" 2351 | ] 2352 | }, 2353 | { 2354 | "name": "stdout", 2355 | "output_type": "stream", 2356 | "text": [ 2357 | "INFO:tensorflow:global_step/sec: 2.09245\n" 2358 | ] 2359 | }, 2360 | { 2361 | "name": "stderr", 2362 | "output_type": "stream", 2363 | "text": [ 2364 | "I0511 15:41:28.036395 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09245\n" 2365 | ] 2366 | }, 2367 | { 2368 | "name": "stdout", 2369 | "output_type": "stream", 2370 | "text": [ 2371 | "INFO:tensorflow:loss = 0.06499622, step = 3400 (47.790 sec)\n" 2372 | ] 2373 | }, 2374 | { 2375 | "name": "stderr", 2376 | "output_type": "stream", 2377 | "text": [ 2378 | "I0511 15:41:28.038285 139818736010624 basic_session_run_hooks.py:247] loss = 0.06499622, step = 3400 (47.790 sec)\n" 2379 | ] 2380 | }, 2381 | { 2382 | "name": "stdout", 2383 | "output_type": "stream", 2384 | "text": [ 2385 | "INFO:tensorflow:global_step/sec: 2.09115\n" 2386 | ] 2387 | }, 2388 | { 2389 | "name": "stderr", 2390 | "output_type": "stream", 2391 | "text": [ 2392 | "I0511 15:42:15.856956 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09115\n" 2393 | ] 2394 | }, 2395 | { 2396 | "name": "stdout", 2397 | "output_type": "stream", 2398 | "text": [ 2399 | "INFO:tensorflow:loss = 0.0353857, step = 3500 (47.821 sec)\n" 2400 | ] 2401 | }, 2402 | { 2403 | "name": "stderr", 2404 | "output_type": "stream", 2405 | "text": [ 2406 | "I0511 15:42:15.859225 139818736010624 basic_session_run_hooks.py:247] loss = 0.0353857, step = 3500 (47.821 sec)\n" 2407 | ] 2408 | }, 2409 | { 2410 | "name": "stdout", 2411 | "output_type": "stream", 2412 | "text": [ 2413 | "INFO:tensorflow:global_step/sec: 2.09203\n" 2414 | ] 2415 | }, 2416 | { 2417 | "name": "stderr", 2418 | "output_type": "stream", 2419 | "text": [ 2420 | "I0511 15:43:03.657398 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09203\n" 2421 | ] 2422 | }, 2423 | { 2424 | "name": "stdout", 2425 | "output_type": "stream", 2426 | "text": [ 2427 | "INFO:tensorflow:loss = 0.020551901, step = 3600 (47.800 sec)\n" 2428 | ] 2429 | }, 2430 | { 2431 | "name": "stderr", 2432 | "output_type": "stream", 2433 | "text": [ 2434 | "I0511 15:43:03.659630 139818736010624 basic_session_run_hooks.py:247] loss = 0.020551901, step = 3600 (47.800 sec)\n" 2435 | ] 2436 | }, 2437 | { 2438 | "name": "stdout", 2439 | "output_type": "stream", 2440 | "text": [ 2441 | "INFO:tensorflow:global_step/sec: 2.09176\n" 2442 | ] 2443 | }, 2444 | { 2445 | "name": "stderr", 2446 | "output_type": "stream", 2447 | "text": [ 2448 | "I0511 15:43:51.463972 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09176\n" 2449 | ] 2450 | }, 2451 | { 2452 | "name": "stdout", 2453 | "output_type": "stream", 2454 | "text": [ 2455 | "INFO:tensorflow:loss = 0.03629427, step = 3700 (47.806 sec)\n" 2456 | ] 2457 | }, 2458 | { 2459 | "name": "stderr", 2460 | "output_type": "stream", 2461 | "text": [ 2462 | "I0511 15:43:51.465949 139818736010624 basic_session_run_hooks.py:247] loss = 0.03629427, step = 3700 (47.806 sec)\n" 2463 | ] 2464 | }, 2465 | { 2466 | "name": "stdout", 2467 | "output_type": "stream", 2468 | "text": [ 2469 | "INFO:tensorflow:global_step/sec: 2.09169\n" 2470 | ] 2471 | }, 2472 | { 2473 | "name": "stderr", 2474 | "output_type": "stream", 2475 | "text": [ 2476 | "I0511 15:44:39.272251 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09169\n" 2477 | ] 2478 | }, 2479 | { 2480 | "name": "stdout", 2481 | "output_type": "stream", 2482 | "text": [ 2483 | "INFO:tensorflow:loss = 0.022059195, step = 3800 (47.809 sec)\n" 2484 | ] 2485 | }, 2486 | { 2487 | "name": "stderr", 2488 | "output_type": "stream", 2489 | "text": [ 2490 | "I0511 15:44:39.274925 139818736010624 basic_session_run_hooks.py:247] loss = 0.022059195, step = 3800 (47.809 sec)\n" 2491 | ] 2492 | }, 2493 | { 2494 | "name": "stdout", 2495 | "output_type": "stream", 2496 | "text": [ 2497 | "INFO:tensorflow:global_step/sec: 2.09129\n" 2498 | ] 2499 | }, 2500 | { 2501 | "name": "stderr", 2502 | "output_type": "stream", 2503 | "text": [ 2504 | "I0511 15:45:27.089683 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09129\n" 2505 | ] 2506 | }, 2507 | { 2508 | "name": "stdout", 2509 | "output_type": "stream", 2510 | "text": [ 2511 | "INFO:tensorflow:loss = 0.030918302, step = 3900 (47.817 sec)\n" 2512 | ] 2513 | }, 2514 | { 2515 | "name": "stderr", 2516 | "output_type": "stream", 2517 | "text": [ 2518 | "I0511 15:45:27.091566 139818736010624 basic_session_run_hooks.py:247] loss = 0.030918302, step = 3900 (47.817 sec)\n" 2519 | ] 2520 | }, 2521 | { 2522 | "name": "stdout", 2523 | "output_type": "stream", 2524 | "text": [ 2525 | "INFO:tensorflow:Saving checkpoints for 4000 into ../working/output/model.ckpt.\n" 2526 | ] 2527 | }, 2528 | { 2529 | "name": "stderr", 2530 | "output_type": "stream", 2531 | "text": [ 2532 | "I0511 15:46:14.414194 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 4000 into ../working/output/model.ckpt.\n" 2533 | ] 2534 | }, 2535 | { 2536 | "name": "stdout", 2537 | "output_type": "stream", 2538 | "text": [ 2539 | "INFO:tensorflow:global_step/sec: 1.95363\n" 2540 | ] 2541 | }, 2542 | { 2543 | "name": "stderr", 2544 | "output_type": "stream", 2545 | "text": [ 2546 | "I0511 15:46:18.276365 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.95363\n" 2547 | ] 2548 | }, 2549 | { 2550 | "name": "stdout", 2551 | "output_type": "stream", 2552 | "text": [ 2553 | "INFO:tensorflow:loss = 0.041287128, step = 4000 (51.187 sec)\n" 2554 | ] 2555 | }, 2556 | { 2557 | "name": "stderr", 2558 | "output_type": "stream", 2559 | "text": [ 2560 | "I0511 15:46:18.278751 139818736010624 basic_session_run_hooks.py:247] loss = 0.041287128, step = 4000 (51.187 sec)\n" 2561 | ] 2562 | }, 2563 | { 2564 | "name": "stdout", 2565 | "output_type": "stream", 2566 | "text": [ 2567 | "INFO:tensorflow:global_step/sec: 2.04033\n" 2568 | ] 2569 | }, 2570 | { 2571 | "name": "stderr", 2572 | "output_type": "stream", 2573 | "text": [ 2574 | "I0511 15:47:07.288040 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.04033\n" 2575 | ] 2576 | }, 2577 | { 2578 | "name": "stdout", 2579 | "output_type": "stream", 2580 | "text": [ 2581 | "INFO:tensorflow:loss = 0.03813028, step = 4100 (49.027 sec)\n" 2582 | ] 2583 | }, 2584 | { 2585 | "name": "stderr", 2586 | "output_type": "stream", 2587 | "text": [ 2588 | "I0511 15:47:07.305834 139818736010624 basic_session_run_hooks.py:247] loss = 0.03813028, step = 4100 (49.027 sec)\n" 2589 | ] 2590 | }, 2591 | { 2592 | "name": "stdout", 2593 | "output_type": "stream", 2594 | "text": [ 2595 | "INFO:tensorflow:global_step/sec: 2.09158\n" 2596 | ] 2597 | }, 2598 | { 2599 | "name": "stderr", 2600 | "output_type": "stream", 2601 | "text": [ 2602 | "I0511 15:47:55.098767 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09158\n" 2603 | ] 2604 | }, 2605 | { 2606 | "name": "stdout", 2607 | "output_type": "stream", 2608 | "text": [ 2609 | "INFO:tensorflow:loss = 0.02978991, step = 4200 (47.795 sec)\n" 2610 | ] 2611 | }, 2612 | { 2613 | "name": "stderr", 2614 | "output_type": "stream", 2615 | "text": [ 2616 | "I0511 15:47:55.100718 139818736010624 basic_session_run_hooks.py:247] loss = 0.02978991, step = 4200 (47.795 sec)\n" 2617 | ] 2618 | }, 2619 | { 2620 | "name": "stdout", 2621 | "output_type": "stream", 2622 | "text": [ 2623 | "INFO:tensorflow:global_step/sec: 2.09192\n" 2624 | ] 2625 | }, 2626 | { 2627 | "name": "stderr", 2628 | "output_type": "stream", 2629 | "text": [ 2630 | "I0511 15:48:42.901846 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09192\n" 2631 | ] 2632 | }, 2633 | { 2634 | "name": "stdout", 2635 | "output_type": "stream", 2636 | "text": [ 2637 | "INFO:tensorflow:loss = 0.024834404, step = 4300 (47.804 sec)\n" 2638 | ] 2639 | }, 2640 | { 2641 | "name": "stderr", 2642 | "output_type": "stream", 2643 | "text": [ 2644 | "I0511 15:48:42.904389 139818736010624 basic_session_run_hooks.py:247] loss = 0.024834404, step = 4300 (47.804 sec)\n" 2645 | ] 2646 | }, 2647 | { 2648 | "name": "stdout", 2649 | "output_type": "stream", 2650 | "text": [ 2651 | "INFO:tensorflow:global_step/sec: 2.09101\n" 2652 | ] 2653 | }, 2654 | { 2655 | "name": "stderr", 2656 | "output_type": "stream", 2657 | "text": [ 2658 | "I0511 15:49:30.725618 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09101\n" 2659 | ] 2660 | }, 2661 | { 2662 | "name": "stdout", 2663 | "output_type": "stream", 2664 | "text": [ 2665 | "INFO:tensorflow:loss = 0.0018598187, step = 4400 (47.823 sec)\n" 2666 | ] 2667 | }, 2668 | { 2669 | "name": "stderr", 2670 | "output_type": "stream", 2671 | "text": [ 2672 | "I0511 15:49:30.727626 139818736010624 basic_session_run_hooks.py:247] loss = 0.0018598187, step = 4400 (47.823 sec)\n" 2673 | ] 2674 | }, 2675 | { 2676 | "name": "stdout", 2677 | "output_type": "stream", 2678 | "text": [ 2679 | "INFO:tensorflow:global_step/sec: 2.09105\n" 2680 | ] 2681 | }, 2682 | { 2683 | "name": "stderr", 2684 | "output_type": "stream", 2685 | "text": [ 2686 | "I0511 15:50:18.548420 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09105\n" 2687 | ] 2688 | }, 2689 | { 2690 | "name": "stdout", 2691 | "output_type": "stream", 2692 | "text": [ 2693 | "INFO:tensorflow:loss = 0.053158622, step = 4500 (47.824 sec)\n" 2694 | ] 2695 | }, 2696 | { 2697 | "name": "stderr", 2698 | "output_type": "stream", 2699 | "text": [ 2700 | "I0511 15:50:18.551164 139818736010624 basic_session_run_hooks.py:247] loss = 0.053158622, step = 4500 (47.824 sec)\n" 2701 | ] 2702 | }, 2703 | { 2704 | "name": "stdout", 2705 | "output_type": "stream", 2706 | "text": [ 2707 | "INFO:tensorflow:global_step/sec: 2.09055\n" 2708 | ] 2709 | }, 2710 | { 2711 | "name": "stderr", 2712 | "output_type": "stream", 2713 | "text": [ 2714 | "I0511 15:51:06.382694 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09055\n" 2715 | ] 2716 | }, 2717 | { 2718 | "name": "stdout", 2719 | "output_type": "stream", 2720 | "text": [ 2721 | "INFO:tensorflow:loss = 0.01103305, step = 4600 (47.834 sec)\n" 2722 | ] 2723 | }, 2724 | { 2725 | "name": "stderr", 2726 | "output_type": "stream", 2727 | "text": [ 2728 | "I0511 15:51:06.385283 139818736010624 basic_session_run_hooks.py:247] loss = 0.01103305, step = 4600 (47.834 sec)\n" 2729 | ] 2730 | }, 2731 | { 2732 | "name": "stdout", 2733 | "output_type": "stream", 2734 | "text": [ 2735 | "INFO:tensorflow:global_step/sec: 2.09034\n" 2736 | ] 2737 | }, 2738 | { 2739 | "name": "stderr", 2740 | "output_type": "stream", 2741 | "text": [ 2742 | "I0511 15:51:54.221799 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09034\n" 2743 | ] 2744 | }, 2745 | { 2746 | "name": "stdout", 2747 | "output_type": "stream", 2748 | "text": [ 2749 | "INFO:tensorflow:loss = 0.03350905, step = 4700 (47.839 sec)\n" 2750 | ] 2751 | }, 2752 | { 2753 | "name": "stderr", 2754 | "output_type": "stream", 2755 | "text": [ 2756 | "I0511 15:51:54.223891 139818736010624 basic_session_run_hooks.py:247] loss = 0.03350905, step = 4700 (47.839 sec)\n" 2757 | ] 2758 | }, 2759 | { 2760 | "name": "stdout", 2761 | "output_type": "stream", 2762 | "text": [ 2763 | "INFO:tensorflow:global_step/sec: 2.09028\n" 2764 | ] 2765 | }, 2766 | { 2767 | "name": "stderr", 2768 | "output_type": "stream", 2769 | "text": [ 2770 | "I0511 15:52:42.062370 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09028\n" 2771 | ] 2772 | }, 2773 | { 2774 | "name": "stdout", 2775 | "output_type": "stream", 2776 | "text": [ 2777 | "INFO:tensorflow:loss = 0.021977767, step = 4800 (47.841 sec)\n" 2778 | ] 2779 | }, 2780 | { 2781 | "name": "stderr", 2782 | "output_type": "stream", 2783 | "text": [ 2784 | "I0511 15:52:42.064637 139818736010624 basic_session_run_hooks.py:247] loss = 0.021977767, step = 4800 (47.841 sec)\n" 2785 | ] 2786 | }, 2787 | { 2788 | "name": "stdout", 2789 | "output_type": "stream", 2790 | "text": [ 2791 | "INFO:tensorflow:global_step/sec: 2.09046\n" 2792 | ] 2793 | }, 2794 | { 2795 | "name": "stderr", 2796 | "output_type": "stream", 2797 | "text": [ 2798 | "I0511 15:53:29.898656 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09046\n" 2799 | ] 2800 | }, 2801 | { 2802 | "name": "stdout", 2803 | "output_type": "stream", 2804 | "text": [ 2805 | "INFO:tensorflow:loss = 0.0696831, step = 4900 (47.836 sec)\n" 2806 | ] 2807 | }, 2808 | { 2809 | "name": "stderr", 2810 | "output_type": "stream", 2811 | "text": [ 2812 | "I0511 15:53:29.900612 139818736010624 basic_session_run_hooks.py:247] loss = 0.0696831, step = 4900 (47.836 sec)\n" 2813 | ] 2814 | }, 2815 | { 2816 | "name": "stdout", 2817 | "output_type": "stream", 2818 | "text": [ 2819 | "INFO:tensorflow:Saving checkpoints for 5000 into ../working/output/model.ckpt.\n" 2820 | ] 2821 | }, 2822 | { 2823 | "name": "stderr", 2824 | "output_type": "stream", 2825 | "text": [ 2826 | "I0511 15:54:17.267159 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 5000 into ../working/output/model.ckpt.\n" 2827 | ] 2828 | }, 2829 | { 2830 | "name": "stdout", 2831 | "output_type": "stream", 2832 | "text": [ 2833 | "INFO:tensorflow:global_step/sec: 1.94279\n" 2834 | ] 2835 | }, 2836 | { 2837 | "name": "stderr", 2838 | "output_type": "stream", 2839 | "text": [ 2840 | "I0511 15:54:21.371152 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.94279\n" 2841 | ] 2842 | }, 2843 | { 2844 | "name": "stdout", 2845 | "output_type": "stream", 2846 | "text": [ 2847 | "INFO:tensorflow:loss = 0.020921743, step = 5000 (51.473 sec)\n" 2848 | ] 2849 | }, 2850 | { 2851 | "name": "stderr", 2852 | "output_type": "stream", 2853 | "text": [ 2854 | "I0511 15:54:21.373201 139818736010624 basic_session_run_hooks.py:247] loss = 0.020921743, step = 5000 (51.473 sec)\n" 2855 | ] 2856 | }, 2857 | { 2858 | "name": "stdout", 2859 | "output_type": "stream", 2860 | "text": [ 2861 | "INFO:tensorflow:global_step/sec: 1.94295\n" 2862 | ] 2863 | }, 2864 | { 2865 | "name": "stderr", 2866 | "output_type": "stream", 2867 | "text": [ 2868 | "I0511 15:55:12.839218 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.94295\n" 2869 | ] 2870 | }, 2871 | { 2872 | "name": "stdout", 2873 | "output_type": "stream", 2874 | "text": [ 2875 | "INFO:tensorflow:loss = 0.010868247, step = 5100 (51.468 sec)\n" 2876 | ] 2877 | }, 2878 | { 2879 | "name": "stderr", 2880 | "output_type": "stream", 2881 | "text": [ 2882 | "I0511 15:55:12.841276 139818736010624 basic_session_run_hooks.py:247] loss = 0.010868247, step = 5100 (51.468 sec)\n" 2883 | ] 2884 | }, 2885 | { 2886 | "name": "stdout", 2887 | "output_type": "stream", 2888 | "text": [ 2889 | "INFO:tensorflow:global_step/sec: 2.08952\n" 2890 | ] 2891 | }, 2892 | { 2893 | "name": "stderr", 2894 | "output_type": "stream", 2895 | "text": [ 2896 | "I0511 15:56:00.697159 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08952\n" 2897 | ] 2898 | }, 2899 | { 2900 | "name": "stdout", 2901 | "output_type": "stream", 2902 | "text": [ 2903 | "INFO:tensorflow:loss = 0.025036039, step = 5200 (47.858 sec)\n" 2904 | ] 2905 | }, 2906 | { 2907 | "name": "stderr", 2908 | "output_type": "stream", 2909 | "text": [ 2910 | "I0511 15:56:00.699109 139818736010624 basic_session_run_hooks.py:247] loss = 0.025036039, step = 5200 (47.858 sec)\n" 2911 | ] 2912 | }, 2913 | { 2914 | "name": "stdout", 2915 | "output_type": "stream", 2916 | "text": [ 2917 | "INFO:tensorflow:global_step/sec: 2.0898\n" 2918 | ] 2919 | }, 2920 | { 2921 | "name": "stderr", 2922 | "output_type": "stream", 2923 | "text": [ 2924 | "I0511 15:56:48.548637 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.0898\n" 2925 | ] 2926 | }, 2927 | { 2928 | "name": "stdout", 2929 | "output_type": "stream", 2930 | "text": [ 2931 | "INFO:tensorflow:loss = 0.030195123, step = 5300 (47.852 sec)\n" 2932 | ] 2933 | }, 2934 | { 2935 | "name": "stderr", 2936 | "output_type": "stream", 2937 | "text": [ 2938 | "I0511 15:56:48.550750 139818736010624 basic_session_run_hooks.py:247] loss = 0.030195123, step = 5300 (47.852 sec)\n" 2939 | ] 2940 | }, 2941 | { 2942 | "name": "stdout", 2943 | "output_type": "stream", 2944 | "text": [ 2945 | "INFO:tensorflow:global_step/sec: 2.08978\n" 2946 | ] 2947 | }, 2948 | { 2949 | "name": "stderr", 2950 | "output_type": "stream", 2951 | "text": [ 2952 | "I0511 15:57:36.400642 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08978\n" 2953 | ] 2954 | }, 2955 | { 2956 | "name": "stdout", 2957 | "output_type": "stream", 2958 | "text": [ 2959 | "INFO:tensorflow:loss = 0.039523054, step = 5400 (47.852 sec)\n" 2960 | ] 2961 | }, 2962 | { 2963 | "name": "stderr", 2964 | "output_type": "stream", 2965 | "text": [ 2966 | "I0511 15:57:36.402472 139818736010624 basic_session_run_hooks.py:247] loss = 0.039523054, step = 5400 (47.852 sec)\n" 2967 | ] 2968 | }, 2969 | { 2970 | "name": "stdout", 2971 | "output_type": "stream", 2972 | "text": [ 2973 | "INFO:tensorflow:global_step/sec: 2.0896\n" 2974 | ] 2975 | }, 2976 | { 2977 | "name": "stderr", 2978 | "output_type": "stream", 2979 | "text": [ 2980 | "I0511 15:58:24.256788 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.0896\n" 2981 | ] 2982 | }, 2983 | { 2984 | "name": "stdout", 2985 | "output_type": "stream", 2986 | "text": [ 2987 | "INFO:tensorflow:loss = 0.035369553, step = 5500 (47.857 sec)\n" 2988 | ] 2989 | }, 2990 | { 2991 | "name": "stderr", 2992 | "output_type": "stream", 2993 | "text": [ 2994 | "I0511 15:58:24.259038 139818736010624 basic_session_run_hooks.py:247] loss = 0.035369553, step = 5500 (47.857 sec)\n" 2995 | ] 2996 | }, 2997 | { 2998 | "name": "stdout", 2999 | "output_type": "stream", 3000 | "text": [ 3001 | "INFO:tensorflow:global_step/sec: 2.09002\n" 3002 | ] 3003 | }, 3004 | { 3005 | "name": "stderr", 3006 | "output_type": "stream", 3007 | "text": [ 3008 | "I0511 15:59:12.103274 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09002\n" 3009 | ] 3010 | }, 3011 | { 3012 | "name": "stdout", 3013 | "output_type": "stream", 3014 | "text": [ 3015 | "INFO:tensorflow:loss = 0.022261783, step = 5600 (47.847 sec)\n" 3016 | ] 3017 | }, 3018 | { 3019 | "name": "stderr", 3020 | "output_type": "stream", 3021 | "text": [ 3022 | "I0511 15:59:12.105604 139818736010624 basic_session_run_hooks.py:247] loss = 0.022261783, step = 5600 (47.847 sec)\n" 3023 | ] 3024 | }, 3025 | { 3026 | "name": "stdout", 3027 | "output_type": "stream", 3028 | "text": [ 3029 | "INFO:tensorflow:global_step/sec: 2.08957\n" 3030 | ] 3031 | }, 3032 | { 3033 | "name": "stderr", 3034 | "output_type": "stream", 3035 | "text": [ 3036 | "I0511 15:59:59.959986 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08957\n" 3037 | ] 3038 | }, 3039 | { 3040 | "name": "stdout", 3041 | "output_type": "stream", 3042 | "text": [ 3043 | "INFO:tensorflow:loss = 0.030732388, step = 5700 (47.856 sec)\n" 3044 | ] 3045 | }, 3046 | { 3047 | "name": "stderr", 3048 | "output_type": "stream", 3049 | "text": [ 3050 | "I0511 15:59:59.961924 139818736010624 basic_session_run_hooks.py:247] loss = 0.030732388, step = 5700 (47.856 sec)\n" 3051 | ] 3052 | }, 3053 | { 3054 | "name": "stdout", 3055 | "output_type": "stream", 3056 | "text": [ 3057 | "INFO:tensorflow:global_step/sec: 2.08961\n" 3058 | ] 3059 | }, 3060 | { 3061 | "name": "stderr", 3062 | "output_type": "stream", 3063 | "text": [ 3064 | "I0511 16:00:47.815930 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08961\n" 3065 | ] 3066 | }, 3067 | { 3068 | "name": "stdout", 3069 | "output_type": "stream", 3070 | "text": [ 3071 | "INFO:tensorflow:loss = 0.005895201, step = 5800 (47.856 sec)\n" 3072 | ] 3073 | }, 3074 | { 3075 | "name": "stderr", 3076 | "output_type": "stream", 3077 | "text": [ 3078 | "I0511 16:00:47.818212 139818736010624 basic_session_run_hooks.py:247] loss = 0.005895201, step = 5800 (47.856 sec)\n" 3079 | ] 3080 | }, 3081 | { 3082 | "name": "stdout", 3083 | "output_type": "stream", 3084 | "text": [ 3085 | "INFO:tensorflow:global_step/sec: 2.09021\n" 3086 | ] 3087 | }, 3088 | { 3089 | "name": "stderr", 3090 | "output_type": "stream", 3091 | "text": [ 3092 | "I0511 16:01:35.657968 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09021\n" 3093 | ] 3094 | }, 3095 | { 3096 | "name": "stdout", 3097 | "output_type": "stream", 3098 | "text": [ 3099 | "INFO:tensorflow:loss = 0.04426557, step = 5900 (47.842 sec)\n" 3100 | ] 3101 | }, 3102 | { 3103 | "name": "stderr", 3104 | "output_type": "stream", 3105 | "text": [ 3106 | "I0511 16:01:35.660161 139818736010624 basic_session_run_hooks.py:247] loss = 0.04426557, step = 5900 (47.842 sec)\n" 3107 | ] 3108 | }, 3109 | { 3110 | "name": "stdout", 3111 | "output_type": "stream", 3112 | "text": [ 3113 | "INFO:tensorflow:Saving checkpoints for 6000 into ../working/output/model.ckpt.\n" 3114 | ] 3115 | }, 3116 | { 3117 | "name": "stderr", 3118 | "output_type": "stream", 3119 | "text": [ 3120 | "I0511 16:02:23.016652 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 6000 into ../working/output/model.ckpt.\n" 3121 | ] 3122 | }, 3123 | { 3124 | "name": "stdout", 3125 | "output_type": "stream", 3126 | "text": [ 3127 | "INFO:tensorflow:global_step/sec: 1.94909\n" 3128 | ] 3129 | }, 3130 | { 3131 | "name": "stderr", 3132 | "output_type": "stream", 3133 | "text": [ 3134 | "I0511 16:02:26.964029 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.94909\n" 3135 | ] 3136 | }, 3137 | { 3138 | "name": "stdout", 3139 | "output_type": "stream", 3140 | "text": [ 3141 | "INFO:tensorflow:loss = 0.019231007, step = 6000 (51.306 sec)\n" 3142 | ] 3143 | }, 3144 | { 3145 | "name": "stderr", 3146 | "output_type": "stream", 3147 | "text": [ 3148 | "I0511 16:02:26.965895 139818736010624 basic_session_run_hooks.py:247] loss = 0.019231007, step = 6000 (51.306 sec)\n" 3149 | ] 3150 | }, 3151 | { 3152 | "name": "stdout", 3153 | "output_type": "stream", 3154 | "text": [ 3155 | "INFO:tensorflow:global_step/sec: 2.08666\n" 3156 | ] 3157 | }, 3158 | { 3159 | "name": "stderr", 3160 | "output_type": "stream", 3161 | "text": [ 3162 | "I0511 16:03:14.887445 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08666\n" 3163 | ] 3164 | }, 3165 | { 3166 | "name": "stdout", 3167 | "output_type": "stream", 3168 | "text": [ 3169 | "INFO:tensorflow:loss = 0.014947635, step = 6100 (47.923 sec)\n" 3170 | ] 3171 | }, 3172 | { 3173 | "name": "stderr", 3174 | "output_type": "stream", 3175 | "text": [ 3176 | "I0511 16:03:14.889358 139818736010624 basic_session_run_hooks.py:247] loss = 0.014947635, step = 6100 (47.923 sec)\n" 3177 | ] 3178 | }, 3179 | { 3180 | "name": "stdout", 3181 | "output_type": "stream", 3182 | "text": [ 3183 | "INFO:tensorflow:global_step/sec: 2.09003\n" 3184 | ] 3185 | }, 3186 | { 3187 | "name": "stderr", 3188 | "output_type": "stream", 3189 | "text": [ 3190 | "I0511 16:04:02.733660 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09003\n" 3191 | ] 3192 | }, 3193 | { 3194 | "name": "stdout", 3195 | "output_type": "stream", 3196 | "text": [ 3197 | "INFO:tensorflow:loss = 0.02263821, step = 6200 (47.847 sec)\n" 3198 | ] 3199 | }, 3200 | { 3201 | "name": "stderr", 3202 | "output_type": "stream", 3203 | "text": [ 3204 | "I0511 16:04:02.736120 139818736010624 basic_session_run_hooks.py:247] loss = 0.02263821, step = 6200 (47.847 sec)\n" 3205 | ] 3206 | }, 3207 | { 3208 | "name": "stdout", 3209 | "output_type": "stream", 3210 | "text": [ 3211 | "INFO:tensorflow:global_step/sec: 2.08982\n" 3212 | ] 3213 | }, 3214 | { 3215 | "name": "stderr", 3216 | "output_type": "stream", 3217 | "text": [ 3218 | "I0511 16:04:50.584459 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08982\n" 3219 | ] 3220 | }, 3221 | { 3222 | "name": "stdout", 3223 | "output_type": "stream", 3224 | "text": [ 3225 | "INFO:tensorflow:loss = 0.080169834, step = 6300 (47.850 sec)\n" 3226 | ] 3227 | }, 3228 | { 3229 | "name": "stderr", 3230 | "output_type": "stream", 3231 | "text": [ 3232 | "I0511 16:04:50.586559 139818736010624 basic_session_run_hooks.py:247] loss = 0.080169834, step = 6300 (47.850 sec)\n" 3233 | ] 3234 | }, 3235 | { 3236 | "name": "stdout", 3237 | "output_type": "stream", 3238 | "text": [ 3239 | "INFO:tensorflow:global_step/sec: 2.09003\n" 3240 | ] 3241 | }, 3242 | { 3243 | "name": "stderr", 3244 | "output_type": "stream", 3245 | "text": [ 3246 | "I0511 16:05:38.430660 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09003\n" 3247 | ] 3248 | }, 3249 | { 3250 | "name": "stdout", 3251 | "output_type": "stream", 3252 | "text": [ 3253 | "INFO:tensorflow:loss = 0.021685267, step = 6400 (47.846 sec)\n" 3254 | ] 3255 | }, 3256 | { 3257 | "name": "stderr", 3258 | "output_type": "stream", 3259 | "text": [ 3260 | "I0511 16:05:38.432614 139818736010624 basic_session_run_hooks.py:247] loss = 0.021685267, step = 6400 (47.846 sec)\n" 3261 | ] 3262 | }, 3263 | { 3264 | "name": "stdout", 3265 | "output_type": "stream", 3266 | "text": [ 3267 | "INFO:tensorflow:global_step/sec: 2.09024\n" 3268 | ] 3269 | }, 3270 | { 3271 | "name": "stderr", 3272 | "output_type": "stream", 3273 | "text": [ 3274 | "I0511 16:06:26.272022 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09024\n" 3275 | ] 3276 | }, 3277 | { 3278 | "name": "stdout", 3279 | "output_type": "stream", 3280 | "text": [ 3281 | "INFO:tensorflow:loss = 0.015173164, step = 6500 (47.842 sec)\n" 3282 | ] 3283 | }, 3284 | { 3285 | "name": "stderr", 3286 | "output_type": "stream", 3287 | "text": [ 3288 | "I0511 16:06:26.274212 139818736010624 basic_session_run_hooks.py:247] loss = 0.015173164, step = 6500 (47.842 sec)\n" 3289 | ] 3290 | }, 3291 | { 3292 | "name": "stdout", 3293 | "output_type": "stream", 3294 | "text": [ 3295 | "INFO:tensorflow:global_step/sec: 2.09068\n" 3296 | ] 3297 | }, 3298 | { 3299 | "name": "stderr", 3300 | "output_type": "stream", 3301 | "text": [ 3302 | "I0511 16:07:14.103396 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09068\n" 3303 | ] 3304 | }, 3305 | { 3306 | "name": "stdout", 3307 | "output_type": "stream", 3308 | "text": [ 3309 | "INFO:tensorflow:loss = 0.021070197, step = 6600 (47.831 sec)\n" 3310 | ] 3311 | }, 3312 | { 3313 | "name": "stderr", 3314 | "output_type": "stream", 3315 | "text": [ 3316 | "I0511 16:07:14.105277 139818736010624 basic_session_run_hooks.py:247] loss = 0.021070197, step = 6600 (47.831 sec)\n" 3317 | ] 3318 | }, 3319 | { 3320 | "name": "stdout", 3321 | "output_type": "stream", 3322 | "text": [ 3323 | "INFO:tensorflow:global_step/sec: 2.09091\n" 3324 | ] 3325 | }, 3326 | { 3327 | "name": "stderr", 3328 | "output_type": "stream", 3329 | "text": [ 3330 | "I0511 16:08:01.929519 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09091\n" 3331 | ] 3332 | }, 3333 | { 3334 | "name": "stdout", 3335 | "output_type": "stream", 3336 | "text": [ 3337 | "INFO:tensorflow:loss = 0.015495446, step = 6700 (47.826 sec)\n" 3338 | ] 3339 | }, 3340 | { 3341 | "name": "stderr", 3342 | "output_type": "stream", 3343 | "text": [ 3344 | "I0511 16:08:01.931626 139818736010624 basic_session_run_hooks.py:247] loss = 0.015495446, step = 6700 (47.826 sec)\n" 3345 | ] 3346 | }, 3347 | { 3348 | "name": "stdout", 3349 | "output_type": "stream", 3350 | "text": [ 3351 | "INFO:tensorflow:global_step/sec: 2.09044\n" 3352 | ] 3353 | }, 3354 | { 3355 | "name": "stderr", 3356 | "output_type": "stream", 3357 | "text": [ 3358 | "I0511 16:08:49.766277 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09044\n" 3359 | ] 3360 | }, 3361 | { 3362 | "name": "stdout", 3363 | "output_type": "stream", 3364 | "text": [ 3365 | "INFO:tensorflow:loss = 0.01370486, step = 6800 (47.837 sec)\n" 3366 | ] 3367 | }, 3368 | { 3369 | "name": "stderr", 3370 | "output_type": "stream", 3371 | "text": [ 3372 | "I0511 16:08:49.768640 139818736010624 basic_session_run_hooks.py:247] loss = 0.01370486, step = 6800 (47.837 sec)\n" 3373 | ] 3374 | }, 3375 | { 3376 | "name": "stdout", 3377 | "output_type": "stream", 3378 | "text": [ 3379 | "INFO:tensorflow:global_step/sec: 2.09061\n" 3380 | ] 3381 | }, 3382 | { 3383 | "name": "stderr", 3384 | "output_type": "stream", 3385 | "text": [ 3386 | "I0511 16:09:37.599245 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09061\n" 3387 | ] 3388 | }, 3389 | { 3390 | "name": "stdout", 3391 | "output_type": "stream", 3392 | "text": [ 3393 | "INFO:tensorflow:loss = 0.012676541, step = 6900 (47.832 sec)\n" 3394 | ] 3395 | }, 3396 | { 3397 | "name": "stderr", 3398 | "output_type": "stream", 3399 | "text": [ 3400 | "I0511 16:09:37.601120 139818736010624 basic_session_run_hooks.py:247] loss = 0.012676541, step = 6900 (47.832 sec)\n" 3401 | ] 3402 | }, 3403 | { 3404 | "name": "stdout", 3405 | "output_type": "stream", 3406 | "text": [ 3407 | "INFO:tensorflow:Saving checkpoints for 7000 into ../working/output/model.ckpt.\n" 3408 | ] 3409 | }, 3410 | { 3411 | "name": "stderr", 3412 | "output_type": "stream", 3413 | "text": [ 3414 | "I0511 16:10:24.958072 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 7000 into ../working/output/model.ckpt.\n" 3415 | ] 3416 | }, 3417 | { 3418 | "name": "stdout", 3419 | "output_type": "stream", 3420 | "text": [ 3421 | "INFO:tensorflow:global_step/sec: 1.95194\n" 3422 | ] 3423 | }, 3424 | { 3425 | "name": "stderr", 3426 | "output_type": "stream", 3427 | "text": [ 3428 | "I0511 16:10:28.830286 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.95194\n" 3429 | ] 3430 | }, 3431 | { 3432 | "name": "stdout", 3433 | "output_type": "stream", 3434 | "text": [ 3435 | "INFO:tensorflow:loss = 0.0042999103, step = 7000 (51.231 sec)\n" 3436 | ] 3437 | }, 3438 | { 3439 | "name": "stderr", 3440 | "output_type": "stream", 3441 | "text": [ 3442 | "I0511 16:10:28.832117 139818736010624 basic_session_run_hooks.py:247] loss = 0.0042999103, step = 7000 (51.231 sec)\n" 3443 | ] 3444 | }, 3445 | { 3446 | "name": "stdout", 3447 | "output_type": "stream", 3448 | "text": [ 3449 | "INFO:tensorflow:global_step/sec: 2.08799\n" 3450 | ] 3451 | }, 3452 | { 3453 | "name": "stderr", 3454 | "output_type": "stream", 3455 | "text": [ 3456 | "I0511 16:11:16.723205 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08799\n" 3457 | ] 3458 | }, 3459 | { 3460 | "name": "stdout", 3461 | "output_type": "stream", 3462 | "text": [ 3463 | "INFO:tensorflow:loss = 0.017462576, step = 7100 (47.893 sec)\n" 3464 | ] 3465 | }, 3466 | { 3467 | "name": "stderr", 3468 | "output_type": "stream", 3469 | "text": [ 3470 | "I0511 16:11:16.725227 139818736010624 basic_session_run_hooks.py:247] loss = 0.017462576, step = 7100 (47.893 sec)\n" 3471 | ] 3472 | }, 3473 | { 3474 | "name": "stdout", 3475 | "output_type": "stream", 3476 | "text": [ 3477 | "INFO:tensorflow:global_step/sec: 2.08987\n" 3478 | ] 3479 | }, 3480 | { 3481 | "name": "stderr", 3482 | "output_type": "stream", 3483 | "text": [ 3484 | "I0511 16:12:04.573233 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08987\n" 3485 | ] 3486 | }, 3487 | { 3488 | "name": "stdout", 3489 | "output_type": "stream", 3490 | "text": [ 3491 | "INFO:tensorflow:loss = 0.04735687, step = 7200 (47.851 sec)\n" 3492 | ] 3493 | }, 3494 | { 3495 | "name": "stderr", 3496 | "output_type": "stream", 3497 | "text": [ 3498 | "I0511 16:12:04.575781 139818736010624 basic_session_run_hooks.py:247] loss = 0.04735687, step = 7200 (47.851 sec)\n" 3499 | ] 3500 | }, 3501 | { 3502 | "name": "stdout", 3503 | "output_type": "stream", 3504 | "text": [ 3505 | "INFO:tensorflow:global_step/sec: 2.09047\n" 3506 | ] 3507 | }, 3508 | { 3509 | "name": "stderr", 3510 | "output_type": "stream", 3511 | "text": [ 3512 | "I0511 16:12:52.409321 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09047\n" 3513 | ] 3514 | }, 3515 | { 3516 | "name": "stdout", 3517 | "output_type": "stream", 3518 | "text": [ 3519 | "INFO:tensorflow:loss = 0.02835084, step = 7300 (47.836 sec)\n" 3520 | ] 3521 | }, 3522 | { 3523 | "name": "stderr", 3524 | "output_type": "stream", 3525 | "text": [ 3526 | "I0511 16:12:52.411336 139818736010624 basic_session_run_hooks.py:247] loss = 0.02835084, step = 7300 (47.836 sec)\n" 3527 | ] 3528 | }, 3529 | { 3530 | "name": "stdout", 3531 | "output_type": "stream", 3532 | "text": [ 3533 | "INFO:tensorflow:global_step/sec: 2.09093\n" 3534 | ] 3535 | }, 3536 | { 3537 | "name": "stderr", 3538 | "output_type": "stream", 3539 | "text": [ 3540 | "I0511 16:13:40.234976 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09093\n" 3541 | ] 3542 | }, 3543 | { 3544 | "name": "stdout", 3545 | "output_type": "stream", 3546 | "text": [ 3547 | "INFO:tensorflow:loss = 0.005693153, step = 7400 (47.826 sec)\n" 3548 | ] 3549 | }, 3550 | { 3551 | "name": "stderr", 3552 | "output_type": "stream", 3553 | "text": [ 3554 | "I0511 16:13:40.237004 139818736010624 basic_session_run_hooks.py:247] loss = 0.005693153, step = 7400 (47.826 sec)\n" 3555 | ] 3556 | }, 3557 | { 3558 | "name": "stdout", 3559 | "output_type": "stream", 3560 | "text": [ 3561 | "INFO:tensorflow:global_step/sec: 2.09046\n" 3562 | ] 3563 | }, 3564 | { 3565 | "name": "stderr", 3566 | "output_type": "stream", 3567 | "text": [ 3568 | "I0511 16:14:28.071292 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09046\n" 3569 | ] 3570 | }, 3571 | { 3572 | "name": "stdout", 3573 | "output_type": "stream", 3574 | "text": [ 3575 | "INFO:tensorflow:loss = 0.029932572, step = 7500 (47.836 sec)\n" 3576 | ] 3577 | }, 3578 | { 3579 | "name": "stderr", 3580 | "output_type": "stream", 3581 | "text": [ 3582 | "I0511 16:14:28.073252 139818736010624 basic_session_run_hooks.py:247] loss = 0.029932572, step = 7500 (47.836 sec)\n" 3583 | ] 3584 | }, 3585 | { 3586 | "name": "stdout", 3587 | "output_type": "stream", 3588 | "text": [ 3589 | "INFO:tensorflow:global_step/sec: 2.09075\n" 3590 | ] 3591 | }, 3592 | { 3593 | "name": "stderr", 3594 | "output_type": "stream", 3595 | "text": [ 3596 | "I0511 16:15:15.900980 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09075\n" 3597 | ] 3598 | }, 3599 | { 3600 | "name": "stdout", 3601 | "output_type": "stream", 3602 | "text": [ 3603 | "INFO:tensorflow:loss = 0.04810138, step = 7600 (47.830 sec)\n" 3604 | ] 3605 | }, 3606 | { 3607 | "name": "stderr", 3608 | "output_type": "stream", 3609 | "text": [ 3610 | "I0511 16:15:15.902899 139818736010624 basic_session_run_hooks.py:247] loss = 0.04810138, step = 7600 (47.830 sec)\n" 3611 | ] 3612 | }, 3613 | { 3614 | "name": "stdout", 3615 | "output_type": "stream", 3616 | "text": [ 3617 | "INFO:tensorflow:global_step/sec: 2.09012\n" 3618 | ] 3619 | }, 3620 | { 3621 | "name": "stderr", 3622 | "output_type": "stream", 3623 | "text": [ 3624 | "I0511 16:16:03.745193 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09012\n" 3625 | ] 3626 | }, 3627 | { 3628 | "name": "stdout", 3629 | "output_type": "stream", 3630 | "text": [ 3631 | "INFO:tensorflow:loss = 0.092232704, step = 7700 (47.844 sec)\n" 3632 | ] 3633 | }, 3634 | { 3635 | "name": "stderr", 3636 | "output_type": "stream", 3637 | "text": [ 3638 | "I0511 16:16:03.747375 139818736010624 basic_session_run_hooks.py:247] loss = 0.092232704, step = 7700 (47.844 sec)\n" 3639 | ] 3640 | }, 3641 | { 3642 | "name": "stdout", 3643 | "output_type": "stream", 3644 | "text": [ 3645 | "INFO:tensorflow:global_step/sec: 2.09036\n" 3646 | ] 3647 | }, 3648 | { 3649 | "name": "stderr", 3650 | "output_type": "stream", 3651 | "text": [ 3652 | "I0511 16:16:51.583868 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09036\n" 3653 | ] 3654 | }, 3655 | { 3656 | "name": "stdout", 3657 | "output_type": "stream", 3658 | "text": [ 3659 | "INFO:tensorflow:loss = 0.043084234, step = 7800 (47.839 sec)\n" 3660 | ] 3661 | }, 3662 | { 3663 | "name": "stderr", 3664 | "output_type": "stream", 3665 | "text": [ 3666 | "I0511 16:16:51.585914 139818736010624 basic_session_run_hooks.py:247] loss = 0.043084234, step = 7800 (47.839 sec)\n" 3667 | ] 3668 | }, 3669 | { 3670 | "name": "stdout", 3671 | "output_type": "stream", 3672 | "text": [ 3673 | "INFO:tensorflow:global_step/sec: 2.09047\n" 3674 | ] 3675 | }, 3676 | { 3677 | "name": "stderr", 3678 | "output_type": "stream", 3679 | "text": [ 3680 | "I0511 16:17:39.420003 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09047\n" 3681 | ] 3682 | }, 3683 | { 3684 | "name": "stdout", 3685 | "output_type": "stream", 3686 | "text": [ 3687 | "INFO:tensorflow:loss = 0.07278801, step = 7900 (47.836 sec)\n" 3688 | ] 3689 | }, 3690 | { 3691 | "name": "stderr", 3692 | "output_type": "stream", 3693 | "text": [ 3694 | "I0511 16:17:39.421839 139818736010624 basic_session_run_hooks.py:247] loss = 0.07278801, step = 7900 (47.836 sec)\n" 3695 | ] 3696 | }, 3697 | { 3698 | "name": "stdout", 3699 | "output_type": "stream", 3700 | "text": [ 3701 | "INFO:tensorflow:Saving checkpoints for 8000 into ../working/output/model.ckpt.\n" 3702 | ] 3703 | }, 3704 | { 3705 | "name": "stderr", 3706 | "output_type": "stream", 3707 | "text": [ 3708 | "I0511 16:18:26.771366 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 8000 into ../working/output/model.ckpt.\n" 3709 | ] 3710 | }, 3711 | { 3712 | "name": "stdout", 3713 | "output_type": "stream", 3714 | "text": [ 3715 | "INFO:tensorflow:global_step/sec: 1.95412\n" 3716 | ] 3717 | }, 3718 | { 3719 | "name": "stderr", 3720 | "output_type": "stream", 3721 | "text": [ 3722 | "I0511 16:18:30.593876 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 1.95412\n" 3723 | ] 3724 | }, 3725 | { 3726 | "name": "stdout", 3727 | "output_type": "stream", 3728 | "text": [ 3729 | "INFO:tensorflow:loss = 0.03513243, step = 8000 (51.174 sec)\n" 3730 | ] 3731 | }, 3732 | { 3733 | "name": "stderr", 3734 | "output_type": "stream", 3735 | "text": [ 3736 | "I0511 16:18:30.595677 139818736010624 basic_session_run_hooks.py:247] loss = 0.03513243, step = 8000 (51.174 sec)\n" 3737 | ] 3738 | }, 3739 | { 3740 | "name": "stdout", 3741 | "output_type": "stream", 3742 | "text": [ 3743 | "INFO:tensorflow:global_step/sec: 2.08824\n" 3744 | ] 3745 | }, 3746 | { 3747 | "name": "stderr", 3748 | "output_type": "stream", 3749 | "text": [ 3750 | "I0511 16:19:18.481096 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08824\n" 3751 | ] 3752 | }, 3753 | { 3754 | "name": "stdout", 3755 | "output_type": "stream", 3756 | "text": [ 3757 | "INFO:tensorflow:loss = 0.02359452, step = 8100 (47.887 sec)\n" 3758 | ] 3759 | }, 3760 | { 3761 | "name": "stderr", 3762 | "output_type": "stream", 3763 | "text": [ 3764 | "I0511 16:19:18.483072 139818736010624 basic_session_run_hooks.py:247] loss = 0.02359452, step = 8100 (47.887 sec)\n" 3765 | ] 3766 | }, 3767 | { 3768 | "name": "stdout", 3769 | "output_type": "stream", 3770 | "text": [ 3771 | "INFO:tensorflow:global_step/sec: 2.08996\n" 3772 | ] 3773 | }, 3774 | { 3775 | "name": "stderr", 3776 | "output_type": "stream", 3777 | "text": [ 3778 | "I0511 16:20:06.328876 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.08996\n" 3779 | ] 3780 | }, 3781 | { 3782 | "name": "stdout", 3783 | "output_type": "stream", 3784 | "text": [ 3785 | "INFO:tensorflow:loss = 0.02455688, step = 8200 (47.848 sec)\n" 3786 | ] 3787 | }, 3788 | { 3789 | "name": "stderr", 3790 | "output_type": "stream", 3791 | "text": [ 3792 | "I0511 16:20:06.331145 139818736010624 basic_session_run_hooks.py:247] loss = 0.02455688, step = 8200 (47.848 sec)\n" 3793 | ] 3794 | }, 3795 | { 3796 | "name": "stdout", 3797 | "output_type": "stream", 3798 | "text": [ 3799 | "INFO:tensorflow:global_step/sec: 2.09042\n" 3800 | ] 3801 | }, 3802 | { 3803 | "name": "stderr", 3804 | "output_type": "stream", 3805 | "text": [ 3806 | "I0511 16:20:54.166063 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09042\n" 3807 | ] 3808 | }, 3809 | { 3810 | "name": "stdout", 3811 | "output_type": "stream", 3812 | "text": [ 3813 | "INFO:tensorflow:loss = 0.015822781, step = 8300 (47.837 sec)\n" 3814 | ] 3815 | }, 3816 | { 3817 | "name": "stderr", 3818 | "output_type": "stream", 3819 | "text": [ 3820 | "I0511 16:20:54.168399 139818736010624 basic_session_run_hooks.py:247] loss = 0.015822781, step = 8300 (47.837 sec)\n" 3821 | ] 3822 | }, 3823 | { 3824 | "name": "stdout", 3825 | "output_type": "stream", 3826 | "text": [ 3827 | "INFO:tensorflow:global_step/sec: 2.09026\n" 3828 | ] 3829 | }, 3830 | { 3831 | "name": "stderr", 3832 | "output_type": "stream", 3833 | "text": [ 3834 | "I0511 16:21:42.006960 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09026\n" 3835 | ] 3836 | }, 3837 | { 3838 | "name": "stdout", 3839 | "output_type": "stream", 3840 | "text": [ 3841 | "INFO:tensorflow:loss = 0.024767285, step = 8400 (47.841 sec)\n" 3842 | ] 3843 | }, 3844 | { 3845 | "name": "stderr", 3846 | "output_type": "stream", 3847 | "text": [ 3848 | "I0511 16:21:42.009106 139818736010624 basic_session_run_hooks.py:247] loss = 0.024767285, step = 8400 (47.841 sec)\n" 3849 | ] 3850 | }, 3851 | { 3852 | "name": "stdout", 3853 | "output_type": "stream", 3854 | "text": [ 3855 | "INFO:tensorflow:global_step/sec: 2.09032\n" 3856 | ] 3857 | }, 3858 | { 3859 | "name": "stderr", 3860 | "output_type": "stream", 3861 | "text": [ 3862 | "I0511 16:22:29.846466 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09032\n" 3863 | ] 3864 | }, 3865 | { 3866 | "name": "stdout", 3867 | "output_type": "stream", 3868 | "text": [ 3869 | "INFO:tensorflow:loss = 0.022529813, step = 8500 (47.839 sec)\n" 3870 | ] 3871 | }, 3872 | { 3873 | "name": "stderr", 3874 | "output_type": "stream", 3875 | "text": [ 3876 | "I0511 16:22:29.848353 139818736010624 basic_session_run_hooks.py:247] loss = 0.022529813, step = 8500 (47.839 sec)\n" 3877 | ] 3878 | }, 3879 | { 3880 | "name": "stdout", 3881 | "output_type": "stream", 3882 | "text": [ 3883 | "INFO:tensorflow:global_step/sec: 2.09093\n" 3884 | ] 3885 | }, 3886 | { 3887 | "name": "stderr", 3888 | "output_type": "stream", 3889 | "text": [ 3890 | "I0511 16:23:17.672010 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09093\n" 3891 | ] 3892 | }, 3893 | { 3894 | "name": "stdout", 3895 | "output_type": "stream", 3896 | "text": [ 3897 | "INFO:tensorflow:loss = 0.045813356, step = 8600 (47.826 sec)\n" 3898 | ] 3899 | }, 3900 | { 3901 | "name": "stderr", 3902 | "output_type": "stream", 3903 | "text": [ 3904 | "I0511 16:23:17.674019 139818736010624 basic_session_run_hooks.py:247] loss = 0.045813356, step = 8600 (47.826 sec)\n" 3905 | ] 3906 | }, 3907 | { 3908 | "name": "stdout", 3909 | "output_type": "stream", 3910 | "text": [ 3911 | "INFO:tensorflow:global_step/sec: 2.09002\n" 3912 | ] 3913 | }, 3914 | { 3915 | "name": "stderr", 3916 | "output_type": "stream", 3917 | "text": [ 3918 | "I0511 16:24:05.518428 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09002\n" 3919 | ] 3920 | }, 3921 | { 3922 | "name": "stdout", 3923 | "output_type": "stream", 3924 | "text": [ 3925 | "INFO:tensorflow:loss = 0.025917647, step = 8700 (47.846 sec)\n" 3926 | ] 3927 | }, 3928 | { 3929 | "name": "stderr", 3930 | "output_type": "stream", 3931 | "text": [ 3932 | "I0511 16:24:05.520494 139818736010624 basic_session_run_hooks.py:247] loss = 0.025917647, step = 8700 (47.846 sec)\n" 3933 | ] 3934 | }, 3935 | { 3936 | "name": "stdout", 3937 | "output_type": "stream", 3938 | "text": [ 3939 | "INFO:tensorflow:global_step/sec: 2.09089\n" 3940 | ] 3941 | }, 3942 | { 3943 | "name": "stderr", 3944 | "output_type": "stream", 3945 | "text": [ 3946 | "I0511 16:24:53.344982 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09089\n" 3947 | ] 3948 | }, 3949 | { 3950 | "name": "stdout", 3951 | "output_type": "stream", 3952 | "text": [ 3953 | "INFO:tensorflow:loss = 0.010641291, step = 8800 (47.827 sec)\n" 3954 | ] 3955 | }, 3956 | { 3957 | "name": "stderr", 3958 | "output_type": "stream", 3959 | "text": [ 3960 | "I0511 16:24:53.347029 139818736010624 basic_session_run_hooks.py:247] loss = 0.010641291, step = 8800 (47.827 sec)\n" 3961 | ] 3962 | }, 3963 | { 3964 | "name": "stdout", 3965 | "output_type": "stream", 3966 | "text": [ 3967 | "INFO:tensorflow:global_step/sec: 2.09021\n" 3968 | ] 3969 | }, 3970 | { 3971 | "name": "stderr", 3972 | "output_type": "stream", 3973 | "text": [ 3974 | "I0511 16:25:41.187094 139818736010624 basic_session_run_hooks.py:680] global_step/sec: 2.09021\n" 3975 | ] 3976 | }, 3977 | { 3978 | "name": "stdout", 3979 | "output_type": "stream", 3980 | "text": [ 3981 | "INFO:tensorflow:loss = 0.023719827, step = 8900 (47.842 sec)\n" 3982 | ] 3983 | }, 3984 | { 3985 | "name": "stderr", 3986 | "output_type": "stream", 3987 | "text": [ 3988 | "I0511 16:25:41.189158 139818736010624 basic_session_run_hooks.py:247] loss = 0.023719827, step = 8900 (47.842 sec)\n" 3989 | ] 3990 | }, 3991 | { 3992 | "name": "stdout", 3993 | "output_type": "stream", 3994 | "text": [ 3995 | "INFO:tensorflow:Saving checkpoints for 8975 into ../working/output/model.ckpt.\n" 3996 | ] 3997 | }, 3998 | { 3999 | "name": "stderr", 4000 | "output_type": "stream", 4001 | "text": [ 4002 | "I0511 16:26:16.580582 139818736010624 basic_session_run_hooks.py:594] Saving checkpoints for 8975 into ../working/output/model.ckpt.\n" 4003 | ] 4004 | }, 4005 | { 4006 | "name": "stdout", 4007 | "output_type": "stream", 4008 | "text": [ 4009 | "INFO:tensorflow:Loss for final step: 0.076903835.\n" 4010 | ] 4011 | }, 4012 | { 4013 | "name": "stderr", 4014 | "output_type": "stream", 4015 | "text": [ 4016 | "I0511 16:26:20.658977 139818736010624 estimator.py:359] Loss for final step: 0.076903835.\n" 4017 | ] 4018 | }, 4019 | { 4020 | "name": "stdout", 4021 | "output_type": "stream", 4022 | "text": [ 4023 | "Training took time 1:13:24.503939\n" 4024 | ] 4025 | } 4026 | ], 4027 | "source": [ 4028 | "print(f'Beginning Training!')\n", 4029 | "current_time = datetime.now()\n", 4030 | "estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n", 4031 | "print(\"Training took time \", datetime.now() - current_time)" 4032 | ] 4033 | }, 4034 | { 4035 | "cell_type": "code", 4036 | "execution_count": 28, 4037 | "metadata": {}, 4038 | "outputs": [], 4039 | "source": [ 4040 | "eval_file = os.path.join('../working', \"eval.tf_record\")\n", 4041 | "#filename = Path(train_file)\n", 4042 | "if not os.path.exists(eval_file):\n", 4043 | " open(eval_file, 'w').close()\n", 4044 | "\n", 4045 | "eval_examples = create_examples(x_val)\n", 4046 | "file_based_convert_examples_to_features(\n", 4047 | " eval_examples, MAX_SEQ_LENGTH, tokenizer, eval_file)" 4048 | ] 4049 | }, 4050 | { 4051 | "cell_type": "code", 4052 | "execution_count": 29, 4053 | "metadata": {}, 4054 | "outputs": [ 4055 | { 4056 | "name": "stdout", 4057 | "output_type": "stream", 4058 | "text": [ 4059 | "INFO:tensorflow:Calling model_fn.\n" 4060 | ] 4061 | }, 4062 | { 4063 | "name": "stderr", 4064 | "output_type": "stream", 4065 | "text": [ 4066 | "I0511 16:26:51.432837 139818736010624 estimator.py:1111] Calling model_fn.\n" 4067 | ] 4068 | }, 4069 | { 4070 | "name": "stdout", 4071 | "output_type": "stream", 4072 | "text": [ 4073 | "INFO:tensorflow:num_labels:6;logits:Tensor(\"loss/BiasAdd:0\", shape=(?, 6), dtype=float32);labels:Tensor(\"loss/Cast:0\", shape=(?, 6), dtype=float32)\n" 4074 | ] 4075 | }, 4076 | { 4077 | "name": "stderr", 4078 | "output_type": "stream", 4079 | "text": [ 4080 | "I0511 16:26:53.388449 139818736010624 :40] num_labels:6;logits:Tensor(\"loss/BiasAdd:0\", shape=(?, 6), dtype=float32);labels:Tensor(\"loss/Cast:0\", shape=(?, 6), dtype=float32)\n" 4081 | ] 4082 | }, 4083 | { 4084 | "name": "stdout", 4085 | "output_type": "stream", 4086 | "text": [ 4087 | "INFO:tensorflow:**** Trainable Variables ****\n" 4088 | ] 4089 | }, 4090 | { 4091 | "name": "stderr", 4092 | "output_type": "stream", 4093 | "text": [ 4094 | "I0511 16:26:54.905051 139818736010624 :99] **** Trainable Variables ****\n" 4095 | ] 4096 | }, 4097 | { 4098 | "name": "stdout", 4099 | "output_type": "stream", 4100 | "text": [ 4101 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/metrics_impl.py:526: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", 4102 | "Instructions for updating:\n", 4103 | "Use tf.cast instead.\n" 4104 | ] 4105 | }, 4106 | { 4107 | "name": "stderr", 4108 | "output_type": "stream", 4109 | "text": [ 4110 | "W0511 16:26:54.940772 139818736010624 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/metrics_impl.py:526: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", 4111 | "Instructions for updating:\n", 4112 | "Use tf.cast instead.\n" 4113 | ] 4114 | }, 4115 | { 4116 | "name": "stdout", 4117 | "output_type": "stream", 4118 | "text": [ 4119 | "INFO:tensorflow:Done calling model_fn.\n" 4120 | ] 4121 | }, 4122 | { 4123 | "name": "stderr", 4124 | "output_type": "stream", 4125 | "text": [ 4126 | "I0511 16:26:55.559957 139818736010624 estimator.py:1113] Done calling model_fn.\n" 4127 | ] 4128 | }, 4129 | { 4130 | "name": "stdout", 4131 | "output_type": "stream", 4132 | "text": [ 4133 | "INFO:tensorflow:Starting evaluation at 2019-05-11T16:26:55Z\n" 4134 | ] 4135 | }, 4136 | { 4137 | "name": "stderr", 4138 | "output_type": "stream", 4139 | "text": [ 4140 | "I0511 16:26:55.585231 139818736010624 evaluation.py:257] Starting evaluation at 2019-05-11T16:26:55Z\n" 4141 | ] 4142 | }, 4143 | { 4144 | "name": "stdout", 4145 | "output_type": "stream", 4146 | "text": [ 4147 | "INFO:tensorflow:Graph was finalized.\n" 4148 | ] 4149 | }, 4150 | { 4151 | "name": "stderr", 4152 | "output_type": "stream", 4153 | "text": [ 4154 | "I0511 16:26:56.142339 139818736010624 monitored_session.py:222] Graph was finalized.\n" 4155 | ] 4156 | }, 4157 | { 4158 | "name": "stdout", 4159 | "output_type": "stream", 4160 | "text": [ 4161 | "WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", 4162 | "Instructions for updating:\n", 4163 | "Use standard file APIs to check for files with this prefix.\n" 4164 | ] 4165 | }, 4166 | { 4167 | "name": "stderr", 4168 | "output_type": "stream", 4169 | "text": [ 4170 | "W0511 16:26:56.147559 139818736010624 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", 4171 | "Instructions for updating:\n", 4172 | "Use standard file APIs to check for files with this prefix.\n" 4173 | ] 4174 | }, 4175 | { 4176 | "name": "stdout", 4177 | "output_type": "stream", 4178 | "text": [ 4179 | "INFO:tensorflow:Restoring parameters from ../working/output/model.ckpt-8975\n" 4180 | ] 4181 | }, 4182 | { 4183 | "name": "stderr", 4184 | "output_type": "stream", 4185 | "text": [ 4186 | "I0511 16:26:56.153778 139818736010624 saver.py:1270] Restoring parameters from ../working/output/model.ckpt-8975\n" 4187 | ] 4188 | }, 4189 | { 4190 | "name": "stdout", 4191 | "output_type": "stream", 4192 | "text": [ 4193 | "INFO:tensorflow:Running local_init_op.\n" 4194 | ] 4195 | }, 4196 | { 4197 | "name": "stderr", 4198 | "output_type": "stream", 4199 | "text": [ 4200 | "I0511 16:26:56.949195 139818736010624 session_manager.py:491] Running local_init_op.\n" 4201 | ] 4202 | }, 4203 | { 4204 | "name": "stdout", 4205 | "output_type": "stream", 4206 | "text": [ 4207 | "INFO:tensorflow:Done running local_init_op.\n" 4208 | ] 4209 | }, 4210 | { 4211 | "name": "stderr", 4212 | "output_type": "stream", 4213 | "text": [ 4214 | "I0511 16:26:57.027371 139818736010624 session_manager.py:493] Done running local_init_op.\n" 4215 | ] 4216 | }, 4217 | { 4218 | "name": "stdout", 4219 | "output_type": "stream", 4220 | "text": [ 4221 | "INFO:tensorflow:Finished evaluation at 2019-05-11-16:28:14\n" 4222 | ] 4223 | }, 4224 | { 4225 | "name": "stderr", 4226 | "output_type": "stream", 4227 | "text": [ 4228 | "I0511 16:28:14.318654 139818736010624 evaluation.py:277] Finished evaluation at 2019-05-11-16:28:14\n" 4229 | ] 4230 | }, 4231 | { 4232 | "name": "stdout", 4233 | "output_type": "stream", 4234 | "text": [ 4235 | "INFO:tensorflow:Saving dict for global step 8975: 0 = 0.9837439, 1 = 0.99188155, 2 = 0.98707324, 3 = 0.9862899, 4 = 0.987142, 5 = 0.9940405, eval_loss = 0.03836409, global_step = 8975, loss = 0.03834793\n" 4236 | ] 4237 | }, 4238 | { 4239 | "name": "stderr", 4240 | "output_type": "stream", 4241 | "text": [ 4242 | "I0511 16:28:14.320686 139818736010624 estimator.py:1979] Saving dict for global step 8975: 0 = 0.9837439, 1 = 0.99188155, 2 = 0.98707324, 3 = 0.9862899, 4 = 0.987142, 5 = 0.9940405, eval_loss = 0.03836409, global_step = 8975, loss = 0.03834793\n" 4243 | ] 4244 | }, 4245 | { 4246 | "name": "stdout", 4247 | "output_type": "stream", 4248 | "text": [ 4249 | "INFO:tensorflow:Saving 'checkpoint_path' summary for global step 8975: ../working/output/model.ckpt-8975\n" 4250 | ] 4251 | }, 4252 | { 4253 | "name": "stderr", 4254 | "output_type": "stream", 4255 | "text": [ 4256 | "I0511 16:28:14.971035 139818736010624 estimator.py:2039] Saving 'checkpoint_path' summary for global step 8975: ../working/output/model.ckpt-8975\n" 4257 | ] 4258 | } 4259 | ], 4260 | "source": [ 4261 | "# This tells the estimator to run through the entire set.\n", 4262 | "eval_steps = None\n", 4263 | "\n", 4264 | "eval_drop_remainder = False\n", 4265 | "eval_input_fn = file_based_input_fn_builder(\n", 4266 | " input_file=eval_file,\n", 4267 | " seq_length=MAX_SEQ_LENGTH,\n", 4268 | " is_training=False,\n", 4269 | " drop_remainder=False)\n", 4270 | "\n", 4271 | "result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)" 4272 | ] 4273 | }, 4274 | { 4275 | "cell_type": "markdown", 4276 | "metadata": { 4277 | "_kg_hide-input": false 4278 | }, 4279 | "source": [ 4280 | "#x_eval = train[100000:]\n", 4281 | "# Use the InputExample class from BERT's run_classifier code to create examples from the data\n", 4282 | "eval_examples = create_examples(x_val)\n", 4283 | "\n", 4284 | "eval_features = convert_examples_to_features(\n", 4285 | " eval_examples, MAX_SEQ_LENGTH, tokenizer)" 4286 | ] 4287 | }, 4288 | { 4289 | "cell_type": "markdown", 4290 | "metadata": {}, 4291 | "source": [ 4292 | "# This tells the estimator to run through the entire set.\n", 4293 | "eval_steps = None\n", 4294 | "\n", 4295 | "eval_drop_remainder = False\n", 4296 | "eval_input_fn = input_fn_builder(\n", 4297 | " features=eval_features,\n", 4298 | " seq_length=MAX_SEQ_LENGTH,\n", 4299 | " is_training=False,\n", 4300 | " drop_remainder=eval_drop_remainder)\n", 4301 | "\n", 4302 | "result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)" 4303 | ] 4304 | }, 4305 | { 4306 | "cell_type": "code", 4307 | "execution_count": 30, 4308 | "metadata": {}, 4309 | "outputs": [ 4310 | { 4311 | "name": "stdout", 4312 | "output_type": "stream", 4313 | "text": [ 4314 | "INFO:tensorflow:***** Eval results *****\n" 4315 | ] 4316 | }, 4317 | { 4318 | "name": "stderr", 4319 | "output_type": "stream", 4320 | "text": [ 4321 | "I0511 16:28:14.994762 139818736010624 :3] ***** Eval results *****\n" 4322 | ] 4323 | }, 4324 | { 4325 | "name": "stdout", 4326 | "output_type": "stream", 4327 | "text": [ 4328 | "INFO:tensorflow: 0 = 0.9837439\n" 4329 | ] 4330 | }, 4331 | { 4332 | "name": "stderr", 4333 | "output_type": "stream", 4334 | "text": [ 4335 | "I0511 16:28:14.997767 139818736010624 :5] 0 = 0.9837439\n" 4336 | ] 4337 | }, 4338 | { 4339 | "name": "stdout", 4340 | "output_type": "stream", 4341 | "text": [ 4342 | "INFO:tensorflow: 1 = 0.99188155\n" 4343 | ] 4344 | }, 4345 | { 4346 | "name": "stderr", 4347 | "output_type": "stream", 4348 | "text": [ 4349 | "I0511 16:28:14.999450 139818736010624 :5] 1 = 0.99188155\n" 4350 | ] 4351 | }, 4352 | { 4353 | "name": "stdout", 4354 | "output_type": "stream", 4355 | "text": [ 4356 | "INFO:tensorflow: 2 = 0.98707324\n" 4357 | ] 4358 | }, 4359 | { 4360 | "name": "stderr", 4361 | "output_type": "stream", 4362 | "text": [ 4363 | "I0511 16:28:15.001837 139818736010624 :5] 2 = 0.98707324\n" 4364 | ] 4365 | }, 4366 | { 4367 | "name": "stdout", 4368 | "output_type": "stream", 4369 | "text": [ 4370 | "INFO:tensorflow: 3 = 0.9862899\n" 4371 | ] 4372 | }, 4373 | { 4374 | "name": "stderr", 4375 | "output_type": "stream", 4376 | "text": [ 4377 | "I0511 16:28:15.003152 139818736010624 :5] 3 = 0.9862899\n" 4378 | ] 4379 | }, 4380 | { 4381 | "name": "stdout", 4382 | "output_type": "stream", 4383 | "text": [ 4384 | "INFO:tensorflow: 4 = 0.987142\n" 4385 | ] 4386 | }, 4387 | { 4388 | "name": "stderr", 4389 | "output_type": "stream", 4390 | "text": [ 4391 | "I0511 16:28:15.004544 139818736010624 :5] 4 = 0.987142\n" 4392 | ] 4393 | }, 4394 | { 4395 | "name": "stdout", 4396 | "output_type": "stream", 4397 | "text": [ 4398 | "INFO:tensorflow: 5 = 0.9940405\n" 4399 | ] 4400 | }, 4401 | { 4402 | "name": "stderr", 4403 | "output_type": "stream", 4404 | "text": [ 4405 | "I0511 16:28:15.005962 139818736010624 :5] 5 = 0.9940405\n" 4406 | ] 4407 | }, 4408 | { 4409 | "name": "stdout", 4410 | "output_type": "stream", 4411 | "text": [ 4412 | "INFO:tensorflow: eval_loss = 0.03836409\n" 4413 | ] 4414 | }, 4415 | { 4416 | "name": "stderr", 4417 | "output_type": "stream", 4418 | "text": [ 4419 | "I0511 16:28:15.007424 139818736010624 :5] eval_loss = 0.03836409\n" 4420 | ] 4421 | }, 4422 | { 4423 | "name": "stdout", 4424 | "output_type": "stream", 4425 | "text": [ 4426 | "INFO:tensorflow: global_step = 8975\n" 4427 | ] 4428 | }, 4429 | { 4430 | "name": "stderr", 4431 | "output_type": "stream", 4432 | "text": [ 4433 | "I0511 16:28:15.009004 139818736010624 :5] global_step = 8975\n" 4434 | ] 4435 | }, 4436 | { 4437 | "name": "stdout", 4438 | "output_type": "stream", 4439 | "text": [ 4440 | "INFO:tensorflow: loss = 0.03834793\n" 4441 | ] 4442 | }, 4443 | { 4444 | "name": "stderr", 4445 | "output_type": "stream", 4446 | "text": [ 4447 | "I0511 16:28:15.010478 139818736010624 :5] loss = 0.03834793\n" 4448 | ] 4449 | } 4450 | ], 4451 | "source": [ 4452 | "output_eval_file = os.path.join(\"../working\", \"eval_results.txt\")\n", 4453 | "with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n", 4454 | " tf.logging.info(\"***** Eval results *****\")\n", 4455 | " for key in sorted(result.keys()):\n", 4456 | " tf.logging.info(\" %s = %s\", key, str(result[key]))\n", 4457 | " writer.write(\"%s = %s\\n\" % (key, str(result[key])))" 4458 | ] 4459 | }, 4460 | { 4461 | "cell_type": "code", 4462 | "execution_count": 31, 4463 | "metadata": {}, 4464 | "outputs": [], 4465 | "source": [ 4466 | "x_test = test#[125000:140000]\n", 4467 | "x_test = x_test.reset_index(drop=True)\n", 4468 | "\n", 4469 | "test_file = os.path.join('../working', \"test.tf_record\")\n", 4470 | "#filename = Path(train_file)\n", 4471 | "if not os.path.exists(test_file):\n", 4472 | " open(test_file, 'w').close()\n", 4473 | "\n", 4474 | "test_examples = create_examples(x_test, False)\n", 4475 | "file_based_convert_examples_to_features(\n", 4476 | " test_examples, MAX_SEQ_LENGTH, tokenizer, test_file)" 4477 | ] 4478 | }, 4479 | { 4480 | "cell_type": "code", 4481 | "execution_count": 32, 4482 | "metadata": {}, 4483 | "outputs": [], 4484 | "source": [ 4485 | "predict_input_fn = file_based_input_fn_builder(\n", 4486 | " input_file=test_file,\n", 4487 | " seq_length=MAX_SEQ_LENGTH,\n", 4488 | " is_training=False,\n", 4489 | " drop_remainder=False)" 4490 | ] 4491 | }, 4492 | { 4493 | "cell_type": "code", 4494 | "execution_count": 33, 4495 | "metadata": {}, 4496 | "outputs": [ 4497 | { 4498 | "name": "stdout", 4499 | "output_type": "stream", 4500 | "text": [ 4501 | "Begin predictions!\n", 4502 | "Predicting took time 0:00:00.000069\n" 4503 | ] 4504 | } 4505 | ], 4506 | "source": [ 4507 | "print('Begin predictions!')\n", 4508 | "current_time = datetime.now()\n", 4509 | "predictions = estimator.predict(predict_input_fn)\n", 4510 | "print(\"Predicting took time \", datetime.now() - current_time)" 4511 | ] 4512 | }, 4513 | { 4514 | "cell_type": "markdown", 4515 | "metadata": {}, 4516 | "source": [ 4517 | "x_test = test[125000:140000]\n", 4518 | "x_test = x_test.reset_index(drop=True)\n", 4519 | "predict_examples = create_examples(x_test,False)" 4520 | ] 4521 | }, 4522 | { 4523 | "cell_type": "markdown", 4524 | "metadata": {}, 4525 | "source": [ 4526 | "test_features = convert_examples_to_features(predict_examples, MAX_SEQ_LENGTH, tokenizer)" 4527 | ] 4528 | }, 4529 | { 4530 | "cell_type": "markdown", 4531 | "metadata": {}, 4532 | "source": [ 4533 | "print(f'Beginning Training!')\n", 4534 | "current_time = datetime.now()\n", 4535 | "\n", 4536 | "predict_input_fn = input_fn_builder(features=test_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)\n", 4537 | "predictions = estimator.predict(predict_input_fn)\n", 4538 | "print(\"Training took time \", datetime.now() - current_time)" 4539 | ] 4540 | }, 4541 | { 4542 | "cell_type": "code", 4543 | "execution_count": 34, 4544 | "metadata": {}, 4545 | "outputs": [], 4546 | "source": [ 4547 | "def create_output(predictions):\n", 4548 | " probabilities = []\n", 4549 | " for (i, prediction) in enumerate(predictions):\n", 4550 | " preds = prediction[\"probabilities\"]\n", 4551 | " probabilities.append(preds)\n", 4552 | " dff = pd.DataFrame(probabilities)\n", 4553 | " dff.columns = LABEL_COLUMNS\n", 4554 | " \n", 4555 | " return dff\n", 4556 | " " 4557 | ] 4558 | }, 4559 | { 4560 | "cell_type": "code", 4561 | "execution_count": 35, 4562 | "metadata": {}, 4563 | "outputs": [ 4564 | { 4565 | "name": "stdout", 4566 | "output_type": "stream", 4567 | "text": [ 4568 | "INFO:tensorflow:Calling model_fn.\n" 4569 | ] 4570 | }, 4571 | { 4572 | "name": "stderr", 4573 | "output_type": "stream", 4574 | "text": [ 4575 | "I0511 16:32:32.682826 139818736010624 estimator.py:1111] Calling model_fn.\n" 4576 | ] 4577 | }, 4578 | { 4579 | "name": "stdout", 4580 | "output_type": "stream", 4581 | "text": [ 4582 | "INFO:tensorflow:num_labels:6;logits:Tensor(\"loss/BiasAdd:0\", shape=(?, 6), dtype=float32);labels:Tensor(\"loss/Cast:0\", shape=(?, 6), dtype=float32)\n" 4583 | ] 4584 | }, 4585 | { 4586 | "name": "stderr", 4587 | "output_type": "stream", 4588 | "text": [ 4589 | "I0511 16:32:34.628746 139818736010624 :40] num_labels:6;logits:Tensor(\"loss/BiasAdd:0\", shape=(?, 6), dtype=float32);labels:Tensor(\"loss/Cast:0\", shape=(?, 6), dtype=float32)\n" 4590 | ] 4591 | }, 4592 | { 4593 | "name": "stdout", 4594 | "output_type": "stream", 4595 | "text": [ 4596 | "INFO:tensorflow:**** Trainable Variables ****\n" 4597 | ] 4598 | }, 4599 | { 4600 | "name": "stderr", 4601 | "output_type": "stream", 4602 | "text": [ 4603 | "I0511 16:32:35.597517 139818736010624 :99] **** Trainable Variables ****\n" 4604 | ] 4605 | }, 4606 | { 4607 | "name": "stdout", 4608 | "output_type": "stream", 4609 | "text": [ 4610 | "mode: infer probabilities: Tensor(\"loss/Sigmoid:0\", shape=(?, 6), dtype=float32)\n", 4611 | "INFO:tensorflow:Done calling model_fn.\n" 4612 | ] 4613 | }, 4614 | { 4615 | "name": "stderr", 4616 | "output_type": "stream", 4617 | "text": [ 4618 | "I0511 16:32:35.602658 139818736010624 estimator.py:1113] Done calling model_fn.\n" 4619 | ] 4620 | }, 4621 | { 4622 | "name": "stdout", 4623 | "output_type": "stream", 4624 | "text": [ 4625 | "INFO:tensorflow:Graph was finalized.\n" 4626 | ] 4627 | }, 4628 | { 4629 | "name": "stderr", 4630 | "output_type": "stream", 4631 | "text": [ 4632 | "I0511 16:32:36.138572 139818736010624 monitored_session.py:222] Graph was finalized.\n" 4633 | ] 4634 | }, 4635 | { 4636 | "name": "stdout", 4637 | "output_type": "stream", 4638 | "text": [ 4639 | "INFO:tensorflow:Restoring parameters from ../working/output/model.ckpt-8975\n" 4640 | ] 4641 | }, 4642 | { 4643 | "name": "stderr", 4644 | "output_type": "stream", 4645 | "text": [ 4646 | "I0511 16:32:36.150778 139818736010624 saver.py:1270] Restoring parameters from ../working/output/model.ckpt-8975\n" 4647 | ] 4648 | }, 4649 | { 4650 | "name": "stdout", 4651 | "output_type": "stream", 4652 | "text": [ 4653 | "INFO:tensorflow:Running local_init_op.\n" 4654 | ] 4655 | }, 4656 | { 4657 | "name": "stderr", 4658 | "output_type": "stream", 4659 | "text": [ 4660 | "I0511 16:32:36.915272 139818736010624 session_manager.py:491] Running local_init_op.\n" 4661 | ] 4662 | }, 4663 | { 4664 | "name": "stdout", 4665 | "output_type": "stream", 4666 | "text": [ 4667 | "INFO:tensorflow:Done running local_init_op.\n" 4668 | ] 4669 | }, 4670 | { 4671 | "name": "stderr", 4672 | "output_type": "stream", 4673 | "text": [ 4674 | "I0511 16:32:36.961946 139818736010624 session_manager.py:493] Done running local_init_op.\n" 4675 | ] 4676 | } 4677 | ], 4678 | "source": [ 4679 | "output_df = create_output(predictions)\n", 4680 | "merged_df = pd.concat([x_test, output_df], axis=1)\n", 4681 | "submission = merged_df.drop(['comment_text'], axis=1)\n", 4682 | "submission.to_csv(\"sample_submission.csv\", index=False)" 4683 | ] 4684 | }, 4685 | { 4686 | "cell_type": "code", 4687 | "execution_count": 36, 4688 | "metadata": {}, 4689 | "outputs": [ 4690 | { 4691 | "data": { 4692 | "text/html": [ 4693 | "
\n", 4694 | "\n", 4707 | "\n", 4708 | " \n", 4709 | " \n", 4710 | " \n", 4711 | " \n", 4712 | " \n", 4713 | " \n", 4714 | " \n", 4715 | " \n", 4716 | " \n", 4717 | " \n", 4718 | " \n", 4719 | " \n", 4720 | " \n", 4721 | " \n", 4722 | " \n", 4723 | " \n", 4724 | " \n", 4725 | " \n", 4726 | " \n", 4727 | " \n", 4728 | " \n", 4729 | " \n", 4730 | " \n", 4731 | " \n", 4732 | " \n", 4733 | " \n", 4734 | " \n", 4735 | " \n", 4736 | " \n", 4737 | " \n", 4738 | " \n", 4739 | " \n", 4740 | " \n", 4741 | " \n", 4742 | " \n", 4743 | " \n", 4744 | " \n", 4745 | " \n", 4746 | " \n", 4747 | " \n", 4748 | " \n", 4749 | " \n", 4750 | " \n", 4751 | " \n", 4752 | " \n", 4753 | " \n", 4754 | " \n", 4755 | " \n", 4756 | " \n", 4757 | " \n", 4758 | " \n", 4759 | " \n", 4760 | " \n", 4761 | " \n", 4762 | " \n", 4763 | " \n", 4764 | " \n", 4765 | " \n", 4766 | " \n", 4767 | " \n", 4768 | " \n", 4769 | " \n", 4770 | " \n", 4771 | " \n", 4772 | "
idtoxicsevere_toxicobscenethreatinsultidentity_hate
153159fffcd0960ee309b50.7511690.0031160.4233330.0011160.0350530.001065
153160fffd7a9a6eb32c160.0012700.0002110.0005580.0002000.0004370.000246
153161fffda9e8d6fafa9e0.0007760.0002660.0004920.0002350.0004060.000292
153162fffe8f1340a79fc20.0008970.0002670.0004410.0002820.0004080.000378
153163ffffce3fb183ee800.9672680.0213330.8616420.0042550.6380430.007017
\n", 4773 | "
" 4774 | ], 4775 | "text/plain": [ 4776 | " id toxic ... insult identity_hate\n", 4777 | "153159 fffcd0960ee309b5 0.751169 ... 0.035053 0.001065\n", 4778 | "153160 fffd7a9a6eb32c16 0.001270 ... 0.000437 0.000246\n", 4779 | "153161 fffda9e8d6fafa9e 0.000776 ... 0.000406 0.000292\n", 4780 | "153162 fffe8f1340a79fc2 0.000897 ... 0.000408 0.000378\n", 4781 | "153163 ffffce3fb183ee80 0.967268 ... 0.638043 0.007017\n", 4782 | "\n", 4783 | "[5 rows x 7 columns]" 4784 | ] 4785 | }, 4786 | "execution_count": 36, 4787 | "metadata": {}, 4788 | "output_type": "execute_result" 4789 | } 4790 | ], 4791 | "source": [ 4792 | "submission.tail()" 4793 | ] 4794 | }, 4795 | { 4796 | "cell_type": "markdown", 4797 | "metadata": {}, 4798 | "source": [ 4799 | "submission1 = pd.read_csv('sample_submission1.csv')\n", 4800 | "submission2 = pd.read_csv('sample_submission2.csv')\n", 4801 | "submission3 = pd.read_csv('sample_submission3.csv')\n", 4802 | "\n", 4803 | "submission = pd.concat([submission1,submission2,submission3])\n", 4804 | "\n", 4805 | "submission.to_csv(\"sample_submission.csv\", index=False)" 4806 | ] 4807 | }, 4808 | { 4809 | "cell_type": "markdown", 4810 | "metadata": {}, 4811 | "source": [ 4812 | "submission1.shape, submission2.shape, submission3.shape, submission.shape," 4813 | ] 4814 | } 4815 | ], 4816 | "metadata": { 4817 | "kernelspec": { 4818 | "display_name": "Python 3", 4819 | "language": "python", 4820 | "name": "python3" 4821 | }, 4822 | "language_info": { 4823 | "codemirror_mode": { 4824 | "name": "ipython", 4825 | "version": 3 4826 | }, 4827 | "file_extension": ".py", 4828 | "mimetype": "text/x-python", 4829 | "name": "python", 4830 | "nbconvert_exporter": "python", 4831 | "pygments_lexer": "ipython3", 4832 | "version": "3.6.4" 4833 | } 4834 | }, 4835 | "nbformat": 4, 4836 | "nbformat_minor": 1 4837 | } 4838 | -------------------------------------------------------------------------------- /main/try2.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"try2.ipynb","provenance":[],"collapsed_sections":[]},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"code","metadata":{"id":"isUS9yO1rTcb","colab_type":"code","outputId":"c8fb8a0e-35aa-455a-dbbf-c7c8056fbbc2","executionInfo":{"status":"ok","timestamp":1570527125711,"user_tz":-330,"elapsed":17771,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":121}},"source":["from google.colab import drive\n","drive.mount('/content/drive')"],"execution_count":0,"outputs":[{"output_type":"stream","text":["Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code\n","\n","Enter your authorization code:\n","··········\n","Mounted at /content/drive\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"UXjozudAriEe","colab_type":"code","outputId":"e549bcd0-4e24-4b29-c206-5f1d378bde46","executionInfo":{"status":"ok","timestamp":1570527125713,"user_tz":-330,"elapsed":17762,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":34}},"source":["%cd drive/My Drive/ToxicCommentDetection/main"],"execution_count":0,"outputs":[{"output_type":"stream","text":["/content/drive/My Drive/ToxicCommentDetection/main\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"gmsSMwU1q8VG","colab_type":"code","outputId":"11e49d09-1b53-4cd0-a927-3d5a1055e4cd","executionInfo":{"status":"ok","timestamp":1570527125714,"user_tz":-330,"elapsed":17752,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":34}},"source":["# This Python 3 environment comes with many helpful analytics libraries installed\n","# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n","# For example, here's several helpful packages to load in \n","\n","import numpy as np # linear algebra\n","import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n","\n","# Input data files are available in the \"../input/\" directory.\n","# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n","\n","import os\n","print(os.listdir(\"../input\"))\n","\n","# Any results you write to the current directory are saved as output."],"execution_count":0,"outputs":[{"output_type":"stream","text":["['uncased-l12-h768-a12', 'jigsaw-toxic-comment-classification-challenge']\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"d2VWfg93q-pE","colab_type":"code","colab":{}},"source":["from fastai.text import *"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"F1sxgmrlrucf","colab_type":"code","outputId":"08cbc5ff-86a5-4e1a-ba3d-67739dc512e4","executionInfo":{"status":"ok","timestamp":1570527132100,"user_tz":-330,"elapsed":24114,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":195}},"source":["train = pd.read_csv(\"../input/jigsaw-toxic-comment-classification-challenge/train.csv\")\n","train.head()"],"execution_count":0,"outputs":[{"output_type":"execute_result","data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
idcomment_texttoxicsevere_toxicobscenethreatinsultidentity_hate
00000997932d777bfExplanation\\nWhy the edits made under my usern...000000
1000103f0d9cfb60fD'aww! He matches this background colour I'm s...000000
2000113f07ec002fdHey man, I'm really not trying to edit war. It...000000
30001b41b1c6bb37e\"\\nMore\\nI can't make any real suggestions on ...000000
40001d958c54c6e35You, sir, are my hero. Any chance you remember...000000
\n","
"],"text/plain":[" id ... identity_hate\n","0 0000997932d777bf ... 0\n","1 000103f0d9cfb60f ... 0\n","2 000113f07ec002fd ... 0\n","3 0001b41b1c6bb37e ... 0\n","4 0001d958c54c6e35 ... 0\n","\n","[5 rows x 8 columns]"]},"metadata":{"tags":[]},"execution_count":5}]},{"cell_type":"code","metadata":{"id":"_0qfgRGZr3Bx","colab_type":"code","colab":{}},"source":["train['comment_text'] = train['comment_text'].str.replace('([“”¨«»®´·º½¾¿¡§£₤‘’])', '')"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"Mu1jVI-kr6Ma","colab_type":"code","colab":{}},"source":["test = pd.read_csv(\"../input/jigsaw-toxic-comment-classification-challenge/test.csv\")\n","\n","test_id = test['id']\n","test['comment_text'] = test['comment_text'].str.replace('([“”¨«»®´·º½¾¿¡§£₤‘’])', '')"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"03CFsJ65sDzK","colab_type":"code","outputId":"611a4910-d026-4f5a-8b72-5ebd17d972d7","executionInfo":{"status":"ok","timestamp":1570527319079,"user_tz":-330,"elapsed":352,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":279}},"source":["data = (TextList.from_df(train, cols='comment_text')\n"," .split_by_rand_pct(0.2)\n"," .label_for_lm() \n"," .databunch(bs=48))\n","data.show_batch()"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
idxtext
0. xxmaj he seems to care more about the formatting than the actual info . xxbos \" \\n xxmaj more \\n i ca n't make any real suggestions on improvement - i wondered if the section statistics should be later on , or a subsection of \" \" types of accidents \" \" xxup -i think the references may need tidying so that they are all in the
1which future issues will be delivered to you , or unsubscribe from this notification by following the link . xxmaj thank you . \\n \\n xxmaj this is an automated delivery by \\n \\n xxmaj article moves \\n xxmaj could you explain your move on xxmaj xxunk mm xxup xxunk field xxunk ? xxmaj in general it is good practice to propose a move on talk first
2are not contradictions . xxmaj they are an essential feature of the study , specified and discussed in the paper . xxmaj there is no contradiction here . xxmaj out it goes . ( ) \\n \\n xxmaj contrary evidence and xxup xxunk tests \\n \\n xxmaj oohashi et al . pictured human brains when the test persons were either exposed or not exposed to ultrasonic stimulus .
3( it is located at the very top of any xxmaj wikipedia page when you are logged in ) , and then selecting \" \" xxmaj image \" \" from the dropdown box . xxmaj note that any non - free media lacking such an explanation will be deleted one week after they have been uploaded , as described on criteria for speedy deletion . xxmaj if you have any
4claims to have originated the idea of recently observed xxunk in 1969 in the paper \" \" xxmaj destruction of xxmaj xxunk in xxmaj xxunk 4he xxmaj films and the xxmaj prediction of a xxmaj new xxmaj crystalline xxmaj phase of 4he with xxmaj bose - xxmaj einstein xxmaj condensation \" \" in xxmaj physics xxmaj letters , xxmaj vol xxup xxunk , number 5 , 3 , , xxmaj
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"ul4zannPsHtP","colab_type":"code","colab":{}},"source":["learn = language_model_learner(data,AWD_LSTM, drop_mult=0.3)"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"DYbXDm-ssJmL","colab_type":"code","outputId":"a7309bba-7a27-466b-db06-7806bbc48ba9","executionInfo":{"status":"ok","timestamp":1570527386033,"user_tz":-330,"elapsed":278005,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":34}},"source":["learn.lr_find()"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"NgL1P48xsLOR","colab_type":"code","outputId":"85003f7c-7695-40a4-bcbe-d8e7d41a5944","executionInfo":{"status":"ok","timestamp":1570527386721,"user_tz":-330,"elapsed":278688,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":283}},"source":["learn.recorder.plot()\n"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAAAYUAAAEKCAYAAAD9xUlFAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3XmYHPV95/H3t485NYc0h24k0IUB\nIyzGGIzBEGIS/JAQJyQP7GZj8O6yOI4dNrb38W72sbNO4sTxZrPYbEywdx07sfGusXEwizlC0JoY\nMIhLgEBIQhLSoBmN5r76/u4fXdNqxiNpJE31MfN5PU89U11VXfXtUqu+/TvqV+buiIiIAETKHYCI\niFQOJQURESlQUhARkQIlBRERKVBSEBGRAiUFEREpUFIQEZECJQURESlQUhARkYJYuQM4We3t7b52\n7dpyhyEiUlWeffbZI+7ecaLtqi4prF27lm3btpU7DBGRqmJm+2ezXWjVR2a2ycxeKJpGzOy2adtc\nYWbDRdt8Nqx4RETkxEIrKbj7TuACADOLAt3AvTNs+ri7XxtWHCIiMnulami+Ctjj7rMqvoiISHmU\nKincANx9jHWXmNmLZvZjMzu3RPGIiMgMQk8KZlYD/CrwvRlWPwescffNwFeAHx5jH7eY2TYz29bX\n1xdesCIiC1wpSgrXAM+5e+/0Fe4+4u5jwfwDQNzM2mfY7i5373L3ro6OE/aoEhGRU1SKpHAjx6g6\nMrNlZmbB/EVBPP0liElERGYQalIws0bgA8APipbdama3Bi+vB142sxeBLwM3uJ4PKiLyc27/x108\nviv86vNQb15z93GgbdqyO4vm7wDuCDMGEZFql8s5tz/6Or97xXou2xBuFbrGPhIRqXAjiTQ5h8WN\nNaEfS0lBRKTCDYynAFjSGA/9WEoKIiIVbnAinxQWN6ikICKy4A2MpwFYouojEREZHFdJQUREAgMT\nU20KSgoiIgve4HiKmliEhppo6MdSUhARqXAD4ymWNNQQDAARKiUFEZEKNziRKsk9CqCkICJS8QbG\nUyW5RwGUFEREKt7gRLokPY9ASUFEpOLlSwpKCiIiC14mm2N4UiUFEREBhiZLdzczKCmIiFS0wt3M\nSgoiIlIYIVXVRyIiUhghVV1SRUSklCOkgpKCiEhFK+WzFEBJQUSkog2Mp2ioiVIXD38wPFBSEBGp\naIPjqZKVEkBJQUSkog1MlO5uZlBSEBGpaIPjpRshFZQUREQq2sBEiiUNpemOCkoKIiIVbXA8rZKC\niIhAMpNlLJkp2d3MoKQgIlKxhibyN66ppCAiIkfHPVJSEBGRwgipqj4SEZGBCZUUREQkcPRZCvOg\nS6qZbTKzF4qmETO7bdo2ZmZfNrPdZrbdzLaEFY+ISLWZGiG1lNVHsbB27O47gQsAzCwKdAP3Ttvs\nGmBDML0H+GrwV0RkwRucSNFUFyMeLV2lTqmOdBWwx933T1t+HfAtz3sKaDWz5SWKSUSkog2Ml3bc\nIyhdUrgBuHuG5SuBA0WvDwbLREQWvMGJ0o6QCiVICmZWA/wq8L3T2MctZrbNzLb19fXNXXAiIhVs\nvpYUrgGec/feGdZ1A6uLXq8Klr2Nu9/l7l3u3tXR0RFSmCIilaXUz1KA0iSFG5m56gjgPuB3gl5I\nFwPD7n6oBDGJiFS8/LMUStcdFULsfQRgZo3AB4B/V7TsVgB3vxN4APggsBuYAG4OMx4RkWoxmcqS\nSOdKOu4RhJwU3H0caJu27M6ieQc+FmYMIiLVqHA38zysPhIRkZN09G5mJQURkQWvHCOkgpKCiEhF\nGpwo/QipoKQgIlKRVFIQEZGCwfEUZtBSX9ouqUoKIiIVqH88RWt9nGjESnpcJQURkQrUO5Kks6mu\n5MdVUhARqUC9IwmWtigpiIgI0DOSYFlzbcmPq6QgIlJh0tkcR8aSLGtWSUFEZMHrG03ijqqPREQk\nX3UEqKQgIiJwOEgKS5UURESkZzgoKaj6SEREekaSxKNW8mGzQUlBRKTi9I4k6GyqI1Liu5lBSUFE\npOL0DCdYWoZ7FEBJQUSk4vSOJMrSngBKCiIiFcXd6RlJlKXnESgpiIhUlNFkholUtiz3KICSgohI\nRektY3dUUFIQEakoPWW8cQ2UFEREKkrvSBIozxAXoKQgIlJRelVSEBGRKT3DCZrrYtTXRMtyfCUF\nEZEK0lPGexRASUFEpKL0lvEeBVBSEBGpKD3DibI1MoOSgohIxchMPYZT1UciItI3liTn5et5BEoK\nIiIVo/BwnfmaFMys1czuMbPXzOxVM7tk2vorzGzYzF4Ips+GGY+ISCUr3LhWxuqjWMj7vx140N2v\nN7MaoGGGbR5392tDjkNEpOKV+8Y1CDEpmFkLcDlwE4C7p4BUWMcTEal2PSMJYhGjrbH0j+GcEmb1\n0ZlAH/ANM3vezL5uZo0zbHeJmb1oZj82s3NDjEdEpKL1DifobKoty2M4p4SZFGLAFuCr7v4uYBz4\nzLRtngPWuPtm4CvAD2fakZndYmbbzGxbX19fiCGLiJRPz0iCpWVsT4Bwk8JB4KC7/yx4fQ/5JFHg\n7iPuPhbMPwDEzax9+o7c/S5373L3ro6OjhBDFhEpn56R8t64BiEmBXfvAQ6Y2aZg0VXAjuJtzGyZ\nmVkwf1EQT39YMYmIVLLe4fIOcQHh9z76OPDtoOfRG8DNZnYrgLvfCVwPfNTMMsAkcIO7e8gxiYhU\nnNFEmvFUtqzdUSHkpODuLwBd0xbfWbT+DuCOMGMQEakGU91R5231kYiIzN7BwUkAls/jhmYREZml\n/f0TAJzZPlPP/dJRUhARqQD7+sdpqInS0VRb1jiUFEREKsC+I+OsaWsk6JBZNkoKIiIVYH//BGvb\nZhoerrSUFEREyiyTzXFgcIK1ZW5PACUFEZGye2soQTrrKimIiEi+kRlgTZtKCiIiC97+ICmUuzsq\nzDIpmNk6M6sN5q8ws0+YWWu4oYmILAx7j0xQF4/QWebuqDD7ksL3gayZrQfuAlYD3wktKhGRBWR/\n/zhrK6A7Ksw+KeTcPQN8CPiKu38aWB5eWCIiC8e+IClUgtkmhbSZ3Qh8GLg/WBYPJyQRkYUjm3MO\nDEyypr38PY9g9knhZuAS4E/dfa+ZnQn8XXhhiYgsDG8NTZLK5iqmpDCrobPdfQfwCQAzWww0ufsX\nwwxMRGQhmBoIr1KSwmx7H201s2YzW0L+ucpfM7P/Fm5oIiLz39Q9CmurrPqoxd1HgF8HvuXu7wF+\nMbywREQWhn1HxqmLR1jaVN7nKEyZbVKImdly4Lc42tAsIiKnaV//BGuWNBKJlL87Ksw+KXweeAjY\n4+7PmNlZwK7wwhIRWRj294+zpgLGPJoy24bm7wHfK3r9BvAbYQUlIrIQ5HLO/oEJfuHsznKHUjDb\nhuZVZnavmR0Opu+b2aqwgxMRmc8OjSRIZXIVMRDelNlWH30DuA9YEUw/CpaJiMgp2n8k6HlUQdVH\ns00KHe7+DXfPBNPfAh0hxiUiMu/tLXRHrb6SQr+Z/baZRYPpt4H+MAMTEZnv9vdPUBuLsKy5Mrqj\nwuyTwkfId0ftAQ4B1wM3hRSTiMiCsO9IvudRpXRHhVkmBXff7+6/6u4d7t7p7r+Geh+JiJyWff3j\nFdXIDKf35LU/mLMoREQWmFQmxxt942zoXFTuUN7mdJJC5ZR3RESqzL7+cTI5Z+PSpnKH8jankxR8\nzqIQEVlgdvaMAlRcUjjuHc1mNsrMF38D6kOJSERkAdjVO0o0YpzVUVltCsdNCu5eWSlMRGSe2Nk7\nypq2Buri0XKH8janU310QmbWamb3mNlrZvaqmV0ybb2Z2ZfNbLeZbTezLWHGIyJSKV7vHWNThVUd\nQchJAbgdeNDdzwY2A69OW38NsCGYbgG+GnI8IiJll0hn2d8/XnHtCRBiUjCzFuBy4H8CuHvK3Yem\nbXYd+Yf2uLs/BbQGz20QEZm3dh8eI+eV18gM4ZYUzgT6gG+Y2fNm9nUzm96ishI4UPT6YLDsbczs\nFjPbZmbb+vr6wotYRKQEXu/N9zzatKyy7lGAcJNCDNgCfNXd3wWMA585lR25+13u3uXuXR0dGodP\nRKrbzt5RaqKRirubGcJNCgeBg+7+s+D1PeSTRLFuYHXR61XBMhGReWtX7xhndTQSj4bdrHvyQovI\n3XuAA2a2KVh0FbBj2mb3Ab8T9EK6GBh290NhxSQiUgl29oxWZHsCzPJxnKfh48C3zawGeAO42cxu\nBXD3O4EHgA8Cu4EJ4OaQ4xERKauxZIbuoUn+xXvOKHcoMwo1Kbj7C0DXtMV3Fq134GNhxiAiUkl2\nBY3MlTYQ3pTKq9ASEZnHjvY8qszqIyUFEZES2tkzRl08wurFlfNc5mJKCiIiJfR67ygbOpsq6mlr\nxZQURERK6PXeyu15BEoKIiIlMzie4vBosiLvZJ6ipCAiUiJTjcwbVFIQEZFCzyMlBRER2XFohJb6\nOMtb6sodyjEpKYiIlMjL3SOct7IZs8rseQRKCiIiJZHK5NjZM8p5K1vKHcpxKSmIiJTA672jpLI5\nzluhpCAisuC98tYwgEoKIiICL3UP01QbY82SyhzeYoqSgohICbzcPcI5K5ordniLKUoKIiIhy2Rz\nvHpopOKrjkBJQUQkdHv6xklmcpy3srncoZyQkoKISMhe6s43Mr9TJQUREXm5e5j6eJQz2yt3ILwp\nSgoiIiF75a1hzlnRTLTCG5lBSUFEJFTZnPPKWyNVUXUESgoiIqHae2SciVSWc1dUfiMzKCmIiISq\nWu5knqKkICISope7h6mJRVjfWfmNzKCkICISqpe6h3nH8mbi0eq43FZHlCIiVSiXc17pHuG8KmlP\nACUFEZHQ7Do8xmgyw+ZVreUOZdaUFEREQrJ152EALtvYXuZIZk9JQUQkJFt39nH2siaWt9SXO5RZ\nU1IQEQnBaCLNM/sGeP+mjnKHclKUFEREQvDT3f1kcs6VmzrLHcpJUVIQEQnB1p2HaaqNceGaxeUO\n5aTEwty5me0DRoEskHH3rmnrrwD+AdgbLPqBu38+zJhERMLm7mzd2cf7NrRXzf0JU0JNCoEr3f3I\ncdY/7u7XliAOEZGSeK1nlJ6RBFdUWXsCqPpIRGTObd3ZB8AVVdaeAOEnBQceNrNnzeyWY2xziZm9\naGY/NrNzZ9rAzG4xs21mtq2vry+8aEVE5sBjOw/zjuXNLG2uK3coJy3spPA+d98CXAN8zMwun7b+\nOWCNu28GvgL8cKaduPtd7t7l7l0dHdVXHBORhWMkkebZ/YNVWXUEIScFd+8O/h4G7gUumrZ+xN3H\ngvkHgLiZVc+tfyIi0/x01xGyVdgVdUpoScHMGs2saWoeuBp4edo2y8zMgvmLgnj6w4pJRCRsj+08\nTFNdjC1nVM94R8XC7H20FLg3uObHgO+4+4NmdiuAu98JXA981MwywCRwg7t7iDGJiIQmnc3x8I5e\nrtzUSazKuqJOCS0puPsbwOYZlt9ZNH8HcEdYMYiIlNJPdx9haCLNtecvL3cop6w6U5mISAW6f/sh\nmmpjVTfeUTElBRGROZDK5HjolR4+cO5SamPRcodzypQURETmwOO7+hhNZPiV81eUO5TToqQgIjIH\n7t9+iJb6OJeur+5e9UoKIiKnKZHO8siOXn7p3KXUxKr7slrd0YuIVICtO/sYS2a4tsqrjkBJQUTk\ntN2//S2WNNbw3nVt5Q7ltCkpiIicholUhkdfPcwvn7esam9YK1b9n0BEpIz+7/ZDTKazVX3DWjEl\nBRGRU5TO5vjKP+3m3BXNXHxm9VcdgZKCiMgpu+fZg7w5MMEnr95IJGLlDmdOKCmIiJyCRDrLlx/d\nxbvOaK3aYbJnoqQgInIK7n76TQ4NJ/jU1ZsIRoOeF5QURERO0kQqw/94bA+XnNVW9XcwT6ekICJy\nkr715H6OjCX55NUbyx3KnFNSEBE5CW8NTfLXj+3mik0ddK1dUu5w5pySgojILGWyOX7/u8+TzTmf\n+5Vzyx1OKMJ8HKeIyLxy+6O7eGbfILffcAFntjeWO5xQqKQgIjILP919hDse281vXriK6y5YWe5w\nQqOkICJyAn2jSW773y9wVnsj/+W6+VltNEXVRyIix5HO5vjE3c8zPJnmWx+5iIaa+X3ZVElBROQ4\n/uT+HTz5Rj9f+NA7ecfy5nKHE7r5nfJOwUgiza7eMUYSabasXkxLQ7zcIYlImdz99Jt888n9/Jv3\nncn1F64qdzglsaCSQjKTZVfvGK8eGmHHoREOjyZJZ3KksjkS6Sz7+yc4NJwobG8G5yxv5r3r2rjo\nzDYuXLOYJY01ZfwEcjqyOefwaIJDwwkGx1OMJjKMJtJMpLI01cVZ3BCntaGGJY35aXFDfF6Mjy+n\n5um9A3z2H17m8o0dfOaas8sdTsksmKRw//a3uO27L5DJOQD18SjLW+uoiUaoiUWoiUa4+Kw2Nixd\nxMbOJhpqozyzd5An3zjCN5/Yz9ce3wvAuo5GtpyxmFWLG+hsrqWzqZbWhjjxaIRYJEJNzKiNRamL\nR6mLR6iPR0tyYXF30lknlc2RzTq18Qi1schxx2SZek+xeNROahyXXM4ZnkwzPJkmlc2RKkqyk6ks\n46ksiVQWM/LnKGrEi855TSxCXSxKfU0kOGfR/H7dcc/X5ybS+f1NpLIMjKfoG0vSN5pkeCJFTSxC\nbSxKbSzCRDpLz3CCQ8OTHB5Jks7liJhhQDKT4/BokmzOj/+BiphBS32c9kW1dCyqpb2pliXBv3U0\nasQiRjQSCf7mp2zOyeacTDaHA9HI0e3iwXviwWdf0lhD+6Ja2hbl/059dim/13pG+OjfP8vqxQ18\n5cZ3LagfBwsmKZy9rJlbLj+Lc1Y0847lzaxtayR6gqFu37uund9nA4l0lu0Hh9m2f4Bt+wZ5bOdh\njoylZn3s5roY7YtqWdJYw/LWejZ0LmJD5yLO6liEGYwnM4wns0ykMkyms4ULYM4hYhAxI+dO32iS\nnpEEvSMJ+sdSTKTy7xlPZklksvi0613EoKEmRm0sfzGORSJEI1bY/3gq83PviUeNJY01tDXW0lwf\nYzyZZXAixdBEmmQmS10sSm08uAinMgxPpjmJ6+ycMYOm2hiZnJNI589VPGosba5jeUsd56xoJh6N\n4O7BugjLW+pY1lLHitY6ljTW0lwXo6kuTn1NlLFEhoHxFEMTKQYmUgyMp+gfy/89MpbkyFiSlw4O\nMTCeIptz0sGF/3if3YyfO7/HUxePsLihhtaGGurjkULyrItHWdpcy/KWepY117GitZ5Vi+tZ3lK3\noC5WpfLIjl5u++7zLKqL8bUPd9FSv7CqkM1P5ltbAbq6unzbtm3lDoNUJseRsSSHR5MMT6bJZHOk\nszlSWSeZzpLI5EgEF97B8RT9wUXm4NAEBwYmT+mYsYjR2VTL0pY62hprWVQbpbE2RmNtjLpY8Os7\nFiFiRjKTYyKVYSKVJZXJ5S9kWSeby1EXj9JQE6OxNvq20oS7M5bMMjCeDC6QaRbVxYILVZzaWJRk\nJksinSOZydJYEytUubTUx6mN538Bx2P5UkpjcIy6eLTwqz+T80JpIpXJT8lMjsl0lslUhkQ6R8TA\nzIhY/pd1XU2UuuDiuKSxhs6mfIItviCmszmiZiUf034q6WRy+XMcsXxJaOoz5HJO1p1M1snkcqSz\nTjqbI5nO0T+epH8sn3T6g4Q0OJFmaCJFMjg36WyOiVSW3pEEgxPptx07GrF8AlzezPmrWjh/VSub\nV7WqHewUuTt/85M3+OKDr/HOlS3c9a+6WNZSV+6w5oyZPevuXSfabsGUFOZaTSzCitZ6VrTWn/R7\nJ1IZ3ugbZ0/fGNGI5S/sNTEaaqLU10SpD6pRohErXHSMfFXGfHmQx1yLl+kXs5kRNYhGZq76iUSM\nCEa+Zujt25zR1nBSx0qksxwaTvDW0CQHByc4ODjJvv4JXuke5uEdvUE8cO6KZi5d184l6/LtYE11\nShInMjCe4nP3vcKPXnyLa89fzpeu30x9zcKszlNJQWQeGJ5M80r3MM/sG+SJPUd47s1B0lnHDM5q\nb2TzqlbOX9XCe9e3s6Fz0bwa//90uDvfe/YgX3jgVcYSGW77xQ187Mr18/L8zLakoKQgMg9NprJs\n2z/A828Osf3gEC8eHKZvNAlAZ1Mt71vfzpY1i1nb1siatoYF2T6xq3eU//zDl/nZ3gG61izmC7/+\nTjYubSp3WKFR9ZHIAlZfE+WyDR1ctqEDyP8iPjg4yRN7jvDPu/vZ+nofP3i+u7D9VHtVRzAtba5j\nfeciNnQ2sXHpIjqaaufNr+eRRJr//sguvvnkPhbVxvjzX38nv9W1WlWzgVBLCma2DxgFskBmepay\n/LfsduCDwARwk7s/d7x9qqQgcvpyOadnJMH+/gn294/z5sAEvSPJQnfft4YmGZ482rDd2VTLxWe1\nccm6Ni45q401bQ1VlyQy2Rzff+4gX3poJ/3jKW686Aw+dfWmBXPvUSWVFK509yPHWHcNsCGY3gN8\nNfgrIiGKRKzQUeKSdW0/t97dOTKWYlfvKK/3jvLsm0M8saef+158C4C2xho2r27lgtWtbF7dyuZV\nLbQ2VObFNZnJ8oPnuvnrrbs5MDBJ15rF/O3NF3HeypZyh1aRyl19dB3wLc8XV54ys1YzW+7uh8oc\nl8iCZmaFqqT3rm/npkvziWJP3zhPvdHPCweGeOHAEI/tPFy4F2NtWwPnr2plyxmtdK1dwtnLmsrW\nTpHLOTsOjfBPrx3m7qff5NBwgs2rW/nctedy1Ts6q66UU0phJwUHHjYzB/7G3e+atn4lcKDo9cFg\n2duSgpndAtwCcMYZZ4QXrYgck5mxvnMR6zsX8dsXrwHy9fMvHRzmxYNDbD8wzNN7BwqlicaaKFvW\nLObCNYt599olXLC6lcba8C45R8aSPLGnn8df72Pr632FhvX3nLmEL/7G+Vy2oV3JYBbCTgrvc/du\nM+sEHjGz19z9Jye7kyCZ3AX5NoW5DlJETk1zXZxL17dz6fr2wrLuoUm27cvf/f/MvgFuf3QX7kdv\ntotH83fWx6MROppqWRncob1qcT0rgyqtpc11xxxxIJdzjowl2d03xp6+cXb3jvKzvQO81jMaxBTj\n8o0dXLGpk/dv7KCjqbYk52K+CDUpuHt38Pewmd0LXAQUJ4VuYHXR61XBMhGpUitb61l5wcrC08lG\nEmmef3OIbfsG6B6cJJPL392dCsajerl7mIHxtw8bE4sYzfVx4lGjJhYhHokwkcoylswwlsy8bduG\nmigXrG7l07+0ifetb+e8lS0nHMJGji20pGBmjUDE3UeD+auBz0/b7D7g98zsu+QbmIfVniAyvzTX\nxXn/xg7ev7HjmNtMpDJ0D07SPRRMg5OMJNKFoVDSOaexJsqi2jiL6mK0NdawrmMR6zobWdZcp2qh\nORRmSWEpcG/wjxUDvuPuD5rZrQDufifwAPnuqLvJd0m9OcR4RKRCNdTE2LC0iQ3z+OaxahFaUnD3\nN4DNMyy/s2jegY+FFYOIiJychXVfu4iIHJeSgoiIFCgpiIhIgZKCiIgUKCmIiEiBkoKIiBQoKYiI\nSEHVPXnNzPqAIWB42qqWEyw70fzU33bgWEN9H89Mx5/N+unLj/d6eqzFy04l7lLGXDxfjnOt74e+\nH8dbX43fj5OJGWCDu594vHB3r7oJuOtkl51ovujvtrmKaTbrpy8/3uvpsZ5u3KWMudznWt8PfT/m\n2/fjZGKezTGmpmqtPvrRKSw70fxM7z/dmGazfvry472eKdbTibuUMRfPl+Nc6/tx8vT9mP18pcc8\nm2MAVVh9FDYz2+azeGRdpanGuBVz6VRj3Iq5PKq1pBCm6Q8CqhbVGLdiLp1qjFsxl4FKCiIiUqCS\ngoiIFMzrpGBm/8vMDpvZy6fw3gvN7CUz221mX7aip3iY2cfN7DUze8XM/mJuow4nbjP7IzPrNrMX\ngumDlR5z0fpPmpmbWfux9nEqQjrPf2xm24Nz/LCZraiCmL8UfJ+3m9m9ZtY6lzGHGPdvBv8Hc2Y2\nZ/X4pxPrMfb3YTPbFUwfLlp+3O992ZxK96lqmYDLgS3Ay6fw3qeBiwEDfgxcEyy/EvhHoDZ43Vkl\ncf8R8KlqOtfButXAQ8B+oL3SYwaai7b5BHBnFcR8NRAL5r8IfLEavh/AO4BNwFagq9yxBnGsnbZs\nCfBG8HdxML/4eJ+r3NO8Lim4+0+AgeJlZrbOzB40s2fN7HEzO3v6+8xsOfn/3E95/l/vW8CvBas/\nCvy5uyeDYxyukrhDFWLMfwX8B2DOG7/CiNndR4o2bZzruEOK+WF3n3rw8VPkn5U+p0KK+1V331kp\nsR7DLwGPuPuAuw8CjwC/XM7/qycyr5PCMdwFfNzdLwQ+Bfz1DNusBA4WvT4YLAPYCFxmZj8zs/9n\nZu8ONdqjTjduyD8Pe3tQPF4cXqgFpxWzmV0HdLv7i2EHWuS0z7OZ/amZHQD+JfDZEGOdMhffjSkf\nIf+rtRTmMu6wzSbWmawEDhS9noq/Uj7XzwnzGc0Vx8wWAe8FvldUfVd7kruJkS8KXgy8G/g/ZnZW\nkO1DMUdxfxX4Y/K/XP8Y+EvyF4BQnG7MZtYA/CfyVRslMUfnGXf/Q+APzew/Ar8HfG7OgpxmrmIO\n9vWHQAb49txEd9xjzVncYTterGZ2M/D7wbL1wANmlgL2uvuHSh3rXFhQSYF8yWjI3S8oXmhmUeDZ\n4OV95C+gxUXoVUB3MH8Q+EGQBJ42sxz58U76Kjlud+8tet/XgPtDjBdOP+Z1wJnAi8F/xFXAc2Z2\nkbv3VGjM030beIAQkwJzFLOZ3QRcC1wV5g+cInN9rsM0Y6wA7v4N4BsAZrYVuMnd9xVt0g1cUfR6\nFfm2h27K/7lmVu5GjbAnYC1FDUbAE8BvBvMGbD7G+6Y3An0wWH4r8PlgfiP5oqFVQdzLi7b598B3\nKz3madvsY44bmkM6zxuKtvk4cE8VxPzLwA6gY65jLcX3gzluaD7VWDl2Q/Ne8o3Mi4P5JbP93pdj\nKnsAoX44uBs4BKTJ/8L/1+R/fT4IvBj8R/jsMd7bBbwM7AHu4OiNfjXA3wfrngN+oUri/jvgJWA7\n+V9gyys95mnb7GPuex+FcZ7RdnNWAAADYUlEQVS/HyzfTn6smZVVEPNu8j9uXgimOe0xFWLcHwr2\nlQR6gYfKGSszJIVg+UeCc7wbuPlkvvflmHRHs4iIFCzE3kciInIMSgoiIlKgpCAiIgVKCiIiUqCk\nICIiBUoKMi+Y2ViJj/d1MztnjvaVtfyoqi+b2Y9ONEqpmbWa2e/OxbFFplOXVJkXzGzM3RfN4f5i\nfnSQuFAVx25m3wRed/c/Pc72a4H73f28UsQnC4tKCjJvmVmHmX3fzJ4JpkuD5ReZ2ZNm9ryZPWFm\nm4LlN5nZfWb2T8CjZnaFmW01s3ss/7yBb0+NeR8s7wrmx4JB8F40s6fMbGmwfF3w+iUz+5NZlmae\n5OiAgIvM7FEzey7Yx3XBNn8OrAtKF18Ktv108Bm3m9l/mcPTKAuMkoLMZ7cDf+Xu7wZ+A/h6sPw1\n4DJ3fxf5UUy/UPSeLcD17v7+4PW7gNuAc4CzgEtnOE4j8JS7bwZ+AvzbouPf7u7v5O0jYs4oGPfn\nKvJ3nAMkgA+5+xbyz/H4yyApfQbY4+4XuPunzexqYANwEXABcKGZXX6i44nMZKENiCcLyy8C5xSN\nbNkcjHjZAnzTzDaQHzU2XvSeR9y9eCz9p939IICZvUB+TJx/nnacFEcHGHwW+EAwfwlHx8j/DvBf\njxFnfbDvlcCr5Mfch/yYOF8ILvC5YP3SGd5/dTA9H7xeRD5J/OQYxxM5JiUFmc8iwMXuniheaGZ3\nAI+5+4eC+vmtRavHp+0jWTSfZeb/M2k/2jh3rG2OZ9LdLwiGC38I+BjwZfLPY+gALnT3tJntA+pm\neL8Bf+buf3OSxxX5Oao+kvnsYfIjlQJgZlNDH7dwdJjim0I8/lPkq60AbjjRxu4+Qf4Rnp80sxj5\nOA8HCeFKYE2w6SjQVPTWh4CPBKUgzGylmXXO0WeQBUZJQeaLBjM7WDT9AfkLbFfQ+LqD/LDnAH8B\n/JmZPU+4peXbgD8ws+3kH8AyfKI3uPvz5EdYvZH88xi6zOwl4HfIt4Xg7v3AT4MurF9y94fJV089\nGWx7D29PGiKzpi6pIiEJqoMm3d3N7AbgRne/7kTvEykntSmIhOdC4I6gx9AQIT7+VGSuqKQgIiIF\nalMQEZECJQURESlQUhARkQIlBRERKVBSEBGRAiUFEREp+P8h6eN01t587gAAAABJRU5ErkJggg==\n","text/plain":["
"]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"0Y-bAlBgsMz0","colab_type":"code","outputId":"b8ef56a5-9014-458d-b10a-948d326f0f76","executionInfo":{"status":"ok","timestamp":1570529747790,"user_tz":-330,"elapsed":2639751,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":77}},"source":["learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
epochtrain_lossvalid_lossaccuracytime
04.0910163.8724530.32281339:21
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"qEenHFUYsOXJ","colab_type":"code","colab":{}},"source":["learn.save_encoder('fine_tuned_enc')"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"6WqIMgJvsQXM","colab_type":"code","colab":{}},"source":["label_cols = ['toxic', 'severe_toxic' , 'obscene' , 'threat' , 'insult' , 'identity_hate']"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"vTRSrBlf5yr8","colab_type":"code","colab":{}},"source":["label_cols = ['Science and Engineering',\n"," 'Music and Audio',\n"," 'Hardware',\n"," 'Platforms',\n"," 'Consumer Electronics',\n"," 'Content and Publishing',\n"," 'Data and Analytics',\n"," 'Financial Services',\n"," 'Hardware',\n"," 'Internet Services',\n"," 'Lending and Investments',\n"," 'Media and Entertainment',\n"," 'Mobile',\n"," 'Music and Audio',\n"," 'Platforms',\n"," 'Sales and Marketing',\n"," 'Science and Engineering',\n"," 'Software',\n"," 'Video']"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"0ZJ9Y7tzsSQl","colab_type":"code","outputId":"621204af-b102-489f-bcb1-d1813fd26bb1","executionInfo":{"status":"ok","timestamp":1570530110093,"user_tz":-330,"elapsed":299,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":279}},"source":["test_datalist = TextList.from_df(test, cols='comment_text', vocab=data.vocab)\n","\n","data_clas = (TextList.from_df(train, cols='comment_text', vocab=data.vocab)\n"," .split_by_rand_pct(0.2)\n"," .label_from_df(cols= label_cols , classes=label_cols)\n"," .add_test(test_datalist)\n"," .databunch(bs=32))\n","\n","data_clas.show_batch()"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
texttarget
xxbos i xxup am xxup an xxup loser xxrep 4 = i xxup am xxup an xxup loser = = = = i xxup am xxup an xxup loser xxrep 4 = i xxup am xxup an xxup loser = = = = i xxup am xxup an xxup loser xxrep 4 = i xxup am xxup an xxup loser = = = = i xxup am xxup an xxuptoxic
xxbos xxmaj so i was all like who farted , than i realized i need to be banned . xxmaj so i was all like who farted , than i realized i need to be banned . xxmaj so i was all like who farted , than i realized i need to be banned . xxmaj so i was all like who farted , than i realized i need totoxic
xxbos xxup suck xxup my xxup lick xxup you xxup can xxup suck xxup my xxup lick xxup you xxup can xxup suck xxup my xxup lick xxup you xxup can xxup suck xxup my xxup lick xxup you xxup can xxup suck xxup my xxup lick xxup you xxup can xxup suck xxup my xxup lick xxup you xxup can xxup suck xxup my xxup lick xxup you xxuptoxic
xxbos a xxmaj short xxmaj history of xxmaj republican xxmaj sinn xxmaj féin \\n \\n xxmaj in 1902 , xxmaj arthur xxmaj griffith , xxmaj editor of the xxmaj united xxmaj irishman , presented to the third annual convention of xxmaj xxunk na xxunk the most revolutionary political idea since the fall of xxmaj parnell ; it was that the elected xxmaj irish xxmaj members of xxmaj parliament should
xxbos \" xxunk = keep ( and possibly merge ) } } \\n \\n xxmaj page xxmaj creation \\n xxmaj so , i started the page . xxmaj this is probably going to get heated , some people accusing me of saying the xxmaj course is a \" \" hoax \" \" and all that . xxmaj but i think this is an issue worthy of a page
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"pKj4_hnZsUTx","colab_type":"code","colab":{}},"source":["learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)\n","learn_classifier.load_encoder('fine_tuned_enc')\n","learn_classifier.freeze()"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"wPb446P-sV2g","colab_type":"code","outputId":"f76f92f2-504b-4ed3-b180-2d9bbacde8b3","executionInfo":{"status":"ok","timestamp":1570530153964,"user_tz":-330,"elapsed":3045893,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":34}},"source":["learn_classifier.lr_find()"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"d6YFN5X5sXWq","colab_type":"code","outputId":"5f0e8521-da86-4c14-b29f-aa2e0e09d04c","executionInfo":{"status":"ok","timestamp":1570530154514,"user_tz":-330,"elapsed":3046437,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":283}},"source":["learn_classifier.recorder.plot()\n"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAAAYsAAAEKCAYAAADjDHn2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xl8VfWd//HX596bnSQECYvsIKC4\nYYloS126WWpnpHaZQn/OaJ3qdKY6tdrO2Gl/bUe7ju1P245d1J9dnFrH6q8tdrCoVWxrRQFFERQE\nXEgUiCwJkOVun98f9wSvMckNkHNvbvJ+Ph7nwbnf8z33fBKS+8l3Od9j7o6IiEhfIoUOQEREBj8l\nCxERyUnJQkREclKyEBGRnJQsREQkJyULERHJSclCRERyUrIQEZGclCxERCSnWKEDGCijR4/2qVOn\nFjoMEZGismbNmtfcvT5XvSGTLKZOncrq1asLHYaISFExs5f6U0/dUCIikpOShYiI5KRkISIiOYWa\nLMxsoZltNLPNZnZ1D8cnm9lDZvakmT1tZucG5VPNrN3M1gbbj8KMU0RE+hbaALeZRYEbgfcAjcAq\nM1vq7huyqn0RuNPdf2hmc4BlwNTg2BZ3nxtWfCIi0n9htizmA5vdfau7x4E7gEXd6jhQE+zXAq+E\nGI+IiBymMJPFBGBb1uvGoCzbV4ALzKyRTKvi8qxj04LuqYfN7IyeLmBml5rZajNb3dzcPIChi4hI\ntkIPcC8BfuruE4FzgdvMLAK8Ckx291OAK4Hbzaym+8nufpO7N7h7Q319zntKRESGnLvXNHL7Yy+H\nfp0wk0UTMCnr9cSgLNvfA3cCuPujQDkw2t073X1XUL4G2ALMCjFWEZGi9Ju1Tdy1ZlvuikcozGSx\nCphpZtPMrBRYDCztVudl4F0AZnYcmWTRbGb1wQA5ZjYdmAlsDTFWEZGiFE+mKYmG30kU2mwod0+a\n2WXAciAK3Oru683sGmC1uy8FrgJuNrPPkBnsvsjd3czOBK4xswSQBj7p7rvDilVEpFjFU2lGlIW/\nclOoV3D3ZWQGrrPLvpS1vwFY0MN5dwN3hxmbiMhQkEjlp2VR6AFuERE5AomkU6pkISIifUmk0pTE\nlCxERKQPnck0JVEL/TpKFiIiRSyRSqsbSkRE+pZIpSlVN5SIiPQlkXLNhhIRkb7l66Y8JQsRkSLl\n7sTVDSUiIn1Jph2AUs2GEhGR3iRSaQB1Q4mISO/iSSULERHJIR60LDRmISIivUqkusYslCxERKQX\nB7uhYhrgFhGRXmiAW0REcupqWagbSkREenWwZaEBbhER6Y1aFiIiklPXbCiNWYiISK8Sus9CRERy\niR+cDVXkU2fNbKGZbTSzzWZ2dQ/HJ5vZQ2b2pJk9bWbnZh37fHDeRjN7b5hxiogUo3yOWcTCemMz\niwI3Au8BGoFVZrbU3TdkVfsicKe7/9DM5gDLgKnB/mLgeOBo4AEzm+XuqbDiFREpNkOlG2o+sNnd\nt7p7HLgDWNStjgM1wX4t8Eqwvwi4w9073f0FYHPwfiIiEhgqN+VNALZlvW4MyrJ9BbjAzBrJtCou\nP4RzRUSGtfgwmg21BPipu08EzgVuM7N+x2Rml5rZajNb3dzcHFqQIiKD0VC5z6IJmJT1emJQlu3v\ngTsB3P1RoBwY3c9zcfeb3L3B3Rvq6+sHMHQRkcFvqIxZrAJmmtk0MyslM2C9tFudl4F3AZjZcWSS\nRXNQb7GZlZnZNGAm8HiIsYqIFJ1EMn9TZ0ObDeXuSTO7DFgORIFb3X29mV0DrHb3pcBVwM1m9hky\ng90XubsD683sTmADkAQ+pZlQIiJvFE+lMYNopIiTBYC7LyMzcJ1d9qWs/Q3Agl7O/RrwtTDjExEp\nZvFUmpJoBLMivylPRETCk0g6ZXkY3AYlCxGRopVIpfOyPDkoWYiIFK14Mp2XwW1QshARKVqJVDov\n02ZByUJEpGh1DXDng5KFiEiRSqTSebl7G5QsRESKVmbMQslCRET6kEi5xixERKRvmTELzYYSEZE+\nJDTALSIiucSTGuAWEZEcdJ+FiIjklEi5uqFERKRvmjorIiI5xVNpSmOaDSUiIn3QHdwiIpJTQt1Q\nIiKSS1zPsxARkb64e2a5D7UsRESkN4mUA+g+CxER6V0ilQbQ2lAiItK7eLIrWQyBloWZLTSzjWa2\n2cyu7uH49Wa2Ntg2mdnerGOprGNLw4xTRKTYdLUs8tUNFQvrjc0sCtwIvAdoBFaZ2VJ339BVx90/\nk1X/cuCUrLdod/e5YcUnIlLM4qmh07KYD2x2963uHgfuABb1UX8J8MsQ4xERGTIODnAPgWQxAdiW\n9boxKHsTM5sCTAMezCouN7PVZrbSzD4QXpgiIsUn32MWoXVDHaLFwF3unsoqm+LuTWY2HXjQzNa5\n+5bsk8zsUuBSgMmTJ+cvWhGRAsv3mEWYV2kCJmW9nhiU9WQx3bqg3L0p+HcrsII3jmd01bnJ3Rvc\nvaG+vn4gYhYRKQrxITR1dhUw08ymmVkpmYTwpllNZnYsUAc8mlVWZ2Zlwf5oYAGwofu5IiLDVSLo\nhsrXmEVo3VDunjSzy4DlQBS41d3Xm9k1wGp370oci4E73N2zTj8O+LGZpckktG9mz6ISERnu4kNl\n6iyAuy8DlnUr+1K311/p4by/ACeGGZuISDFLDKGpsyIiEpJ4MtMZo2QhIiK9er0bqvgHuEVEJCSv\nD3BH83I9JQsRkSJ0cMxCLQsREemNBrhFRCSnzqG0RLmIiISjayHBsiGw3IeIiIRE3VAiIpJTIpUm\nYhCNaIBbRER6EU+m89aqACULEZGiFE+l87YuFChZiIgUpUQqnbcVZ0HJYlBIp53GPW3s70wWOhQR\nKRKJpOe1G2qwPCmvYBKpNJt27COZcpLpdPCvE0+liSfTb5hxUBqNEI0YLe0JXtvfya79cQ7Ek9RX\nlzG+tpyxNeWk07BtTxvbdrfxyt52OpNpUmkn7RCNQF1lKaOqMtvetgRPNe7lqW17ae3IJIrxteXM\nqB/B5KMqGVEWozwWobw0SnVZjJGVpYysLGFkRSlmmWZoIpkm7VBdHqO2ooSa8hIcZ/eBOHva4uxt\nS1BeEqW2ooTaihIqS6O0xVPs60iyvzNJZzJF2jMJK+1OWSxKRWmUytIoseBr3X0g8z5t8SSpoK7j\njKkuZ/JRlUwZVcmoqlLMjFTaSaTSRCOW1x9kkeEm391Qwz5ZtLQneP/3/nxY50YMyksyH77dxSLG\nuNpyKkujRMyIBB+kT7TtZfeBOKm0E40Yx46r5v0nHc3xR9fQ0p5gy879bGnez++f2U57PEV74s3v\nPRiVRiMk05nE1aW8JEJ1eQk15THqKksZWVnKqKoSjhpRxtEjK5hYV8HEkRVUl5eQSGWSajLtlESN\nsliUsliEitIo5SUDt/ZNOvhD4EBnkldbOni1pYPtLe0kUn4wiY+qKqW2ooTq8hjV5SV5m20iciji\nqXTenpIHShZUl8f48d/OIxYxYtFI5t+IURqLUBKNHLzhJZ5Kk0g5yVSamooSRo8oY2RFCZGIcaAz\nyfbWDna0dBCJGJNGVTK2uoxYL39Zp9NOa0fmL/5cH4TuTkcizb7OBC1tCfa0JdjbFsfMKIlaps/S\nYF9Hktb2BC3tCcyMo6pKqQs+9DoTKfa2Z85viyepKotRXR5jRFkJZSWRIJmBmdGZyCSo9niKRNoZ\nWVESfNCXBK2NCJHgy9re0sFLu9p4aXcbO/d1UBLJfM9KYxGSqTT7OpPs68jEtLctQeOeNp5pSrDr\nQOfBG4r6o66yhHG1FRxdW05tZQklkQjRqFESMcbWljPtqCqmHFXFqKpStu1p44XmA2x97QBNe9vZ\n0dpB875Omvd10p5IkUr3/7pdKksziassFqU0FqGusoSJoyqZVFfJ5FGVzB5XzZzxNVSU5mdBNxHI\nLCSobqg8KotFee/x447oParKYsyoH8GM+hH9qh+JGCMrS/tV18yoKM10DY2pLj+SMAdcdXkJM8dW\nH/J56bSzc18nTXvbaNzTzoHOFLFoJvl1tcA6k2k6Eyn2B4n41b0dvNLSwcYd+4KuLieeTB3svuuu\nJGpMGFnBmJpyjj+6hvrqMqpKY5REI5TEjIqSKONqyhk/soKjR5ZTEomwuy3O7gOZrbU9QWtHJtnt\n70jSmcx0S3YkU+w+EGd9Uwv3rd9+MOlFI8bMMSM4YUIts8dWM2tcNbPGjmBcTTlmapnIwEuoG0qG\nukjQRTeutpx5U47svfZ3JnnxtQO8tKuN3Qc6mTSqkumjRzChruKQu4/qqkqZUd//+qm082pLOxte\naWVdUwtPN7awYuNO7lrTeLBOSdSoregaayphXG05E4Lut/G1FdRVZcaSaoIWnMZ5pL/ieZ4NpWQh\nRW1EWYwTJtRywoTavF87GjEm1lUysa6Sc7Jap7sPxNm0Yx/P79jHKy0d7G1L0NIeZ8+BBM80tXDf\n+h0HH1yTzQyOqiplbE05Y6rLKItFiUQ4OOYVscy+mTGiLMqYmsykirE1mTGgCSMrBnR8RwY3zYYS\nKXKjqko5ffpRnD79qB6Pp9NO8/5OXtnbTkvQ3dXSnmDX/k52tHayo7WDnfs6SCSdlGdmqWVmoBHs\nw76ORI9dcGOqy5g6uooFM0Zz1ux6TppQS0QD9ENSPJWmprQkb9dTshDJs0jEghbBkY1BtcdT7NzX\nwfaWDpr2ttO4p53GPW1s3L6PG/6wiesf2MSoqlJmj60OxoQilEQzraGZY0Ywc2xmXKW6PH8fODJw\n4sk0pZoNJSK5VJRGmRLMBOtu94E4f3q+mYc3NtO4p52OZIpkyulMpnh4UzMdiUw3mBnMHlvNqVNH\nceq0UZxxzGjqqvo3+UIKa0gNcJvZQuC7QBS4xd2/2e349cA7gpeVwBh3HxkcuxD4YnDsq+7+szBj\nFRlKRlWVsmjuBBbNnfCmY6lgxYBNO/az/pUW1ry0h7ufaOS2lS8xoizG5e88hosWTKUspvGPwSyR\nGiJTZ80sCtwIvAdoBFaZ2VJ339BVx90/k1X/cuCUYH8U8GWgAXBgTXDunrDiFRkuohE72CJ5z5yx\nACRTadY1tXDjQ5v5xr3P8cvHX+aL75/Du44bo6m/g1Qild8B7jCvNB/Y7O5b3T0O3AEs6qP+EuCX\nwf57gfvdfXeQIO4HFoYYq8iwFotGOGVyHbdceCo/u3g+sWiET/x8Ned+78/c9uiLtHYkCh2idNOZ\nHISrzprZDDMrC/bPNrN/NrOROU6bAGzLet0YlPX0/lOAacCDh3quiAyss2bVc++nz+Dr55+IAf/7\nt+uZ/7UH+OyvnuK57a2FDk8C+V51tr/dUHcDDWZ2DHAT8FvgduDcAYpjMXCXux/SQkhmdilwKcDk\nyZMHKBQRKYlG+Nhpk1kyfxLrmlr45ePb+O3aJu5a08jZs+v55FkzOG3aKHVRFVAiz2tD9Tctpd09\nCZwPfN/dPweMz3FOEzAp6/XEoKwni3m9C6rf57r7Te7e4O4N9fWHcOutiPSLmXHSxJF844Mn8per\n38lnz5nFusYWFt+0ko/86FHWbttb6BCHrXwPcPf3SgkzWwJcCPwuKMs1OXsVMNPMpplZKZmEsLR7\nJTM7FqgDHs0qXg6cY2Z1ZlYHnBOUiUiBjKws5bJ3zuSRq9/JtR84gRd3tfGBGx/hijue5JW97YUO\nb1hJB+ujDboxC+DjwFuBr7n7C2Y2DbitrxOClshlZD7knwXudPf1ZnaNmZ2XVXUxcIe7e9a5u4Fr\nySScVcA1QZmIFFh5SZS/PX0KKz53Np96xwyWPbOdd35nBT9Ysfng818kXIn068/ZyRfL+ozu3wmZ\nv/QnufvT4YR0eBoaGnz16tWFDkNk2Gnc08bX/udZ7n1mO8cfXcO3PnRSQdbqGk72dyY54cvL+cK5\nx3HJmdOP6L3MbI27N+Sq19/ZUCvMrCa4/+EJ4GYz+z9HFKGIDAkT6yr54QXz+NEFb2FHayeLbnyE\n65Y/p1ZGiBLJrpbF4BvgrnX3VuCDwM/d/TTg3eGFJSLFZuEJ43ngyjM5/5QJ3PjQFj5280p2tnYU\nOqwhqWvV4tI83mXf32QRM7PxwN/w+gC3iMgbjKws5dsfOZnvLp7LM02tvP/7f2bVixpuHGjxQdyy\nuIbMQPUWd19lZtOB58MLS0SK2aK5E/jNpxYwoizGkptW8ovHXip0SENK4mDLYpDNhnL3X7n7Se7+\nj8Hrre7+oXBDE5FiNntcNb+9bAFnzBzNF379DPeue7XQIQ0ZXd1Qg+4+CzObaGa/NrOdwXa3mU0M\nOzgRKW415SX88IJ5nDJ5JFf891rdxDdAEsnMLNZ8LvfR3yv9hMwNdUcH2z1BmYhIn8pLotz8dw2M\nqSnjEz9bTeOetkKHVPQOtiwGWzcUUO/uP3H3ZLD9FND6GiLSL6NHlPGTi06lM5ni4p+u0iq2RyiR\nGrwD3LvM7AIziwbbBcCuMAMTkaHlmDHV/OiCeWxtPsCV/72WdPrQbgiW13XNhiobhC2Li8lMm90O\nvAp8GLgopJhEZIhacMxovvj+43jg2Z18/8HNhQ6naCUG6wC3u7/k7ue5e727j3H3DwCaDSUih+zC\nt03lg6dM4IY/bOLB53YUOpyiNGiTRS+uHLAoRGTYMDO+/sETOW5cDZ++Yy0vvnag0CEVnXgq04VX\nLMlCTz0RkcNSXhLlx387j2jE+Ifb1tCROKTnng17g3nMoicanRKRwzZpVCU3fHQuG3fs40cPbyl0\nOEVl0HVDmdk+M2vtYdtH5n4LEZHDdvbsMZx38tH8YMUWdUcdgkE3ddbdq929poet2t37+/xuEZFe\nffH9x1EajfClpes51OfrDFcHFxIskm4oEZEjNqamnKvOmcUfNzVz7zPbCx1OUTi4RPlg6YYSEcmH\nvz19CnPG13DNPRvY35ksdDiDXtfaUINmzEJEJB9i0QhfPf8Etrd28N0HNhU6nEEvkUoTjRjRyCAZ\nsxARyZe3TK7jow2T+OlfXtRgdw6JVDqvg9ugZCEig8hV58yiJBrhW79/rtChDGqdyXRexytAyUJE\nBpExNeX8w5kzuPeZ7Xocax8SqXRen5IHIScLM1toZhvNbLOZXd1Lnb8xsw1mtt7Mbs8qT5nZ2mBb\nGmacIjJ4XHLmNMbWlPHV/3lWU2l7kemGGiLJwsyiwI3A+4A5wBIzm9Otzkzg88ACdz8euCLrcLu7\nzw2288KKU0QGl8rSGFedM5untu3lnqf1KNaexJNDq2UxH9gcPK87DtwBLOpW5xLgRnffA+DuO0OM\nR0SKxIfeMpHjxtfwrXuf07pRPUikfOi0LIAJwLas141BWbZZwCwze8TMVprZwqxj5Wa2Oij/QIhx\nisggE40YXzj3OJr2tvPLx18udDiDTnwodUP1UwyYCZwNLAFuNrORwbEp7t4AfAy4wcxmdD/ZzC4N\nEsrq5ubmfMUsInnw9pmjaZhSx08eeZGUnqr3BolUmtIhNHW2CZiU9XpiUJatEVjq7gl3fwHYRCZ5\n4O5Nwb9bgRXAKd0v4O43uXuDuzfU1+uR4CJDzccXTOPl3W08+Jx6qLMNtTGLVcBMM5tmZqXAYqD7\nrKbfkGlVYGajyXRLbTWzOjMryypfAGwIMVYRGYTee/xYjq4t59Y/v1DoUAaVITUbyt2TwGXAcuBZ\n4E53X29m15hZ1+ym5cAuM9sAPAR8zt13AccBq83sqaD8m+6uZCEyzMSiES5821Qe3bqLZ19tLXQ4\ng0a8AAPcoS4z7u7LgGXdyr6Ute9kHs96Zbc6fwFODDM2ESkOi0+dzA0PPM9PHnmB//jwyYUOZ1BI\nJIdQy0JEZCDUVpbwoXkT+M3aV3htf2ehwxkU4ql0Xh+pCkoWIlIELnrbNOLJNLc/pmm0oIUERUR6\ndMyYEZw9u57bVr5EZ1I36akbSkSkF3//9mk07+vkN092n4E//MRTPqSmzoqIDJi3HzOaEybU8KOH\ntw77m/TiyZRaFiIiPTEzPnX2Mbzw2gHufWZ4LzCYUMtCRKR37z1+HNPrq/jBQ1uG9fLlGuAWEelD\nJGJ88qwZbHi1lYc3Dc/14NJpJ5l2SqPRvF5XyUJEisoH5k5gfG05P1ixpdChFEQ8lQagJKaWhYhI\nr0pjES45YzqPv7Cb1cPw0auJIFnoGdwiIjksnj+JusoSvn3fxmE3MyqRyny9mg0lIpJDZWmMf114\nLCu37uZ7f3i+0OHkVTwZtCw0G0pEJLePnjqJD8+byPcefJ6HNg6f5110dUOpZSEi0g9mxrWLTuDY\ncTVcccdatu1uK3RIeXFwgFtTZ0VE+qeiNMqPLngLaXf+6RdP0JEY+utGaYBbROQwTDmqiuv/Zi7r\nmlr49vKNhQ4ndBqzEBE5TO+eM5aPnTaZWx95gacb9xY6nFBpzEJE5Ahc/b5jGT2ijH+9e93BD9Sh\nKJ7U1FkRkcNWU17CNYtO4NlXW7nlTy8UOpzQHByzUDeUiMjhWXjCOBYeP44bHtjEi68dKHQ4oTg4\nZqGWhYjI4fv3RcdTGovwb79eNyRXpk1obSgRkSM3tqacq993LH/Zsov/WTf0nnsRH4oD3Ga20Mw2\nmtlmM7u6lzp/Y2YbzGy9md2eVX6hmT0fbBeGGaeIDC2LT53MnPE1fGPZc0Pu3ouutaGGTDeUmUWB\nG4H3AXOAJWY2p1udmcDngQXufjxwRVA+CvgycBowH/iymdWFFauIDC3RiPG//2oOTXvb+b9/HlqD\n3S3tCQBGlMXyet0wU9N8YLO7b3X3OHAHsKhbnUuAG919D4C7dy3w8l7gfnffHRy7H1gYYqwiMsS8\ndcZRLDx+HDc+tJmdrR2FDmfA7GztoDQWYWRlSV6vG2aymABsy3rdGJRlmwXMMrNHzGylmS08hHNF\nRPr0+XOPJZlyrhtCd3Zvb+1gXE05ZsNrgDsGzATOBpYAN5vZyP6ebGaXmtlqM1vd3Dw8H7EoIr2b\nclQVH3/7VO56opF1jS2FDmdA7GjtYGxNWd6vG2ayaAImZb2eGJRlawSWunvC3V8ANpFJHv05F3e/\nyd0b3L2hvr5+QIMXkaHhsnccw1FVpXxt2YZChzIgdrR2MramPO/XDTNZrAJmmtk0MysFFgNLu9X5\nDZlWBWY2mky31FZgOXCOmdUFA9vnBGUiIoekuryET73jGFZu3c3KrbsKHc4RcfegZTGEkoW7J4HL\nyHzIPwvc6e7rzewaMzsvqLYc2GVmG4CHgM+5+y533w1cSybhrAKuCcpERA7ZkvmTqa8uK/qn6u3r\nTNIWTzGuAMki1LlX7r4MWNat7EtZ+w5cGWzdz70VuDXM+ERkeCgvifIPZ07nq//zLKte3M2pU0cV\nOqTD0jWra8wQG7MQERk0/tdpUxg9orSoWxfbWzoBCtKyULIQkWGhojTKJWdM50/Pv8aal/YUOpzD\nsiNoWQypMQsRkcHmgtOnMKqqlO8/WJyti+1KFiIi4asqi/GJM6axYmMzT20rvifq7WztoKY8RkVp\nNO/XVrIQkWHl7946ldqKEn64YkuhQzlk2ws0bRaULERkmBlRFuOC0yezfMP2ontA0o7WTsbVKlmI\niOTFhW+dSkkkwi1/3lroUA7JjtYOxlQrWYiI5MWYmnLOP2UCv1rdyK79nYUOp1/SaWfnvk7G1eb/\nHgtQshCRYeqSM6fRmUzz80dfKnQo/fLagU5SadeYhYhIPh0zppp3HzeG21a+RHt88D9Nb2drpgWk\nZCEikmeXnjmD3Qfi3PVEY6FDyWl7S+HusQAlCxEZxk6dWsfJk0Zyy5+2sq8jUehw+rRjX1ey0JiF\niEhemRlXvHsm23a3sfCGP/HYIF7CfEdLB2ZQP0LJQkQk794xewy/+uTbiEWNxTev5BvLnqUzOfjG\nMHa0djJ6RBmxaGE+tpUsRGTYmzeljmX/fAZL5k/mx3/cyqU/X0PmCQqDR9eztwtFyUJEhMy6UV8/\n/0Q+/75jeXhTM49sHlxdUoV69nYXJQsRkSwXLZjKhJEVXLf8uUHVuijU41S7KFmIiGQpi0X59Ltm\n8lRjC/dv2FHocADoTKbY05ZQshARGUw++JYJTB9dxXfu20Q6XfjWRdcNeRqzEBEZRGLRCFe8ZxYb\nd+zjnqdfKXQ4B5+QV4hnb3dRshAR6cFfnTieY8dVc/39m0ik0gWNZUeBl/oAJQsRkR5FIsZnz5nN\ni7vauHP1tlCv9eTLe/jojx9lZ3CXdnddj1Mdst1QZrbQzDaa2WYzu7qH4xeZWbOZrQ22T2QdS2WV\nLw0zThGRnrzruDHMnzaK79y3iZa28JYD+dWaRh57YTdX372uxxlYO1s7KI1FGFlZEloMuYSWLMws\nCtwIvA+YAywxszk9VP1vd58bbLdklbdnlZ8XVpwiIr0xM77813PY2xbnhj9sCuUa7s7DG5uprSjh\nwed28svH39yK2R7cY2FmocTQH2G2LOYDm919q7vHgTuARSFeT0RkwB1/dC1L5k/m54++xKYd+wb8\n/bc0H6BpbzufPWcWbz9mNNf+bsObHve6o7WDsQV6Ql6XMJPFBCA7RTYGZd19yMyeNrO7zGxSVnm5\nma02s5Vm9oGeLmBmlwZ1Vjc3Nw9g6CIir7vqnNmMKIvx7/esH/Ab9R7elPnsOnv2GK77yEmURI3P\n3LmWZNag+o7WTsYW6NnbXQo9wH0PMNXdTwLuB36WdWyKuzcAHwNuMLMZ3U9295vcvcHdG+rr6/MT\nsYgMO6OqSrnqnFk8snkXy9cP7I16KzbuZEZ9FZNGVTK+toKvnn8iT768l8t/+SQ3/XELv13bxPaW\nwrcsYiG+dxOQ3VKYGJQd5O7Zi6/cAvxH1rGm4N+tZrYCOAXYElawIiJ9+dj8ydz+2Mt8Zel6tjTv\n56SJtZw0YSS1RzDo3B5P8dgLu7ngtCkHy847+WieeGkP/71qG/c+s/1g+ZSjKo8o/iMVZrJYBcw0\ns2lkksRiMq2Eg8xsvLu/Grw8D3g2KK8D2ty908xGAwvISiQiIvkWi0b45odO4so713Ld8o0Hyz88\nbyLf/sjJh/WeK1/YRTyZ5qzZb+wZ+cp5x/Plv57D/s4kO1o72dsW58SJtUcU/5EKLVm4e9LMLgOW\nA1HgVndfb2bXAKvdfSnwz2YcaV4dAAAKR0lEQVR2HpAEdgMXBacfB/zYzNJkusq+6e4bwopVRKQ/\n5k4ayYNXnU1LW4J1TS38Zm0Td61p5MPzJnL69KMO+f0e3thMeUmE06aNetMxM6O6vITq8sJNl81m\ng2lVxSPR0NDgq1evLnQYIjKMdCRSnHXdQxw9soL/949vO+Spre/89gomH1XJTz8+P6QIczOzNcH4\ncJ8KPcAtIlK0ykuiXPHuWTz58t5DXqH25V1tbH3tAGfNKo7JOUoWIiJH4CPzJjJ9dBXXLd9I6hBW\nqH14004AJQsRkeEgFo3w2ffO5vmd+/n1k025Twg8vKmZyaMqmTa6KsToBo6ShYjIEXrfCeM4aWIt\n19+/iY5EKmf9vW1x/rJlF2fNqi/oEh6HQslCROQImRn/uvBYmva2c/FPV/FMU0uvdd2df7nraRKp\nNIvnT+q13mCjZCEiMgAWHDOaaxYdz4ZXW/mr7/+ZK+54km27295U778ee5n7NuzgXxcey/FHF/be\niUOhZCEiMkD+7q1Tefhz7+Cfzp7B79dv513feZjv3LfxYNfUc9tbufZ3GzhrVj0XL5hW4GgPje6z\nEBEJwfaWDr71++f49ZNNTBpVwRfOncN37tvInrYEv7/iDEaPKNwjUrPpPgsRkQIaV1vO9R+dy+2X\nnEZpNMIn/2sNz+/cz/UfPXnQJIpDEebaUCIiw97bZozm3k+fyc8ffZGqshhnzCyO+yq6U7IQEQlZ\naSzCJ86YXugwjoi6oUREJCclCxERyUnJQkREclKyEBGRnJQsREQkJyULERHJSclCRERyUrIQEZGc\nhszaUGbWDOwFelobuLZbeV+vu/Z7KhsNvHYY4XW/Xn+O96cszLhzxdzfGHuLs7f97LIw4u7t+EDF\nnc+fkZ7K+/Mzkr2vuA/t+FD8LJkCXOzu9/R5ZXcfMhtwU3/K+3rdtd9L2eqBjKuv4/0pCzPuXDEf\nbty59ruVDXjc/f0ZOdy48/kzkutnQnEPjriL7bOkt22odUP1lhm7l/f1+p4+yg5XrvN7Ot6fsjDj\n7s+5hxN3rv1CfK97Ky/GuPvzM5K9r7gP7fhw+Czp0ZDphsoHM1vt/VjKd7BR3PlTjDGD4s63Yox7\nqLUswnZToQM4TIo7f4oxZlDc+VZ0catlISIiOallISIiOQ3bZGFmt5rZTjN75jDOnWdm68xss5l9\nz8ws69jlZvacma03s/8Y7DGb2VfMrMnM1gbbuQMZc1hxZx2/yszczEYPXMQH3zuM7/e1ZvZ08L2+\nz8yOLpK4rwt+rp82s1+b2cgiifsjwe9i2swGbIzgSGLt5f0uNLPng+3CrPI+f/7z6nCmbw2FDTgT\neAvwzGGc+zhwOmDAvcD7gvJ3AA8AZcHrMUUQ81eAzxbb9zo4NglYDrwEjC6GuIGarDr/DPyoSOI+\nB4gF+98CvlUkcR8HzAZWAA2FjjWIY2q3slHA1uDfumC/rq+vqxDbsG1ZuPsfgd3ZZWY2w8x+b2Zr\nzOxPZnZs9/PMbDyZX/iVnvnf/DnwgeDwPwLfdPfO4Bo7iyDm0IUY9/XAvwChDLyFEbe7t2ZVrQoj\n9pDivs/dk0HVlcDEIon7WXffOFhi7cV7gfvdfbe77wHuBxYW+ve2u2GbLHpxE3C5u88DPgv8oIc6\nE4DGrNeNQRnALOAMM3vMzB42s1NDjTbjSGMGuCzoXrjVzOrCC/UNjihuM1sENLn7U2EH2s0Rf7/N\n7Gtmtg34X8CXQow120D8nHS5mMxfufkwkHGHrT+x9mQCsC3rdVf8g+XrAvQM7oPMbATwNuBXWd2C\nZYf4NjEyTcnTgVOBO81sevBXwYAboJh/CFxL5i/ca4HvkPkwCM2Rxm1mlcC/kekayZsB+n7j7l8A\nvmBmnwcuA748YEH2YKDiDt7rC0AS+MXARNfntQYs7rD1FauZfRz4dFB2DLDMzOLAC+5+fr5jPVxK\nFq+LAHvdfW52oZlFgTXBy6VkPlyzm+ATgaZgvxH4f0FyeNzM0mTWgGkerDG7+46s824GfhdSrNmO\nNO4ZwDTgqeAXcyLwhJnNd/ftgzju7n4BLCPkZMEAxW1mFwF/BbwrrD+Auhno73eYeowVwN1/AvwE\nwMxWABe5+4tZVZqAs7NeTyQzttFE4b+u1xVqsGQwbMBUsgaogL8AHwn2DTi5l/O6DzqdG5R/Ergm\n2J9Fpmlpgzzm8Vl1PgPcUQzf6251XiSEAe6Qvt8zs+pcDtxVJHEvBDYA9WHEG/bPCQM8wH24sdL7\nAPcLZAa364L9Uf35uvK5FeSig2EDfgm8CiTItAj+nsxfq78Hngp+Mb7Uy7kNwDPAFuA/ef3mxlLg\nv4JjTwDvLIKYbwPWAU+T+Stt/EDGHFbc3eq8SDizocL4ft8dlD9NZj2eCUUS92Yyf/ysDbYwZnGF\nEff5wXt1AjuA5YWMlR6SRVB+cfA93gx8/FB+/vO16Q5uERHJSbOhREQkJyULERHJSclCRERyUrIQ\nEZGclCxERCQnJQsZ0sxsf56vd4uZzRmg90pZZnXaZ8zsnlwrvZrZSDP7p4G4tkh3mjorQ5qZ7Xf3\nEQP4fjF/fUG9UGXHbmY/Aza5+9f6qD8V+J27n5CP+GR4UctChh0zqzezu81sVbAtCMrnm9mjZvak\nmf3FzGYH5ReZ2VIzexD4g5mdbWYrzOwuyzzj4RddzxkIyhuC/f3BooFPmdlKMxsblM8IXq8zs6/2\ns/XzKK8vojjCzP5gZk8E77EoqPNNYEbQGrkuqPu54Gt82sz+fQC/jTLMKFnIcPRd4Hp3PxX4EHBL\nUP4ccIa7n0JmNdivZ53zFuDD7n5W8PoU4ApgDjAdWNDDdaqAle5+MvBH4JKs63/X3U/kjauK9ihY\nC+ldZO6wB+gAznf3t5B5hsp3gmR1NbDF3ee6++fM7BxgJjAfmAvMM7Mzc11PpCdaSFCGo3cDc7JW\nB60JVg2tBX5mZjPJrMJbknXO/e6e/fyCx929EcDM1pJZJ+jP3a4T5/WFGdcA7wn238rrzyW4Hfh2\nL3FWBO89AXiWzHMOILNO0NeDD/50cHxsD+efE2xPBq9HkEkef+zleiK9UrKQ4SgCnO7uHdmFZvaf\nwEPufn7Q/78i6/CBbu/RmbWfouffpYS/PijYW52+tLv73GBJ9uXAp4DvkXkORj0wz90TZvYiUN7D\n+QZ8w91/fIjXFXkTdUPJcHQfmRVfATCzrmWla3l9CeiLQrz+SjLdXwCLc1V29zYyj2C9ysxiZOLc\nGSSKdwBTgqr7gOqsU5cDFwetJsxsgpmNGaCvQYYZJQsZ6irNrDFru5LMB29DMOi7gczS8gD/AXzD\nzJ4k3Fb3FcCVZvY0mYfhtOQ6wd2fJLNS7RIyz8FoMLN1wN+RGWvB3XcBjwRTba9z9/vIdHM9GtS9\nizcmE5F+09RZkTwLupXa3d3NbDGwxN0X5TpPpJA0ZiGSf/OA/wxmMO0l5MfYigwEtSxERCQnjVmI\niEhOShYiIpKTkoWIiOSkZCEiIjkpWYiISE5KFiIiktP/B8d0SK6ezHA2AAAAAElFTkSuQmCC\n","text/plain":["
"]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"RUQHqlimsZUJ","colab_type":"code","outputId":"75c2a5fa-c0ad-49a3-9a0f-2e35b6fb5626","executionInfo":{"status":"ok","timestamp":1570531220496,"user_tz":-330,"elapsed":4112415,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":77}},"source":["learn_classifier.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
epochtrain_lossvalid_losstime
00.0923780.07146717:46
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"6rP-PmbdsbQ4","colab_type":"code","outputId":"7202948d-0589-4c2f-d386-247e113526a1","executionInfo":{"status":"ok","timestamp":1570532499477,"user_tz":-330,"elapsed":5391392,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":77}},"source":["learn_classifier.freeze_to(-2)\n","learn_classifier.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
epochtrain_lossvalid_losstime
00.0582820.11083121:18
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"M79yRnJJsdGp","colab_type":"code","outputId":"32fe0dfa-56cf-4aa8-d10d-4feb32d1a510","executionInfo":{"status":"ok","timestamp":1570534189102,"user_tz":-330,"elapsed":7081012,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":77}},"source":["learn_classifier.freeze_to(-3)\n","learn_classifier.x(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
epochtrain_lossvalid_losstime
00.0501590.17116828:09
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"YMeLgGvese58","colab_type":"code","outputId":"a986ae0b-975c-4343-dcfe-555dd03360bd","executionInfo":{"status":"ok","timestamp":1570534193323,"user_tz":-330,"elapsed":7085228,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":346}},"source":["learn_classifier.show_results()\n"],"execution_count":0,"outputs":[{"output_type":"display_data","data":{"text/html":["\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
texttargetprediction
xxbos xxmaj take that ! \\n \\n xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup in xxup the xxup ass xxup intoxic;severe_toxic;obscenetoxic;severe_toxic;obscene;insult
xxbos xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxup all ! ! xxup fuck xxup you xxuptoxic;severe_toxic;obscene;insulttoxic;severe_toxic;obscene;insult
xxbos xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cock d xxup suck xxup my xxup cocktoxic;severe_toxic;obscene;insulttoxic;severe_toxic;obscene;insult
xxbos xxup dust xxup filter , xxup if xxup you xxup rvv xxup it xxup again ! i xxup will xxup fuckin xxup kill xxup you ! i xxup will xxup block xxup you , i xxup will xxup blank xxup your xxup talk xxup page , i xxup will xxup steal xxup your xxup password , i xxup will xxup take xxup over xxup you ! i xxup willtoxic;severe_toxic;obscene;threat;insulttoxic
xxbos xxup in xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup name xxup of xxup xxunk xxup the xxup
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"1R9EWMO6sg8L","colab_type":"code","colab":{}},"source":["preds, target = learn_classifier.get_preds(DatasetType.Test, ordered=True)\n","labels = preds.numpy()\n"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"j0CDbTN0sjTJ","colab_type":"code","outputId":"01321aed-cbd9-47b2-a07e-aac23bf59055","executionInfo":{"status":"ok","timestamp":1570534817091,"user_tz":-330,"elapsed":7708971,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":168}},"source":["labels"],"execution_count":0,"outputs":[{"output_type":"execute_result","data":{"text/plain":["array([[9.883897e-01, 2.623884e-01, 9.571094e-01, 3.149130e-01, 8.940867e-01, 1.850217e-01],\n"," [3.467371e-03, 1.417886e-04, 1.162976e-03, 9.286470e-05, 6.408545e-04, 2.221961e-04],\n"," [3.883871e-02, 2.430319e-03, 2.090406e-02, 7.950535e-04, 8.026772e-03, 2.779136e-03],\n"," [3.973726e-04, 5.405182e-06, 1.415407e-04, 3.100197e-05, 6.837760e-05, 1.746514e-05],\n"," ...,\n"," [7.170928e-02, 2.071401e-03, 1.676683e-02, 1.095338e-03, 1.220249e-02, 3.308463e-03],\n"," [9.681711e-04, 2.607051e-05, 1.463362e-04, 2.938509e-05, 1.122276e-04, 1.300497e-04],\n"," [8.367985e-03, 1.441929e-04, 9.057145e-04, 1.736582e-04, 9.263147e-04, 1.776602e-03],\n"," [7.008537e-01, 9.681507e-03, 3.033271e-01, 1.412554e-03, 1.950892e-01, 3.190502e-03]], dtype=float32)"]},"metadata":{"tags":[]},"execution_count":24}]},{"cell_type":"code","metadata":{"id":"qoDJ8Fihsk-h","colab_type":"code","outputId":"96e4a76a-5aad-4a53-9886-2e1913b67385","executionInfo":{"status":"ok","timestamp":1570534818799,"user_tz":-330,"elapsed":7710672,"user":{"displayName":"ANSHUL WADHAWAN","photoUrl":"","userId":"00642966863791990714"}},"colab":{"base_uri":"https://localhost:8080/","height":195}},"source":["submission = pd.DataFrame({'id': test_id})\n","submission = pd.concat([submission, pd.DataFrame(preds.numpy(), columns = label_cols)], axis=1)\n","\n","submission.to_csv('submission.csv', index=False)\n","submission.head()"],"execution_count":0,"outputs":[{"output_type":"execute_result","data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
idtoxicsevere_toxicobscenethreatinsultidentity_hate
000001cee341fdb120.9883900.2623880.9571090.3149130.8940870.185022
10000247867823ef70.0034670.0001420.0011630.0000930.0006410.000222
200013b17ad220c460.0388390.0024300.0209040.0007950.0080270.002779
300017563c3f7919a0.0003970.0000050.0001420.0000310.0000680.000017
400017695ad8997eb0.0164690.0007170.0053960.0007910.0032230.000434
\n","
"],"text/plain":[" id toxic severe_toxic ... threat insult identity_hate\n","0 00001cee341fdb12 0.988390 0.262388 ... 0.314913 0.894087 0.185022\n","1 0000247867823ef7 0.003467 0.000142 ... 0.000093 0.000641 0.000222\n","2 00013b17ad220c46 0.038839 0.002430 ... 0.000795 0.008027 0.002779\n","3 00017563c3f7919a 0.000397 0.000005 ... 0.000031 0.000068 0.000017\n","4 00017695ad8997eb 0.016469 0.000717 ... 0.000791 0.003223 0.000434\n","\n","[5 rows x 7 columns]"]},"metadata":{"tags":[]},"execution_count":25}]}]} --------------------------------------------------------------------------------