├── .gitignore
├── LICENSE
├── README.md
├── assignments
├── Chat
│ ├── .gitignore
│ ├── chat.py
│ ├── dialog-ger.md
│ ├── german-aixml-2.md
│ ├── german-aixml.md
│ ├── readme.md
│ ├── requirements.txt
│ └── tools.py
├── Embeddings
│ ├── 1-baseline.py
│ ├── 2-preprocessing.py
│ ├── assginment_text_classifier.ipynb
│ └── data
│ │ ├── custom-emo.txt
│ │ ├── germeval2018.test.txt
│ │ └── germeval2018.training.txt
├── RNNs
│ ├── classifying names with rnns.ipynb
│ ├── shakespear-lstm.py
│ └── vanishing-gradients.ipynb
├── germeval2018.test.txt
├── germeval2018.training.txt
├── goethe.txt
├── sensor-data.csv
├── tiny_goethe.ipynb
└── transformer
│ ├── nlp_2_transformer_offensive_language_classification.ipynb
│ └── nlp_3_neural_search.ipynb
├── hello-python
├── Hello PyTorch.ipynb
├── Short Python Intro.html
└── Short Python Intro.ipynb
└── slides
├── Deep Learning - Hello Python.pdf
├── Deep NLP 1 Recurrent Neural Networks.pdf
├── Deep NLP 2 Word Vectors and Transfer Learning.pdf
└── Deep NLP 3 Transforners and Attention.pdf
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # logs
7 | */logs/*
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 |
62 | # Flask stuff:
63 | instance/
64 | .webassets-cache
65 |
66 | # Scrapy stuff:
67 | .scrapy
68 |
69 | # Sphinx documentation
70 | docs/_build/
71 |
72 | # PyBuilder
73 | target/
74 |
75 | # Jupyter Notebook
76 | .ipynb_checkpoints
77 |
78 | # pyenv
79 | .python-version
80 |
81 | # celery beat schedule file
82 | celerybeat-schedule
83 |
84 | # SageMath parsed files
85 | *.sage.py
86 |
87 | # Environments
88 | .env
89 | .venv
90 | env/
91 | venv/
92 | ENV/
93 | env.bak/
94 | venv.bak/
95 |
96 | # Spyder project settings
97 | .spyderproject
98 | .spyproject
99 |
100 | # Rope project settings
101 | .ropeproject
102 |
103 | # mkdocs documentation
104 | /site
105 |
106 | # mypy
107 | .mypy_cache/
108 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Oliver Guhr
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # HTW Dresden NLP Lecture
2 |
3 | This repository contains NLP related material for the I833 Deep Learning course at University of Applied Sciences Dresden.
4 |
5 | You can find the all the CNN examples in this [repository](https://github.com/tneumann/htw_cnn_lecture)
6 |
7 |
8 | ## Hello Python - a brief introduction
9 |
10 | [Slides](https://github.com/oliverguhr/htw-nlp-lecture/blob/master/slides/Deep%20NLP%201%20Recurrent%20Neural%20Networks.pdf)
11 |
12 | [Hello Python Notebook](./hello-python/Short%20Python%20Intro.ipynb)
13 |
14 | [Hello PyTorch Notebook](./hello-python/Hello%20PyTorch.ipynb)
15 |
16 | ## Introduction into RNNs
17 |
18 | [Slides](./slides/Deep%20NLP%201%20Recurrent%20Neural%20Networks.pdf)
19 |
20 | [Vanishing Gradients Notebook](./assignments/RNNs/vanishing-gradients.ipynb)
21 |
22 |
23 | [Classifying Names with a Character-Level RNN](./assignments/RNNs/classifying%20names%20with%20rnns.ipynb)
24 |
25 | ## Word Vectors and Transfer Learning
26 |
27 | [Slides](./slides/Deep%20NLP%202%20Word%20Vectors%20and%20Transfer%20Learning.pdf)
28 |
29 | Code for a simple offensive language classificator, for german texts.
30 |
31 | [Offensive Language Classification](./assignments/transformer/nlp_2_transformer_offensive_language_classification.ipynb)
32 |
33 |
34 |
35 | ## Transforners and Attention
36 |
37 | [Slides](./slides/Deep%20NLP%203%20Transforners%20and%20Attention.pdf)
38 |
39 | Implement an Neural search using Transformers.
40 |
41 | [Neural Search](./assignments/transformer/nlp_3_neural_search.ipynb)
42 |
--------------------------------------------------------------------------------
/assignments/Chat/.gitignore:
--------------------------------------------------------------------------------
1 | chat-final.py
2 | *.pickle
3 | __pychache__
--------------------------------------------------------------------------------
/assignments/Chat/chat.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from termcolor import colored
3 | from typing import List
4 | import random
5 | import pickle
6 | import os
7 | import re, locale
8 | import torch
9 | from transformers import AutoModel, AutoTokenizer
10 | from tqdm import tqdm
11 | import tools
12 | from tools import Pattern
13 |
14 |
15 | tokenizer = AutoTokenizer.from_pretrained("bert-base-german-cased")
16 | model = AutoModel.from_pretrained("bert-base-german-cased")
17 |
18 | def encode(text):
19 | tokens = [tokenizer.encode(text, add_special_tokens=False)]
20 |
21 | input_ids = torch.tensor(tokens)
22 | with torch.no_grad():
23 | all_hidden_states, _ = model(input_ids)
24 |
25 | # todo: implement a pooling strategy to generate a document vector
26 | # tip: take a look at slides from our last lesson
27 | document_vector = None # <- you code :)
28 |
29 | assert(np.shape(document_vector) == (768,)) # <- the output should have this shape
30 | return document_vector
31 |
32 | def load_chat_texts():
33 | patterns = tools.load_file('dialog-ger.md')
34 | patterns.extend(tools.load_file('german-aixml.md'))
35 | patterns.extend(tools.load_file('german-aixml-2.md'))
36 | return patterns
37 |
38 | def encode_chat_text_to_vectors(patterns):
39 | data = tools.load_if_exists("tmp.pickle")
40 | if data is not None:
41 | doc, doc_vecs, reponse_patterns = data
42 | else:
43 | doc = list()
44 | doc_vecs = list()
45 | reponse_patterns = list()
46 | print("encoding sentences")
47 | for i,pattern in tqdm(enumerate(patterns), total=len(patterns)):
48 | doc.extend(pattern.input)
49 | vectors = [encode(line) for line in pattern.input]
50 | doc_vecs.extend(vectors)
51 | reponse_patterns.extend([pattern]*len(pattern.input))
52 | tools.save("tmp.pickle",[doc,doc_vecs,reponse_patterns])
53 | return doc, np.array(doc_vecs), reponse_patterns
54 |
55 |
56 | if __name__ == '__main__':
57 |
58 | # 1. load chat texts
59 | texts = load_chat_texts()
60 | # 2. convert texts into vectors
61 | doc, vectors, reponse_patterns = encode_chat_text_to_vectors(texts)
62 |
63 | topk = 5 # number of top scoring answers to print
64 | while True:
65 | query = input(colored('you: ', 'green'))
66 | query = query.strip().lower()
67 | query = re.sub(r'\W ', '', query) # remove non text chars
68 | query_vec = encode(query)
69 |
70 | # 3. compare user input to stored vectors unsing the dot product or cosine similarity
71 | score = None # <- todo: write code to score the output here
72 | topk_idx = None # <- todo: create a list with the [topk] document ids here
73 |
74 | # 4. Output the answers with the highest score
75 | print('top %d texts similar to "%s"' % (topk, colored(query, 'green')))
76 | for idx in topk_idx:
77 | matched_pattern = doc[idx]
78 | print('> %s\t%s' % (colored('%.1f' % score[idx], 'cyan'), colored(matched_pattern, 'yellow')))
79 |
80 | reponse_text = random.choice(reponse_patterns[topk_idx[0]].response)
81 | print(colored("robo: "+reponse_text+"\n","blue"))
82 |
83 | # 5. Create a chatbot startup :)
84 |
--------------------------------------------------------------------------------
/assignments/Chat/dialog-ger.md:
--------------------------------------------------------------------------------
1 | ## Greeting
2 | * hallo
3 | * hi
4 | * hallo tesaro
5 | * hallo roboter
6 | * hallo robo
7 | * hallo du da
8 | * hallöchen
9 | * Moin
10 | - Hallo
11 | - Guten Tag
12 |
13 | ## trivia 1
14 | * wie gehts dir
15 | - Mir gehts super!
16 | - Heute ist mein Tag.
17 |
18 | ## trivia 2
19 | * erzähle mir einen witz
20 | - Das willst du nicht wirklich, glaub mir.
21 |
22 | ## trivia
23 | * wirklich
24 | * echt jetzt
25 | - Absolut.
26 |
27 | ## gefühl
28 | * mir gehts schlecht
29 | * mir geht es schlecht
30 | * mir geht es nicht gut
31 | * mir ist nicht gut
32 | - Das ist nicht schön. Kann ich etwas für dich tun?
33 |
34 | ## studium
35 | * mein tutor nervt
36 | - Echt? Warum?
37 |
38 | ## Super cool
39 | * das fetzt
40 | * das ist toll
41 | * das ist großartig
42 | * das ist grandios
43 | * awesome
44 | - Ja oder?
45 |
46 | ## elsterglanz
47 | * juhu juri
48 | * huhu juri
49 | - Juri ist ein echter Russe!
--------------------------------------------------------------------------------
/assignments/Chat/readme.md:
--------------------------------------------------------------------------------
1 | # Bert Chatter
2 |
3 |
4 | ## Instructions
5 |
6 | 1. start by installing the requirements
7 |
8 | ```
9 | pip3 install -r requirements.txt
10 | ```
11 |
12 | 2. start a local Python IDE
13 |
14 |
15 | 3. Open the [code](chat.py) and fill all the comments marked with "todo"
16 |
17 |
18 | ## Tasks
19 |
20 | * Implement a pooling strategy to generate a document vector.
21 |
22 | * Implement a scoring function (dot product, cosine similarity, euclidean distance).
23 |
24 | * Next Steps:
25 | * Use the output from different layers
26 | * Try other models like [this](https://huggingface.co/bert-base-german-dbmdz-uncased)
27 |
28 |
--------------------------------------------------------------------------------
/assignments/Chat/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | transformers
3 | tqdm
4 | termcolor
5 | numpy
--------------------------------------------------------------------------------
/assignments/Chat/tools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from termcolor import colored
3 | from typing import List
4 | import random
5 | import pickle
6 | import os
7 | import re, locale
8 |
9 |
10 | class Pattern:
11 | def __init__(self,name:str):
12 | self.input:List[str] = list()
13 | self.response:List[str] = list()
14 | self.name = name
15 | self.input_vectors: List[float] = None
16 |
17 | def load_if_exists(filename:str):
18 | if os.path.isfile(filename):
19 | with open(filename, 'rb') as f:
20 | data = pickle.load(f)
21 | return data
22 | else:
23 | return None
24 | def save(filename:str,data):
25 | with open(filename, 'wb') as f:
26 | pickle.dump(data, f)
27 |
28 | def load_file(path):
29 | with open(path) as fp:
30 | patterns: List[Pattern] = list()
31 | pattern = None
32 | for line in fp:
33 | if line.startswith('##'):
34 | if pattern is not None: patterns.append(pattern)
35 | pattern = Pattern(line.replace('##','',1).strip())
36 | elif line.startswith('*'):
37 | pattern.input.append(line.replace('*','',1).strip())
38 | elif line.startswith(' -'):
39 | if '*' not in line: # skips wildcard lines as we can't handel them yet
40 | pattern.response.append(line.replace(' -','',1).strip())
41 | patterns.append(pattern)
42 | print(f'{len(patterns)} patterns loaded from file {path}')
43 | return patterns
--------------------------------------------------------------------------------
/assignments/Embeddings/1-baseline.py:
--------------------------------------------------------------------------------
1 | import fastText
2 | import re
3 |
4 | def load_data(path):
5 | file = open(path, "r",encoding="utf-8")
6 | data = file.readlines()
7 | return [line.split("\t") for line in data]
8 |
9 | def save_data(path,data):
10 | with open(path, 'w',encoding="utf-8") as f:
11 | f.write("\n".join(data))
12 |
13 | def train():
14 | traning_parameters = {'input': 'fasttext.train', 'epoch': 60, 'lr': 0.01, 'wordNgrams': 1, 'verbose': 2, 'minCount': 1, 'loss': "ns",
15 | 'lrUpdateRate': 100, 'thread': 1, 'ws':5, 'dim': 100}
16 | model = fastText.train_supervised(**traning_parameters)
17 | model.save_model("model.bin")
18 | return model
19 |
20 | def test(model):
21 | f1_score = lambda precision, recall: 2 * ((precision * recall) / (precision + recall))
22 | nexamples, recall, precision = model.test('fasttext.test')
23 | print (f'recall: {recall}' )
24 | print (f'precision: {precision}')
25 | print (f'f1 score: {f1_score(precision,recall)}')
26 | print (f'Number of examples: {nexamples}')
27 |
28 | def transform(input_file, output_file):
29 | # load data
30 | data = load_data(input_file)
31 | # transform it into fasttext format __label__other have a nice day
32 | data = [f"__label__{line[1]}\t{line[0]}" for line in data]
33 | # and save the data
34 | save_data(output_file,data)
35 |
36 | if __name__ == "__main__":
37 | transform("data/germeval2018.training.txt","fasttext.train")
38 | transform("data/germeval2018.test.txt","fasttext.test")
39 |
40 | # train the model
41 | model = train()
42 | test(model)
43 |
44 |
--------------------------------------------------------------------------------
/assignments/Embeddings/2-preprocessing.py:
--------------------------------------------------------------------------------
1 | import fastText
2 | import re
3 |
4 | def load_data(path):
5 | file = open(path, "r",encoding="utf-8")
6 | data = file.readlines()
7 | return [line.split("\t") for line in data]
8 |
9 | def save_data(path,data):
10 | with open(path, 'w',encoding="utf-8") as f:
11 | f.write("\n".join(data))
12 |
13 | def train():
14 | traning_parameters = {'input': 'fasttext.train', 'epoch': 60, 'lr': 0.01, 'wordNgrams': 1, 'verbose': 2, 'minCount': 1, 'loss': "ns",
15 | 'lrUpdateRate': 100, 'thread': 1, 'ws':5, 'dim': 300}
16 | model = fastText.train_supervised(**traning_parameters)
17 | model.save_model("model.bin")
18 | return model
19 |
20 | def replaceNumbers(text):
21 | text = text.replace("0"," null ")
22 | text = text.replace("1"," eins ")
23 | text = text.replace("2"," zwei ")
24 | text = text.replace("3"," drei ")
25 | text = text.replace("4"," vier ")
26 | text = text.replace("5"," fünf ")
27 | text = text.replace("6"," sechs ")
28 | text = text.replace("7"," sieben ")
29 | text = text.replace("8"," acht ")
30 | text = text.replace("9"," neun ")
31 | return text
32 |
33 | def loadSmileyData(path):
34 | file = open(path, "r",encoding="utf-8")
35 | data = file.readlines()
36 | data = [line.replace("\n","") for line in data]
37 | return [line.split("\t") for line in data]
38 |
39 | simleys = loadSmileyData("data/custom-emo.txt")
40 |
41 | def replaceSmiley(text):
42 | for simley in simleys:
43 | text = text.replace(simley[0],' '+simley[1].lower()+' ')
44 | return text
45 |
46 | cleanChars = re.compile(r'[^a-züöäÖÜÄß ]', re.MULTILINE)
47 | def preprocess(line):
48 | line = replaceSmiley(line)
49 | line = line.strip().lower()
50 | line = " ".join([word for word in line.split() if word[0] is not "@"])
51 | line = line.replace("."," ").replace("#","").replace("@"," ").replace(":","").replace(",","").replace("|","").replace("("," ").replace(")"," ").replace("-"," ").replace("/"," ").replace("!","").replace(";","").replace("\"","").replace("="," ")
52 | line = replaceNumbers(line)
53 | #line = line.replace(" u "," und ")
54 | #line = cleanChars.sub('', line)
55 | return line
56 |
57 | def test(model):
58 | f1_score = lambda precision, recall: 2 * ((precision * recall) / (precision + recall))
59 | nexamples, recall, precision = model.test('fasttext.test')
60 | print (f'recall: {recall}' )
61 | print (f'precision: {precision}')
62 | print (f'f1 score: {f1_score(precision,recall)}')
63 | print (f'Number of examples: {nexamples}')
64 |
65 | def transform(input_file, output_file):
66 | # load data
67 | data = load_data(input_file)
68 | # transform it into fasttext format __label__other have a nice day
69 | data = [f"__label__{line[1]}\t{preprocess(line[0])}" for line in data]
70 | # and save the data
71 | save_data(output_file,data)
72 |
73 | if __name__ == "__main__":
74 | transform("data/germeval2018.training.txt","fasttext.train")
75 | transform("data/germeval2018.test.txt","fasttext.test")
76 |
77 | # train the model
78 | model = train()
79 | test(model)
80 |
81 |
--------------------------------------------------------------------------------
/assignments/Embeddings/assginment_text_classifier.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Classifying Text \n",
8 | "\n",
9 | "In this little turorial we are using PyTorch, TorchText and Byte Pair Encoding to quickly build a text classifyer."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "!pip3 install bpemb pandas torchtext torch"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "import time\n",
28 | "from bpemb import BPEmb\n",
29 | "import pandas as pd\n",
30 | "import numpy as np\n",
31 | "\n",
32 | "import torch\n",
33 | "from torchtext import data\n",
34 | "import torch.nn as nn"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 1. Load the data\n",
42 | "\n",
43 | "\n",
44 | "At first, we need to downlad the data:"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "!wget https://www.htw-dresden.de/~guhr/dist/sample/germeval2018.training.txt\n",
54 | "!wget https://www.htw-dresden.de/~guhr/dist/sample/germeval2018.test.txt"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {},
60 | "source": [
61 | "Now we can load the data, using pandas:"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": null,
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "test_df = pd.read_csv(\"germeval2018.test.txt\", sep='\\t', header=0,encoding=\"utf-8\")\n",
71 | "train_df = pd.read_csv(\"germeval2018.training.txt\", sep='\\t', header=0,encoding=\"utf-8\")"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": null,
77 | "metadata": {
78 | "scrolled": true
79 | },
80 | "outputs": [],
81 | "source": [
82 | "train_df.head()"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "metadata": {},
89 | "outputs": [],
90 | "source": [
91 | "# drop unused columns\n",
92 | "test_df.drop(columns=['label2'], inplace=True)\n",
93 | "train_df.drop(columns=['label2'], inplace=True)"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "## 2. Data Preprocessing\n",
101 | "\n",
102 | "Now we can preprocess our dataset. In this step we remove all special chars and binarize our labels:"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": null,
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "def clean_text (text):\n",
112 | " text = text.str.lower() # lowercase\n",
113 | " text = text.str.replace(r\"\\#\",\"\") # replaces hashtags\n",
114 | " text = text.str.replace(r\"http\\S+\",\"URL\") # remove URL addresses\n",
115 | " text = text.str.replace(r\"@\",\"\")\n",
116 | " text = text.str.replace(r\"[^A-Za-z0-9öäüÖÄÜß()!?]\", \" \")\n",
117 | " text = text.str.replace(\"\\s{2,}\", \" \")\n",
118 | " return text\n",
119 | "\n",
120 | "def convert_label(label):\n",
121 | " return 1 if label == \"OFFENSE\" else 0"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": null,
127 | "metadata": {},
128 | "outputs": [],
129 | "source": [
130 | "train_df[\"text\"]=clean_text(train_df[\"text\"])\n",
131 | "test_df[\"text\"]=clean_text(test_df[\"text\"])\n",
132 | "train_df[\"label\"]=train_df[\"label\"].map(convert_label)\n",
133 | "test_df[\"label\"]=test_df[\"label\"].map(convert_label)"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": [
142 | "# this is how our data set looks now. No urls no @ :)\n",
143 | "train_df.head()"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "# The following will help make the results reproducible later.\n",
153 | "# This is will make shure that you get the same result every time you train you model\n",
154 | "# Turn this off, for you final train run, to improve performance.\n",
155 | "SEED = 42\n",
156 | "\n",
157 | "torch.manual_seed(SEED)\n",
158 | "torch.backends.cudnn.deterministic = True\n",
159 | "torch.backends.cudnn.benchmark = False"
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "### data magic\n",
167 | "\n",
168 | "The following class helps us to convert the pandas dataframe into an pytorch data set. You can skip that. "
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {},
175 | "outputs": [],
176 | "source": [
177 | "# source : https://gist.github.com/lextoumbourou/8f90313cbc3598ffbabeeaa1741a11c8\n",
178 | "# to use DataFrame as a Data source\n",
179 | "\n",
180 | "class DataFrameDataset(data.Dataset):\n",
181 | "\n",
182 | " def __init__(self, df, fields, is_test=False, **kwargs):\n",
183 | " print(df)\n",
184 | " examples = []\n",
185 | " for i, row in df.iterrows(): \n",
186 | " label = row.label#row.target if not is_test else None \n",
187 | " text = row.text \n",
188 | " examples.append(data.Example.fromlist([text, label], fields))\n",
189 | "\n",
190 | " super().__init__(examples, fields, **kwargs)\n",
191 | "\n",
192 | " @staticmethod\n",
193 | " def sort_key(ex):\n",
194 | " return len(ex.text)\n",
195 | "\n",
196 | " @classmethod\n",
197 | " def splits(cls, fields, train_df, val_df=None, test_df=None, **kwargs):\n",
198 | " train_data, val_data, test_data = (None, None, None)\n",
199 | " data_field = fields\n",
200 | "\n",
201 | " if train_df is not None:\n",
202 | " train_data = cls(train_df, data_field, **kwargs)\n",
203 | " if val_df is not None:\n",
204 | " val_data = cls(val_df, data_field, **kwargs)\n",
205 | " if test_df is not None:\n",
206 | " test_data = cls(test_df, data_field, True, **kwargs)\n",
207 | "\n",
208 | " return tuple(d for d in (train_data, val_data, test_data) if d is not None)"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "metadata": {},
214 | "source": [
215 | "## 3. Loading the pretrained word vectors\n",
216 | "\n",
217 | "For this tutorial we are using the byte pair encoding. The great [BPEmb](https://pypi.org/project/bpemb/) library helps us the encode the text and provides pretrained models for a lot of languages."
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": null,
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "from collections import Counter\n",
227 | "from torchtext import vocab\n",
228 | "\n",
229 | "bpemb_de = BPEmb(lang=\"de\", vs=10000)\n",
230 | "bpemb_de_counter = Counter(bpemb_de.words)\n",
231 | "bpemb_de_stoi = {word:i for i, word in enumerate(bpemb_de.words)}\n",
232 | "\n",
233 | "bpemb_vocab = vocab.Vocab(counter = bpemb_de_counter)\n",
234 | "bpemb_vocab.set_vectors(stoi = bpemb_de_stoi, vectors = torch.tensor(bpemb_de.vectors), dim = bpemb_de.dim)\n",
235 | "\n",
236 | "bpemb_vocab.stoi = bpemb_de_stoi # pytorch overwrite our tokens, so we need to reset them\n"
237 | ]
238 | },
239 | {
240 | "cell_type": "markdown",
241 | "metadata": {},
242 | "source": [
243 | "The byte pair encoding turns words into tokens. Every tokens has an id and a coresponding vector that we can feed to our neural network."
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": null,
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "tokens = bpemb_de.encode_with_bos_eos(\"das ist ein test\")\n",
253 | "print(tokens)\n",
254 | "\n",
255 | "token_ids = bpemb_de.encode_ids_with_bos_eos(\"das ist ein test\")\n",
256 | "print(token_ids)\n"
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "# and this is how the vector for the \"_das\" token looks like:\n",
266 | "bpemb_de.vectors[99]\n",
267 | "#[bpemb_de.vectors[id] for id in token_ids] # vectors for all tokens"
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {},
273 | "source": [
274 | "## 4. Load Train and Valid Data Sets\n",
275 | "\n",
276 | "First, we define how the TEXT and LABEL's will encoded. Thats what the Field fields do. With these fields and the class we defined above we can create a data set."
277 | ]
278 | },
279 | {
280 | "cell_type": "code",
281 | "execution_count": null,
282 | "metadata": {},
283 | "outputs": [],
284 | "source": [
285 | "TEXT = data.Field(tokenize= bpemb_de.encode,init_token ='', eos_token='',pad_token=\"\",use_vocab = True, batch_first = True,sequential=True )\n",
286 | "\n",
287 | "TEXT.vocab = bpemb_vocab # -> assign our byte pair endcoing module\n",
288 | "LABEL = data.LabelField(dtype = torch.float, use_vocab = False)\n",
289 | "\n",
290 | "fields = [('text',TEXT), ('label',LABEL)]\n",
291 | "train_ds, val_ds = DataFrameDataset.splits(fields, train_df=train_df, val_df=test_df)"
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "metadata": {},
298 | "outputs": [],
299 | "source": [
300 | "# Lets look at a the first example\n",
301 | "print(vars(train_ds[0]))"
302 | ]
303 | },
304 | {
305 | "cell_type": "markdown",
306 | "metadata": {},
307 | "source": [
308 | "### Batch Iterator\n",
309 | "\n",
310 | "With this data set we can now create a iterator that prepares the batches for us."
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "metadata": {},
317 | "outputs": [],
318 | "source": [
319 | "BATCH_SIZE = 64\n",
320 | "\n",
321 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
322 | "\n",
323 | "train_iterator, valid_iterator = data.Iterator.splits(\n",
324 | " (train_ds, val_ds), \n",
325 | " batch_size = BATCH_SIZE,\n",
326 | " shuffle = True, \n",
327 | " device = device)"
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "execution_count": null,
333 | "metadata": {},
334 | "outputs": [],
335 | "source": [
336 | "# This is how a batch looks like. Do you know why our texts a still id's?\n",
337 | "\n",
338 | "batch = next(iter(train_iterator))\n",
339 | "\n",
340 | "print(batch.label)\n",
341 | "print(batch.text)"
342 | ]
343 | },
344 | {
345 | "cell_type": "markdown",
346 | "metadata": {},
347 | "source": [
348 | "## 5. Define the Model\n",
349 | "\n",
350 | "Now its finally time to define our model:"
351 | ]
352 | },
353 | {
354 | "cell_type": "code",
355 | "execution_count": null,
356 | "metadata": {},
357 | "outputs": [],
358 | "source": [
359 | "class SimpleModel(nn.Module):\n",
360 | " def __init__(self, weights,embedding_length = 100):\n",
361 | " super(SimpleModel, self).__init__()\n",
362 | " \n",
363 | " # these three lines load to pretrained vecotrs into our embedding layer\n",
364 | " vocab_size= len(weights) \n",
365 | " self.word_embeddings = nn.Embedding(vocab_size, embedding_length) \n",
366 | " self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False) \n",
367 | " \n",
368 | " def forward(self, input_sentences):\n",
369 | " input = self.word_embeddings(input_sentences) # <-- here we turn our ids into actual vectors\n",
370 | " \n",
371 | " # since our sentences are do not have a equal length, we can't simply feed them \n",
372 | " # into a feed forward network. How can we solve that?\n",
373 | " \n",
374 | " return input # "
375 | ]
376 | },
377 | {
378 | "cell_type": "markdown",
379 | "metadata": {},
380 | "source": [
381 | "## 6. Train the model\n",
382 | "\n",
383 | "First we define a set of helper funtions, to make our live a bit easier. "
384 | ]
385 | },
386 | {
387 | "cell_type": "code",
388 | "execution_count": null,
389 | "metadata": {},
390 | "outputs": [],
391 | "source": [
392 | "def binary_accuracy(preds, y):\n",
393 | " \"\"\"\n",
394 | " Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8\n",
395 | " \"\"\"\n",
396 | "\n",
397 | " #round predictions to the closest integer\n",
398 | " rounded_preds = torch.round(torch.sigmoid(preds))\n",
399 | " correct = (rounded_preds == y).float() #convert into float for division \n",
400 | " acc = correct.sum() / len(correct)\n",
401 | " return acc"
402 | ]
403 | },
404 | {
405 | "cell_type": "code",
406 | "execution_count": null,
407 | "metadata": {},
408 | "outputs": [],
409 | "source": [
410 | "# we moved the training of a single batch into a method for convenience\n",
411 | "def train(model, iterator):\n",
412 | " \n",
413 | " epoch_loss = 0\n",
414 | " epoch_acc = 0\n",
415 | " \n",
416 | " model.train()\n",
417 | " \n",
418 | " for batch in iterator:\n",
419 | " text = batch.text\n",
420 | " optimizer.zero_grad()\n",
421 | " predictions = model(text).squeeze(1) \n",
422 | " loss = criterion(predictions, batch.label)\n",
423 | " acc = binary_accuracy(predictions, batch.label)\n",
424 | " \n",
425 | " loss.backward()\n",
426 | " optimizer.step()\n",
427 | " \n",
428 | " epoch_loss += loss.item()\n",
429 | " epoch_acc += acc.item()\n",
430 | " \n",
431 | "\n",
432 | " return epoch_loss / len(iterator), epoch_acc / len(iterator)"
433 | ]
434 | },
435 | {
436 | "cell_type": "code",
437 | "execution_count": null,
438 | "metadata": {},
439 | "outputs": [],
440 | "source": [
441 | "# ...same with the eval code\n",
442 | "def evaluate(model, iterator):\n",
443 | " \n",
444 | " epoch_acc = 0\n",
445 | " model.eval()\n",
446 | " \n",
447 | " with torch.no_grad():\n",
448 | " for batch in iterator:\n",
449 | " text = batch.text\n",
450 | " predictions = model(text).squeeze(1)\n",
451 | " acc = binary_accuracy(predictions, batch.label)\n",
452 | " \n",
453 | " epoch_acc += acc.item()\n",
454 | " \n",
455 | " return epoch_acc / len(iterator)"
456 | ]
457 | },
458 | {
459 | "cell_type": "markdown",
460 | "metadata": {},
461 | "source": [
462 | "### Now we can create an instance of our model, with the pretrained byte pair vectors."
463 | ]
464 | },
465 | {
466 | "cell_type": "code",
467 | "execution_count": null,
468 | "metadata": {},
469 | "outputs": [],
470 | "source": [
471 | "model = SimpleModel(torch.tensor(bpemb_de.vectors))\n",
472 | "model.to(device)\n",
473 | "\n",
474 | "learning_rate = 0.001\n",
475 | "\n",
476 | "criterion = nn.BCEWithLogitsLoss()\n",
477 | "\n",
478 | "optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)"
479 | ]
480 | },
481 | {
482 | "cell_type": "code",
483 | "execution_count": null,
484 | "metadata": {},
485 | "outputs": [],
486 | "source": [
487 | "num_epochs = 10\n",
488 | "loss=[]\n",
489 | "acc=[]\n",
490 | "val_acc=[]\n",
491 | "\n",
492 | "for epoch in range(num_epochs):\n",
493 | " \n",
494 | " train_loss, train_acc = train(model, train_iterator)\n",
495 | " valid_acc = evaluate(model, valid_iterator)\n",
496 | " \n",
497 | " print(f'{epoch} Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Acc: {valid_acc*100:.2f}%') \n",
498 | " \n",
499 | " loss.append(train_loss)\n",
500 | " acc.append(train_acc)\n",
501 | " val_acc.append(valid_acc)\n",
502 | " \n"
503 | ]
504 | },
505 | {
506 | "cell_type": "code",
507 | "execution_count": null,
508 | "metadata": {},
509 | "outputs": [],
510 | "source": [
511 | "import matplotlib.pyplot as plt\n",
512 | "\n",
513 | "epochs = range(1,num_epochs+1)\n",
514 | "#plt.plot(epochs, loss, 'g', label='Training loss')\n",
515 | "plt.plot(epochs, acc, 'b', label='Training acc')\n",
516 | "plt.plot(epochs, val_acc, 'r', label='validation acc')\n",
517 | "plt.title('Training and Validation loss')\n",
518 | "plt.xlabel('Epochs')\n",
519 | "plt.ylabel('Loss')\n",
520 | "plt.legend()\n",
521 | "plt.show()"
522 | ]
523 | },
524 | {
525 | "cell_type": "code",
526 | "execution_count": null,
527 | "metadata": {},
528 | "outputs": [],
529 | "source": []
530 | },
531 | {
532 | "cell_type": "markdown",
533 | "metadata": {},
534 | "source": [
535 | "# Tasks\n",
536 | "\n",
537 | "1. Implement a feed forward neural entwork classifyer\n",
538 | "\n",
539 | "2. Try to improve the results. What happens when,\n",
540 | " * you use more layers\n",
541 | " * more neurons\n",
542 | " * a bigger vocabulary size\n",
543 | " \n",
544 | "3. Try differnt models:\n",
545 | " * Use LSTMs \n",
546 | " * Did you know that you can use a cnn to classify text?"
547 | ]
548 | },
549 | {
550 | "cell_type": "code",
551 | "execution_count": null,
552 | "metadata": {},
553 | "outputs": [],
554 | "source": []
555 | }
556 | ],
557 | "metadata": {
558 | "kernelspec": {
559 | "display_name": "Python 3",
560 | "language": "python",
561 | "name": "python3"
562 | },
563 | "language_info": {
564 | "codemirror_mode": {
565 | "name": "ipython",
566 | "version": 3
567 | },
568 | "file_extension": ".py",
569 | "mimetype": "text/x-python",
570 | "name": "python",
571 | "nbconvert_exporter": "python",
572 | "pygments_lexer": "ipython3",
573 | "version": "3.8.2"
574 | }
575 | },
576 | "nbformat": 4,
577 | "nbformat_minor": 4
578 | }
579 |
--------------------------------------------------------------------------------
/assignments/Embeddings/data/custom-emo.txt:
--------------------------------------------------------------------------------
1 | 🤢 Negative
2 | 😡 Negative
3 | 🤮 Negative
4 | 💩 Negative
--------------------------------------------------------------------------------
/assignments/RNNs/shakespear-lstm.py:
--------------------------------------------------------------------------------
1 | '''
2 | #Example script to generate text from Nietzsche's writings.
3 |
4 | At least 20 epochs are required before the generated text
5 | starts sounding coherent.
6 |
7 | It is recommended to run this script on GPU, as recurrent
8 | networks are quite computationally intensive.
9 |
10 | If you try this script on new data, make sure your corpus
11 | has at least ~100k characters. ~1M is better.
12 |
13 |
14 | You can try some other texts too:
15 |
16 |
17 | What about Tolstoys Anna Karenina:
18 | https://raw.githubusercontent.com/udacity/deep-learning/master/tensorboard/anna.txt
19 |
20 | Or some Nietzsche:
21 | https://s3.amazonaws.com/text-datasets/nietzsche.txt
22 |
23 | Germany Wikipedia Articles:
24 | https://www2.htw-dresden.de/~guhr/dist/wiki.txt
25 |
26 | Shakesspears Sonnets:
27 | https://raw.githubusercontent.com/vivshaw/shakespeare-LSTM/master/sonnets.txt
28 | '''
29 |
30 | from __future__ import print_function
31 | from keras.callbacks import LambdaCallback, TensorBoard
32 | from keras.models import Sequential
33 | from keras.layers import Dense
34 | from keras.layers import LSTM, CuDNNLSTM, CuDNNGRU, Dropout
35 | from keras.optimizers import RMSprop, SGD, Nadam
36 | from keras.utils.data_utils import get_file
37 | import numpy as np
38 | import random
39 | import sys
40 | import io
41 | from datetime import datetime
42 | import re
43 |
44 | path = get_file(
45 | 'shakespear.txt',
46 | origin='https://cs.stanford.edu/people/karpathy/char-rnn/shakespear.txt')
47 |
48 |
49 | with io.open(path, encoding='utf-8') as f:
50 | text = f.read().lower()
51 | print('corpus length:', len(text))
52 |
53 | # build lookup table
54 | chars = sorted(list(set(text)))
55 | print('total chars:', len(chars))
56 | char_indices = dict((c, i) for i, c in enumerate(chars))
57 | indices_char = dict((i, c) for i, c in enumerate(chars))
58 |
59 | # cut the text in semi-redundant sequences of maxlen characters
60 | # How does the network react when you change the sequence length or stepsize
61 | maxlen = 40
62 | step = 3
63 | sentences = []
64 | next_chars = []
65 | for i in range(0, len(text) - maxlen, step):
66 | sentences.append(text[i: i + maxlen])
67 | next_chars.append(text[i + maxlen])
68 | print('nb sequences:', len(sentences))
69 |
70 | print('Vectorization...')
71 | x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
72 | y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
73 | for i, sentence in enumerate(sentences):
74 | for t, char in enumerate(sentence):
75 | x[i, t, char_indices[char]] = 1
76 | y[i, char_indices[next_chars[i]]] = 1
77 |
78 |
79 | # build the model: a single LSTM layer
80 | # experiment:
81 | # - add some more neurons
82 | # - add some more layers
83 | # - add dropout
84 | # - try out GRU's
85 |
86 | print('Build model...')
87 | model = Sequential()
88 | model.add(CuDNNLSTM(128,input_shape=(maxlen, len(chars))))
89 | model.add(Dense(len(chars), activation='softmax'))
90 |
91 | rms = RMSprop(lr=0.01)
92 | # try some other optimizers
93 | #sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
94 | #nadam = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
95 | model.compile(loss='categorical_crossentropy', optimizer=rms)
96 |
97 |
98 | def sample(preds, temperature=1.0):
99 | # helper function to sample an index from a probability array
100 | # Read more about this softmax with temperature here:
101 | # Distilling the Knowledge in a Neural Network (Geoffrey Hinton, Oriol Vinyals, Jeff Dean)
102 | # https://arxiv.org/abs/1503.02531
103 | preds = np.asarray(preds).astype('float64')
104 | preds = np.log(preds) / temperature
105 | exp_preds = np.exp(preds)
106 | preds = exp_preds / np.sum(exp_preds)
107 | probas = np.random.multinomial(1, preds, 1)
108 | return np.argmax(probas)
109 |
110 |
111 | def on_epoch_end(epoch, _):
112 | # Function invoked at end of each epoch. Prints generated text.
113 | print()
114 | print('----- Generating text after Epoch: %d' % epoch)
115 |
116 | start_index = random.randint(0, len(text) - maxlen - 1)
117 | for diversity in [0.8, 1.0, 1.2]:
118 | print('----- diversity:', diversity)
119 |
120 | generated = ''
121 | sentence = text[start_index: start_index + maxlen]
122 | generated += sentence
123 | print('----- Generating with seed: "' + sentence + '"')
124 | sys.stdout.write(generated)
125 | sys.stdout.write("\n----- result ------\n")
126 | for i in range(300):
127 | x_pred = np.zeros((1, maxlen, len(chars)))
128 | for t, char in enumerate(sentence):
129 | x_pred[0, t, char_indices[char]] = 1.
130 |
131 | preds = model.predict(x_pred, verbose=0)[0]
132 | next_index = sample(preds, diversity)
133 | next_char = indices_char[next_index]
134 |
135 | sentence = sentence[1:] + next_char
136 |
137 | sys.stdout.write(next_char)
138 | sys.stdout.flush()
139 | print()
140 |
141 | # print some text with the current model
142 | print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
143 |
144 | # train the model
145 | model.fit(x, y,
146 | batch_size=128,
147 | epochs=90,
148 | callbacks=[print_callback])
149 |
150 | # save the model
151 | model.save("shakespear-rnn")
--------------------------------------------------------------------------------
/assignments/RNNs/vanishing-gradients.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 11,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%matplotlib inline\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "import torch\n",
12 | "import torch.nn as nn\n",
13 | "import torch.nn.functional as F\n",
14 | "plt.rcParams[\"figure.figsize\"] = (12, 9)"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "## Vanishing & Exploding Gradient Problem\n",
22 | "\n",
23 | "By gradient, we mean the gradient of the loss function with respect to the weights of the neural network. As you already learned, this gradient is calculated using backpropagation. \n",
24 | "\n",
25 | "* What you should know:\n",
26 | " * [Backpropagation](https://www.youtube.com/watch?v=tIeHLnjs5U8)\n",
27 | " \n",
28 | "* Video for this topic:\n",
29 | " * [Vanishing & Exploding Gradient explained](https://www.youtube.com/watch?v=qO_NLVjD6zE)"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "### What happens during backpropagation?\n",
37 | "\n",
38 | "Let's start with the loss function. In case we use an MSE loss (or cost) function it is calculated this way:\n",
39 | "\n",
40 | "$ Loss = (a_{L} -y)^2 $\n",
41 | "\n",
42 | "Where $ y $ denotes the desired output of the network and $a_{L}$ is the activation of the last neuron.\n",
43 | "\n",
44 | "\n",
45 | "$ a_{L} = \\sigma( w_{L}a_{L-1}+b_{L})$\n",
46 | "\n",
47 | "\n",
48 | "Let's define $ in $ as the input of our network and \n",
49 | "write down a 5 layer network single neuron network:\n",
50 | "\n",
51 | "\n",
52 | "$ a_{1} = \\sigma( w_{1} in +b_{1}) $ \n",
53 | "\n",
54 | "$ a_{2} = \\sigma( w_{2}a_{1}+b_{2}) $ \n",
55 | "\n",
56 | "$ a_{3} = \\sigma( w_{3}a_{2}+b_{3}) $ \n",
57 | "\n",
58 | "$ a_{4} = \\sigma( w_{4}a_{3}+b_{4}) $ \n",
59 | "\n",
60 | "$ a_{5} = \\sigma( w_{5}a_{4}+b_{5}) $ \n",
61 | "\n",
62 | "$ Loss = (a_{5} -y)^2 $\n",
63 | "\n",
64 | "With the help of the backpropagation algorithm, we can adjust the weights. As you know the backpropagation uses derivatives to calculate the weight changes. How do these derivatives look like?\n"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "### A close look at the derivative of our activation function"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 12,
77 | "metadata": {},
78 | "outputs": [
79 | {
80 | "data": {
81 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAswAAAIMCAYAAADhOfquAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzt3X+Irfl9H/b3Z/ZWtg9uEv24UNBq5qipCtmkJoLJumDqlNqRV66rTcGlksdGpoYhNAIHJxSl9w9ThYE0hsaCqsSHJmDcExTFbdol2KhqovQ/uztrywJJVbJVZ0a7pHi9a9LCCIvVfvvHM1eaGc0895x7z5zn/Hi94PLc53uemfs5c875Pu/7zPf7faq1FgAA4GY7QxcAAACrTGAGAIAeAjMAAPQQmAEAoIfADAAAPQRmAADoITADAEAPgRkAAHoIzAAA0ENgBgCAHveGLuC6d73rXW08Hg9dBgAAG+6ll176g9ba/Ucdt3KBeTwe5/j4eOgyAADYcFV1OstxhmQAAEAPgRkAAHoIzAAA0ENgBgCAHgIzAAD0EJgBAKCHwAwAAD0EZgAA6CEwAwBAD4EZAAB6CMwAANBDYAYAgB4CMwAA9BCYAQCgh8AMAAA9BGYAAOgxU2Cuqueq6qtV9XJVffyGx3+hqr5cVV+sqn9SVXuXHvtWVX3h4s8LiyweAADu2iMDc1U9leRTST6Y5JkkH6mqZ64d9rtJ9ltrP5Dk15P8rUuPfaO19mcv/nxoQXUDcN10mozHyc5Ot51Oh64IYCPMcoX52SQvt9a+1lr7ZpJPJ3n+8gGttc+31s4vdn8rydOLLROAb7spGE+nyeFhcnqatNZtDw+/85ggDfDY7s1wzLuTfP3S/itJfrDn+J9L8puX9r+3qo6TvJnkb7bW/ue5qwSg8zAYn19co3gYjL/v+77T9tD5efLzP5984xvffXySHBwsr26ANbbQSX9V9dNJ9pP80qXmvdbafpKfSvLLVfUnb/i6w6o6rqrj1157bZElAWyWBw9uDsavv37z8a+/fvPxDx7cTX0AG2iWwPxqkvdc2n/6ou2KqvrRJA+SfKi19kcP21trr15sv5bknyV5//Wvba1NWmv7rbX9+/fvz/UEADbWTUMpzs4W870ffh/DNQAeaZbA/GKS91XVe6vqbUk+nOTKahdV9f4kv5IuLP/+pfa3V9X3XPz9XUl+KMmXF1U8wMa6bUzyO95x8/HvfGcyGl1tG4269pvs7vaPewbg2x4ZmFtrbyb5WJLPJvlKks+01r5UVZ+oqoerXvxSku9P8g+vLR/3p5IcV9XvJfl8ujHMAjPAo9w29CK5ORh/8pPJZJLs7SVV3XYy6dpvOv7o6PZ/w3ANgCuqtTZ0DVfs7++34+PjocsAGNbOTnfV97qq5Nd+rQu1Z2fdleKjo/4JfNPpzcf3/RtvvbW45wKwoqrqpYu5dv3HCcwAK2g87oZIXLe3l5ycrM+/AbDCZg3Mbo0NsIqOjm4fSrFO/wbABhCYAYZ200oVBwc3j0le5NrJff+G1TMAvs2QDIAhXb8RSdJd5V10OF73mgDugDHMAOtgFccRr2JNAHfAGGaAdXDbjUgWdYOSx7GKNQEMSGAGGNLu7nzty7CKNQEMSGAGGNIqrlSxijUBDEhgBhjSMlbD2ISaAAZk0h8AAFvJpD+AVbMJaxtvwnMAmNO9oQsA2ArX1zY+Pe32k/UZ6rAJzwHgMRiSAbAMm7C28SY8B4BLDMkAWCWbsLbxJjwHgMcgMAMswyasbbwJzwHgMQjMAMuwCWsbb8JzAHgMAjPAMmzC2sab8BwAHoNJfwAAbCWT/gAAYAEEZoBF28abe2zjcwa2hhuXACzSNt7cYxufM7BVjGEGWKRtvLnHNj5nYCMYwwwwhG28ucc2PmdgqwjMAIu0jTf32MbnDGwVgRlgkbbx5h7b+JyBrSIwAyzSNt7cYxufM7BVTPoDAGArmfQHAAALIDADAEAPgRkAAHoIzAAA0ENgBngc02l3h7udnW47nQ5d0WrycwI2wL2hCwBYO9NpcniYnJ93+6en3X5iKbXL/JyADWFZOYB5jcdd+Ltuby85OVl2NavLzwlYcZaVA7grZ2fztW8rPydgQwjMAPPa3Z2vfVv5OQEbQmAGmNfRUTIaXW0bjbp2vsPPCdgQAjPAvA4OksmkG4tb1W0nExPZrvNzAjaESX8AAGwlk/4AAGABBGYAAOghMAMAQA+BGQAAegjMAH2m0+6OdTs73XY6HbqizeDnCqyRe0MXALCyptPk8DA5P+/2T0+7/cTSaE/CzxVYM5aVA7jNeNyFuev29pKTk2VXszn8XIEVYVk5gCd1djZfO7PxcwXWjMAMcJvd3fnamY2fK7BmBGaA2xwdJaPR1bbRqGvn8fm5AmtGYAa4zcFBMpl0Y2uruu1kYmLak/JzBdaMSX8AAGwlk/4AAGABBGYAAOghMAMAQA+BGQAAegjMAADQQ2AGAIAeAjMAAPQQmAGSZDpNxuNkZ6fbTqdDV7S9vBbAirk3dAEAg5tOk8PD5Py82z897fYTd59bNq8FsILc6Q9gPO6C2XV7e8nJybKr2W5eC2CJ3OkPYFZnZ/O1c3e8FsAKEpgBdnfna+fueC2AFSQwAxwdJaPR1bbRqGtnubwWwAoSmAEODpLJpBsnW9VtJxOTzIbgtQBWkEl/AABsJZP+AABgAQRmAADoITADAEAPgRkAAHoIzAAA0ENgBgCAHgIzAAD0EJiB7TKdJuNxsrPTbafToStiVl47YCD3hi4AYGmm0+TwMDk/7/ZPT7v9xJ3kVp3XDhiQO/0B22M87oLWdXt7ycnJsqthHl474A640x/AdWdn87WzOrx2wIAEZmB77O7O187q8NoBAxKYge1xdJSMRlfbRqOundXmtQMGJDAD2+PgIJlMunGvVd12MjFpbB147YABmfQHAMBWMukPAAAWYKbAXFXPVdVXq+rlqvr4DY//QlV9uaq+WFX/pKr2Lj320ar6Fxd/PrrI4gEA4K49MjBX1VNJPpXkg0meSfKRqnrm2mG/m2S/tfYDSX49yd+6+Np3JPnFJD+Y5Nkkv1hVb19c+QAAcLdmucL8bJKXW2tfa619M8mnkzx/+YDW2udbaxe3X8pvJXn64u8/luRzrbU3Wmt/mORzSZ5bTOkAAHD3ZgnM707y9Uv7r1y03ebnkvzmY34tAACslHuL/GZV9dNJ9pP8+Tm/7jDJYZLsWoQeAIAVMssV5leTvOfS/tMXbVdU1Y8meZDkQ621P5rna1trk9bafmtt//79+7PWDgAAd26WwPxikvdV1Xur6m1JPpzkhcsHVNX7k/xKurD8+5ce+mySD1TV2y8m+33gog0AANbCIwNza+3NJB9LF3S/kuQzrbUvVdUnqupDF4f9UpLvT/IPq+oLVfXCxde+keRvpAvdLyb5xEUbwN2aTpPxONnZ6bbT6dAVcVe81sAdc6c/YPNMp8nhYXJ+/p220citlDeR1xp4ArPe6U9gBjbPeJycnn53+95ecnKy7Gq4S15r4Am4NTawvc7O5mtnfXmtgSUQmIHNc9vylJat3Dxea2AJBGZg8xwddeNYLxuNunY2i9caWAKBGdg8BwfdpK+9vaSq25oEtpm81sASmPQHAMBWMukPAAAWQGAGAIAeAjMAAPQQmAEAoIfADAAAPQRmAADoITADAEAPgRkAAHoIzAAA0ENgBtbbdJqMx8nOTredToeuiFXhvQEsyL2hCwB4bNNpcniYnJ93+6en3X6SHBwMVxfD894AFqhaa0PXcMX+/n47Pj4eugxgHYzHXRC6bm8vOTlZdjWsEu8NYAZV9VJrbf9RxxmSAayvs7P52tke3hvAAgnMwPra3Z2vne3hvQEskMAMrK+jo2Q0uto2GnXtbDfvDWCBBGZgfR0cJJNJNy61qttOJiZ14b0BLJRJfwAAbCWT/gAAYAEEZgAA6CEwAwBAD4EZAAB6CMwAANBDYAYAgB4CMwAA9BCYAQCgh8AMAAA9BGYAAOghMAPrYTpNxuNkZ6fbTqdDV8S68l4C5nRv6AIAHmk6TQ4Pk/Pzbv/0tNtPkoOD4epi/XgvAY+hWmtD13DF/v5+Oz4+HroMYJWMx12wuW5vLzk5WXY1rDPvJeCSqnqptbb/qOMMyQBW39nZfO1wG+8l4DEIzMDq292drx1u470EPAaBGVh9R0fJaHS1bTTq2mEe3kvAYxCYgdV3cJBMJt0406puO5mYpMX8vJeAx2DSHwAAW8mkPwAAWACBGQAAegjMAADQQ2AGAIAeAjMAAPQQmAEAoIfADAAAPQRmAADoITADAEAPgRlYHdNpMh4nOzvddjoduiK2ifcfcIt7QxcAkKQLJ4eHyfl5t3962u0nycHBcHWxHbz/gB7VWhu6hiv29/fb8fHx0GUAyzYedyHlur295ORk2dWwbbz/YCtV1Uuttf1HHWdIBrAazs7ma4dF8v4DegjMwGrY3Z2vHRbJ+w/oITADq+HoKBmNrraNRl073DXvP6CHwAyshoODZDLpxoxWddvJxIQrlsP7D+hh0h8AAFvJpD8AAFgAgRkAAHoIzAAA0ENgBgCAHgIzAAD0EJgBAKCHwAwAAD0EZgAA6CEwAwBAD4EZAAB6CMzA8k2nyXic7Ox02+l06Irgdt6vsPXuDV0AsGWm0+TwMDk/7/ZPT7v9JDk4GK4uuIn3K5CkWmtD13DF/v5+Oz4+HroM4K6Mx13ouG5vLzk5WXY10M/7FTZaVb3UWtt/1HGGZADLdXY2XzsMyfsViMAMLNvu7nztMCTvVyACM7BsR0fJaHS1bTTq2mHVeL8CEZiBZTs4SCaTbgxoVbedTEygYjV5vwIx6Q8AgC1l0h8AACyAwAwAAD0EZgAA6CEwAwBAD4EZAAB6CMwAANBDYAYAgB4zBeaqeq6qvlpVL1fVx294/Ier6neq6s2q+slrj32rqr5w8eeFRRUOAADLcO9RB1TVU0k+leQvJHklyYtV9UJr7cuXDjtL8rNJ/toN3+IbrbU/u4BaAQBg6Wa5wvxskpdba19rrX0zyaeTPH/5gNbaSWvti0neuoMagXU1nSbjcbKz022n06ErgsXyHoetMEtgfneSr1/af+WibVbfW1XHVfVbVfUX56oOWF/TaXJ4mJyeJq1128NDgYLN4T0OW2MZk/72Lu7R/VNJfrmq/uT1A6rq8CJUH7/22mtLKAm4cw8eJOfnV9vOz7t22ATe47A1ZgnMryZ5z6X9py/aZtJae/Vi+7Uk/yzJ+284ZtJa22+t7d+/f3/Wbw2ssrOz+dph3XiPw9aYJTC/mOR9VfXeqnpbkg8nmWm1i6p6e1V9z8Xf35Xkh5J8uf+rgI2wuztfO6wb73HYGo8MzK21N5N8LMlnk3wlyWdaa1+qqk9U1YeSpKr+XFW9kuQ/SfIrVfWliy//U0mOq+r3knw+yd+8troGsKmOjpLR6GrbaNS1wybwHoetUa21oWu4Yn9/vx0fHw9dBrAI02k3nvPsrLvqdnSUHBwMXRUsjvc4rLWqeulirl3/cQIzAADbaNbA7NbYAADQQ2AGAIAeAjMAAPQQmAEAoIfADAAAPQRmAADoITADAEAPgRkAAHoIzAAA0ENgBp7cdJqMx8nOTredToeuCIblMwEb5d7QBQBrbjpNDg+T8/Nu//S020+Sg4Ph6oKh+EzAxqnW2tA1XLG/v9+Oj4+HLgOY1XjcBYLr9vaSk5NlVwPD85mAtVFVL7XW9h91nCEZwJM5O5uvHTadzwRsHIEZeDK7u/O1w6bzmYCNIzADT+boKBmNrraNRl07bCOfCdg4AjPwZA4OksmkG59Z1W0nE5Ob2F4+E7BxTPoDAGArmfQHAAALIDADAEAPgRkAAHoIzAAA0ENgBgCAHgIzAAD0EJgBAKCHwAwAAD0EZgAA6CEwA7ObTpPxONnZ6bbT6dAVwXrxGYK1dG/oAoA1MZ0mh4fJ+Xm3f3ra7SfJwcFwdcG68BmCtVWttaFruGJ/f78dHx8PXQZw3XjcneCv29tLTk6WXQ2sH58hWDlV9VJrbf9RxxmSAczm7Gy+duAqnyFYWwIzMJvd3fnagat8hmBtCczAbI6OktHoatto1LUDj+YzBGtLYAZmc3CQTCbdeMuqbjuZmKwEs/IZgrVl0h8AAFvJpD8AAFgAgRkAAHoIzAAA0ENgBgCAHgIzAAD0EJgBAKCHwAwAAD0EZgAA6CEwA99tOk3G42Rnp9tOp0NXBJvNZw5W2r2hCwBWzHSaHB4m5+fd/ulpt5+4hS/cBZ85WHlujQ1cNR53J+zr9vaSk5NlVwObz2cOBuPW2MDjOTubrx14Mj5zsPIEZuCq3d352oEn4zMHK09gBq46OkpGo6tto1HXDiyezxysPIEZuOrgIJlMuvGTVd12MjH5CO6KzxysPJP+AADYSib9AQDAAgjMAADQQ2AGAIAeAjMAAPQQmAEAoIfADAAAPQRmAADoITADAEAPgRm21XSajMfJzk63nU6Hrgi4zucUVsK9oQsABjCdJoeHyfl5t3962u0nbscLq8LnFFaGW2PDNhqPu5PvdXt7ycnJsqsBbuJzCnfOrbGB252dzdcOLJ/PKawMgRm20e7ufO3A8vmcwsoQmGEbHR0lo9HVttGoawdWg88prAyBGbbRwUEymXRjIau67WRiIhGsEp9TWBkm/QEAsJVM+gMAgAUQmAEAoIfADAAAPQRmAADoITADAEAPgRkAAHoIzAAA0ENghk03nSbjcbKz022n06ErAp6UzzUs1b2hCwDu0HSaHB4m5+fd/ulpt5+4WxisK59rWDp3+oNNNh53J9Pr9vaSk5NlVwMsgs81LIw7/QHJ2dl87cDq87mGpROYYZPt7s7XDqw+n2tYOoEZNtnRUTIaXW0bjbp2YD35XMPSCcywyQ4OksmkG9tY1W0nExODYJ35XMPSmfQHAMBWWuikv6p6rqq+WlUvV9XHb3j8h6vqd6rqzar6yWuPfbSq/sXFn4/O/hQAAGB4jwzMVfVUkk8l+WCSZ5J8pKqeuXbYWZKfTfL3r33tO5L8YpIfTPJskl+sqrc/edkAALAcs1xhfjbJy621r7XWvpnk00mev3xAa+2ktfbFJG9d+9ofS/K51tobrbU/TPK5JM8toG4AAFiKWQLzu5N8/dL+Kxdts3iSrwUAgMGtxCoZVXVYVcdVdfzaa68NXQ4AAHzbLIH51STvubT/9EXbLGb62tbapLW231rbv3///ozfGgAA7t4sgfnFJO+rqvdW1duSfDjJCzN+/88m+UBVvf1ist8HLtqARZtOk/E42dnpttPp0BUBy6YfgDtx71EHtNberKqPpQu6TyX5e621L1XVJ5Ict9ZeqKo/l+QfJXl7kv+oqv6r1tqfbq29UVV/I13oTpJPtNbeuKPnAttrOk0OD5Pz827/9LTbT9zMALaFfgDujBuXwCYYj7uT43V7e8nJybKrAYagH4C5LfTGJcCKOzubrx3YPPoBuDMCM2yC3d352oHNox+AOyMwwyY4OkpGo6tto1HXDmwH/QDcGYEZNsHBQTKZdGMVq7rtZGKiD2wT/QDcGZP+AADYSib9AQDAAgjMAADQQ2AGAIAeAjMAAPQQmAEAoIfADAAAPQRmWDfTaTIeJzs73XY6HboiYB3oO+Cx3Ru6AGAO02lyeJicn3f7p6fdfuLmBMDt9B3wRNy4BNbJeNyd6K7b20tOTpZdDbAu9B1wIzcugU10djZfO0Ci74AnJDDDOtndna8dINF3wBMSmGGdHB0lo9HVttGoawe4jb4DnojADOvk4CCZTLpxh1XddjIxaQfop++AJ2LSHwAAW8mkPwAAWACBGQAAegjMAADQQ2AGAIAeAjOsqum0uzvXzk63nU6HrgjYRPoaeKR7QxcA3GA6TQ4Pk/Pzbv/0tNtPLAMFLI6+BmZiWTlYReNxd+K6bm8vOTlZdjXAptLXsOUsKwfr7OxsvnaAx6GvgZkIzLCKdnfnawd4HPoamInADKvo6CgZja62jUZdO8Ci6GtgJgIzrKKDg2Qy6cYRVnXbycQkHGCx9DUwE5P+AADYSib9AQDAAgjMAADQQ2AGAIAeAjMAAPQQmGFo02l3t62dnW47nQ5dEYC+CS65N3QBsNWm0+TwMDk/7/ZPT7v9xLJOwHD0TXCFZeVgSONxdyK6bm8vOTlZdjUAHX0TW8KycrAOzs7mawdYBn0TXCEww5B2d+drB1gGfRNcITDDkI6OktHoatto1LUDDEXfBFcIzDCkg4NkMunGBVZ128nEpBpgWPomuMKkPwAAtpJJfwAAsAACMwAA9BCYAQCgh8AMAAA9BGZYlum0u3vWzk63nU6HrghgfvoyttC9oQuArTCdJoeHyfl5t3962u0nlmkC1oe+jC1lWTlYhvG4O7Fct7eXnJwsuxqAx6MvY8NYVg5WydnZfO0Aq0hfxpYSmGEZdnfnawdYRfoytpTADMtwdJSMRlfbRqOuHWBd6MvYUgIzLMPBQTKZdOP8qrrtZGKSDLBe9GVsKZP+AADYSib9AQDAAgjMAADQQ2CGRXIHLGBb6f/YYO70B4viDljAttL/seFM+oNFcQcsYFvp/1hTJv3BsrkDFrCt9H9sOIEZFsUdsIBtpf9jwwnMsCjugAVsK/0fG05ghkVxByxgW+n/2HAm/QEAsJVM+gMAgAUQmAEAoIfADI/DHa0AZqO/ZAO40x/Myx2tAGajv2RDmPQH83JHK4DZ6C9ZcSb9wV1xRyuA2egv2RACM8zLHa0AZqO/ZEMIzDAvd7QCmI3+kg0hMMO83NEKYDb6SzaESX8AAGwlk/4AAGABBGboY8F9gLuhf2WNuHEJ3MaC+wB3Q//KmjGGGW5jwX2Au6F/ZUUYwwxPyoL7AHdD/8qaEZjhNhbcB7gb+lfWjMAMt7HgPsDd0L+yZmYKzFX1XFV9taperqqP3/D491TVP7h4/LeranzRPq6qb1TVFy7+/J3Flg93yIL7AHdD/8qaeeSkv6p6Ksk/T/IXkryS5MUkH2mtffnSMf95kh9orf2lqvpwkv+4tfafXgTnf9xa+zOzFmTSHwAAy7DISX/PJnm5tfa11to3k3w6yfPXjnk+ya9e/P3Xk/xIVdU8BcOgrAcKsDr0yayYWQLzu5N8/dL+KxdtNx7TWnszyb9K8s6Lx95bVb9bVf97Vf17T1gvLN7D9UBPT5PWvrMeqA4aYPn0yaygu5709y+T7LbW3p/kF5L8/ar6Y9cPqqrDqjququPXXnvtjkuCax48+M7i+Q+dn3ftACyXPpkVNEtgfjXJey7tP33RduMxVXUvyR9P8npr7Y9aa68nSWvtpST/V5J/+/o/0FqbtNb2W2v79+/fn/9ZwJOwHijA6tAns4JmCcwvJnlfVb23qt6W5MNJXrh2zAtJPnrx959M8k9ba62q7l9MGkxV/ZtJ3pfka4spHRbEeqAAq0OfzAp6ZGC+GJP8sSSfTfKVJJ9prX2pqj5RVR+6OOzvJnlnVb2cbujFw6XnfjjJF6vqC+kmA/6l1tobi34S8ESsBwqwOvTJrKBHLiu3bJaVYxDTaTc+7uysu4pxdGQ9UICh6JNZklmXlROYAQDYSotchxk2h7U9AdaXPpyB3Bu6AFiah2t7Plyu6OHanolf9QGsOn04AzIkg+0xHncd7HV7e8nJybKrAWAe+nDugCEZcJ21PQHWlz6cAQnMbA9rewKsL304AxKY2R7W9gRYX/pwBiQwsz0ODpLJpBvvVtVtJxOTRQDWgT6cAQnMbKbblh46OOgmh7z1VrfV0QKsj9v6cMvNcccsK8fmsfQQwPbQ57MElpVj81h6CGB76PN5ApaVY3tZeghge+jzWQKBmc1j6SGA7aHPZwkEZjaPpYcAtoc+nyUQmNk8lh4C2B76fJZAYGa9WT4OAMvNcccsK8f6spQQALdxjmCBLCvH+rKUEAC3cY5gBpaVY/NZSgiA2zhHsEACM+vLUkIA3MY5ggUSmFlflhIC4DbOESyQwMzq61sJw1JCANyk7xxh9QzmZNIfq+36LOeku0IgGAPwOJxXuGTWSX8CM6vNLGcAFsl5hUusksFmMMsZgEVyXuExCMysNrOcAVgk5xUeg8DMajPLGYBFcl7hMQjMrI6bZi1bCQOARbJ6Bo/BpD9Wg1nLAAzJeWgrWSWD9WLWMgBDch7aSlbJYL2YtQzAkJyH6CEwsxrMWgZgSM5D9BCYWb6bJlWYtQzAkB51HjIhcKsJzCzXw0kVp6dJa9328LB7zGoYAAzlUatn3HTuEpq3hkl/LJdJFQCsG+eujWXSH6vJpAoA1o1z19YTmLk7N433MqkCgHXTd+4ytnkrCMzcjdvGe/34j5vcB8B6uW1C4I//uLHNW0Jg5m48eHD1bklJt/8bv2FyHwDr5bYJgb/xGzef6x48GKZO7oxJf9yNnZ3uf9vXVSVvvbX8egBg0Zzr1p5JfwzLWGUANp1z3dYQmHlybkQCwDbqO9eZDLhRBGaejBuRALCtbhvbnJgMuGGMYebJWMwdAK5yblwbxjCzHBZzB4CrnBs3jsDM7NyIBAAezY1ONo7AzGzciAQAZuNGJxtHYGY2bkQCALNxo5ONIzDz3W76dVHfeKyDg24Sw1tvdVthGYBtd9O5se9caqjGShOYueq2oRfveMfNxxurDACzue2c+Y53GKqx4gRmrrpt6EVirDIAPInbxjYnhmqsOIF5W932q5/bfl30xhvGKgPAk7htbPMbb9x8vKEaK8ONS7bRw2EXl/83Oxp1H9oHDyy2DgDLdNuNTt75zuQb37j5fO2C1UK4cQm3u23YxYMHt/+6yNALALgbhmqsPIF50z3OiheGXgDA8hiqsfIMydhktw29+L7vS15//buPN+wCAFaHoRp3zpAMrHgBAOvMUI2VITBvinmGXljxAgBW3+MM1UgM17gDhmRsAkMvAGB73DZUY2+vuyp920pYLox9F0MytomhFwCwPfpWtOpbCYvHJjCvG0MvAGC79a1o1bcSlqEaj82QjHVi6AUA0MfKGnMxJGPd3fS/QEMvAIA+j7OyhivPjyQwr6KHV5JPT5PWuu3D/ZsYegEAJPOvrHE5Y1zOHELzFQKrDajcAAAGfElEQVTz0Oa5kvzUUzd/j93d7gNycpK89Va3FZYBYDvdlAl2d28+9qmnXHmegcA8pHmvJH/rW4ZeAADzu22oxre+dfPxrjxfITAvw23/Q5v3SvLDX6sYegEAzOO2oRp7ezcf78rzFVbJWKSHwynOzrpffTy88nvbAuI/8zPd/9puMhqZyQoA3K3bVuC6HpYvuy2jJN+dg1Y8t8y6SobAvCiPs+Rb0n+nnjV70wEAa+imC34PHtycUZ566uZhHGu6bJ1l5e7SPBP1bgrLSfem7LtTj0l8AMAy3JQ55h3z/PrrGz2EQ2Duc9MLPO9Evds8XNnCmGQAYNXMO+b5No+aPLgmYdqQjNvMO8Riw35FAQDwXRaVjx4OP71tnteSMpIhGfNYxBCL25Z8++QnXUUGADbDbVeeP/nJ+YZwnJ3dnrUePLib2p+AK8yPMzv0JibqAQDbbJ7Jg3t73XE35dCqbjz1ElglY1bj8VbMAgUAWLrbLkxOJv1h+uRkKeUZkjGrs7Ob2w2xAAB4Mn0LHPStFrZiXGG+7QqzIRYAAHfrpmEcS8xahmTMqu9XBcIxAMDGMiRjVtZCBgCgx72hC1gJBwcCMgAAN3KFGQAAegjMAADQQ2AGAIAeMwXmqnquqr5aVS9X1cdvePx7quofXDz+21U1vvTYX79o/2pV/djiSgcAgLv3yMBcVU8l+VSSDyZ5JslHquqZa4f9XJI/bK39W0n+dpL/+uJrn0ny4SR/OslzSf67i+8HAABrYZYrzM8mebm19rXW2jeTfDrJ89eOeT7Jr178/deT/EhV1UX7p1trf9Ra+7+TvHzx/QAAYC3MEpjfneTrl/ZfuWi78ZjW2ptJ/lWSd874tQAAsLJWYtJfVR1W1XFVHb/22mtDlwMAAN82S2B+Ncl7Lu0/fdF24zFVdS/JH0/y+oxfm9bapLW231rbv3///uzVAwDAHZslML+Y5H1V9d6qelu6SXwvXDvmhSQfvfj7Tyb5p621dtH+4YtVNN6b5H1J/o/FlA4AAHfvkbfGbq29WVUfS/LZJE8l+XuttS9V1SeSHLfWXkjyd5P8WlW9nOSNdKE6F8d9JsmXk7yZ5C+31r51R88FAAAWrroLwatjf3+/HR8fD10GAAAbrqpeaq3tP+q4lZj0BwAAq0pgBgCAHgIzAAD0EJgBAKCHwAwAAD1WbpWMqnotyenQdWyRdyX5g6GLYCm81tvDa709vNbbw2t9N/Zaa4+8a97KBWaWq6qOZ1lOhfXntd4eXuvt4bXeHl7rYRmSAQAAPQRmAADoITAzGboAlsZrvT281tvDa709vNYDMoYZAAB6uMIMAAA9BGa+rar+alW1qnrX0LVwN6rql6rq/6yqL1bVP6qqPzF0TSxWVT1XVV+tqper6uND18PdqKr3VNXnq+rLVfWlqvr5oWviblXVU1X1u1X1j4euZRsJzCTpOt8kH0hyNnQt3KnPJfkzrbUfSPLPk/z1gethgarqqSSfSvLBJM8k+UhVPTNsVdyRN5P81dbaM0n+3SR/2Wu98X4+yVeGLmJbCcw89LeT/BdJDGrfYK21/7W19ubF7m8leXrIeli4Z5O83Fr7Wmvtm0k+neT5gWviDrTW/mVr7Xcu/v7/pQtS7x62Ku5KVT2d5D9M8t8PXcu2EphJVT2f5NXW2u8NXQtL9Z8l+c2hi2Ch3p3k65f2X4kQtfGqapzk/Ul+e9hKuEO/nO6i1ltDF7Kt7g1dAMtRVf9bkn/jhoceJPkv0w3HYAP0vdattf/l4pgH6X6lO11mbcBiVdX3J/kfk/yV1tr/O3Q9LF5V/USS32+tvVRV//7Q9WwrgXlLtNZ+9Kb2qvp3krw3ye9VVdL9iv53qurZ1tr/s8QSWZDbXuuHqupnk/xEkh9p1pXcNK8mec+l/acv2thAVfWvpQvL09ba/zR0PdyZH0ryoar68STfm+SPVdX/0Fr76YHr2irWYeaKqjpJst9a+4Oha2Hxquq5JP9Nkj/fWntt6HpYrKq6l24y54+kC8ovJvmp1tqXBi2MhavuCsevJnmjtfZXhq6H5bi4wvzXWms/MXQt28YYZtgu/22Sfz3J56rqC1X1d4YuiMW5mND5sSSfTTcJ7DPC8sb6oSQ/k+Q/uPgsf+HiCiRwB1xhBgCAHq4wAwBAD4EZAAB6CMwAANBDYAYAgB4CMwAA9BCYAQCgh8AMAAA9BGYAAOjx/wMSJRam7ucmugAAAABJRU5ErkJggg==\n",
82 | "text/plain": [
83 | ""
84 | ]
85 | },
86 | "metadata": {
87 | "needs_background": "light"
88 | },
89 | "output_type": "display_data"
90 | }
91 | ],
92 | "source": [
93 | "activationFunction = nn.Sigmoid() # try this for Sigmoid, Tanh, ReLU\n",
94 | "\n",
95 | "for i in range(-50,50):\n",
96 | " data = torch.tensor([i/10],requires_grad=True)\n",
97 | " calc = activationFunction(data)\n",
98 | " calc.backward() \n",
99 | " plt.plot(i/10,data.grad[0], 'ro') \n",
100 | "\n",
101 | "plt.show()"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "The maximum of the derivative of the sigmoid function is 0.25 at $x = 0$ and much smaller for all other values. \n",
109 | "\n",
110 | "\n",
111 | "### Let's implement a small network...\n",
112 | "so we can see what's going on here. We use a 5 layer single neuron network that we defined earlier. \n",
113 | "\n",
114 | "Let's say our network should negate a number. If we put in a $1$ and it should return a $-1$. For simplicity, we do not use biases in this tiny example."
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": 3,
120 | "metadata": {},
121 | "outputs": [
122 | {
123 | "data": {
124 | "text/plain": [
125 | "tensor([1.1797, 1.3934, 0.2839, 0.1575, 1.3092], grad_fn=)"
126 | ]
127 | },
128 | "execution_count": 3,
129 | "metadata": {},
130 | "output_type": "execute_result"
131 | }
132 | ],
133 | "source": [
134 | "number_of_layers = 5\n",
135 | "weight_tensor = torch.randn((1, number_of_layers), requires_grad=True)\n",
136 | "weight = weight_tensor[0].abs() \n",
137 | "net_input = 1 \n",
138 | "y = net_output = -1\n",
139 | "weight"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 4,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "layer = activationFunction(weight[0] * net_input) \n",
149 | "\n",
150 | "for i in range(1,number_of_layers):\n",
151 | " layer = activationFunction(weight[i] * layer) \n",
152 | " \n",
153 | "loss= (layer - y)**2\n",
154 | "\n",
155 | "loss.backward()"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": 5,
161 | "metadata": {},
162 | "outputs": [
163 | {
164 | "data": {
165 | "text/plain": [
166 | "tensor([[1.2801e-04, 3.9076e-04, 7.0230e-03, 1.3398e-01, 3.8725e-01]])"
167 | ]
168 | },
169 | "execution_count": 5,
170 | "metadata": {},
171 | "output_type": "execute_result"
172 | }
173 | ],
174 | "source": [
175 | "# we can obtian the gradient of the weights with:\n",
176 | "weight_tensor.grad"
177 | ]
178 | },
179 | {
180 | "cell_type": "markdown",
181 | "metadata": {},
182 | "source": [
183 | "From right to left, we can see how the weight delta gets smaller. Since they are a product of numbers smaller than one, they can only get even smaller."
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": 6,
189 | "metadata": {},
190 | "outputs": [
191 | {
192 | "name": "stdout",
193 | "output_type": "stream",
194 | "text": [
195 | "wieght update:\n",
196 | "\n",
197 | "weight delta \t\t* learning rate \t= update step\n",
198 | "0.0001280104479520 \t* 0.01 \t\t\t= 0.0000012801044795\n",
199 | "\n",
200 | "old weight:1.1797258853912354\n",
201 | "new weight:1.1797246052867558\n"
202 | ]
203 | }
204 | ],
205 | "source": [
206 | "learning_rate = 0.01\n",
207 | "weight_neuron_one = weight_tensor[0][0].item()\n",
208 | "weight_grad_neuron_one = weight_tensor.grad[0][0].item()\n",
209 | "\n",
210 | "weight_update = weight_grad_neuron_one * learning_rate\n",
211 | "\n",
212 | "print(\"wieght update:\\n\")\n",
213 | "print(\"weight delta \\t\\t* learning rate \\t= update step\")\n",
214 | "print(f\"{weight_grad_neuron_one:.16f} \\t* {learning_rate} \\t\\t\\t= {weight_update:.16f}\")\n",
215 | "print(\"\")\n",
216 | "print(f\"old weight:{weight_neuron_one}\")\n",
217 | "print(f\"new weight:{weight_neuron_one - weight_update}\")"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": 13,
223 | "metadata": {},
224 | "outputs": [
225 | {
226 | "data": {
227 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtoAAAImCAYAAABtvi1RAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzs3Xl8lPW5/vHrTkLYwo4EZV8CKq4YWVwQlK31qKdHq1bFpSJa1yraY9tz+mvtcnrca9Uq7nUptdb20JaWRQ24sYpgQRPCvsi+BrLn/v2RwY6RJQOZfGcmn/frxYuZZ55n5po7qb3y8M0z5u4CAAAAULfSQgcAAAAAUhFFGwAAAIgDijYAAAAQBxRtAAAAIA4o2gAAAEAcULQBAACAOKBoA0AtmNlKMxseuf0DM3s2dKbQzKyJmbmZdQ6dZR8zW2Zmg2u57wYzOyvemQA0XBRtAEnPzC43s9lmtsfMNkVu32xmFo/Xc/dfuPvYI30eM+seKaoZdZErxtc+1swqYjxmlpldFa9MdcHde7n7h0f6PGY22swK6yITgIaLog0gqZnZeEm/kvSApI6SsiXdJOlMSZkHOCa93gImgBBFHgBA0QaQxMyslaT7JN3s7m+4+26vtsDdr3T30sh+L5rZb8xsspntkTTMzM43swVmtsvM1pjZj2s89xgzW2VmW83shzUe+7GZvRJ1f5CZfWBmO8xsoZkNjXosz8x+ambvm9luM5tqZu0jD8+M/L3DzIpqLnmILM0o3re/mf3QzCrMrGXk/k/N7NEDzGatmd1jZp9I2rOfXWZKSo+8bpGZnXqIWT8k6XRJz0b2fyjq4a9HlmxsN7NHahx3o5nlm9k2M/ubmXU6wPP/3sxuidzuFTnTf33kfj8z2xC17zfMbFFk3u+a2fFRj32xHMTMsszstch+/zSz7+/nLPXpkcd2mtmrZpZpZu0k/UlSz6j5tDOzM6O+ZzaY2f8cbGYAQNEGkMwGS2os6f9qse8Vkn4uqYWk91RdPq+W1FrS+ZK+Y2b/LkmR4vYbSWMkHSOpnaT9rkOOFMe/SfqZpLaS7pb0RzM7qsZrXyepg6rPst8d2T4k8ndrd8+queTB3UskzZV0TmTTOZJWqfps/b77Mw7yni+X9LXIe6xpiKTKyOtmufuCgzyP3H18JMvYyP7jox4eLelUSf0lXbfvBw0zu0zSdyVdoOp/aVgg6RXt3wxJQ6Pe13L9az5fvE8zGyTpSVXPs52klyX9+QBn7X8m6ShJ3VT9NR6zn30ukXSepN6SBkq6wt23SvqGpOVR89kq6XFJv3D3lpJyJP35AO8FACRRtAEkt/aStrj7F2uNo84sF5vZkKh9/8/d33f3Kncvcfc8d/8kcn+RpN/pX4X2Ekl/dfeZkbPi/y2p6gAZrpI02d0nR55rmqR5kr4etc8L7l7g7sWSXpd0SgzvcYakcyJF8iRJj0XuN1H1GeaZBzn2V+6+NvK68fQLd9/l7isiefa9v5sk/Szy3ssl/UTSWWaWvZ/nmKF/Feshkn4p6ezI/egfKG6U9Li7z3f3SnefoOoftk7bz3NeGnn9ne6+StUFvaZH3H2ju2+WNFkH/9qUS+pjZu0i/3oy+yD7AgBFG0BS2yqpffTZTHc/w91bRx6L/m/cmugDzWygmb1jZpvNbKeqS+G+JR3HRO/v7nsiz7c/3SR9M1Lud5jZDklnSTo6ap8NUbf3SsqK4T3uO9PbX9InkqapungOklQYOdN6IGsO8lhdOtD76ybpqai5bJZUof3/68ASSWmRf004S9VLN4rMrLuqi/e+ot1N0g9qzPsoSV9akmJmaapesx89g/3NI5avzTWq/mGnwKp/4XbUQfYFAIo2gKT2oaRSSRfVYl+vcf81SZMkdXH3VpKekrTvKiWfS+qyb0cza6bqZQr7s0bSy+7eOupPc3f/5WFk2p8PJPVV9VKGGe6+RFJXVZ8xP9iykUM9f21e+0iPWSPp2hqzaeru87/yxO6u6rPhV0kqcfctqn5/4yRlqLqI73vOH9V4zmbu/maN56uStFFfLvVdVHtfea/u/qm7X6bqJUCPSXrTzPb7C7cAIFG0ASQxd9+h6uUIT5rZJWbWwszSzOwUSc0PcXgLSdvcvcTMBqh6HfU+b0j6NzM7K1Kk7tOB/3v5iqQLzGyUmaVHfoFxqNXu2tKbVb0kpedB3uNeSfMl3aJ/FesPVH0G/lBF+2A2qfqXIbvGcMxGHSTrfjwl6b/MrK8kmVkbM7v4IPvPkHSr/vW+8iL3Z0aKuCRNkHSbmeVatSwzuzDyw1BNr0v6oZm1irzP78SQfaOkDmb2xRluM7s6smykUtJOVZfxw/mBBUADQdEGkNTc/X5Jd0n6nqrL0UZJT0v6T1UX0gO5WdJ9ZrZb0o9UXcr2PediVRfb11R9dnu7pLUHeP01qj6j/gNVF+c1ku5RLf77GinRP5f0fmQZxKAD7DpDUiNJc6Lut1DU+mwz+28z+8vBXs/M3jazuyKvvV3S/ZLmR177FDMbbmZbDvIUj0i6OnJ1kftr8f5+p+pfIHzTzHZJ+ljSiIMcUvN9zVT1Uo4v3qe7vy/pdlV/jXdIKlD1D0n7K7z/peqv3SpJf1f117j0ULkjFqr6XzxWRebTVtK/ScqPfM/8j6RLI2vPAWC/7F8nCQAASF1mdqek0e7O2moA9YIz2gCAlGRmXaz6GudpZtZP0h2q/iVLAKgXfFoYACBVNZb0vKqvVLJN0quSng2aCECDwtIRAAAAIA5YOgIAAADEAUUbAAAAiIOUWaPdvn177969e5DX3rNnj5o3P9Qle7EP84oN84oN84oN84oN84oN84odM4tNqHnNnz9/i7sfdaj9UqZod+/eXfPmzQvy2nl5eRo6dGiQ105GzCs2zCs2zCs2zCs2zCs2zCt2zCw2oeZlZqtqsx9LRwAAAIA4oGgDAAAAcRDXom1mo80s38wKzezeg+x3sZm5meVGbft+5Lh8M+NTvAAAAJBU4rZG28zSJT0haYSktZLmmtkkd19SY78Wqv60rtlR246XdLmkfpKOkTTdzPq4e2W88gIAAAB1KZ5ntAdIKnT35e5eJmmipIv2s99PJf2vpJKobRdJmujupe6+QlJh5PkAAACApBDPq450krQm6v5aSQOjdzCz/pK6uPvfzOyeGsfOqnFsp5ovYGbjJI2TpOzsbOXl5dVN8hgVFRUFe+1kxLxiw7xiw7xiw7xiw7xiw7xix8xik+jzCnZ5PzNLk/SwpGsP9zncfYKkCZKUm5vroS6Hw6V4YsO8YsO8YsO8YsO8YsO8YsO8YsfMYpPo84pn0V4nqUvU/c6Rbfu0kHSCpDwzk6SOkiaZ2YW1OBYAAABIaPFcoz1XUo6Z9TCzTFX/cuOkfQ+6+053b+/u3d29u6qXilzo7vMi+11uZo3NrIekHElz4pgVAAAAqFNxO6Pt7hVmdqukKZLSJT3v7ovN7D5J89x90kGOXWxmr0taIqlC0i1ccQQAAADJJK5rtN19sqTJNbb96AD7Dq1x/+eSfh63cAAAAEAc8cmQAAAAQBxQtAEAAIA4oGgDAAAAcUDRBgAAAOKAog0AAADEAUUbAAAAiAOKNgAAABAHFG0AAAAknV0l5aEjHBJFGwAAAEll/Y5iDf7FW/pgfUXoKAdF0QYAAEBS+fXbS1Ve6erTJrGrbGKnAwAAAKKs2LJHr89bqysGdlX7poldZRM7HQAAABDlkWkFykxP0y3DeoeOckgUbQAAACSFJet3adLC9fr2Wd11VIvGoeMcEkUbAAAASeHhaflq0SRD487uFTpKrVC0AQAAkPA+Wr1d0z/dpJvO6aVWzRqFjlMrFG0AAAAkvAen5Kt9VqauPaN76Ci1RtEGAABAQnu/cIs+WLZVtwzrreaNM0LHqTWKNgAAABKWu+v+Kfk6plUTXTGwa+g4MaFoAwAAIGFNW7JRC9fs0HeH91HjjPTQcWJC0QYAAEBCqqxyPTS1QD3bN9d/9O8UOk7MKNoAAABISH9ZuF75G3frrpF9lJGefLU1+RIDAAAg5ZVXVunhaQU6/uiW+voJR4eOc1go2gAAAEg4r89bo9Xb9uruUX2Ulmah4xwWijYAAAASSkl5pR57a6lO69ZGw/p2CB3nsFG0AQAAkFBembVKG3eV6p5RfWWWnGezJYo2AAAAEsjuknI98U6hzs5pr0E924WOc0Qo2gAAAEgYz7+3Utv3luueUX1DRzliFG0AAAAkhO17yvTMu8s1ul9HndS5deg4R4yiDQAAgITw1Ixl2lNWofEj+4SOUico2gAAAAhu464SvfjBSn3j1E7KyW4ROk6doGgDAAAguF+/vVRV7rpzeGqczZYo2gAAAAhs9da9mjhnjS47vYu6tG0WOk6doWgDAAAgqEenFyg9zXTbuTmho9QpijYAAACCKdi4W3/6eJ2uPaO7sls2CR2nTlG0AQAAEMzDUwvUPDNDN53TK3SUOkfRBgAAQBAL1+zQPxZv0A1n91Sb5pmh49Q5ijYAAACCeHBqvto2z9T1Z/cIHSUuKNoAAACodx8u26p3l27RzUN7KatxRug4cUHRBgAAQL1ydz04NV8dWzbRVYO6hY4TNxRtAAAA1Kt38jdp/qrtuv28HDVplB46TtxQtAEAAFBvqqpcD0wpULd2zfTN3M6h48QVRRsAAAD15m+ffK5PP9+lO4f3UaP01K6iqf3uAAAAkDAqKqv0yLQC9c1uoQtOPiZ0nLijaAMAAKBevPnROi3fskfjR/ZRepqFjhN3FG0AAADEXWlFpR6dXqCTu7TWiOOzQ8epFxRtAAAAxN1rs1dr/c4SfW9UX5ml/tlsiaINAACAONtTWqEn3inUGb3a6cze7UPHqTcUbQAAAMTVix+s1JaiMt09qm/oKPUqrkXbzEabWb6ZFZrZvft5/CYz+8TMPjaz98zs+Mj27mZWHNn+sZk9Fc+cAAAAiI+de8v11IxlGn5ctvp3bRM6Tr2K2wfLm1m6pCckjZC0VtJcM5vk7kuidnvN3Z+K7H+hpIcljY48tszdT4lXPgAAAMTf0zOXqai0QuNH9gkdpd7F84z2AEmF7r7c3cskTZR0UfQO7r4r6m5zSR7HPAAAAKhHm3aX6IX3V+qCk47RcUe3DB2n3sWzaHeStCbq/trIti8xs1vMbJmk+yXdHvVQDzNbYGYzzOzsOOYEAABAHDz5zjKVVVbpzhEN72y2JJl7fE4im9klkka7+9jI/TGSBrr7rQfY/wpJo9z9GjNrLCnL3bea2WmS/iypX40z4DKzcZLGSVJ2dvZpEydOjMt7OZSioiJlZWUFee1kxLxiw7xiw7xiw7xiw7xiw7xil0oz21JcpXtnFuvMThm67oTGcXmNUPMaNmzYfHfPPdR+cVujLWmdpC5R9ztHth3IREm/kSR3L5VUGrk9P3LGu4+kedEHuPsESRMkKTc314cOHVpX2WOSl5enUK+djJhXbJhXbJhXbJhXbJhXbJhX7FJpZt97Y6HS0tfrf64aomNaN43LayT6vOK5dGSupBwz62FmmZIulzQpegczy4m6e76kpZHtR0V+mVJm1lNSjqTlccwKAACAOrJsc5HemL9WYwZ1i1vJTgZxO6Pt7hVmdqukKZLSJT3v7ovN7D5J89x9kqRbzWy4pHJJ2yVdEzl8iKT7zKxcUpWkm9x9W7yyAgAAoO48PK1ATRul6+ahvUJHCSqeS0fk7pMlTa6x7UdRt+84wHF/lPTHeGYDAABA3fvnup3626LPdfu5vdUuKz5rs5MFnwwJAACAOvPQ1Hy1atpIY4f0DB0lOIo2AAAA6sTcldv0Tv5m3XROL7Vs0ih0nOAo2gAAADhi7q4HpuTrqBaNdc0Z3ULHSQgUbQAAAByxd5du0ZwV23Tbub3VLDOuvwaYNCjaAAAAOCL7zmZ3btNUl5/eNXSchEHRBgAAwBGZsniDPlm3U98d3keZGdTLfZgEAAAADltllevBqQXq3SFL3zi1U+g4CYWiDQAAgMP25wXrVLipSONH9FF6moWOk1Ao2gAAADgsZRVVemR6gU7s1EqjT+gYOk7CoWgDAADgsPx+7mqt3V6su0f1lRlns2uiaAMAACBmxWWVeuztQg3o3lZDctqHjpOQKNoAAACI2UsfrtTm3aWczT4IijYAAABisqukXE/NWKahfY/SgB5tQ8dJWBRtAAAAxOTZd1dox95y3T2yb+goCY2iDQAAgFrbWlSq595drvNPPFondGoVOk5Co2gDAACg1n6Tt0zF5ZW6c0Sf0FESHkUbAAAAtfL5zmL9dtYqXdy/s3p3yAodJ+FRtAEAAFArj71VKHfXHcNzQkdJChRtAAAAHNLKLXv0+rw1unJgN3Vu0yx0nKRA0QYAAMAhPTK9QI3STTcP6xU6StKgaAMAAOCgPtuwS5MWrtd1Z/ZQhxZNQsdJGhRtAAAAHNRDUwuU1ThDNw7pGTpKUqFoAwAA4IAWrN6uaUs26sYhPdW6WWboOEmFog0AAIADenBqvto1z9R1Z/YIHSXpULQBAACwX+8XbtH7hVt1y7Deat44I3ScpEPRBgAAwFe4ux6Ykq9jWjXRFQO7ho6TlCjaAAAA+Irpn27Sx2t26I7hOWrSKD10nKRE0QYAAMCXVFW5HpySrx7tm+vi/p1Dx0laFG0AAAB8yV8WrVf+xt26c0QfZaRTFw8XkwMAAMAXyiur9PC0Ah3bsYX+7cSjQ8dJahRtAAAAfOGN+Wu1aute3TOqr9LSLHScpEbRBgAAgCSppLxSv5q+VP27tta5x3YIHSfpUbQBAAAgSXpl1ipt2FWie0YdKzPOZh8pijYAAABUVFqhJ/OW6eyc9hrcq13oOCmBog0AAAA9/94KbdtTprtH9g0dJWVQtAEAABq47XvK9MzM5RrVL1snd2kdOk7KoGgDAAA0cE/NXKaisgqN52x2naJoAwAANGAbd5XopQ9W6t9P6aQ+2S1Cx0kpFG0AAIAG7PG3C1VR6fru8JzQUVIORRsAAKCBWrNtrybOXa3LTu+ibu2ah46TcijaAAAADdSj05cqzUy3ncvZ7HigaAMAADRASzfu1p8WrNU1Z3RXx1ZNQsdJSRRtAACABujhaQVqlpmhm87pFTpKyqJoAwAANDCL1u7Q3/+5QWPP7qG2zTNDx0lZFG0AAIAG5sGpBWrTrJGuP6tH6CgpjaINAADQgMxavlUzCzbrO0N7qUWTRqHjpDSKNgAAQAPh7npwSr6yWzbW1YO7h46T8ijaAAAADURewWbNW7Vdt52boyaN0kPHSXkUbQAAgAagqqr6bHbXts10aW6X0HEahLgWbTMbbWb5ZlZoZvfu5/GbzOwTM/vYzN4zs+OjHvt+5Lh8MxsVz5wAAACp7u//3KDF63fpzhE5yszgXGt9iNuUzSxd0hOSvibpeEnfii7SEa+5+4nufoqk+yU9HDn2eEmXS+onabSkJyPPBwAAgBhVVFbpoWn56pOdpQtP7hQ6ToMRzx9nBkgqdPfl7l4maaKki6J3cPddUXebS/LI7YskTXT3UndfIakw8nwAAACI0ZsL1mn55j0aP7Kv0tMsdJwGw9z90HsdzhObXSJptLuPjdwfI2mgu99aY79bJN0lKVPSue6+1MwelzTL3V+J7POcpL+7+xs1jh0naZwkZWdnnzZx4sS4vJdDKSoqUlZWVpDXTkbMKzbMKzbMKzbMKzbMKzbMK3bxmFl5levemcVq2dj0o0FNZJY6RTvU99iwYcPmu3vuofbLqI8wB+PuT0h6wsyukPRfkq6J4dgJkiZIUm5urg8dOjQuGQ8lLy9PoV47GTGv2DCv2DCv2DCv2DCv2DCv2MVjZi++v0JbS5boV1cO0Fk57ev0uUNL9O+xeC4dWScp+ldaO0e2HchESf9+mMcCAACghr1lFXr8nUIN6tlWZ/ZuFzpOgxPPoj1XUo6Z9TCzTFX/cuOk6B3MLCfq7vmSlkZuT5J0uZk1NrMeknIkzYljVgAAgJTzwvsrtaWoTPeM6ptSS0aSRdyWjrh7hZndKmmKpHRJz7v7YjO7T9I8d58k6VYzGy6pXNJ2RZaNRPZ7XdISSRWSbnH3ynhlBQAASDU7i8v19IxlOu/YDjqtW9vQcRqkuK7RdvfJkibX2PajqNt3HOTYn0v6efzSAQAApK5nZi7XrpIKjR/ZN3SUBourlQMAAKSYzbtL9fz7K3TBycfo+GNaho7TYFG0AQAAUsyTeYUqrajSncNzDr0z4oaiDQAAkELW7SjWq7NW65undVbPo7iOeUgUbQAAgBTy2PTqi7jdfh5ns0OjaAMAAKSIZZuL9MZHa3XVoG46pnXT0HEaPIo2AABAinhkWoEaZ6Tp5mG9QkeBKNoAAAApYfH6nfrros/17TN7qH1W49BxIIo2AABASnh4aoFaNsnQDUN6ho6CCIo2AABAkpu/apve+myTbhraS62aNgodBxEUbQAAgCTm7rr/H/lqn9VY157RPXQcRKFoAwAAJLH3Crdo9optuu3c3mqWmRE6DqJQtAEAAJKUu+uBKfnq1LqpLh/QJXQc1EDRBgAASFJTFm/UorU79d3hOWqckR46DmqgaAMAACShyirXQ1Pz1euo5vrGqZ1Cx8F+ULQBAACS0P99vE5LNxXprhF9lZFOpUtEfFUAAACSTFlFlR6ZXqB+x7TU107oGDoODoCiDQAAkGRen7dGa7YV6+5RfZWWZqHj4AAo2gAAAEmkpLxSj721VKd3b6OhfY4KHQcHQdEGAABIIr/9cKU27S7VPaOOlRlnsxMZRRsAACBJ7C4p15N5y3ROn6M0oEfb0HFwCBRtAACAJPHsuyu0Y2+57h7ZN3QU1AJFGwAAIAls21OmZ99drq+f2FEndm4VOg5qgaINAACQBH6TV6ji8krdNaJP6CioJYo2AABAgvt8Z7Fe+nCVvnFqZ/Xu0CJ0HNQSRRsAACDB/frtQrm7vjs8J3QUxICiDQAAkMBWbd2j1+eu0bcGdFWXts1Cx0EMKNoAAAAJ7NHpS5WRbrp1WO/QURAjijYAAECCyt+wW3/+eJ2uPaOHOrRsEjoOYkTRBgAASFAPTc1XVmaGbjqnZ+goOAwUbQAAgAT08Zodmrpko8YN6anWzTJDx8FhoGgDAAAkoAen5Ktd80xdd1aP0FFwmCjaAAAACeaDwi16r3CLbh7WW1mNM0LHwWGiaAMAACQQd9cDU/N1dKsmunJg19BxcAQo2gAAAAnkrU83acHqHbr9vBw1aZQeOg6OAEUbAAAgQVS568Gp+ererpkuOa1z6Dg4QhRtAACABDFnQ6U+27Bbd47oo0bp1LRkx1cQAAAgAZRXVulPS8t0bMcWuuCkY0LHQR2gaAMAACSAP85fq417XXeP7Ku0NAsdB3WAog0AABBYSXmlfvXWUvVqlabzjusQOg7qCEUbAAAgsFdnr9bnO0t0SZ9MmXE2O1VQtAEAAAIqKq3Qk+8U6qze7XVcOy7nl0oo2gAAAAG98N4Kbd1TprtH9Q0dBXWMog0AABDIjr1lmjBzuUYcn61TurQOHQd1jKINAAAQyNMzl6uorELjR/YJHQVxQNEGAAAIYNPuEr3w/gpddPIxOrZjy9BxEAcUbQAAgACeeLtQFZWu7w7nbHaqomgDAADUszXb9uq1Oat16eld1L1989BxECcUbQAAgHr2q7eWysx0+7k5oaMgjuJatM1stJnlm1mhmd27n8fvMrMlZrbIzN4ys25Rj1Wa2ceRP5PimRMAAKC+FG7arTc/WqtrBndTx1ZNQsdBHGXE64nNLF3SE5JGSForaa6ZTXL3JVG7LZCU6+57zew7ku6XdFnksWJ3PyVe+QAAAEJ4eFqBmjZK13eG9g4dBXEWzzPaAyQVuvtydy+TNFHSRdE7uPs77r43cneWpM5xzAMAABDUJ2t3avInG3T92T3Vtnlm6DiIM3P3+Dyx2SWSRrv72Mj9MZIGuvutB9j/cUkb3P1nkfsVkj6WVCHpl+7+5/0cM07SOEnKzs4+beLEiXF5L4dSVFSkrKysIK+djJhXbJhXbJhXbJhXbJhXbJjXVz00r0TLd1bqgSHN1KyRfeVxZhabUPMaNmzYfHfPPdR+cVs6Egszu0pSrqRzojZ3c/d1ZtZT0ttm9om7L4s+zt0nSJogSbm5uT506ND6ivwleXl5CvXayYh5xYZ5xYZ5xYZ5xYZ5xYZ5fdmcFdv0yT8+1Pe/dqy+fk6v/e7DzGKT6POK59KRdZK6RN3vHNn2JWY2XNIPJV3o7qX7trv7usjfyyXlSTo1jlkBAADixt31wJTP1KFFY109uHvoOKgn8SzacyXlmFkPM8uUdLmkL109xMxOlfS0qkv2pqjtbcysceR2e0lnSor+JUoAAICkMaNgs+au3K7bzstR08z00HFQT+K2dMTdK8zsVklTJKVLet7dF5vZfZLmufskSQ9IypL0BzOTpNXufqGk4yQ9bWZVqv5h4Jc1rlYCAACQFKqqXA9MyVeXtk11WW6XQx+AlBHXNdruPlnS5BrbfhR1e/gBjvtA0onxzAYAAFAf/rF4gxav36WHLz1ZmRl8VmBDwlcbAAAgTioqq/TQ1HzldMjSRad0Ch0H9YyiDQAAECd/WrBOyzbv0fiRfZSe9tXL+SG1UbQBAADioLSiUo9OX6oTO7XSqH4dQ8dBABRtAACAOJg4Z43W7SjWPaP6KnLRBzQwFG0AAIA6tresQr9+u1ADe7TV2TntQ8dBIBRtAACAOvbSB6u0paiUs9kNHEUbAACgDu0sLtdTM5bp3GM7KLd729BxEBBFGwAAoA49++5y7Swu1/iRfUJHQWAUbQAAgDqypahUz723Qv920tHqd0yr0HEQGEUbAACgjjz5zjKVVlTprhGczQZFGwAAoE6s21GsV2at0sX9O6nnUVmh4yABULQBAADqwK/fWipJuv28nMBJkCgo2gAAAEdoxZY9+sP8tbpiYFd1btMsdBwkCIo2AADAEXpkWoEy09N0y7DeoaMggVC0AQAAjsCS9bs0aeF6ffus7jqqRePQcZBAKNoAAABH4OFp+WrZJEPjzu4VOgoSDEUbAADgMM1ftV3TP92kG8/ppVbNGoWOgwRD0QYAADiQkFUmAAAgAElEQVQM7q4Hpnym9lmZuu7M7qHjIAFRtAEAAA7D+4VbNWv5Nt06rLeaZWaEjoMERNEGAACI0b6z2Z1aN9W3BnYNHQcJiqINAAAQo6lLNmrh2p2647wcNc5IDx0HCYqiDQAAEIPKKtdDU/PVs31z/Uf/TqHjIIFRtAEAAGLwl4XrVbCxSHeN7KOMdKoUDozvDgAAgFoqr6zSw9MKdPzRLfX1E44OHQcJjqINAABQS6/PW6PV2/bqnlF9lZZmoeMgwVG0AQAAaqGkvFKPvbVUud3aaGjfo0LHQRKgaAMAANTCyx+u0sZdpbpnVF+ZcTYbh0bRBgAAOITdJeV6Mq9QQ/ocpYE924WOgyRB0QYAADiE595boe17y3X3yD6hoyCJULQBAAAOYvueMj377gqN7tdRJ3VuHToOkghFGwAA4CCemrFMe8oqNJ6z2YgRRRsAAOAANu4q0YsfrNQ3Tu2knOwWoeMgyVC0AQAADuDXby9VlbvuHM7ZbMSOog0AALAfq7fu1cQ5a3T56V3VpW2z0HGQhCjaAAAA+/Ho9AJlpJtuO7d36ChIUhRtAACAGgo27tafPl6na87org4tm4SOgyRF0QYAAKjhoan5ysrM0E1DeoWOgiRG0QYAAIiycM0OTVm8UWPP7qk2zTNDx0ESo2gDAABEeXBqvto2z9T1Z/cIHQVJjqINAAAQ8eGyrXp36RbdPLSXshpnhI6DJEfRBgAAkOTuenBqvjq2bKKrBnULHQcpgKINAAAg6Z38TZq/artuPy9HTRqlh46DFEDRBgAADV5VleuBKQXq1q6ZvpnbOXQcpAiKNgAAaPD+9snn+vTzXbprRB81SqceoW7wnQQAABq0isoqPTytQMd2bKELTjomdBykEIo2AABo0P740Vqt2LJH40f2VVqahY6DFELRBgAADVZJeaV+NX2pTu7SWsOP6xA6DlIMRRsAADRYr81erfU7S/S9UX1lxtls1K1DFm0zSzezV+sjDAAAQH3ZU1qhJ/MKdUavdjqzd/vQcZCCDlm03b1SUjczy4z1yc1stJnlm1mhmd27n8fvMrMlZrbIzN4ys25Rj11jZksjf66J9bUBAAAO5sUPVmpLUZnuHtU3dBSkqNp+tuhySe+b2SRJe/ZtdPeHD3SAmaVLekLSCElrJc01s0nuviRqtwWSct19r5l9R9L9ki4zs7aS/p+kXEkuaX7k2O0xvDcAAID92rm3XE/NWKbhx2Wrf9c2oeMgRdV2jfYySX+N7N8i6s/BDJBU6O7L3b1M0kRJF0Xv4O7vuPveyN1ZkvZdIX6UpGnuvi1SrqdJGl3LrAAAAAf19MxlKiqt0PiRfUJHQQozd6/9zmbNoorxofa9RNJodx8buT9G0kB3v/UA+z8uaYO7/8zM7pbUxN1/FnnsvyUVu/uDNY4ZJ2mcJGVnZ582ceLEWr+XulRUVKSsrKwgr52MmFdsmFdsmFdsmFdsmFdsEnVeO0qr9L2ZxerfIV03ndwkdJwvSdSZJapQ8xo2bNh8d8891H61WjpiZoMlPScpS1JXMztZ0o3ufvORxfzi+a9S9TKRc2I5zt0nSJogSbm5uT506NC6iBOzvLw8hXrtZMS8YsO8YsO8YsO8YsO8YpOo8/rxpMWq9FX65ZVnq3v75qHjfEmizixRJfq8art05FFVL+fYKknuvlDSkEMcs05Sl6j7nSPbvsTMhkv6oaQL3b00lmMBAABisXb7Xr06e5Uuze2ScCUbqafW19F29zU1NlUe4pC5knLMrEfkiiWXS5oUvYOZnSrpaVWX7E1RD02RNNLM2phZG0kjI9sAAAAO26+mL5WZ6fbzeoeOggagtlcdWWNmZ0hyM2sk6Q5Jnx7sAHevMLNbVV2Q0yU97+6Lzew+SfPcfZKkB1S9HOUPkYvEr3b3C919m5n9VNVlXZLuc/dtMb87AACAiMJNRfrjR2t13Zk9dHSrpqHjoAGobdG+SdKvJHVS9RKOqZJuOdRB7j5Z0uQa234UdXv4QY59XtLztcwHAABwUI9ML1DTRum6eWiv0FHQQNS2aLu7XxnXJAAAAHHyz3U79bdFn+v2c3urXVbj0HHQQNR2jfYsM/uDmX3NIms8AAAAksVDU/PVqmkjjR3SM3QUNCC1Ldp9VH0ZvaslLTWzX5gZV3gHAAAJb+7KbXonf7O+M7SXWjZpFDoOGpBaFW2vNs3dvyXpBknXSJpjZjMi19gGAABIOO6uB/6Rr6NaNNY1g7uHjoMGprYfWNNO0lWSxkjaKOk2VV+q7xRJf5DUI14BAQAADtfMpVs0Z+U2/fSifmqamR46DhqY2v4y5IeSXpb07+6+Nmr7PDN7qu5jAQAAHBl31wNTPlPnNk112eldQ8dBA1Tbot3X3X1/D7j7/9ZhHgAAgDrxj39u0D/X7dKD3zxZmRm1/ow+oM7Utmi3N7PvSeonqcm+je5+blxSAQAAHIHKKtdD0wrUu0OWvnFqp9Bx0EDV9se7VyV9puq12D+RtFL/+tRGAACAhPLnBetUuKlI40f0UXoaVyZGGLUt2u3c/TlJ5e4+w92/LYmz2QAAIOGUVVTpkekFOrFTK40+oWPoOGjAart0pDzy9+dmdr6k9ZLaxicSAADA4fv93NVau71YP//GieJz9hBSbYv2z8yslaTxkn4tqaWk78YtFQAAwGEoLqvUY28XakCPthqS0z50HDRwtSra7v7XyM2dkoZJkplRtAEAQEJ56cOV2ry7VE9e2Z+z2QjuSK51c1edpQAAADhCu0rK9Zu8ZRrW9yid3p0VrgjvSIo2PyYCAICE8ezM5dpZXK7xI/uGjgJIOrKivd8PsAEAAKhvW4pK9ex7K3T+iUfrhE6tQscBJB1ijbaZ7db+C7VJahqXRAAAADH6Td4ylZRX6s4RfUJHAb5w0KLt7i3qKwgAAMDh+HxnsV6etUoX9++s3h2yQscBvnAkS0cAAACCe+ytQrm77hieEzoK8CUUbQAAkLRWbtmj1+et0ZUDu6lzm2ah4wBfQtEGAABJ65HpBcpMT9PNw3qFjgJ8BUUbAAAkpU8/36VJC9frujO7q0OLJqHjAF9B0QYAAEnpoakFymqcoRuHcDYbiYmiDQAAks5Hq7dr+qcbdeOQnmrVrFHoOMB+UbQBAEDSeXBKvto1z9R1Z/YIHQU4IIo2AABIKu8XbtEHy7bqlmG91bzxQT8SBAiKog0AAJKGu+uBKfk6plUTXTGwa+g4wEFRtAEAQNKY/ukmfbxmh+4YnqMmjdJDxwEOiqINAACSQlWV68Ep+erRvrku7t85dBzgkCjaAAAgKfxl0Xrlb9ytu0b0UUY6FQaJj+9SAACQ8Morq/TwtAIdd3RLnX/i0aHjALVC0QYAAAnvD/PWatXWvbpnVB+lpVnoOECtULQBAEBCKymv1GNvLVX/rq01rG+H0HGAWqNoAwCAhPbKrFXasKtE94w6VmaczUbyoGgDAICEVVRaoSfzlunsnPYa3Ktd6DhATCjaAAAgYT3/3gpt21Omu0f2DR0FiBlFGwAAJKTte8r0zMzlGtUvWyd3aR06DhAzijYAAEhIT81cpqKyCo3nbDaSFEUbAAAknI27SvTSByv1jVM6qU92i9BxgMNC0QYAAAnn8bcLVVHp+u7wPqGjAIeNog0AABLK6q179bs5q3X5gC7q2q5Z6DjAYaNoAwCAhPLoWwVKTzPddm5O6CjAEaFoAwCAhFGwcbf+tGCdrjmju7JbNgkdBzgiFG0AAJAwHp5aoOaZGbrpnF6howBHjKINAAASwqK1O/SPxRs09uweats8M3Qc4IhRtAEAQEJ4cGqB2jRrpOvP6hE6ClAnKNoAACC4Wcu3ambBZt08tLdaNGkUOg5QJyjaAAAgKHfXg1Pyld2yscYM7hY6DlBn4lq0zWy0meWbWaGZ3bufx4eY2UdmVmFml9R4rNLMPo78mRTPnAAAIJy8/M2at2q7bj8vR00apYeOA9SZjHg9sZmlS3pC0ghJayXNNbNJ7r4karfVkq6VdPd+nqLY3U+JVz4AABBeVZXrgSn56tq2mS7N7RI6DlCn4nlGe4CkQndf7u5lkiZKuih6B3df6e6LJFXFMQcAAEhQk//5uZZ8vkt3jshRo3RWtCK1mLvH54mrl4KMdvexkftjJA1091v3s++Lkv7q7m9EbauQ9LGkCkm/dPc/7+e4cZLGSVJ2dvZpEydOjMdbOaSioiJlZWUFee1kxLxiw7xiw7xiw7xiw7xic6h5VVa5fvhesdLTpJ+e2VRpZvWYLjHxPRabUPMaNmzYfHfPPdR+cVs6Uge6ufs6M+sp6W0z+8Tdl0Xv4O4TJE2QpNzcXB86dGiAmFJeXp5CvXYyYl6xYV6xYV6xYV6xYV6xOdS8Xp+7Rhv2LtLTY07Tuf061l+wBMb3WGwSfV7x/DeadZKiF1t1jmyrFXdfF/l7uaQ8SafWZTgAABBOaUWlfvXWUp3cuZVGHp8dOg4QF/Es2nMl5ZhZDzPLlHS5pFpdPcTM2phZ48jt9pLOlLTk4EcBAIBk8bvZq7VuR7HuGXWsjCUjSFFxK9ruXiHpVklTJH0q6XV3X2xm95nZhZJkZqeb2VpJ35T0tJktjhx+nKR5ZrZQ0juqXqNN0QYAIAXsLavQ4+8UanDPdjqzd7vQcYC4iesabXefLGlyjW0/iro9V9VLSmoe94GkE+OZDQAAhPHC+yu1pahMT4/py9lspDSuowMAAOrNzr3lenrGMg0/roNO69YmdBwgrijaAACg3kx4d5l2lVRo/Mi+oaMAcUfRBgAA9WLz7lI9/95KXXDyMTru6Jah4wBxR9EGAAD14ol3ClVWWaU7h+eEjgLUC4o2AACIu3U7ivXa7NX65mmd1fMoPvkQDQNFGwAAxN1j05dKkm4/j7PZaDgo2gAAIK6WbS7SGx+t1VWDuumY1k1DxwHqDUUbAADE1SPTCtQ4I003D+sVOgpQryjaAAAgbhav36m/Lvpc15/VQ+2zGoeOA9QrijYAAIibh6YWqFXTRhp7ds/QUYB6R9EGAABxsXR7pd7+bJNuOqeXWjVtFDoOUO8o2gAAoM65u94oKFP7rMa65oxuoeMAQVC0AQBAnZs4d43yt1fptnN7q1lmRug4QBAUbQAAUKd+++FKff/NT3RCu3R9a0DX0HGAYPgREwAA1JmnZyzT//z9M404PluXdt6tzAzO6aHh4rsfAAAcMXfXo9ML9D9//0wXnHyMnryyvxqlWehYQFCc0QYAAEfE3fXLv3+mp2cu1zdP66xfXnyS0inZAEUbAAAcvqoq14//sli//XCVrh7cTT++oJ/SKNmAJIo2AAA4TJVVrnv/uEh/mL9WNw7pqXu/dqzMKNnAPhRtAAAQs/LKKt35+4/110Wf687hfXT7eb0p2UANFG0AABCT0opK3fraAk1bslE/+PqxGjekV+hIQEKiaAMAgForLqvUuJfn6d2lW/TTi/ppzODuoSMBCYuiDQAAaqWotELffnGu5q3cpvsvOUmX5nYJHQlIaBRtAABwSDv3luuaF+bok3U79ejlp+rCk48JHQlIeBRtAABwUFuLSjXmuTkq3FSk31zZXyP7dQwdCUgKFG0AAHBAm3aV6MpnZ2v1tr165ppcndPnqNCRgKRB0QYAAPu1bkexrnxmljbvLtVL3x6gQT3bhY4EJBWKNgAA+IqVW/boymdna3dJuV4eO1D9u7YJHQlIOhRtAADwJUs37taVz85WRZXrtRsG6YROrUJHApISRRsAAHxh8fqdGvPcHKWnmX4/bpBysluEjgQkrbTQAQAAQGJYsHq7vjVhlpo2StcfbhxMyQaOEGe0AQCAZi3fqutfnKv2LRrr1bED1blNs9CRgKRH0QYAoIGbUbBZN748T53bNNOrYwcqu2WT0JGAlEDRBgCgAZu6eINufW2BenfI0svXD1C7rMahIwEpg6INAEAD9ZeF6/Xd33+sEzu10kvXDVCrZo1CRwJSCkUbAIAG6A/z1ug//7hIud3b6vlrT1dWYyoBUNf4XxUAAA3Myx+u1H//32KdndNeE8bkqmlmeuhIQEqiaAMA0IBMmLlMv5j8mYYfl60nrjxVjTMo2UC8ULQBAGgA3F2PvVWoR6YX6N9OOlqPXHaKGqXzcRpAPFG0AQBIce6uX/7jMz09Y7kuOa2z/vfik5SeZqFjASmPog0AQAqrqnL95C+L9dKHqzRmUDf95MJ+SqNkA/WCog0AQIqqrHJ9/81Fen3eWo0b0lPf/9qxMqNkA/WFog0AQAoqr6zSXa8v1F8Wrtcd5+Xou8NzKNlAPaNoAwCQYkorKnXraws0bclGff9rx+rGc3qFjgQ0SBRtAABSSHFZpW58Zb5mFmzWfRf109WDu4eOBDRYFG0AAFJEUWmFrn9xruas3Kb7Lz5Jl57eJXQkoEGjaAMAkAJ2Fpfr2hfmaNHanXr0slN00SmdQkcCGjyKNgAASW7bnjKNeW62lm4s0pNX9teofh1DRwIgKa4fCWVmo80s38wKzeze/Tw+xMw+MrMKM7ukxmPXmNnSyJ9r4pkTAIBktWlXiS57+kMVbirSM9fkUrKBBBK3M9pmli7pCUkjJK2VNNfMJrn7kqjdVku6VtLdNY5tK+n/ScqV5JLmR47dHq+8AAAkm3U7inXlM7O0aXepXrxugAb3ahc6EoAo8TyjPUBSobsvd/cySRMlXRS9g7uvdPdFkqpqHDtK0jR33xYp19MkjY5jVgAAksqqrXt06VMfauueMr0ydiAlG0hA8SzanSStibq/NrIt3scCAJDSCjft1jef+lB7yyr0uxsGqX/XNqEjAdiPpP5lSDMbJ2mcJGVnZysvLy9IjqKiomCvnYyYV2yYV2yYV2yYV2wSYV6rd1XqgXklSjPT93KbaMvSBcpbGjTSASXCvJINM4tNos8rnkV7naToC3h2jmyr7bFDaxybV3Mnd58gaYIk5ebm+tChQ2vuUi/y8vIU6rWTEfOKDfOKDfOKDfOKTeh5LVi9XQ8+P0ctmjbRqzcMUo/2zYNlqY3Q80pGzCw2iT6veC4dmSspx8x6mFmmpMslTarlsVMkjTSzNmbWRtLIyDYAABqk2cu36qpnZ6tN80y9ftPghC/ZAOJYtN29QtKtqi7In0p63d0Xm9l9ZnahJJnZ6Wa2VtI3JT1tZosjx26T9FNVl/W5ku6LbAMAoMGZWbBZ17wwR0e3bqrXbxyszm2ahY4EoBbiukbb3SdLmlxj24+ibs9V9bKQ/R37vKTn45kPAIBEN23JRt3y6kfq1SFLr1w/QO2yGoeOBKCWkvqXIQEASGV/Wbhed/7+Y/Xr1Eq/vW6AWjVrFDoSgBjE9ZMhAQDA4Xlj/lrdMXGB+ndto1eup2QDyYgz2gAAJJiXZ63Sf//5nzo7p70mjMlV08z00JEAHAaKNgAACeSZmcv188mfavhxHfT4Ff3VpBElG0hWFG0AABKAu+vXbxfq4WkFOv+ko/XoZaeoUTorPIFkRtEGACAwd9f//iNfT81Ypov7d9b9l5yk9DQLHQvAEaJoAwAQUFWV676/LtGLH6zUVYO66r4LT1AaJRtICRRtAAACqaxy/eDNT/T7eWt0w9k99IOvHyczSjaQKijaAAAEUF5ZpfGvL9Skhet1+3k5unN4DiUbSDEUbQAA6llpRaVue22Bpi7ZqHu/dqxuOqdX6EgA4oCiDQBAPSouq9RNr8zXjILN+smF/XTNGd1DRwIQJxRtAADqSVFphca+NFezV2zT/RefpEtP7xI6EoA4omgDAFAPdhaX69oX5mjR2p169LJTdNEpnUJHAhBnFG0AAOJs254yjXlutgo27tYTV/TX6BM6ho4EoB5QtAEAiKNNu0p01XOztWrrXj1zda6G9u0QOhKAekLRBgAgTtbvKNaVz87Wxl0levG6ARrcq13oSADqEUUbAIA4WLV1j654ZrZ2lZTr5esH6rRubUJHAlDPKNoAANSxwk1FuvLZWSqrqNLvbhikEzq1Ch0JQAAUbQAA6tCS9bs05rnZMjNNHDdYfTu2CB0JQCBpoQMAAJAqPl6zQ996ZpYyM9L0+o2DKNlAA8cZbQAA6sCcFdv07Rfnqm3zTL06dqC6tG0WOhKAwCjaAAAcoXeXbtYNv52nTq2b6tWxg9SxVZPQkQAkAIo2AABHYPqSjbr51Y/Uq0OWXr5+gNpnNQ4dCUCCYI02AACH6a+L1uumV+bruKNb6Hc3DKRkA/gSzmgDAHAY3pi/Vt97Y6Fyu7XVc9fmqkWTRqEjAUgwFG0AAGL0yqxV+q8//1Nn57TX02NOU7NM/u8UwFfxXwYAAGLw7LvL9bO/farhx3XQ41f0V5NG6aEjAUhQFG0AAGrB3TVpWZneXPqpzj/xaD16+SlqlM6vOgE4MIo2AACH4O66f0q+3lxarv/o30n3X3ySMijZAA6Bog0AwEFUVbnu++sSvfjBSg3rkqEHLzlZaWkWOhaAJEDRBgDgACqrXD/80yeaOHeNxp7VQ2c230jJBlBr/LsXAAD7UV5Zpbte/1gT567R7ef21g/PP05mlGwAtccZbQAAaiitqNTtv1ugKYs36j9HH6vvDO0VOhKAJETRBgAgSkl5pW58eb5mFGzWjy84Xtee2SN0JABJiqINAEDEntIKjX1pnmat2Kr/vfhEXXZ619CRACQxijYAAJJ2FpfruhfmaOHanXr0slN00SmdQkcCkOQo2gCABm/bnjJd/fxs5W/YrSeu6K/RJ3QMHQlACqBoAwAatE27S3TVs7O1auteTbg6V8P6dggdCUCKoGgDABqs9TuKdeWzs7VxV4leuO50ndGrfehIAFIIRRsA0CCt3rpX33pmlnYVl+vl6wfotG5tQ0cCkGIo2gCABqdwU5GufHaWSiuq9NoNg3Ri51ahIwFIQRRtAECD8unnu3TVs7NlZvr9uMHq27FF6EgAUhRFGwDQYCxcs0NXPz9HzTLT9erYgep5VFboSABSGEUbANAgzFmxTd9+ca7aNs/Uq2MHqkvbZqEjAUhxFG0AQMp7b+kWjf3tXHVq3VSvjh2kjq2ahI4EoAGgaAMAUtpbn27Ud179SD3bN9crYweqfVbj0JEANBBpoQMAAPD/27v3KKvKM8/j3weKywAq3jVA1IjRmJhoJIitSYOZGNNxYpK2I8a7qG2Pjs5M2lnpMW1325Pu3LqTzOikRxHjDTHRlsU4ptUeZaJGuYkK3lFRMRgVFATCpYpn/jgbOByrqHOQXecUfD9r1ap99n732U89a686v3rrPVVl+T9PLuZPb5rDx/bZiSkXjDFkS+pRzmhLkrZLd8xZxGW3P8GR++3KpLM/w04D+zW7JEk7GIO2JGm7c8uMV7j8zvkcO3IPrjnzSAb19+VOUs8rdelIRJwQEc9FxIKI+HYnxwdExG3F8RkRsX+xf/+I+H1EPF58/FOZdUqSth8TH3yJy++cz+cP2YuJZ40yZEtqmtK++0REX+Bq4AvAImBWREzLzKerhk0A3snMkRExHvg+cEpx7MXMPLys+iRJ25+r7n+BH937PF8+bF9+fMrh9G/zrUiSmqfM70CjgQWZ+VJmrgWmACfVjDkJuKHYvh34fEREiTVJkrZDmckP/uVZfnTv83z9iGH8dLwhW1LzRWaW88QRJwMnZOZ5xeMzgKMy8+KqMfOLMYuKxy8CRwFDgKeA54HlwHcy88FOrnEBcAHA3nvvfeSUKVNK+Vq6s2LFCoYM8b+L1ct+NcZ+NcZ+NWZ76FdmMvnZtdz3SjtjR7Rx5qH96VPSnM320K+eZL8aZ88a06x+jRs3bk5mjupuXKsuXFsMfDgzl0TEkcDUiPh4Zi6vHpSZ1wDXAIwaNSrHjh3b85UC06dPp1nX7o3sV2PsV2PsV2N6e7861iffmTqP+155jQnHHsB3vvwxyvzFaG/vV0+zX42zZ41p9X6V+Xu114ERVY+HF/s6HRMRbcAuwJLMXJOZSwAycw7wIvDREmuVJPUy7R3r+dYvHufWma/xH44bWXrIlqRGlRm0ZwEHRcQBEdEfGA9MqxkzDTir2D4ZuD8zMyL2LN5MSUR8BDgIeKnEWiVJvcja9vVcPHkuUx//LZd98WC+dfzBhmxJLae0pSOZ2R4RFwP3AH2BSZn5VERcCczOzGnAdcBNEbEAWEoljAN8DrgyItYB64ELM3NpWbVKknqP1es6uPDmOUx/7i3+6t8dyjnHHNDskiSpU6Wu0c7Mu4G7a/ZdUbW9GviTTs67A7ijzNokSb3PyjXtnHfDbB59eQnf+/phjB/94WaXJEldatU3Q0qStJnlq9dxzvWzePy1d/nxNw7nq0cMa3ZJkrRFBm1JUst7Z+Vazpw0k2ffWM7V3zyCEz6xb7NLkqRuGbQlSS3tzfdWc8bEmSxcspJrzhjFuEP2anZJklQXg7YkqWUtXvZ7Trt2Bm8sX831Z3+GPxi5R7NLkqS6GbQlSS3p1SWr+ObER1m2ah03TRjNkfvt1uySJKkhBm1JUstZ8OYKTp84g9XtHUw+fwyHDd+l2SVJUsMM2pKklvLM4uWccd0MIJhywRgO2WfnZpckSVulzP8MKUlSQ5547V3GX/MobX36cNufGrIl9W7OaEuSWsKshUs55/pZ7Dq4H5PPG8OI3QY1uyRJ+kAM2pKkpnvohbc5/8bZ7Dt0IJPPG8M+uwxsdkmS9IG5dESS1FT3P/s7zr1hFvvtPojbLjjakC1pu+GMtiSpae6et5hLbp3LoR/amRvPHc3QQf2bXZIkbTPOaEuSmuKfH1vExZMf4/ARQ7n5vKMM2ZK2O85oS5J63OQZr3L51Hn8wYG7c+2ZoxjU35cjSdsfv7NJknrUdQ+9zN/e9TTHHbIX//O0TzOwX99mlyRJpTBoS5J6zFX3v8CP7n2ePzpsH35yyhH0b3MFo6Ttl0FbklS6zORH9z7H1Q+8yNePGMYPTv4kbRcMJ5kAABBMSURBVH0N2ZK2bwZtSVKpMpMr73qa6x9eyKmjP8x3v/oJ+vSJZpclSaUzaEuSSrN+fXL51PncOvNVzj3mAP7yxI8RYciWtGMwaEuSStHesZ7Lbn+SO+e+zsXjRvKt4z9qyJa0QzFoS5K2ubXt67l0ylx+Nf8NLvviwVw0bmSzS5KkHmfQliRtU6vXdfBnN8/hgefe4ooTD+XcYw9odkmS1BQGbUnSNrNyTTvn3zibR15awt9//TBOHf3hZpckSU1j0JYkbRPLV6/jnOtn8fhr7/KP3/gUXztieLNLkqSmMmhLkj6wd1au5cxJM3n2jeVcdeoRfOmwfZtdkiQ1nUFbkvSBvPXeGk6fOIOXl6zkmjNGMe6QvZpdkiS1BIO2JGmrLV72e067dgaLl63m+rM/wzEj92h2SZLUMgzakqSt8trSVXxz4qO8u3IdN00Yzaj9d2t2SZLUUgzakqSGvfjWCk67dgar2zu45fyj+OTwoc0uSZJajkFbktSQZ99YzukTZwAw5YIxHLLPzk2uSJJak0FbklS3Jxe9y5mTZjKwrS+3nH8UB+45pNklSVLLMmhLkuoye+FSzrl+FrsM6set549hxG6Dml2SJLW0Ps0uQJLU+h5e8DZnXDeTPXcawC8vPNqQLUl1cEZbkrRF9z/7Oy68+TE+ssdgbppwFHvuNKDZJUlSr2DQliR16VfzFnPJlLl8bN+dueGc0ew6uH+zS5KkXsOlI5KkTt05dxEXTX6MTw0fys3nHWXIlqQGOaMtSXqfyTNe5fKp8zj6I7sz8axRDOrvy4UkNcrvnJKkzUx66GWuvOtpxh28Jz87/UgG9uvb7JIkqVcyaEuSNrr6gQX88J7n+NIn9uGn44+gf5srDCVpaxm0JUlkJv9w7/Nc9cACvnbEMH548idp62vIlqQPwqAtSTu4zORv73qGSQ+/zKmjR/Ddrx5Gnz7R7LIkqdczaEvSDmx9JpdPnc/kGa9yzjH7c8WJhxJhyJakbcGgLUk7oDXtHaxc08HEeWv5zW9f5aJxB/Lnxx9syJakbcigLUm9wNr29axc086KNe2sXNvOyjUdrFzTvmnfmnZWrq3eV2yvLY6t6ag6t511HbnxuS/74sFcNG5kE786Sdo+GbQlqQQbgvGGULwxDBfBeNXa2n0drFpbFZqrgvGqNR2s7Vhf13Xb+gSDB7QxZEAbg/r33bi9504Dqva3MWRA5djKxS8ZsiWpJAZtSQLWdVTNGFcF40r47dhs5rg6JFcH4+rZ43qDcd8+weD+fRkyoI3BxcfGYNy/el8lGG/at+mc6lA9oK1PQ8s/pk9/ZWtbJknqhkFbUq9UG4w3hNzZb7Tz9pxFnS+bqJpNrg3JHyQYDx7Qlz2GDNh8X9VscnUwrswmVx5vTTCWJPUeBm1JPWJdx3pWrelgxdrameBNQbl22UT1bHLtkou17VsIxo8/sXGzOhgPqpod3n3woGJfEYj7t20WjAdtmDE2GEuStlKpQTsiTgB+CvQFJmbm92qODwBuBI4ElgCnZObC4thfABOADuCSzLynzFolba69Y30l8FYF41XVa43XdrVvUzCuXp+8xWBcpU9QMxNcCca7FcF4cE0w3vB4wznPPDmXPzxmzMbHBmNJUrOUFrQjoi9wNfAFYBEwKyKmZebTVcMmAO9k5siIGA98HzglIg4FxgMfBz4E/GtEfDQzO8qqV+rtqoPxqjrXGlf/9Yrav2axZmuDcbFkojYYD+5fs9Z4477N1xp/0GC8cmFf9tt98FafL0nStlLmjPZoYEFmvgQQEVOAk4DqoH0S8NfF9u3AVVF5hT0JmJKZa4CXI2JB8XyPlFjvVnnhd+/xyvIO5r++DIAs/mJWkjWPi8/Fjk2PNzxTdjJ2y+dsuAZdnVdnDXQ1voHaOx/f+fM+tbid5U/8duPz0c34ra6/u+etuXD9fd/8eLf1d3N80/mdn/fCi2uZvvwpVq3d8lrjRoNxbcgdsWEpxfvemNe3al3x5ucM7t/GwH7OGEuS1Jkyg/Yw4LWqx4uAo7oak5ntEbEM2L3Y/2jNucPKK3XrTbhhNq8uXQ2/eajZpfQuT8xtdgW9RgBDXl/0vpA7fNCgjbPDG9cV9998dnjj0oqq9ccGY0mSekavfjNkRFwAXACw9957M3369B6v4RsfWc/yvZOBAwdSm11qo8yG4+/bX3McopN9DT5nN9ei5ni3+7uppza4bel5V61axaBBg2rO7+p5u6i/5vjWf/3RZZ1bet566+zqeeutMwJWrljJTjsNqDraXnx0ogNYVXxUbb7V+ejt0ooVK5ryvaC3sl+NsV+NsV+Ns2eNafV+lRm0XwdGVD0eXuzrbMyiiGgDdqHypsh6ziUzrwGuARg1alSOHTt2W9Vet7HA9OnTaca1eyv71Rj71Rj71Rj71Rj71Rj71Th71phW71efEp97FnBQRBwQEf2pvLlxWs2YacBZxfbJwP1ZWag6DRgfEQMi4gDgIGBmibVKkiRJ21RpM9rFmuuLgXuo/Hm/SZn5VERcCczOzGnAdcBNxZsdl1IJ4xTjfkHljZPtwEX+xRFJkiT1JqWu0c7Mu4G7a/ZdUbW9GviTLs79LvDdMuuTJEmSylLm0hFJkiRph2XQliRJkkpg0JYkSZJKYNCWJEmSSmDQliRJkkpg0JYkSZJKYNCWJEmSSmDQliRJkkpg0JYkSZJKYNCWJEmSSmDQliRJkkpg0JYkSZJKYNCWJEmSSmDQliRJkkpg0JYkSZJKYNCWJEmSSmDQliRJkkoQmdnsGraJiHgLeKVJl98DeLtJ1+6N7Fdj7Fdj7Fdj7Fdj7Fdj7Ffj7FljmtWv/TJzz+4GbTdBu5kiYnZmjmp2Hb2F/WqM/WqM/WqM/WqM/WqM/WqcPWtMq/fLpSOSJElSCQzakiRJUgkM2tvGNc0uoJexX42xX42xX42xX42xX42xX42zZ41p6X65RluSJEkqgTPakiRJUgkM2nWKiEkR8WZEzO/ieETEf4+IBRHxZER8uqdrbCV19GtsRCyLiMeLjyt6usZWEhEjIuKBiHg6Ip6KiEs7GeM9VqizX95jhYgYGBEzI+KJol9/08mYARFxW3F/zYiI/Xu+0tZQZ7/Ojoi3qu6v85pRayuJiL4RMTci7urkmPdXjW765f1VJSIWRsS8ohezOznesq+Pbc0uoBf5OXAVcGMXx78EHFR8HAX8rPi8o/o5W+4XwIOZeWLPlNPy2oFvZeZjEbETMCci7svMp6vGeI9tUk+/wHtsgzXAcZm5IiL6AQ9FxK8y89GqMROAdzJzZESMB74PnNKMYltAPf0CuC0zL25Cfa3qUuAZYOdOjnl/vd+W+gXeX7XGZWZXfy+7ZV8fndGuU2b+Gli6hSEnATdmxaPA0IjYt2eqaz119EtVMnNxZj5WbL9H5ZvvsJph3mOFOvulQnHPrCge9is+at+gcxJwQ7F9O/D5iIgeKrGl1NkvVYmI4cCXgYldDPH+qlJHv9SYln19NGhvO8OA16oeL8IX/u4cXfxq9lcR8fFmF9Mqil+pHgHMqDnkPdaJLfQLvMc2Kn5N/TjwJnBfZnZ5f2VmO7AM2L1nq2wddfQL4I+LX1PfHhEjerjEVvMT4L8A67s47v21ue76Bd5f1RK4NyLmRMQFnRxv2ddHg7aa5TEq/770U8D/AKY2uZ6WEBFDgDuA/5iZy5tdT6vrpl/eY1UysyMzDweGA6Mj4hPNrqmV1dGv/w3sn5mfBO5j02ztDiciTgTezMw5za6lN6izX95fmzs2Mz9NZYnIRRHxuWYXVC+D9rbzOlD9E+fwYp86kZnLN/xqNjPvBvpFxB5NLqupirWgdwC3ZOY/dzLEe6xKd/3yHutcZr4LPACcUHNo4/0VEW3ALsCSnq2u9XTVr8xckplriocTgSN7urYWcgzwlYhYCEwBjouIm2vGeH9t0m2/vL82l5mvF5/fBO4ERtcMadnXR4P2tjMNOLN45+sYYFlmLm52Ua0qIvbZsD4vIkZTuRd31G+6FL24DngmM/+xi2HeY4V6+uU9tklE7BkRQ4vtfwN8AXi2Ztg04Kxi+2Tg/txB/9FCPf2qWf/5FSrvE9ghZeZfZObwzNwfGE/l3jm9Zpj3V6Gefnl/bRIRg4s3vRMRg4Hjgdq/aNayr4/+1ZE6RcStwFhgj4hYBPwVlTfIkJn/BNwN/BGwAFgFnNOcSltDHf06GfiziGgHfg+M31G/6RaOAc4A5hXrQgH+K/Bh8B7rRD398h7bZF/ghojoS+UHjl9k5l0RcSUwOzOnUfnB5aaIWEDljczjm1du09XTr0si4itU/gLOUuDsplXbory/GuP91aW9gTuLeZM2YHJm/ktEXAit//rof4aUJEmSSuDSEUmSJKkEBm1JkiSpBAZtSZIkqQQGbUmSJKkEBm1JkiSpBAZtSeolImJiRBzazZifR8TJnezfPyK+WWJtK7o5PjQi/n1Z15ekVmTQlqReIjPPy8ynt/L0/YHSgnYdhgIGbUk7FIO2JPWgiLgsIi4ptn8cEfcX28dFxC3F9vER8UhEPBYRv4yIIcX+6RExqtieEBHPR8TMiLg2Iq6qusznIuI3EfFS1ez294DPRsTjEfGfamoaGxF3VT2+KiLOLrYXRsQPImJeca2Rxf4DihrnRcR/qzp3SET836L2eRFxUtX1Dyyu/8OqXsyKiCcj4m+2TYclqXUYtCWpZz0IfLbYHgUMiYh+xb5fR8QewHeAf5uZnwZmA/+5+gki4kPAXwJjqPyXzENqrrEvcCxwIpWAC/Bt4MHMPDwzf9xgzcsy8zDgKuAnxb6fAj8r9lf/q+PVwNeK2scB/xCVf+n2beDF4vqXRcTxwEHAaOBw4MiI+FyDdUlSSzNoS1LPmkMlVO4MrAEeoRK4P0slhI8BDgUeLv69/FnAfjXPMRr4f5m5NDPXAb+sOT41M9cXy0z23gY131r1+ehi+5iq/TdVjQ3g7yLiSeBfgWFd1HB88TEXeIzKDwsHbYNaJalltDW7AEnakWTmuoh4GTgb+A3wJJWZ35HAM8CBwH2ZeeoHuMyaqu2oY3w7m0+8DKw5nnVsb3AasCdwZPG1Luzk+TbU9feZ+b/qqE+SeiVntCWp5z0I/Dnw62L7QmBuZibwKHBM1VrowRHx0ZrzZwF/GBG7RkQb8Md1XPM9YKcujr0CHBoRAyJiKPD5muOnVH1+pNh+GBhfbJ9WNXYX4M0iZI9j02x87fXvAc6tWn8+LCL2quPrkKRew6AtST3vQSrrqB/JzN9RWdf8IEBmvkVltvvWYvnFI9Sswc7M14G/A2ZSCbwLgWXdXPNJoCMinqh9M2Rmvgb8AphffJ5bc+6uRS2XAhvOvRS4KCLmUVkessEtwKhi/5nAs8U1llBZDjM/In6YmfcCk4FHirG30/UPApLUK0VlAkWS1JtExJDMXFHMaN8JTMrMO0u4zkJgVGa+va2fW5K2d85oS1Lv9NfFmyXnAy8DU5tcjySphjPakiRJUgmc0ZYkSZJKYNCWJEmSSmDQliRJkkpg0JYkSZJKYNCWJEmSSmDQliRJkkrw/wH72TEVDO7bNgAAAABJRU5ErkJggg==\n",
228 | "text/plain": [
229 | ""
230 | ]
231 | },
232 | "metadata": {
233 | "needs_background": "light"
234 | },
235 | "output_type": "display_data"
236 | }
237 | ],
238 | "source": [
239 | "fig, ax = plt.subplots()\n",
240 | "ax.plot(range(1,number_of_layers+1),weight_tensor.grad[0].numpy())\n",
241 | "\n",
242 | "ax.set(xlabel='weight update', ylabel='Layer',\n",
243 | " title='Gradient w.r.t. the weights')\n",
244 | "ax.grid()\n",
245 | "\n",
246 | "plt.show()"
247 | ]
248 | },
249 | {
250 | "cell_type": "markdown",
251 | "metadata": {},
252 | "source": [
253 | "As you can the gradient of our toy network quickly gets close to zero. That's why this problem is called vanishing gradient problem. Since the updates to the weights are so small, they also don't help to reduce the loss.\n",
254 | "\n",
255 | "The opposite of this is called the exploding gradient, this happens if weights or activations.\n",
256 | "\n",
257 | "\n",
258 | "## Tasks\n",
259 | "\n",
260 | "1. Try different activation functions.\n",
261 | "2. Increase the number of layers.\n",
262 | "3. Modify the code so that the gradient explodes."
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "execution_count": null,
268 | "metadata": {},
269 | "outputs": [],
270 | "source": []
271 | }
272 | ],
273 | "metadata": {
274 | "kernelspec": {
275 | "display_name": "Python 3",
276 | "language": "python",
277 | "name": "python3"
278 | },
279 | "language_info": {
280 | "codemirror_mode": {
281 | "name": "ipython",
282 | "version": 3
283 | },
284 | "file_extension": ".py",
285 | "mimetype": "text/x-python",
286 | "name": "python",
287 | "nbconvert_exporter": "python",
288 | "pygments_lexer": "ipython3",
289 | "version": "3.6.7"
290 | }
291 | },
292 | "nbformat": 4,
293 | "nbformat_minor": 2
294 | }
295 |
--------------------------------------------------------------------------------
/assignments/transformer/nlp_2_transformer_offensive_language_classification.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "kernelspec": {
6 | "display_name": "Python 3",
7 | "language": "python",
8 | "name": "python3"
9 | },
10 | "language_info": {
11 | "codemirror_mode": {
12 | "name": "ipython",
13 | "version": 3
14 | },
15 | "file_extension": ".py",
16 | "mimetype": "text/x-python",
17 | "name": "python",
18 | "nbconvert_exporter": "python",
19 | "pygments_lexer": "ipython3",
20 | "version": "3.6.9"
21 | },
22 | "colab": {
23 | "name": "nlp-2-transformer-offensive-language-classification.ipynb",
24 | "private_outputs": true,
25 | "provenance": [],
26 | "include_colab_link": true
27 | },
28 | "accelerator": "GPU"
29 | },
30 | "cells": [
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {
34 | "id": "view-in-github",
35 | "colab_type": "text"
36 | },
37 | "source": [
38 | "
"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {
44 | "id": "1x7ywAcjTMyk"
45 | },
46 | "source": [
47 | "# Offensive Language Classification\n",
48 | "\n",
49 | "\n",
50 | "## First Steps\n",
51 | "\n",
52 | "We need to download the required packages and our the training data."
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "metadata": {
58 | "id": "T6cFhLiDTMyk"
59 | },
60 | "source": [
61 | "!pip install datasets transformers accelerate"
62 | ],
63 | "execution_count": null,
64 | "outputs": []
65 | },
66 | {
67 | "cell_type": "code",
68 | "metadata": {
69 | "id": "lqhVeVEnTMyl"
70 | },
71 | "source": [
72 | "!wget -c https://www.htw-dresden.de/~guhr/dist/sample/germeval2018.training.txt\n",
73 | "!wget -c https://www.htw-dresden.de/~guhr/dist/sample/germeval2018.test.txt"
74 | ],
75 | "execution_count": null,
76 | "outputs": []
77 | },
78 | {
79 | "cell_type": "code",
80 | "metadata": {
81 | "id": "ONd9nMwMTMyl"
82 | },
83 | "source": [
84 | "import time\n",
85 | "import pandas as pd\n",
86 | "import numpy as np"
87 | ],
88 | "execution_count": null,
89 | "outputs": []
90 | },
91 | {
92 | "cell_type": "code",
93 | "metadata": {
94 | "id": "4Ca0b7_IpH1M"
95 | },
96 | "source": [
97 | "# check if we have a GPU\n",
98 | "!nvidia-smi"
99 | ],
100 | "execution_count": null,
101 | "outputs": []
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {
106 | "id": "5wCOi_UiTMyl"
107 | },
108 | "source": [
109 | "## Prepairing the data\n",
110 | "\n",
111 | "In the next step we have to load the data and adjust it a bit. The data is available in tab delimited csv. Pandas is a good choice for simple processing, but it could also be done with Python board tools."
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "metadata": {
117 | "id": "fpvStxVhTMyl"
118 | },
119 | "source": [
120 | "test_df = pd.read_csv(\"germeval2018.test.txt\", sep='\\t', header=0,encoding=\"utf-8\")\n",
121 | "train_df = pd.read_csv(\"germeval2018.training.txt\", sep='\\t', header=0,encoding=\"utf-8\")"
122 | ],
123 | "execution_count": null,
124 | "outputs": []
125 | },
126 | {
127 | "cell_type": "code",
128 | "metadata": {
129 | "id": "H9CkOlvaTMyl"
130 | },
131 | "source": [
132 | "train_df.head()"
133 | ],
134 | "execution_count": null,
135 | "outputs": []
136 | },
137 | {
138 | "cell_type": "code",
139 | "metadata": {
140 | "id": "W4BsXBCsTMyn"
141 | },
142 | "source": [
143 | "# Since we do not need the label 2 columns, we can delete them.\n",
144 | "test_df.drop(columns=['label2'], inplace=True)\n",
145 | "train_df.drop(columns=['label2'], inplace=True)"
146 | ],
147 | "execution_count": null,
148 | "outputs": []
149 | },
150 | {
151 | "cell_type": "code",
152 | "metadata": {
153 | "id": "q4P20Gf_TMyn"
154 | },
155 | "source": [
156 | "def clean_text (text):\n",
157 | " #text = text.str.lower() # lowercase\n",
158 | " #text = text.str.replace(r\"\\#\",\"\") # replaces hashtags\n",
159 | " #text = text.str.replace(r\"http\\S+\",\"URL\") # remove URL addresses\n",
160 | " #text = text.str.replace(r\"@\",\"\")\n",
161 | " #text = text.str.replace(r\"[^A-Za-z0-9öäüÖÄÜß()!?]\", \" \")\n",
162 | " #text = text.str.replace(\"\\s{2,}\", \" \")\n",
163 | " return text\n",
164 | "\n",
165 | "def convert_label(label):\n",
166 | " return 1 if label == \"OFFENSE\" else 0"
167 | ],
168 | "execution_count": null,
169 | "outputs": []
170 | },
171 | {
172 | "cell_type": "code",
173 | "metadata": {
174 | "id": "p690qluXTMyn"
175 | },
176 | "source": [
177 | "train_df[\"text\"]=clean_text(train_df[\"text\"])\n",
178 | "test_df[\"text\"]=clean_text(test_df[\"text\"])\n",
179 | "train_df[\"label\"]=train_df[\"label\"].map(convert_label)\n",
180 | "test_df[\"label\"]=test_df[\"label\"].map(convert_label)"
181 | ],
182 | "execution_count": null,
183 | "outputs": []
184 | },
185 | {
186 | "cell_type": "code",
187 | "metadata": {
188 | "id": "9BIixoz-TMyn"
189 | },
190 | "source": [
191 | "# this is how our data set looks now\n",
192 | "train_df.head() "
193 | ],
194 | "execution_count": null,
195 | "outputs": []
196 | },
197 | {
198 | "cell_type": "code",
199 | "metadata": {
200 | "id": "HeE1qHXhTMyo"
201 | },
202 | "source": [
203 | "len(train_df.loc[train_df[\"label\"]==1])"
204 | ],
205 | "execution_count": null,
206 | "outputs": []
207 | },
208 | {
209 | "cell_type": "code",
210 | "source": [
211 | "from sklearn.utils import shuffle\n",
212 | "train_df = shuffle(train_df)"
213 | ],
214 | "metadata": {
215 | "id": "XYIke-q7Oqfz"
216 | },
217 | "execution_count": null,
218 | "outputs": []
219 | },
220 | {
221 | "cell_type": "markdown",
222 | "metadata": {
223 | "id": "T1_tIVzLTMyo"
224 | },
225 | "source": [
226 | "How many datasets do we have in our Train/Valid/Test sets?"
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "metadata": {
232 | "id": "6rCCzWaJTMyo"
233 | },
234 | "source": [
235 | "print(f\"Test exampels \\t {len(test_df) }\")\n",
236 | "print(f\"Train exampels \\t {len(train_df[500:])}\")\n",
237 | "print(f\"Valid exampels \\t {len(train_df[:500])}\")"
238 | ],
239 | "execution_count": null,
240 | "outputs": []
241 | },
242 | {
243 | "cell_type": "markdown",
244 | "metadata": {
245 | "id": "ObJ7KjhYDxCX"
246 | },
247 | "source": [
248 | "In the next step we convert the data in a format that our ml lib can use."
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "metadata": {
254 | "id": "5zRn0t3oTMyp"
255 | },
256 | "source": [
257 | "from datasets import Dataset\n",
258 | "\n",
259 | "train_dataset = Dataset.from_pandas(train_df[500:])\n",
260 | "valid_dataset = Dataset.from_pandas(train_df[:500])\n",
261 | "test_dataset = Dataset.from_pandas(test_df)"
262 | ],
263 | "execution_count": null,
264 | "outputs": []
265 | },
266 | {
267 | "cell_type": "code",
268 | "metadata": {
269 | "id": "2qt9p2yeTMyp"
270 | },
271 | "source": [
272 | "# What is the shape of our dataset?\n",
273 | "train_dataset"
274 | ],
275 | "execution_count": null,
276 | "outputs": []
277 | },
278 | {
279 | "cell_type": "markdown",
280 | "metadata": {
281 | "id": "fUhmAob8TMyp"
282 | },
283 | "source": [
284 | "## Encoding of the data \n",
285 | "\n",
286 | "We convert our texts into token that our model can process."
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "metadata": {
292 | "id": "jxDv4WeXTMyp"
293 | },
294 | "source": [
295 | "from transformers import AutoTokenizer\n",
296 | "from datasets import load_dataset, load_metric, list_metrics\n",
297 | "\n",
298 | "\n",
299 | "# try out different models :) \n",
300 | "\n",
301 | "model_checkpoint =\"distilbert-base-multilingual-cased\"\n",
302 | "\n",
303 | "tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)"
304 | ],
305 | "execution_count": null,
306 | "outputs": []
307 | },
308 | {
309 | "cell_type": "code",
310 | "source": [
311 | "!rm -rf ./test-offsive-language/checkpoint*"
312 | ],
313 | "metadata": {
314 | "id": "ZFuavLu5UlwZ"
315 | },
316 | "execution_count": null,
317 | "outputs": []
318 | },
319 | {
320 | "cell_type": "code",
321 | "metadata": {
322 | "id": "ZkNiW-kITMyp"
323 | },
324 | "source": [
325 | "demo_tokens = tokenizer([\"Mehr Daten führen oftmals zu besseren Ergebnissen.\", \"And this is a second sentence\"],add_special_tokens=True, truncation=True)\n",
326 | "demo_tokens"
327 | ],
328 | "execution_count": null,
329 | "outputs": []
330 | },
331 | {
332 | "cell_type": "code",
333 | "metadata": {
334 | "id": "tjSb3j1RTMyp"
335 | },
336 | "source": [
337 | "tokenizer.convert_ids_to_tokens(demo_tokens['input_ids'][0])"
338 | ],
339 | "execution_count": null,
340 | "outputs": []
341 | },
342 | {
343 | "cell_type": "code",
344 | "metadata": {
345 | "id": "2CBg2qhVTMyp"
346 | },
347 | "source": [
348 | "def example_tokenizer(examples):\n",
349 | " return tokenizer(examples[\"text\"], truncation=True,padding=False)"
350 | ],
351 | "execution_count": null,
352 | "outputs": []
353 | },
354 | {
355 | "cell_type": "code",
356 | "metadata": {
357 | "id": "tWghamclTMyp"
358 | },
359 | "source": [
360 | "encoded_train_dataset = train_dataset.map(example_tokenizer, batched=True)\n",
361 | "encoded_valid_dataset = valid_dataset.map(example_tokenizer, batched=True)\n",
362 | "encoded_test_dataset = test_dataset.map(example_tokenizer, batched=True)"
363 | ],
364 | "execution_count": null,
365 | "outputs": []
366 | },
367 | {
368 | "cell_type": "markdown",
369 | "metadata": {
370 | "id": "xhkxSIVLTMyq"
371 | },
372 | "source": [
373 | "## The training \\o/\n",
374 | "\n",
375 | "Now we can train our model. To do this, we need to define a number of settings (hyperparameters):"
376 | ]
377 | },
378 | {
379 | "cell_type": "code",
380 | "metadata": {
381 | "id": "X4DtlapiTMyq"
382 | },
383 | "source": [
384 | "from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer\n",
385 | "\n",
386 | "model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=2)\n",
387 | "\n",
388 | "batch_size = 8\n",
389 | "\n",
390 | "args = TrainingArguments(\n",
391 | " \"test-offsive-language\",\n",
392 | " evaluation_strategy = \"steps\",\n",
393 | " save_strategy= \"steps\",\n",
394 | " learning_rate=3e-5,\n",
395 | " per_device_train_batch_size=batch_size,\n",
396 | " per_device_eval_batch_size=batch_size,\n",
397 | " gradient_accumulation_steps=4,\n",
398 | " num_train_epochs=2,\n",
399 | " eval_steps=50,\n",
400 | " save_steps=50,\n",
401 | " warmup_steps=50,\n",
402 | " logging_steps=10,\n",
403 | " weight_decay=0.001,\n",
404 | " load_best_model_at_end=True,\n",
405 | " overwrite_output_dir=True,\n",
406 | " metric_for_best_model=\"f1\",\n",
407 | " save_total_limit=2, \n",
408 | " fp16=True \n",
409 | ")"
410 | ],
411 | "execution_count": null,
412 | "outputs": []
413 | },
414 | {
415 | "cell_type": "code",
416 | "source": [
417 | "from sklearn.metrics import accuracy_score, f1_score\n",
418 | "\n",
419 | "def compute_metrics(pred):\n",
420 | " labels = pred.label_ids\n",
421 | " preds = pred.predictions.argmax(-1)\n",
422 | " f1 = f1_score(labels, preds, average=\"macro\")\n",
423 | " acc = accuracy_score(labels, preds)\n",
424 | " return {\"accuracy\": acc, \"f1\": f1}"
425 | ],
426 | "metadata": {
427 | "id": "Rh_wjh5TKhY8"
428 | },
429 | "execution_count": null,
430 | "outputs": []
431 | },
432 | {
433 | "cell_type": "code",
434 | "metadata": {
435 | "id": "g0v5GJXmTMyq"
436 | },
437 | "source": [
438 | "trainer = Trainer(\n",
439 | " model,\n",
440 | " args,\n",
441 | " train_dataset=encoded_train_dataset,\n",
442 | " eval_dataset=encoded_valid_dataset, \n",
443 | " tokenizer=tokenizer,\n",
444 | " compute_metrics=compute_metrics\n",
445 | ")"
446 | ],
447 | "execution_count": null,
448 | "outputs": []
449 | },
450 | {
451 | "cell_type": "code",
452 | "metadata": {
453 | "id": "y_lUSvpDTMyq"
454 | },
455 | "source": [
456 | "trainer.train()"
457 | ],
458 | "execution_count": null,
459 | "outputs": []
460 | },
461 | {
462 | "cell_type": "code",
463 | "metadata": {
464 | "id": "i98JNibNTMyr"
465 | },
466 | "source": [
467 | "#trainer.model.to(\"cuda\")\n",
468 | "trainer.evaluate()"
469 | ],
470 | "execution_count": null,
471 | "outputs": []
472 | },
473 | {
474 | "cell_type": "code",
475 | "metadata": {
476 | "id": "rfEjiV_p7BRh"
477 | },
478 | "source": [
479 | "# How much GPU memory did we use?\n",
480 | "!nvidia-smi\n",
481 | "#import torch\n",
482 | "#torch.cuda.empty_cache()\n",
483 | "#!nvidia-smi"
484 | ],
485 | "execution_count": null,
486 | "outputs": []
487 | },
488 | {
489 | "cell_type": "code",
490 | "metadata": {
491 | "id": "-w-nuYI8TMyr"
492 | },
493 | "source": [
494 | "#tensorboard --logdir runs\n",
495 | "%load_ext tensorboard\n",
496 | "#%reload_ext tensorboard\n",
497 | "%tensorboard --logdir /content/test-offsive-language/runs"
498 | ],
499 | "execution_count": null,
500 | "outputs": []
501 | },
502 | {
503 | "cell_type": "markdown",
504 | "metadata": {
505 | "id": "tv-PghAYTMyr"
506 | },
507 | "source": [
508 | "## Testing the model\n",
509 | "\n",
510 | "The next step is to test the model with the provided test data."
511 | ]
512 | },
513 | {
514 | "cell_type": "code",
515 | "metadata": {
516 | "id": "jYI9LEbvTMyr"
517 | },
518 | "source": [
519 | "result = trainer.predict(encoded_test_dataset)\n",
520 | "result.metrics[\"test_f1\"]"
521 | ],
522 | "execution_count": null,
523 | "outputs": []
524 | },
525 | {
526 | "cell_type": "code",
527 | "metadata": {
528 | "id": "YiYw4kS3TMyr"
529 | },
530 | "source": [
531 | "import torch\n",
532 | "\n",
533 | "#trainer.prediction_step(trainer.model,tokenizer(\"das ist ein test\"),False)\n",
534 | "trainer.model.cpu()\n",
535 | "#trainer.model.num_parameters()\n",
536 | "encoded_texts = tokenizer([\"du bist so dumm\", \"du bist toll\"],padding=True, return_tensors=\"pt\")\n",
537 | "print(encoded_texts)\n",
538 | "logits = trainer.model(**encoded_texts)\n",
539 | "probabilities = torch.softmax(logits[0],dim=1)\n",
540 | "print(probabilities)\n",
541 | "class_label = torch.argmax(probabilities,dim=1)\n",
542 | "print(class_label)"
543 | ],
544 | "execution_count": null,
545 | "outputs": []
546 | },
547 | {
548 | "cell_type": "markdown",
549 | "metadata": {
550 | "id": "ylTzH9P8uu-8"
551 | },
552 | "source": [
553 | "How can we predict a sigle test example and how long does it take on a cpu?"
554 | ]
555 | },
556 | {
557 | "cell_type": "code",
558 | "metadata": {
559 | "id": "_eyM790HTMyr"
560 | },
561 | "source": [
562 | "def predict(text):\n",
563 | " trainer.model.cpu()\n",
564 | " #trainer.model.num_parameters()\n",
565 | " encoded_texts = tokenizer(text, return_tensors=\"pt\")\n",
566 | " #print(encoded_texts)\n",
567 | " logits = trainer.model(**encoded_texts)\n",
568 | " probabilities = torch.softmax(logits[0],dim=1)\n",
569 | " #print(probabilities)\n",
570 | " class_label = torch.argmax(probabilities)\n",
571 | " return class_label\n",
572 | " #print(class_label)\n",
573 | "\n",
574 | "%timeit predict(\"du bist so toll\")\n",
575 | "\n"
576 | ],
577 | "execution_count": null,
578 | "outputs": []
579 | },
580 | {
581 | "cell_type": "markdown",
582 | "metadata": {
583 | "id": "lsL5FUdTTMys"
584 | },
585 | "source": [
586 | "# Tutorial:\n",
587 | "\n",
588 | "Our results are already quite good - but we can still improve the results. First get familiar with the notebook - change a few parameters like learning rate and number of epochs and see how they change the results. \n",
589 | "\n",
590 | "**Your task is to improve the classification score.**\n",
591 | "\n",
592 | "Here are some ideas how you can improve the score.\n",
593 | "\n",
594 | "* Test different models. The [Model Hub](https://huggingface.co/models) lists a number of German models with which you can improve the results. \n",
595 | "\n",
596 | "* About 5000 sampels in the data set are comparatively few for this problem. You may find more data sets that you can add to the current training data set.\n",
597 | "\n",
598 | "* A number of multilingual models are available in the [Model Hub](https://huggingface.co/models). These models have been trained with different languages. You could also try adding English to the German dataset to train a multilingual model. This may also be better on the German data. \n",
599 | "\n",
600 | "Data augmentation is a procedure to create new data sets by modifying existing data sets. It is important that the statement does not change (the class remains the same).\n",
601 | "\n",
602 | "* You can replace synonyms words and thus generate new data sets. An example:\n",
603 | "\n",
604 | "> \"Can you still believe all this crap?\" -> \"Can you still believe all this crap?\"\n",
605 | "\n",
606 | "* Everything is allowed here. Try translating texts from German to English and back to German. If the meaning is preserved, the result can also be used for training. A small example with Google Translate:\n",
607 | "\n",
608 | "> Deutsch: \"Kann man diesen ganzen Scheiß noch glauben?\" \n",
609 | "\n",
610 | "> Englisch: \"Can you still believe all this shit?\"\n",
611 | "\n",
612 | "> Deutsch: \"Kannst du all diese Scheiße noch glauben?\"\n",
613 | "\n",
614 | "\n"
615 | ]
616 | }
617 | ]
618 | }
--------------------------------------------------------------------------------
/hello-python/Hello PyTorch.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Hello PyTorch - a tiny intro."
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 4,
13 | "metadata": {},
14 | "outputs": [
15 | {
16 | "name": "stdout",
17 | "output_type": "stream",
18 | "text": [
19 | "Collecting package metadata (current_repodata.json): ...working... done\n",
20 | "Solving environment: ...working... done\n",
21 | "\n",
22 | "# All requested packages already installed.\n",
23 | "\n",
24 | "\n",
25 | "Note: you may need to restart the kernel to use updated packages.\n"
26 | ]
27 | }
28 | ],
29 | "source": [
30 | "conda install pytorch torchvision cpuonly -c pytorch"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 5,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "import torch"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "## Create a tensor (aka matrix) with PyTorch"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 6,
52 | "metadata": {},
53 | "outputs": [
54 | {
55 | "data": {
56 | "text/plain": [
57 | "tensor([[ 1., -1.],\n",
58 | " [ 1., -1.]])"
59 | ]
60 | },
61 | "execution_count": 6,
62 | "metadata": {},
63 | "output_type": "execute_result"
64 | }
65 | ],
66 | "source": [
67 | "torch.tensor([[1., -1.], [1., -1.]])"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 14,
73 | "metadata": {},
74 | "outputs": [
75 | {
76 | "name": "stdout",
77 | "output_type": "stream",
78 | "text": [
79 | "tensor([[ 0.0381, -1.6110, -0.9273],\n",
80 | " [-0.0901, -1.8310, -1.4307],\n",
81 | " [-0.3276, -0.5256, 0.2382],\n",
82 | " [-0.6835, -0.1138, -0.6881]])\n"
83 | ]
84 | }
85 | ],
86 | "source": [
87 | "matrix = torch.randn(4, 3)\n",
88 | "print(matrix)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 15,
94 | "metadata": {},
95 | "outputs": [
96 | {
97 | "data": {
98 | "text/plain": [
99 | "torch.Size([4, 3])"
100 | ]
101 | },
102 | "execution_count": 15,
103 | "metadata": {},
104 | "output_type": "execute_result"
105 | }
106 | ],
107 | "source": [
108 | "matrix.shape"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": 16,
114 | "metadata": {},
115 | "outputs": [
116 | {
117 | "data": {
118 | "text/plain": [
119 | "tensor([[ 0.0381, -0.0901, -0.3276, -0.6835],\n",
120 | " [-1.6110, -1.8310, -0.5256, -0.1138],\n",
121 | " [-0.9273, -1.4307, 0.2382, -0.6881]])"
122 | ]
123 | },
124 | "execution_count": 16,
125 | "metadata": {},
126 | "output_type": "execute_result"
127 | }
128 | ],
129 | "source": [
130 | "matrix.t()"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": 11,
136 | "metadata": {},
137 | "outputs": [
138 | {
139 | "data": {
140 | "text/plain": [
141 | "tensor([ 0.3480, 0.8093, -2.0684])"
142 | ]
143 | },
144 | "execution_count": 11,
145 | "metadata": {},
146 | "output_type": "execute_result"
147 | }
148 | ],
149 | "source": [
150 | "matrix[2]"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 12,
156 | "metadata": {},
157 | "outputs": [
158 | {
159 | "data": {
160 | "text/plain": [
161 | "tensor(0.3480)"
162 | ]
163 | },
164 | "execution_count": 12,
165 | "metadata": {},
166 | "output_type": "execute_result"
167 | }
168 | ],
169 | "source": [
170 | "matrix[2,0]"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "metadata": {},
176 | "source": [
177 | "Take a look at the PyTorch [documentation](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view) for details about the ``view()`` function."
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": 17,
183 | "metadata": {},
184 | "outputs": [
185 | {
186 | "data": {
187 | "text/plain": [
188 | "tensor([ 0.0381, -1.6110, -0.9273, -0.0901, -1.8310, -1.4307, -0.3276, -0.5256,\n",
189 | " 0.2382, -0.6835, -0.1138, -0.6881])"
190 | ]
191 | },
192 | "execution_count": 17,
193 | "metadata": {},
194 | "output_type": "execute_result"
195 | }
196 | ],
197 | "source": [
198 | "matrix.view(-1)"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": 26,
204 | "metadata": {},
205 | "outputs": [
206 | {
207 | "data": {
208 | "text/plain": [
209 | "tensor([[ 0.0381, -1.6110, -0.9273, -0.0901, -1.8310, -1.4307],\n",
210 | " [-0.3276, -0.5256, 0.2382, -0.6835, -0.1138, -0.6881]])"
211 | ]
212 | },
213 | "execution_count": 26,
214 | "metadata": {},
215 | "output_type": "execute_result"
216 | }
217 | ],
218 | "source": [
219 | "matrix.view(2,-1) # view the matrix with 6 rows and 2 columns"
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {},
225 | "source": [
226 | "## Matrix multiplication with PyTorch"
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": 31,
232 | "metadata": {},
233 | "outputs": [
234 | {
235 | "name": "stdout",
236 | "output_type": "stream",
237 | "text": [
238 | "tensor([[ 0.5291, -0.1487, 0.8204],\n",
239 | " [ 1.4145, -1.9181, -2.5060],\n",
240 | " [-0.2387, 0.0584, 0.7217]])\n",
241 | "tensor([[ 0.8169, 0.2696, -1.3153],\n",
242 | " [ 0.8360, -0.8357, -1.5030],\n",
243 | " [ 0.9936, 0.1287, 1.9048]])\n"
244 | ]
245 | }
246 | ],
247 | "source": [
248 | "a = torch.randn(3,3)\n",
249 | "b = torch.randn(3,3)\n",
250 | "print(a)\n",
251 | "print(b)"
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "execution_count": 32,
257 | "metadata": {},
258 | "outputs": [
259 | {
260 | "data": {
261 | "text/plain": [
262 | "tensor([[ 0.4322, -0.0401, -1.0791],\n",
263 | " [ 1.1825, 1.6031, 3.7667],\n",
264 | " [-0.2372, 0.0075, 1.3747]])"
265 | ]
266 | },
267 | "execution_count": 32,
268 | "metadata": {},
269 | "output_type": "execute_result"
270 | }
271 | ],
272 | "source": [
273 | "a * b # this does not do what you might think!"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": 33,
279 | "metadata": {},
280 | "outputs": [
281 | {
282 | "data": {
283 | "text/plain": [
284 | "tensor([[ 1.1232, 0.3725, 1.0902],\n",
285 | " [-2.9381, 1.6619, -3.7509],\n",
286 | " [ 0.5709, -0.0203, 1.6010]])"
287 | ]
288 | },
289 | "execution_count": 33,
290 | "metadata": {},
291 | "output_type": "execute_result"
292 | }
293 | ],
294 | "source": [
295 | "a @ b"
296 | ]
297 | },
298 | {
299 | "cell_type": "code",
300 | "execution_count": 34,
301 | "metadata": {},
302 | "outputs": [
303 | {
304 | "data": {
305 | "text/plain": [
306 | "tensor([[ 1.1232, 0.3725, 1.0902],\n",
307 | " [-2.9381, 1.6619, -3.7509],\n",
308 | " [ 0.5709, -0.0203, 1.6010]])"
309 | ]
310 | },
311 | "execution_count": 34,
312 | "metadata": {},
313 | "output_type": "execute_result"
314 | }
315 | ],
316 | "source": [
317 | "a.mm(b)"
318 | ]
319 | },
320 | {
321 | "cell_type": "code",
322 | "execution_count": 35,
323 | "metadata": {},
324 | "outputs": [
325 | {
326 | "data": {
327 | "text/plain": [
328 | "tensor([[3., 3.],\n",
329 | " [3., 4.],\n",
330 | " [5., 6.]])"
331 | ]
332 | },
333 | "execution_count": 35,
334 | "metadata": {},
335 | "output_type": "execute_result"
336 | }
337 | ],
338 | "source": [
339 | "c = torch.tensor([[1., 2.], [3., 4.], [5., 6.]]) \n",
340 | "c.clamp(min=3)"
341 | ]
342 | },
343 | {
344 | "cell_type": "markdown",
345 | "metadata": {},
346 | "source": [
347 | "# What can Pytorch do?\n",
348 | "You can find both examples with further [explanations here](https://pytorch.org/tutorials/beginner/pytorch_with_examples.html)."
349 | ]
350 | },
351 | {
352 | "cell_type": "markdown",
353 | "metadata": {},
354 | "source": [
355 | "## Autograd: AUTOMATIC DIFFERENTIATION \n",
356 | "\n",
357 | "\n"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": 37,
363 | "metadata": {},
364 | "outputs": [
365 | {
366 | "data": {
367 | "text/plain": [
368 | "tensor([[ 2., -2.],\n",
369 | " [ 2., 2.]])"
370 | ]
371 | },
372 | "execution_count": 37,
373 | "metadata": {},
374 | "output_type": "execute_result"
375 | }
376 | ],
377 | "source": [
378 | "x = torch.tensor([[1., -1.], [1., 1.]], requires_grad=True)\n",
379 | "out = x.pow(2).sum()\n",
380 | "out.backward()\n",
381 | "x.grad"
382 | ]
383 | },
384 | {
385 | "cell_type": "markdown",
386 | "metadata": {},
387 | "source": [
388 | "# Tutotrials\n",
389 | "\n",
390 | "\n",
391 | "**1. I am new to Python:**\n",
392 | "\n",
393 | "If you are new to Python here is a list of online tutorials that you might find useful:\n",
394 | "\n",
395 | "*Learn the Basics*\n",
396 | "\n",
397 | "- [Hello, World!](https://www.learnpython.org/en/Hello%2C_World!)\n",
398 | "- [Variables and Types](https://www.learnpython.org/en/Variables_and_Types)\n",
399 | "- [Lists](https://www.learnpython.org/en/Lists)\n",
400 | "- [Basic Operators](https://www.learnpython.org/en/Basic_Operators)\n",
401 | "- [String Formatting](https://www.learnpython.org/en/String_Formatting)\n",
402 | "- [Basic String Operations](https://www.learnpython.org/en/Basic_String_Operations)\n",
403 | "- [Conditions](https://www.learnpython.org/en/Conditions)\n",
404 | "- [Loops](https://www.learnpython.org/en/Loops)\n",
405 | "- [Functions](https://www.learnpython.org/en/Functions)\n",
406 | "- [Classes and Objects](https://www.learnpython.org/en/Classes_and_Objects)\n",
407 | "- [Dictionaries](https://www.learnpython.org/en/Dictionaries)\n",
408 | "- [Modules and Packages](https://www.learnpython.org/en/Modules_and_Packages)\n",
409 | "\n",
410 | "**2. I am new to PyTorch**\n",
411 | "\n",
412 | "To get familiar with the PyTorchs concepts you can take the [A 60 Minute Blitz](https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html#sphx-glr-beginner-blitz-tensor-tutorial-py) tutorial. \n",
413 | "You can find both example toy networks with further [explanations here](https://pytorch.org/tutorials/beginner/pytorch_with_examples.html)."
414 | ]
415 | }
416 | ],
417 | "metadata": {
418 | "kernelspec": {
419 | "display_name": "Python 3",
420 | "language": "python",
421 | "name": "python3"
422 | },
423 | "language_info": {
424 | "codemirror_mode": {
425 | "name": "ipython",
426 | "version": 3
427 | },
428 | "file_extension": ".py",
429 | "mimetype": "text/x-python",
430 | "name": "python",
431 | "nbconvert_exporter": "python",
432 | "pygments_lexer": "ipython3",
433 | "version": "3.7.6"
434 | }
435 | },
436 | "nbformat": 4,
437 | "nbformat_minor": 4
438 | }
439 |
--------------------------------------------------------------------------------
/slides/Deep Learning - Hello Python.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oliverguhr/htw-nlp-lecture/0b7cf49ff455d61fcbd2261034bbf5ecb570088d/slides/Deep Learning - Hello Python.pdf
--------------------------------------------------------------------------------
/slides/Deep NLP 1 Recurrent Neural Networks.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oliverguhr/htw-nlp-lecture/0b7cf49ff455d61fcbd2261034bbf5ecb570088d/slides/Deep NLP 1 Recurrent Neural Networks.pdf
--------------------------------------------------------------------------------
/slides/Deep NLP 2 Word Vectors and Transfer Learning.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oliverguhr/htw-nlp-lecture/0b7cf49ff455d61fcbd2261034bbf5ecb570088d/slides/Deep NLP 2 Word Vectors and Transfer Learning.pdf
--------------------------------------------------------------------------------
/slides/Deep NLP 3 Transforners and Attention.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oliverguhr/htw-nlp-lecture/0b7cf49ff455d61fcbd2261034bbf5ecb570088d/slides/Deep NLP 3 Transforners and Attention.pdf
--------------------------------------------------------------------------------