├── Chapter06
└── main.py
├── LICENSE
├── Chapter04
├── widgets.ipynb
└── interactive.ipynb
├── Chapter05
├── rise.ipynb
└── keras_neural_network_architecture.ipynb
└── README.md
/Chapter06/main.py:
--------------------------------------------------------------------------------
1 | import neptune
2 | neptune.init('shared/onboarding', api_token='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5tbCIsImFwaV9rZXkiOiJiNzA2YmM4Zi03NmY5LTRjMmUtOTM5ZC00YmEwMzZmOTMyZTQifQ==')
3 | with neptune.create_experiment(name='hello-neptune'):
4 | neptune.append_tag('introduction-minimal-example')
5 | n = 117
6 | for i in range(1, n):
7 | neptune.log_metric('iteration', i)
8 | neptune.log_metric('loss', 1/i**0.5)
9 | neptune.log_text('magic values', 'magic value{}'.format(0.95*i**2))
10 | neptune.set_property('n_iterations', n)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Packt
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Chapter04/widgets.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from ipywidgets import widgets"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "data": {
19 | "application/vnd.jupyter.widget-view+json": {
20 | "model_id": "016522e32ec744dca7d223ab6128cee0",
21 | "version_major": 2,
22 | "version_minor": 0
23 | },
24 | "text/plain": [
25 | "FloatRangeSlider(value=(2.0, 7.0), description='X-Range: ', max=10.0, readout_format='.1f')"
26 | ]
27 | },
28 | "metadata": {},
29 | "output_type": "display_data"
30 | }
31 | ],
32 | "source": [
33 | "x_range = widgets.FloatRangeSlider(value=[2,7],\n",
34 | " min=0,\n",
35 | " max=10.,\n",
36 | " step=0.1,\n",
37 | " description='X-Range: ',\n",
38 | " readout_format='.1f')\n",
39 | "display(x_range)"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": 3,
45 | "metadata": {},
46 | "outputs": [
47 | {
48 | "data": {
49 | "application/vnd.jupyter.widget-view+json": {
50 | "model_id": "81f2c25074b245caac16b2b4fa823c36",
51 | "version_major": 2,
52 | "version_minor": 0
53 | },
54 | "text/plain": [
55 | "ToggleButton(value=False, description='Show Species')"
56 | ]
57 | },
58 | "metadata": {},
59 | "output_type": "display_data"
60 | }
61 | ],
62 | "source": [
63 | "species_button = widgets.ToggleButton(\n",
64 | " value=False,\n",
65 | " description='Show Species')\n",
66 | "display(species_button)"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 4,
72 | "metadata": {},
73 | "outputs": [
74 | {
75 | "data": {
76 | "application/vnd.jupyter.widget-view+json": {
77 | "model_id": "ff089473da5a41daba031d87dee545f8",
78 | "version_major": 2,
79 | "version_minor": 0
80 | },
81 | "text/plain": [
82 | "Dropdown(description='X-Axis:', index=2, options=(('Sepal Length', 0), ('Sepal Width', 1), ('Petal Length', 2)…"
83 | ]
84 | },
85 | "metadata": {},
86 | "output_type": "display_data"
87 | }
88 | ],
89 | "source": [
90 | "feature_x_select = widgets.Dropdown(\n",
91 | " value=2,\n",
92 | " options=[('Sepal Length',0), ('Sepal Width',1),\n",
93 | " ('Petal Length',2), ('Petal Width',3)],\n",
94 | " description='X-Axis:')\n",
95 | "\n",
96 | "display(feature_x_select)"
97 | ]
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": null,
102 | "metadata": {},
103 | "outputs": [],
104 | "source": []
105 | }
106 | ],
107 | "metadata": {
108 | "kernelspec": {
109 | "display_name": "Python 3",
110 | "language": "python",
111 | "name": "python3"
112 | },
113 | "language_info": {
114 | "codemirror_mode": {
115 | "name": "ipython",
116 | "version": 3
117 | },
118 | "file_extension": ".py",
119 | "mimetype": "text/x-python",
120 | "name": "python",
121 | "nbconvert_exporter": "python",
122 | "pygments_lexer": "ipython3",
123 | "version": "3.7.3"
124 | }
125 | },
126 | "nbformat": 4,
127 | "nbformat_minor": 4
128 | }
129 |
--------------------------------------------------------------------------------
/Chapter05/rise.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "slideshow": {
8 | "slide_type": "skip"
9 | }
10 | },
11 | "outputs": [],
12 | "source": [
13 | "import pandas as pd\n"
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {
19 | "slideshow": {
20 | "slide_type": "slide"
21 | }
22 | },
23 | "source": [
24 | "### Rockstar Recipes: An Introduction"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 7,
30 | "metadata": {
31 | "jupyter": {
32 | "source_hidden": true
33 | },
34 | "slideshow": {
35 | "slide_type": "slide"
36 | }
37 | },
38 | "outputs": [
39 | {
40 | "data": {
41 | "text/html": [
42 | "
\n",
43 | "\n",
56 | "
\n",
57 | " \n",
58 | " \n",
59 | " | \n",
60 | " user_id | \n",
61 | " recipe_id | \n",
62 | " date | \n",
63 | " rating | \n",
64 | " review | \n",
65 | "
\n",
66 | " \n",
67 | " \n",
68 | " \n",
69 | " | 0 | \n",
70 | " 38094 | \n",
71 | " 40893 | \n",
72 | " 2/17/03 | \n",
73 | " 4 | \n",
74 | " Great with a salad. Cooked on top of stove for... | \n",
75 | "
\n",
76 | " \n",
77 | " | 1 | \n",
78 | " 1293707 | \n",
79 | " 40893 | \n",
80 | " 12/21/11 | \n",
81 | " 5 | \n",
82 | " So simple, so delicious! Great for chilly fall... | \n",
83 | "
\n",
84 | " \n",
85 | " | 2 | \n",
86 | " 8937 | \n",
87 | " 44394 | \n",
88 | " 12/1/02 | \n",
89 | " 4 | \n",
90 | " This worked very well and is EASY. I used not... | \n",
91 | "
\n",
92 | " \n",
93 | " | 3 | \n",
94 | " 126440 | \n",
95 | " 85009 | \n",
96 | " 2/27/10 | \n",
97 | " 5 | \n",
98 | " I made the Mexican topping and took it to bunk... | \n",
99 | "
\n",
100 | " \n",
101 | " | 4 | \n",
102 | " 57222 | \n",
103 | " 85009 | \n",
104 | " 10/1/11 | \n",
105 | " 5 | \n",
106 | " Made the cheddar bacon topping, adding a sprin... | \n",
107 | "
\n",
108 | " \n",
109 | "
\n",
110 | "
"
111 | ],
112 | "text/plain": [
113 | " user_id recipe_id date rating \\\n",
114 | "0 38094 40893 2/17/03 4 \n",
115 | "1 1293707 40893 12/21/11 5 \n",
116 | "2 8937 44394 12/1/02 4 \n",
117 | "3 126440 85009 2/27/10 5 \n",
118 | "4 57222 85009 10/1/11 5 \n",
119 | "\n",
120 | " review \n",
121 | "0 Great with a salad. Cooked on top of stove for... \n",
122 | "1 So simple, so delicious! Great for chilly fall... \n",
123 | "2 This worked very well and is EASY. I used not... \n",
124 | "3 I made the Mexican topping and took it to bunk... \n",
125 | "4 Made the cheddar bacon topping, adding a sprin... "
126 | ]
127 | },
128 | "execution_count": 7,
129 | "metadata": {},
130 | "output_type": "execute_result"
131 | }
132 | ],
133 | "source": [
134 | "df=pd.read_csv(\"RAW_interactions.csv\")\n",
135 | "df.head()"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "metadata": {
142 | "jupyter": {
143 | "source_hidden": true
144 | },
145 | "slideshow": {
146 | "slide_type": "slide"
147 | }
148 | },
149 | "outputs": [],
150 | "source": []
151 | }
152 | ],
153 | "metadata": {
154 | "kernelspec": {
155 | "display_name": "Python 3",
156 | "language": "python",
157 | "name": "python3"
158 | },
159 | "language_info": {
160 | "codemirror_mode": {
161 | "name": "ipython",
162 | "version": 3
163 | },
164 | "file_extension": ".py",
165 | "mimetype": "text/x-python",
166 | "name": "python",
167 | "nbconvert_exporter": "python",
168 | "pygments_lexer": "ipython3",
169 | "version": "3.7.3"
170 | }
171 | },
172 | "nbformat": 4,
173 | "nbformat_minor": 4
174 | }
175 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # JupyterLab Quick Start Guide
2 |
3 |
4 |
5 | This is the code repository for [JupyterLab Quick Start Guide](https://www.packtpub.com/data/jupyterlab-quick-start-guide?utm_source=github&utm_medium=repository&utm_campaign=9781789805543), published by Packt.
6 |
7 | **A practical approach to implementing blockchain in your enterprise**
8 |
9 | ## What is this book about?
10 | Jupyterlab is a web-based data science interface and natural evolution of Jupyter Notebooks. This guide will take you through the core commands and functionalities of JupyterLab. You will learn to customize and enhance your JupyterLab productivity by installing additional extensions.
11 |
12 | This book covers the following exciting features:
13 | * Install JupyterLab and work with Jupyter Notebooks
14 | * Host JupyterLab Notebooks on GitHub and access GitHub resources in your Notebooks
15 | * Explore different methods for exchanging Notebooks
16 | * Discover ways in which multiple users can access the same Notebook
17 | * Publish your Notebooks with nbviewer and convert them into different formats
18 | * Attach and operate widgets within your Notebooks using a JupyterLab document
19 | * Use JupyterLab to work collaboratively with multiple data scientists in your teams
20 |
21 | If you feel this book is for you, get your [copy](https://www.amazon.com/dp/1789805546) today!
22 |
23 |
24 |
25 | ## Instructions and Navigations
26 | All of the code is organized into folders. For example,
27 |
28 | The code will look like the following:
29 | ```
30 | import neptune
31 | neptune.init('shared/onboarding', api_token='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5tbCIsImFwaV9rZXkiOiJiNzA2YmM4Zi03NmY5LTRjMmUtOTM5ZC00YmEwMzZmOTMyZTQifQ==')
32 | with neptune.create_experiment():
33 | neptune.append_tag('minimal-example')
34 | ```
35 |
36 | **Following is what you need for this book:**
37 | This book is for data scientists and data analysts who are new to JupyterLab as well as for existing Jupyter users who want to get acquainted with its impressive features. Although not necessary, basic knowledge of Python will be helpful.
38 |
39 | With the following software and hardware list you can run all code files present in the book (Chapter 4-6).
40 |
41 | ### Software and Hardware List
42 |
43 | | Chapter | Software required | OS required |
44 | | -------- | -------------------------------------------------------------------------------------| -----------------------------------|
45 | | 1-6 | Anaconda Distribution (JupyterLab) | Windows, Mac OS X, and Linux (Any) |
46 | | 3-6 | Node.js | Windows, Mac OS X, and Linux (Any) |
47 | | 5 | nteract | Windows, Mac OS X, and Linux (Any) |
48 |
49 | We also provide a PDF file that has color images of the screenshots/diagrams used in this book. [Click here to download it](https://static.packt-cdn.com/downloads/9781789805543_ColorImages.pdf).
50 |
51 |
52 | ### Related products
53 | * Learning Jupyter 5 - Second Edition [[Packt]](https://www.packtpub.com/big-data-and-business-intelligence/learning-jupyter-5-second-edition?utm_source=github&utm_medium=repository&utm_campaign=9781789137408) [[Amazon]](https://www.amazon.com/Learning-Jupyter-interactive-JavaScript-JupyterLab/dp/1789137403)
54 |
55 | * Jupyter Cookbook [[Packt]](https://www.packtpub.com/big-data-and-business-intelligence/jupyter-cookbook?utm_source=github&utm_medium=repository&utm_campaign=9781788839440) [[Amazon]](https://www.amazon.com/Jupyter-Cookbook-interactive-computing-JavaScript-ebook/dp/B07CDQT8VQ)
56 |
57 | ## Get to Know the Authors
58 | **Lindsay Richman**
59 | is a Product Manager who has worked in product, analytics and consulting within a variety of industries. She is passionate about the Jupyter project, and JupyterLab’s role in democratizing scientific computing. She wrote Ch. 5 for this book; proceeds from her chapter will be donated to NumFocus.
60 |
61 | **Melissa Ferrari**
62 | completed her Ph.D. in physics at New York University. Jupyter has been a pivotal tool in her research as a method for exploratory data analysis (especially with interactive widgets), prototyping data analysis pipelines, interactive modeling, and adhering to scientific reproducibility and transparency standards.
63 |
64 | **Joseph Oladokun**
65 | is a Data Scientist at eHealth Africa in Nigeria, where he has an in-depth understanding of advanced techniques and tools needed to generate insights from data using the best practices with his experience in data analytics, engineering, and machine learning. Joseph is also a leader and mentor for various data science communities in Africa, and he is the founder of Data Science in Africa, an organization that uses the information to empower data scientists in Africa. He's also the co-lead of Africa R Users Group. Beyond his profession, Joseph is a leader who is very passionate about sharing information and ideas with others.
66 |
67 | **Wesley Banfield** is an R&D Geologist with a passion for digital innovation. He has worked in tech companies leveraging his software development skills and geological background to provide novel solutions. Throughout his career, his go-to tool for innovation has been Jupyter.
68 |
69 | **Dan Toomey**
70 | has been developing application software for over 20 years. He has worked in a variety of industries and company sizes in roles from a sole contributor on a project to VP/CTO overseeing and directing many. Dan had been a contract software developer for years, again working at different levels typically in the Java space. For the last several years Dan has been an employee of different companies in the eastern Massachusetts area. Dan has also written R for Data Sciences, Introduction to Jupyter (version 1 and 2), Jupyter for Data Sciences and the Jupyter Cookbook.
71 |
72 | ### Suggestions and Feedback
73 | [Click here](https://docs.google.com/forms/d/e/1FAIpQLSdy7dATC6QmEL81FIUuymZ0Wy9vH1jHkvpY57OiMeKGqib_Ow/viewform) if you have any feedback or suggestions.
74 |
75 |
--------------------------------------------------------------------------------
/Chapter04/interactive.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "kernelspec": {
6 | "name": "python3",
7 | "display_name": "Python 3"
8 | },
9 | "language_info": {
10 | "codemirror_mode": {
11 | "name": "ipython",
12 | "version": 3
13 | },
14 | "file_extension": ".py",
15 | "mimetype": "text/x-python",
16 | "name": "python",
17 | "nbconvert_exporter": "python",
18 | "pygments_lexer": "ipython3",
19 | "version": "3.7.3"
20 | },
21 | "colab": {
22 | "name": "interactive.ipynb",
23 | "provenance": [],
24 | "collapsed_sections": []
25 | },
26 | "accelerator": "GPU",
27 | "widgets": {
28 | "application/vnd.jupyter.widget-state+json": {
29 | "b6962ffde86b4b3e8c30f3426520a179": {
30 | "model_module": "@jupyter-widgets/controls",
31 | "model_name": "FloatRangeSliderModel",
32 | "state": {
33 | "_view_name": "FloatRangeSliderView",
34 | "style": "IPY_MODEL_c78b7b2541b846df94facbe158282a42",
35 | "_dom_classes": [],
36 | "description": "X-Range: ",
37 | "step": 0.1,
38 | "_model_name": "FloatRangeSliderModel",
39 | "orientation": "horizontal",
40 | "max": 10,
41 | "_view_module": "@jupyter-widgets/controls",
42 | "_model_module_version": "1.5.0",
43 | "value": [
44 | 0.3,
45 | 7.7
46 | ],
47 | "_view_count": null,
48 | "disabled": false,
49 | "_view_module_version": "1.5.0",
50 | "min": 0,
51 | "continuous_update": true,
52 | "readout_format": ".1f",
53 | "description_tooltip": null,
54 | "readout": true,
55 | "_model_module": "@jupyter-widgets/controls",
56 | "layout": "IPY_MODEL_b9e256133b804ece927ecaf8ed7126cb"
57 | }
58 | },
59 | "c78b7b2541b846df94facbe158282a42": {
60 | "model_module": "@jupyter-widgets/controls",
61 | "model_name": "SliderStyleModel",
62 | "state": {
63 | "_view_name": "StyleView",
64 | "handle_color": null,
65 | "_model_name": "SliderStyleModel",
66 | "description_width": "",
67 | "_view_module": "@jupyter-widgets/base",
68 | "_model_module_version": "1.5.0",
69 | "_view_count": null,
70 | "_view_module_version": "1.2.0",
71 | "_model_module": "@jupyter-widgets/controls"
72 | }
73 | },
74 | "b9e256133b804ece927ecaf8ed7126cb": {
75 | "model_module": "@jupyter-widgets/base",
76 | "model_name": "LayoutModel",
77 | "state": {
78 | "_view_name": "LayoutView",
79 | "grid_template_rows": null,
80 | "right": null,
81 | "justify_content": null,
82 | "_view_module": "@jupyter-widgets/base",
83 | "overflow": null,
84 | "_model_module_version": "1.2.0",
85 | "_view_count": null,
86 | "flex_flow": null,
87 | "width": null,
88 | "min_width": null,
89 | "border": null,
90 | "align_items": null,
91 | "bottom": null,
92 | "_model_module": "@jupyter-widgets/base",
93 | "top": null,
94 | "grid_column": null,
95 | "overflow_y": null,
96 | "overflow_x": null,
97 | "grid_auto_flow": null,
98 | "grid_area": null,
99 | "grid_template_columns": null,
100 | "flex": null,
101 | "_model_name": "LayoutModel",
102 | "justify_items": null,
103 | "grid_row": null,
104 | "max_height": null,
105 | "align_content": null,
106 | "visibility": null,
107 | "align_self": null,
108 | "height": null,
109 | "min_height": null,
110 | "padding": null,
111 | "grid_auto_rows": null,
112 | "grid_gap": null,
113 | "max_width": null,
114 | "order": null,
115 | "_view_module_version": "1.2.0",
116 | "grid_template_areas": null,
117 | "object_position": null,
118 | "object_fit": null,
119 | "grid_auto_columns": null,
120 | "margin": null,
121 | "display": null,
122 | "left": null
123 | }
124 | },
125 | "cd883016a8b147ffa62f1e86bc91eccb": {
126 | "model_module": "@jupyter-widgets/controls",
127 | "model_name": "FloatRangeSliderModel",
128 | "state": {
129 | "_view_name": "FloatRangeSliderView",
130 | "style": "IPY_MODEL_d8cc6d5614a343e892a10b3759e3976c",
131 | "_dom_classes": [],
132 | "description": "Y-Range: ",
133 | "step": 0.1,
134 | "_model_name": "FloatRangeSliderModel",
135 | "orientation": "horizontal",
136 | "max": 10,
137 | "_view_module": "@jupyter-widgets/controls",
138 | "_model_module_version": "1.5.0",
139 | "value": [
140 | 1.2,
141 | 5.4
142 | ],
143 | "_view_count": null,
144 | "disabled": false,
145 | "_view_module_version": "1.5.0",
146 | "min": 0,
147 | "continuous_update": true,
148 | "readout_format": ".1f",
149 | "description_tooltip": null,
150 | "readout": true,
151 | "_model_module": "@jupyter-widgets/controls",
152 | "layout": "IPY_MODEL_496afabf093244ae98a74dd5357c868f"
153 | }
154 | },
155 | "d8cc6d5614a343e892a10b3759e3976c": {
156 | "model_module": "@jupyter-widgets/controls",
157 | "model_name": "SliderStyleModel",
158 | "state": {
159 | "_view_name": "StyleView",
160 | "handle_color": null,
161 | "_model_name": "SliderStyleModel",
162 | "description_width": "",
163 | "_view_module": "@jupyter-widgets/base",
164 | "_model_module_version": "1.5.0",
165 | "_view_count": null,
166 | "_view_module_version": "1.2.0",
167 | "_model_module": "@jupyter-widgets/controls"
168 | }
169 | },
170 | "496afabf093244ae98a74dd5357c868f": {
171 | "model_module": "@jupyter-widgets/base",
172 | "model_name": "LayoutModel",
173 | "state": {
174 | "_view_name": "LayoutView",
175 | "grid_template_rows": null,
176 | "right": null,
177 | "justify_content": null,
178 | "_view_module": "@jupyter-widgets/base",
179 | "overflow": null,
180 | "_model_module_version": "1.2.0",
181 | "_view_count": null,
182 | "flex_flow": null,
183 | "width": null,
184 | "min_width": null,
185 | "border": null,
186 | "align_items": null,
187 | "bottom": null,
188 | "_model_module": "@jupyter-widgets/base",
189 | "top": null,
190 | "grid_column": null,
191 | "overflow_y": null,
192 | "overflow_x": null,
193 | "grid_auto_flow": null,
194 | "grid_area": null,
195 | "grid_template_columns": null,
196 | "flex": null,
197 | "_model_name": "LayoutModel",
198 | "justify_items": null,
199 | "grid_row": null,
200 | "max_height": null,
201 | "align_content": null,
202 | "visibility": null,
203 | "align_self": null,
204 | "height": null,
205 | "min_height": null,
206 | "padding": null,
207 | "grid_auto_rows": null,
208 | "grid_gap": null,
209 | "max_width": null,
210 | "order": null,
211 | "_view_module_version": "1.2.0",
212 | "grid_template_areas": null,
213 | "object_position": null,
214 | "object_fit": null,
215 | "grid_auto_columns": null,
216 | "margin": null,
217 | "display": null,
218 | "left": null
219 | }
220 | },
221 | "8fa927c081a047dbb0315941398367a8": {
222 | "model_module": "@jupyter-widgets/controls",
223 | "model_name": "ToggleButtonModel",
224 | "state": {
225 | "_view_name": "ToggleButtonView",
226 | "style": "IPY_MODEL_a9fe084dd17444559d69eef288272469",
227 | "_dom_classes": [],
228 | "description": "Show Species",
229 | "_model_name": "ToggleButtonModel",
230 | "button_style": "",
231 | "_view_module": "@jupyter-widgets/controls",
232 | "_model_module_version": "1.5.0",
233 | "tooltip": "",
234 | "_view_count": null,
235 | "disabled": false,
236 | "_view_module_version": "1.5.0",
237 | "value": true,
238 | "description_tooltip": null,
239 | "_model_module": "@jupyter-widgets/controls",
240 | "layout": "IPY_MODEL_39342e7201cc45b68146035c7c11ad81",
241 | "icon": ""
242 | }
243 | },
244 | "a9fe084dd17444559d69eef288272469": {
245 | "model_module": "@jupyter-widgets/controls",
246 | "model_name": "DescriptionStyleModel",
247 | "state": {
248 | "_view_name": "StyleView",
249 | "_model_name": "DescriptionStyleModel",
250 | "description_width": "",
251 | "_view_module": "@jupyter-widgets/base",
252 | "_model_module_version": "1.5.0",
253 | "_view_count": null,
254 | "_view_module_version": "1.2.0",
255 | "_model_module": "@jupyter-widgets/controls"
256 | }
257 | },
258 | "39342e7201cc45b68146035c7c11ad81": {
259 | "model_module": "@jupyter-widgets/base",
260 | "model_name": "LayoutModel",
261 | "state": {
262 | "_view_name": "LayoutView",
263 | "grid_template_rows": null,
264 | "right": null,
265 | "justify_content": null,
266 | "_view_module": "@jupyter-widgets/base",
267 | "overflow": null,
268 | "_model_module_version": "1.2.0",
269 | "_view_count": null,
270 | "flex_flow": null,
271 | "width": null,
272 | "min_width": null,
273 | "border": null,
274 | "align_items": null,
275 | "bottom": null,
276 | "_model_module": "@jupyter-widgets/base",
277 | "top": null,
278 | "grid_column": null,
279 | "overflow_y": null,
280 | "overflow_x": null,
281 | "grid_auto_flow": null,
282 | "grid_area": null,
283 | "grid_template_columns": null,
284 | "flex": null,
285 | "_model_name": "LayoutModel",
286 | "justify_items": null,
287 | "grid_row": null,
288 | "max_height": null,
289 | "align_content": null,
290 | "visibility": null,
291 | "align_self": null,
292 | "height": null,
293 | "min_height": null,
294 | "padding": null,
295 | "grid_auto_rows": null,
296 | "grid_gap": null,
297 | "max_width": null,
298 | "order": null,
299 | "_view_module_version": "1.2.0",
300 | "grid_template_areas": null,
301 | "object_position": null,
302 | "object_fit": null,
303 | "grid_auto_columns": null,
304 | "margin": null,
305 | "display": null,
306 | "left": null
307 | }
308 | },
309 | "93c1de18d0244c1184d7b7ac05e11522": {
310 | "model_module": "@jupyter-widgets/controls",
311 | "model_name": "DropdownModel",
312 | "state": {
313 | "_options_labels": [
314 | "Sepal Length",
315 | "Sepal Width",
316 | "Petal Length",
317 | "Petal Width"
318 | ],
319 | "_view_name": "DropdownView",
320 | "style": "IPY_MODEL_c9da6a4df77642da918fff628e36aa83",
321 | "_dom_classes": [],
322 | "description": "X-Axis:",
323 | "_model_name": "DropdownModel",
324 | "index": 0,
325 | "_view_module": "@jupyter-widgets/controls",
326 | "_model_module_version": "1.5.0",
327 | "_view_count": null,
328 | "disabled": false,
329 | "_view_module_version": "1.5.0",
330 | "description_tooltip": null,
331 | "_model_module": "@jupyter-widgets/controls",
332 | "layout": "IPY_MODEL_bc33d9ac837b4a1c98402f6904780b44"
333 | }
334 | },
335 | "c9da6a4df77642da918fff628e36aa83": {
336 | "model_module": "@jupyter-widgets/controls",
337 | "model_name": "DescriptionStyleModel",
338 | "state": {
339 | "_view_name": "StyleView",
340 | "_model_name": "DescriptionStyleModel",
341 | "description_width": "",
342 | "_view_module": "@jupyter-widgets/base",
343 | "_model_module_version": "1.5.0",
344 | "_view_count": null,
345 | "_view_module_version": "1.2.0",
346 | "_model_module": "@jupyter-widgets/controls"
347 | }
348 | },
349 | "bc33d9ac837b4a1c98402f6904780b44": {
350 | "model_module": "@jupyter-widgets/base",
351 | "model_name": "LayoutModel",
352 | "state": {
353 | "_view_name": "LayoutView",
354 | "grid_template_rows": null,
355 | "right": null,
356 | "justify_content": null,
357 | "_view_module": "@jupyter-widgets/base",
358 | "overflow": null,
359 | "_model_module_version": "1.2.0",
360 | "_view_count": null,
361 | "flex_flow": null,
362 | "width": null,
363 | "min_width": null,
364 | "border": null,
365 | "align_items": null,
366 | "bottom": null,
367 | "_model_module": "@jupyter-widgets/base",
368 | "top": null,
369 | "grid_column": null,
370 | "overflow_y": null,
371 | "overflow_x": null,
372 | "grid_auto_flow": null,
373 | "grid_area": null,
374 | "grid_template_columns": null,
375 | "flex": null,
376 | "_model_name": "LayoutModel",
377 | "justify_items": null,
378 | "grid_row": null,
379 | "max_height": null,
380 | "align_content": null,
381 | "visibility": null,
382 | "align_self": null,
383 | "height": null,
384 | "min_height": null,
385 | "padding": null,
386 | "grid_auto_rows": null,
387 | "grid_gap": null,
388 | "max_width": null,
389 | "order": null,
390 | "_view_module_version": "1.2.0",
391 | "grid_template_areas": null,
392 | "object_position": null,
393 | "object_fit": null,
394 | "grid_auto_columns": null,
395 | "margin": null,
396 | "display": null,
397 | "left": null
398 | }
399 | },
400 | "9fde7fa1a82c4e4f94cdbc0d88385c61": {
401 | "model_module": "@jupyter-widgets/controls",
402 | "model_name": "DropdownModel",
403 | "state": {
404 | "_options_labels": [
405 | "Sepal Length",
406 | "Sepal Width",
407 | "Petal Length",
408 | "Petal Width"
409 | ],
410 | "_view_name": "DropdownView",
411 | "style": "IPY_MODEL_047f57b3565e491697c20fb4c0a4089e",
412 | "_dom_classes": [],
413 | "description": "Y-Axis:",
414 | "_model_name": "DropdownModel",
415 | "index": 1,
416 | "_view_module": "@jupyter-widgets/controls",
417 | "_model_module_version": "1.5.0",
418 | "_view_count": null,
419 | "disabled": false,
420 | "_view_module_version": "1.5.0",
421 | "description_tooltip": null,
422 | "_model_module": "@jupyter-widgets/controls",
423 | "layout": "IPY_MODEL_730a7f4234c54faab80e609cf524fe11"
424 | }
425 | },
426 | "047f57b3565e491697c20fb4c0a4089e": {
427 | "model_module": "@jupyter-widgets/controls",
428 | "model_name": "DescriptionStyleModel",
429 | "state": {
430 | "_view_name": "StyleView",
431 | "_model_name": "DescriptionStyleModel",
432 | "description_width": "",
433 | "_view_module": "@jupyter-widgets/base",
434 | "_model_module_version": "1.5.0",
435 | "_view_count": null,
436 | "_view_module_version": "1.2.0",
437 | "_model_module": "@jupyter-widgets/controls"
438 | }
439 | },
440 | "730a7f4234c54faab80e609cf524fe11": {
441 | "model_module": "@jupyter-widgets/base",
442 | "model_name": "LayoutModel",
443 | "state": {
444 | "_view_name": "LayoutView",
445 | "grid_template_rows": null,
446 | "right": null,
447 | "justify_content": null,
448 | "_view_module": "@jupyter-widgets/base",
449 | "overflow": null,
450 | "_model_module_version": "1.2.0",
451 | "_view_count": null,
452 | "flex_flow": null,
453 | "width": null,
454 | "min_width": null,
455 | "border": null,
456 | "align_items": null,
457 | "bottom": null,
458 | "_model_module": "@jupyter-widgets/base",
459 | "top": null,
460 | "grid_column": null,
461 | "overflow_y": null,
462 | "overflow_x": null,
463 | "grid_auto_flow": null,
464 | "grid_area": null,
465 | "grid_template_columns": null,
466 | "flex": null,
467 | "_model_name": "LayoutModel",
468 | "justify_items": null,
469 | "grid_row": null,
470 | "max_height": null,
471 | "align_content": null,
472 | "visibility": null,
473 | "align_self": null,
474 | "height": null,
475 | "min_height": null,
476 | "padding": null,
477 | "grid_auto_rows": null,
478 | "grid_gap": null,
479 | "max_width": null,
480 | "order": null,
481 | "_view_module_version": "1.2.0",
482 | "grid_template_areas": null,
483 | "object_position": null,
484 | "object_fit": null,
485 | "grid_auto_columns": null,
486 | "margin": null,
487 | "display": null,
488 | "left": null
489 | }
490 | },
491 | "49f3d05fc367424c8030eaff3177118e": {
492 | "model_module": "@jupyter-widgets/controls",
493 | "model_name": "VBoxModel",
494 | "state": {
495 | "_view_name": "VBoxView",
496 | "_dom_classes": [
497 | "widget-interact"
498 | ],
499 | "_model_name": "VBoxModel",
500 | "_view_module": "@jupyter-widgets/controls",
501 | "_model_module_version": "1.5.0",
502 | "_view_count": null,
503 | "_view_module_version": "1.5.0",
504 | "box_style": "",
505 | "layout": "IPY_MODEL_ae91ef85ddaf4827b7da06f49097fe73",
506 | "_model_module": "@jupyter-widgets/controls",
507 | "children": [
508 | "IPY_MODEL_93c1de18d0244c1184d7b7ac05e11522",
509 | "IPY_MODEL_9fde7fa1a82c4e4f94cdbc0d88385c61",
510 | "IPY_MODEL_b6962ffde86b4b3e8c30f3426520a179",
511 | "IPY_MODEL_cd883016a8b147ffa62f1e86bc91eccb",
512 | "IPY_MODEL_8fa927c081a047dbb0315941398367a8",
513 | "IPY_MODEL_7ba42e6dc0f5417dbf5773dc85365a1f"
514 | ]
515 | }
516 | },
517 | "ae91ef85ddaf4827b7da06f49097fe73": {
518 | "model_module": "@jupyter-widgets/base",
519 | "model_name": "LayoutModel",
520 | "state": {
521 | "_view_name": "LayoutView",
522 | "grid_template_rows": null,
523 | "right": null,
524 | "justify_content": null,
525 | "_view_module": "@jupyter-widgets/base",
526 | "overflow": null,
527 | "_model_module_version": "1.2.0",
528 | "_view_count": null,
529 | "flex_flow": null,
530 | "width": null,
531 | "min_width": null,
532 | "border": null,
533 | "align_items": null,
534 | "bottom": null,
535 | "_model_module": "@jupyter-widgets/base",
536 | "top": null,
537 | "grid_column": null,
538 | "overflow_y": null,
539 | "overflow_x": null,
540 | "grid_auto_flow": null,
541 | "grid_area": null,
542 | "grid_template_columns": null,
543 | "flex": null,
544 | "_model_name": "LayoutModel",
545 | "justify_items": null,
546 | "grid_row": null,
547 | "max_height": null,
548 | "align_content": null,
549 | "visibility": null,
550 | "align_self": null,
551 | "height": null,
552 | "min_height": null,
553 | "padding": null,
554 | "grid_auto_rows": null,
555 | "grid_gap": null,
556 | "max_width": null,
557 | "order": null,
558 | "_view_module_version": "1.2.0",
559 | "grid_template_areas": null,
560 | "object_position": null,
561 | "object_fit": null,
562 | "grid_auto_columns": null,
563 | "margin": null,
564 | "display": null,
565 | "left": null
566 | }
567 | },
568 | "7ba42e6dc0f5417dbf5773dc85365a1f": {
569 | "model_module": "@jupyter-widgets/output",
570 | "model_name": "OutputModel",
571 | "state": {
572 | "_view_name": "OutputView",
573 | "msg_id": "",
574 | "_dom_classes": [],
575 | "_model_name": "OutputModel",
576 | "outputs": [
577 | {
578 | "output_type": "display_data",
579 | "metadata": {
580 | "tags": []
581 | },
582 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEJCAYAAAB7UTvrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdd3hUddbA8e+5U9PoAUJHBcGGKFXs\nioi9d119Laura93XdXWb7q6rrquuvb+rrr27KmLBgg0FC1Wl9xJKgNQp97x/3GHIJDNhUkjAnM/z\n5CG59UyAOXN/5fxEVTHGGGMAnJYOwBhjzLbDkoIxxpgkSwrGGGOSLCkYY4xJsqRgjDEmyd/SATRG\np06dtE+fPi0dhjHGbFemTJmyWlUL0+3brpNCnz59mDx5ckuHYYwx2xURWZhpnzUfGWOMSbKkYIwx\nJsmSgjHGmCRLCsYYY5IsKRhjjEmypGCMMSbJkoIxxpgkSwrGGGOSLCkYY4xJsqRgjDEmyZKCMcaY\nJEsKxhhjkiwpGGOMSbKkYIwxJsmSgjHGmCRLCsYYY5KaLSmIyAIRmSYi34lIrZVxxHO3iMwRkaki\nsldzxWaMMcbT3CuvHaSqqzPsGwv0S3wNBx5I/GmMMaaZbEvNR8cCT6rnS6CdiBS1dFDGGNOaNGdS\nUOBdEZkiIhel2d8dWFzt5yWJbSlE5CIRmSwik4uLi7dSqMYY0zo1Z1LYV1X3wmsmulRE9m/IRVT1\nYVUdoqpDCgsLmzZCY4xp5ZotKajq0sSfq4BXgWE1DlkK9Kz2c4/ENmOMMc2kWZKCiOSJSMGm74HD\ngOk1DnsDOCcxCmkEsF5VlzdHfMYYYzzNNfqoC/CqiGy65zOq+o6IXAygqg8CbwNHAHOAcuC8ZorN\nGGNMQrMkBVWdBwxKs/3Bat8rcGlzxGOMMSa9bWlIqjHGmBZmScEYY0ySJQVjjDFJlhSMMcYkWVIw\nxhiTZEnBGGNMkiUFY4wxSZYUjDHGJFlSMMYYk2RJwRhjTJIlBWOMMUmWFIwxxiRZUjDGGJNkScEY\nY0ySJQVjjDFJlhSMMcYkWVIwxhiT1KxJQUR8IvKtiLyZZt+5IlIsIt8lvi5oztiMMcY03xrNm1wB\nzALaZNj/vKpe1ozxGGOMqabZnhREpAdwJPBoc93TGGNM/TRn89FdwLWAW8cxJ4rIVBF5SUR6pjtA\nRC4SkckiMrm4uHirBGqMMa1VsyQFETkKWKWqU+o47L9AH1XdA3gPeCLdQar6sKoOUdUhhYWFWyFa\nY0xzm/PdfD54eiI/TZnb0qG0es3VpzAKOEZEjgDCQBsR+Y+qnrXpAFVdU+34R4Hbmik2Y0wLqSir\n5PdH/Z0fv56L4wiqyg6DevP3cb8ntyCnpcNrlZrlSUFVf6eqPVS1D3AaMKF6QgAQkaJqPx6D1yFt\njPkZe/S6/zDry9lUlVdRUVpJZVkVs6fM44Gr/t3SobVaLTpPQURuEpFjEj9eLiIzROR74HLg3JaL\nzBjTHN574mOiVdGUbdGqGBOemYiqtlBUrVtzD0lFVT8CPkp8/8dq238H/K654zHGtJxoVSz99kj6\n7WbrsxnNxpgWM/iQ3RFHUraJCIMO2AURyXCW2ZosKRhjWsyld59Hfrs8gjlBAII5QfLa5vLr+y5s\n4char2ZvPjLGmE2671TEv3+8m3GPfcDsKfPYcc8+jL3gENoVtm3p0FotSwrGmBbVpmMBp157XEuH\nYRKs+cgYY0ySJQVjzFZnw0u3H5YUjDFbzVuPvMep3S/iMN8pnNnnEiY892lLh2S2wPoUjDFbxZsP\nvcuD1zxJVXkVAKsWreaOCx4gEPCz34kjWjg6k8kWk4KI7IVX8noQ0A4oAb4Hxqnq5K0bnjFme6Sq\nPPGnF5IJYZOq8giP3/CsJYVtWMbmIxEZIyKTgWeBnsBnwHOJP3sCTydKWB/eLJEaY7YbsWiM9cUb\n0u5bsWBVM0dj6qOuJ4ULgUtU9etMB4jIUOC3wDtNHZgxZvvlD/hp36Uta1eU1NpXtEOXFojIZCvj\nk4KqnlRXQkgc87WqntT0YRljtiWlJWU8+rv/cM5Ol3Hh7lfz+n3jiMfjGY8XEc7962mEcoMp20O5\nQS74+5lbO1zTCPXqaBaRNkB+9W2quqxJIzLGbFMilRF+PeJ3rFxYnCxg98hvn2baxB/4/XNXZTxv\n7P8cgj/g54k/Pk/xkjUU7diFC285i32OHdpcoZsGyCopiMihwMNAb6B6lSoFfFshLmPMNuLD5z5j\n9dK1KRVNq8qr+PK/k1k4awm9B/bIeO7osw9g9NkHNEeYpolkO0/hMeBmoC0QqPYVrOskY8z27/uP\nZlBZVlVruzjCD5Nmt0BEZmvKtvkoDPyfqmZuRDTG/Cx17duZQMhfa+0DcYTCHh1bKCqztWT7pHAn\ncK1YgXNjWp2x5x+Cz5/aSuz4HAo65DPooF2b/H6qypT3vufmM//FX0+9gy/+O9nKZDQjyeaXLSL9\ngPFAJ2B19X2qukPWNxPxAZOBpap6VI19IeBJYG9gDXCqqi6o63pDhgzRyZNt/pwxW9u0ibO45ey7\nWb96A25c2WFQb/7w/NV06V3Y5Pe674rHeefxCckmq3BeiH2OHcp1T11uC+80ERGZoqpD0u3Ltvno\nJWAi8CJQ0YhYrgBmAW3S7DsfWKeqO4nIacCtwKmNuJcxponsvt9A/jP/flYsWEUwHKRjUfutcp+F\ns5bw9qMfEKmIJLdVllXx+etfM+PzH9lt1ICtcl+zWbZJoS8wWFXdht5IRHrglcv4G3B1mkOOBf6c\n+P4l4F4REbXnRmO2CSJCUd+tO/FsyvjvUbf220xleRVfvf2NJYVmkG2fwuvAwY28113AtUCmxNId\nWAygqjFgPVCrF0tELkqU15hcXFzcyJCMMduS3DY5tfovAAIBP3ltc1sgotYn26QQAt4QkfEi8mT1\nr2xOFpGjgFWqOqXBkSao6sOqOkRVhxQWNn17pjGm4Sa/9z1/PPZWbjr5n/w0eU69zx91/DDStQ2I\nz+Gg0/dtggjNlmTbfDQj8dVQo4BjROQIvOGtbUTkP6p6VrVjluIV2lsiIn68ORFrGnFPY0wzuvbQ\nG/l2wvTkzxNf/pKxFx7C1Q9dnPU1Ctrnc+Or/8tNJ/8zuS0ec/ntE5fRuWenJo3XpJfV6KMmvaHI\ngcBv0ow+uhTYXVUvTnQ0n6Cqp9R1LRt9ZMy24eMXPuevp92Zdt8j0+6gz64963W9SGWE7z6cQTwW\nZ8+DdyMnL9wUYZqEukYfZdV8JCLXJSqiVt82TESubWRgN4nIMYkfHwM6isgcvI7o6xpzbWNM83nl\nX29l3Pf8ba/X+3rBcJBhYwcz8ughlhCaWbbNR1cA99TYNhN4DbitPjdU1Y+AjxLf/7Ha9krg5Ppc\nyxizbXDranGwAYTblWw7moNAtMa2CF7/gDHmZ+ilO//LzWfcxVfjv9viscddNjbjvpN/c/QWz18+\nbyWT3prC0jnL6xWjqR/VCFr1RZ3HZPukMAX4Fd6w0k0uBr5pWGjGmG3Vj1/N4df7XI+63if8D5/7\njLx2ebyw4mGCwfQ1MA85Yz/euO8dZn7xU+r2M/djhz36ZLxXpCrK3067k8njv/PqK0Xi7HnQbvzx\nxasJ5YSa7DUZ0KrP0JJfb/G4bMtc7Aq8BywH5gI7Al2B0ao6s3GhNpx1NBvT9MaGTycWidXavvPQ\nHbl30i11nvvpq5N44/7x+IN+Tvvtseyxf921kR6+9klev298ygzmYDjA2AsO4bK7z2/YCzC1qLsW\nXXUgUAmAr2h248pcqOoMEekPHIU3bPQV4E1VLW2SiI0x24Q5381PmxAAfvx67hbP3/f44ex7/PCs\n7/f2I6klLQAilVHeefxDSwpNqXJc1odmvfJaIgE815B4jDHbh1WLmrdKQLp1GsAbkqqqVgCvqbgb\n8LqBtyxjR7OIvFJzGGqaY4aKyCv1i84Ys60afuTeGfelKzNRVVHFqkXFxKKbny7WrljH+tUbsrrf\n7vulr2W0y4j+lhCaUmgUSHZ9NHU9KTwE3J9Yl/lj4EdgI1AA9AcOBEqA3zcmVmPMtsPn83HYLw7k\n3Sc+qrXv+mrrMcdjcR7+3yd56+H3QcAf8HPERYfy1VvfsGzuClSh3147cP0zV9C1T+eM97v07vO5\nYtQNRCqjxCIx/EE/gZCfy+61pqOmJIE90NBoqHyPLRW6zpgUVHU8MF5EhgBjgeFAO2AdMBU4TVW/\nbbKojTHbhEDIjy/gIx6Np2yLV+treOz6p3nrkfepSvQHVBHhxX+8kXKdH7+azZX7/YH/zLsPfyD9\nW02fXXvy6PQ7ee2eccyeMpcdB/fl+F+PpXMvq2vW1KTtbRB+D614Bci8jGqzl7loSjb6yJimVVFW\nyUmF/0Oksua0JNh56E7cO+nvxKIxjmt/LlXl6fsDqsspCHPdk5ezz7F1tkSbZtboMhfGmNZhw+qN\niJO+LX/VYm/RxfKNFcRj2S3XHovEk+eZ7YMlBWNMUqfuHQgEA7W2iwgDhu0EQH67PAra52d1PZ/f\nof+QHZs0RrN1WVIwZjs1f9pCrh19E0flncmp3S/iuVtfJR6v+xO8qvLaveM4o9fFHJV3Jlft/wc+\ne/0r/nT8bRydfxandruQgSP6EcrdPHNZRAjlhjjvL6cB4DgOF99xTo1jvON8gc0L5IRyggwc0Z+B\nw/s18Sv/+VJV3PIXcYsPxl2xO+6ak9DI180ag/UpGLMdWj5vJb8c/BsqNlYmt4VyQ4w+e3+ueOCi\njOc9fsMzvPqvt6ms2R8gQOKtIBgO0Ge3XgAUL17NgOH9OO8vp9F3994pp3z9zrc8deOLrFiwip0G\n78Bp1x3Ll29O4cPnPsMf8DP2/IM56ZpjCIZqP3mY9NzSx6DsbtDqI4TCSIenkOCgJrtPXX0KWScF\nEdkZGASkPDeq6uONjrCBLCmY1upflzzMuMc+IB5LXd02EA7wzMIHaFfYttY5FWWVnNz5/OSIobqE\ncoLcP+U2eg3o3mQxm7qpRtFVw0DLau8MjsTp8EST3auupJDVjGYRuR74I/A9UF5tlwItlhSMaa1+\n+GpOrYQAEAwFWPLjsrRJYeWC4rTrH6fjC/hYMH2RJYXm5K4BzdD8F/ux2cLItszFlcAwVZ26NYMx\nxmSnz249mTd1IW48NTFEq6J03aFL2nMKe3RImXlcl3jMpXu/okbHaerBaY/XjpeGr3f67VsjjCyP\nqwB+2JqBGGOyd+q1x9Vqqw/mBBhx9BA6deuQ9py8tnmMPueAlA7idBxHaNe5Db8b+zeOyDmDC/e4\nmu8+nMYLt7/OJUOu5ar9/8CEZyaSTdPzvKkLufmMu7hoj2v4x3n3sfjHpdm/yFZGJAS5ZwM5NfaE\nkfzLmy+OTH+xIlI9YZwFjAL+DKysfpyq1n6GrX2tMPAJEMJ7OnlJVf9U45hzgX8Am/7V3Kuqj9Z1\nXetTMK3Z1E9mcvevHmHxD0vxhwKM/Z+Duej2c+rs2I3H4jx+wzO8cb9XrrqwZyHFS1bXeuJIJxDy\nE63ynjTCeSEOPmNfrnro4ozHf//xDG448u9ecTtXcXwOwZwgd3x0I/322qH+L7gVUHXRsvuh7HHQ\ncnC6QMENODmHNel9GtTRLCIuyfEIyWea6gcLoKq6xUZK8Spb5alqqYgEgE+BK1T1y2rHnAsMUdXL\ntnS9TSwpGONVFPUH/ThO9iPMXdclFonxzwseYMIznzbovsFwgEem3UG3Hbum3X/B7lezcMbiWtv3\n2H8X/vnRjQ26Z2vhfdaOAKGtUhiwoR3NfZsqAPUyz6a1FwKJr+13LKwx25BguO7moHQcxyEYDvLt\nhOkNvq/jc5g2cVbapBCLxlg0c0na82ZN+intdrOZ11DTMqsdZ/xooaoLN30BJ1f/udr2E7O9kYj4\nROQ7YBXwnqpOSnPYiSIyVUReEpGeGa5zkYhMFpHJxcXNW/vdmJ+btoVtGnyu4zi079Iu7T6f35ex\n7yK/XV6D72m2vmyX49ygqrX+9YjIWlVN36uV+VrtgFeBX6vq9GrbOwKlqlolIr8ETlXVg+u6ljUf\nmdYsHovz6atfMfnd7+hY1J4x5x1EUd/UkUeRSJT//PlFJr46iYL2+Zxz4ykMGb15EtRnr3/Nn4+/\nrUH379C1Hc8sejA5zNV1XV69+23GPTYBf8BHh67tmPrxzJR5EaHcIGf/8WROvfa4Bt2zupnFq3hl\n1kwqYzHG9uvHPj161Wpq0ehsryqoliLh0WhgKFL1Dhr5Cnw9kZwTEV/60Vo/Zw2epyAim96UfSJy\nEKnjpXbAW1+hXlS1REQ+BA4HplfbvqbaYY8CDfuXakwrEKmMcM1Bf2bhjMVUlFbiD/p46Y43+cPz\nVyUXyqksr+S0Hr+krGTz1KLfjfkrJ1xxBJfceR4AQ8fuSTDsJ1JZ91BVEUkZbSQCh//PQSkJ4fxd\nrmLJT8uSx8zFSxyuq8lO6jHnHcTJvzmm0a//kSlfc+ekz4nE4rgor/0wk8N23Il/HjY2mRjcsudg\n481AFIijFW8AgqJ4AyqDaNnD0P4xJJh5caHWZkvzFB5L/BkmdZKaAiuAX2dzExEpBKKJhJADjAZu\nrXFMkaouT/x4DDArm2sb0xq99cj7zJ+2kKpy71N4LBInFolzyzn38OKKR/EH/Nx3+f+lJIRNXvnX\n25z5h5No06GAf9/w7BYTAlBr+KkqvPjPNznu8iNp37ktbz/yQUpC2GTtihL+8sZvade5Ld126kqb\nDgUNfMWbrSwt5Y4vP6OqWp2n8liUd+fN4cslixnZsxfqroONfwOql/OoubhMBDSClvwGCifYSm8J\ndQ5XUNW+qtoXeHrT94mvHVR1H1V9o67zqykCPhSRqcDXeH0Kb4rITSKy6WPD5SIyQ0S+By4Hzm3g\nazLmZ2/CM58mE0J1bsxl9jfzAfjsta8ynv/OYxMA+PC5zxocgz/gY8q73wMw7rEP6ox1wLB+TZIQ\nAD5eOB+f1H7rqohGeWduYvGYyJcgWc7NdddAPH2neGuU1W9NVc9pzE0SM6EHp9n+x2rf/w74XWPu\nY0xrEcpJ34nrum5ynz+Y+b93ON8b2RJoRLE6ESEYDtQZD3iF+ppS2O8n3Yd6R4Qc/6bXHCLj7OBa\n3KzXL24NMj4piMhiEVm0pa/mDNYY4zn64sMI59V+I2vXuS19d/cqnB59yZi054ojHH6+1114/BVH\nNDgG11WGjvU+6532u8wdx2fccEKD75HOQX12wE0zQCbg83HcgF28H0KjsrqW4oB/AOLLvI50Jm7l\nx7glv8HdcCuuu6He52+r6mo+Ogs4O/F1N7Ae+AtwQeLPdcC/tnaAxpja9j95JIectR/BcIBwXojc\nghzaFrbhL29cl2wbP/OGE+i3d42ZwwLX/vtSgomFdI7/9REU9upU+wY1PmSH80L4g37C+SFyCsKE\n88Lc+Nq15OR5TxzDDt+Lg06v/UZ81h9OqjUiqrEKQiEeOPJYcvwB8gJBcgMBQj4f1+6zHwM6eWs7\ni4SQ9g+C5CW+clGClMeCqJL8cl1llXNpve7vui5u8RgouRAq34Dyx2DVUNyKt5v0dbaUbIekTgfG\nqOrSatt6AO+o6m5bMb462ZBU09otmb2c6RNn0bawDUPGDEq7atq0T2fxzuMTaN+lHaf/7njy2uQm\n933/8Qx+f9TfqSyre73lUE6QE648ih79iwjlBBl2xGBy8mvW6IGFs5bwyl1vEQgFOPXaYyns0bHx\nLzKD0kiEjxbMoyoeZ//efSjMrT3/Qd1yiHwMbgVTlnzNwNzXyfFv7liPuzC/tIj+/T/O+r7uhpuh\n/N9p9jjQeWa9Zpa3lEavpyAia4G+qrq+2rZ2wHxVbd9kkdaTJQVjGufWX9zDB//5hGyWVSnasQtP\nzr536we1lcyfM5Le+Wtqba+K+ygteJvCNtkVcXBXDgHN0FzU9nacnMYPud3a6koK2aa0N4A3RGS0\niAwUkcPwJqBlO/rIGLMNilRGs0oIALGq7Mpub6v8Tvq1ClyFmFuZdl96dSx56tYc9rr9yTYpXAx8\nATwIfJP4c1JiuzFmKyjfWMGMz39k+byVGY+pKKtkxuc/smT28uS2bz6YypsPvce6VSVbvMeBp45K\n22Fdkz/oZ/9TRmYX+FaytqKcycuWsqK03nNmAVhSNYrKeO36nesieVRFS/lx+XtURb1Vz9QtQyPf\noLE0Y2mCB2a+SWBP7zw3cR2tQiPforG5m+PYsJ4py5eysSpzk53Gl6GRKai7PuMxW4ut0WzMNuj5\n217jqRtfxB/0E43E6L/3Dtz46rW06bh5rP/r943jkd8+jc/vEI/GKdqxC8vnrkwpKzHq+GH8+eX/\nzXgf13X59Yjr+Wny5jctxJuD4Ph8RCoihPPDdOrWnnu+/HuL1C1yVbnx4wk8P2MaIZ+PSDzOQX12\n4M4xRxDyZ7tOGGysXMPqJUfTKbyePH+UypgPV4WSaB7tg2XE1UFQ1ukIegS/9uY5aBQCuyLt70cc\nr6KP65ZB8X6gpak3cLqCW5I4LwahQyEyAXBAY8SdHlz91bF8sKiSgOMQibtcOnQYlw3bnGzVLUVL\nroDIVyBB0Ajk/gIpuKZJJ9c1tHT2/qr6SeL7jDWIVHVCk0TZAJYUzM/RF/+dzM2n30Vl+eZPkv6A\nj932Hcg/PvCWIfnuw+n8/uhbqCqvu4MY4Pybz+C0645Pu2/+9EVcNvw6IhXRzRsFCrt35NjLDmfF\nglXsus8A9j95ZJ3rNGxNj34zmTu//IyK2Obmq5DPx0kDd+MvBx9ar2tFYhVMXfQkscqvUKeInsHx\ndA5vwO9sfh9UpcY8CD8EBuN0fDq5xXUjUHYPVE4Ap4OXDOJzgcxNbHFXWFjalsPeOYVNw7ty/H7+\nMXosR/Tr71133RVQ9QFe2exNcqDNH3Fys64/ukUN7VO4v9r3j2X4qnMRHGNM/b14+xspCQEgFo0z\n44sfWb1sLQAv3/lmVgkB4MV//jfjvv8+MJ5YpEYbuULp+jIGjujPFfdfxKFn7d9iCQHg8W+npCQE\ngKp4nJdmzSDmbnlxoOqC/hyG7PBLRuzyGB3aHUTbQHlKQoCaCQEgBtGpaHxzE53jBHEKrsEpfAtp\ndwvEF1BXQgDwOUqXnFJ2a786ua0iFuORb74GvKcEqt4nNSEAVEDZYzSXjM9e1YeaJkpdGGOawbpV\n6duR/QEfG1ZvpFO3DqxZvi7r61WWZe5EXbNsXdpV10SE9cXbxoSsDRna3mNunEg8jr+BQ0Arqlbg\nOlk2yYgf3HXgS7NutbsWJAC65SQdV6FDKLUzek1Foj6VlpLxc7pm//fdWFn9NkXkmMQQVGPMVjb0\n8D3xB2p3iIoIPQd0A2D4kXtlXaJip8F9qSirJBbd/Ek2Uhlh9bK1DB07OG1Hc7Qqxi777NzAV9C0\nhnTrnrZgRe+27cgNNPwJplfHAwikGZGUsZvVv1OtTcWlq6iIdyfbNcOCPpepazfPnvY7Dvv3Snzm\ndjqD0zbNWQ4E98nq+k0h216a3wDPichPwMeJr09UdXXdpxlj6uu03x7Hh89+RllJGdGI90Yeyg3x\nq7vOS05OO/7yI/jvA++yfnU0ddHcmu9NAiXFGzi+/S9wHIf9Th7BgumLmff9Qm+3I7TpWIDrKpFE\nB3U4L8Rxl42lY1GLTUFKcf1+B3DiC8uoikWJqeKIEPL5+MtB9etPqKlDfg8mLR7D4Lbjkk1IbmKm\nsyN+JNkclAMFNyCyub7T2zOeon/gHnrlryemDlPXF7F7hzjCpqeyIKpRQJPNUa7C03N2Z30kUXfK\ncSgIhrhs2HAgsdpamxvRkqvwqrsq4AfJRfKvbNRrrY+sRx+JSBgYARwA7A8MBxbYjGZjmt66Vet5\n5a43mTz+ezr36sTJ1xzNbvsOTO6fP20hl424PvlGDl7zUs8B3Vm3cj0VGyvotUsPFs5cknJMJvse\nP5xVS1aT3zaPYy87nJFHD9mmSkkvXr+eh6Z8xfcrV7BTh478cu+hyZIWjeGuORM38g2ObH5iUMJI\nzgkQ/R58XZC885Hg0OT+z+a9z57By1NmRlfEfPy4vgt7dhsA8ZWUuT3Jjb+T0j+hCqWxMNdN/QtL\nNmxgVM9enD94CIV5qSO6NDodLX0E4gshOATJuwDxpV8Hu6EaPaM5cZE8YBRwIF5i6A18oaonN1Gc\n9WZJwbRWt5x9Nx8++ymum/r/NxgO8MTse+jUvSP3XPYobz38PvFYHZOtEtp0KuDlVY9v8bifE43O\nRtecCNTscwlA7pk4ba5Pe964ycdzSLeZtTqoK+M+5jnPslvRniybfxBdwktrdVqrwsrA7XTr1LKz\nnhs9o1lEvgJ+AP4HWARcqKo9WjIhGNOaLZy5pFZCAK8U9ooF3trlC2YsziohAGkX4/nZiy/yOohr\niUJsTsbTuuUW10oIAJG4j8XrZgKQ71+Ttrw3QGXFdw2Jttlk222/HggA7RNf7USyXcHCGNPUdhnZ\nP7kUZnXRqig9d+6WPCYQyu6/aYeiVjiOxN/fmxxWSwgCtZZ/SZpf1otIvPZbZ9AXp39n78N3SaxH\nxg7rvPw6l55vcdkusjM6kQT2xutPuA4YJiLTVXWLvT2J/ohP8Fa+8AMvqeqfahwTAp5M3GMNcKqq\nLqjHazFmuxSPx3nt7rd5/b7xVJRWMOyIvTjsFwfy6t1vM33iLNp1bstp1x3PIWful2znP/k3x/D+\nU59QUVqRfPMJ5YYYc96BtO3UBoDjfn0Er907jmgWNYt69C/i5K7nE84Lc8yvxnDClUfi89VOOnVZ\nX1nJXZM+5+3ZP+F3HE7aZVd+NWR4yqzjlaWl/PKt15i+ahUAe3TuwsVDhvPot5OZu3YNfdu15+qR\n+7JPz14p1/5x+XtE1t9Ot5zlrK7qQFX4QnbrsA7KXwSiEBrDgoqhbCy5jx45i1kbaUdp4EIG9zk7\nY7zi74mGD/EmoCWbkBzvq+wh3LJ7vE2+oTy74hoe+/Y7NlRVsm+3Azm463cEHDf5NFAVF34oKWLP\nzlfgrlxLYV4/iKdOhFOFtZH2dCnaNxmDxovR0rugagJIDuScAb4eUPYAxFdAYDdvNnNgl3r9XQBo\n1WfetWMLvZFT+Zd4K9JVvOwf28YAACAASURBVF7nefXpU+iAlxAOwutX2BmYoqpbXM1CvH/Jeapa\nKiIB4FPgClX9stoxvwL2UNWLReQ04HhVPbWu61qfgvk5uPXce5n40pfJyWiOT3BdRZDk2sih3BCn\nXXccZ/3+pOR5C2cu5qH/fYrpE2eR3z6PE688kuOvODJZunnBjMVcuMfVWY2WdPwObsxN3CvIPscO\n4/qnr8j6NUTiccY+/QRLNmwg6npNViGfnz27duWZE05BRIjEYgx66N6UtZXTCfv93Dv2aA7u660F\nMXPZOHq719QoeS0gDr5EB7GLD9T7ftPUg4qYn+kVFzK831UZ76UaRcsehvJnQMvBtwPEptU4BpaW\n5XPg22cC0DWnjHGHP0+eP5q8lxcP+GTTL9vBJUBpxE9BoAwXYWnlLvTo+TR+v1e6XN2N6Oqx3jyH\n5EgnP+AmvjbJQTo+W6/EoJUfJEYxVe8vEcAHxPAVzW50n8JUYAlwFbABuAbokE1CAFDPpkIhgcRX\nzX+qxwJPJL5/CThEtqXhD8ZsBSsWrOKTFz5PmZ3sxhUUqn9gqyqv4rlbXqOi2kS03rv05Oa3rueN\nDU/xzMIHOfGqo1Nq+f/rV49kO3w+mRC8e0X47NVJLJu7IuvXMX7ubFaWlSYTAkBVPMa0VSv5doU3\nE/jeyZO2mBAAKmMx/jbxo+TPsvEfKQkBvNnBvmojhhziCJsTAkCOP0a/0P8Rj0fJRCSAk38pTufP\ncLp8C7H5aY6B7nml9Mj1JhWe228qISeeci8vnuq/bBeHOG3aHo2v6CcCRT/Sp++ryYQAoBUvg7uB\n1JnQMVITAkAluvGujK8hHd34d2p3oCtbmnUN2fcpXI6XBA5Q1T+o6vuqWq+eKRHxich3wCrgPVWd\nVOOQ7sBiAFWN4fVjbL0VOozZBsz9bkGdaylX5/M7LJuT/Rv1/KkLGxoW/qCfOd/WfoPM5NvlyyiP\n1n7zjbsu01Z5MX+9dEnW15tfsi6ZFHvmZfea032EDPuirC3P/r5QmnHP0b28zudhhcsJ+rIprxGD\nSB0tGZGvqf3GnY5CbGoWxyWO1rjXid5AWSUFVf1IVetTcDzdNeKquifQA68/okHzG0TkIhGZLCKT\ni4uLGxOSMS2uS+/CtGUm0olWxejUvUPW1+7QiMlnbtylS5/s1y3u3bYd4TQVS/2Ojx4FbZPHZKtD\nOCfZf7K2qmALR2emKrQJ12f95cwzpCcVeyUu5m1s6zUXbZGAv0/m3f6+dd4vhdMtu+O8g0HSzYzO\n+uzmpaolwIfA4TV2LQV6AiQ6tdvidTjXPP9hVR2iqkMKCxs/ecWYlrTT4L70GtgDf7DuTt1AyM8+\nxw1NdiJn48LbMney1sUf8NG9XxH9a67vXIfjBuxCwEl9DT4R2oRCHNDHK+Pwm332S1uuoqaQz8fF\nQ4Ylf17JOZTHUhOOq97XJpvWXK6uPObn8zUjuHz8+5z5ygs8O+17qiKrcTfe401aW/9bNDqLLxYv\n4ldvvcEZr7zA9LLDa7W4qXpzEL5Z470xP/bTIKrc1Ncad1PjSbwSJO+XGV+n5J6RZkisUGuBbMIQ\n2At37bm46y5GKz+krr5gEYG8i7yO69p7M563SbMkBREp3FQ7SURygNF48x6qewP4ReL7k4AJuj0v\n9mBMlv7+zg0MGTMYf9BPIOSna59CcgrCKcfEYy4jjty7XtcdedTeXHjrWTj+zf/NOxS148BT9yEY\nDhAMB2jfpS3n/PkUuu/UlUDIjz/oZ+/DBnHru3+o14zmtuEwz590KgM7FRJwHAKOw15F3Xjh5NOS\nBes65uSwV1HtT7w17+KqMrx7j+TPQ3e8lOllZ7IhEqQy5qM85mfc0iFMXt2NSNwhEneYvaED987c\ni3VVISpjPipiPp6fN4CrPtuN9+bN4Ysli3nw67coXTHa61iOfg0VrxMpPpmnvr6Dd+bO5sslizl9\nfC++WLVTSmKIumFGv3V68udZJZ247usjWVlRQFXcR1XcYer6QbjBsUAQCHl1jNr+EwkOyvg7E183\npP3j4OuTOC8AgVGQczIQ9q4j7cApgooXIfI5VE1AS65EN95S59+H5F0AeReA5Caukw+5F4B/d7b0\ndNIsi+yIyB54ncg+vET0gqreJCI3AZNV9Y3EsNWngMHAWuA0VZ1X13Vt9JH5OSnfWEFVRYQX/vE6\nr98zLln3aJP8dnm8uPJR/IH6TRFyXZeFM5fQtlMBHbp6TUpVFVWUrS+nXee2OI6DqlJSvIFQTpDc\ngnSfMLO3rqICnyO0CaUmtinLl3LOqy9TEcvc8bvJ4K5FvHzKGSnbYvEI68qXUlIV4pjnX6AqHic/\nUEVAXNZFvJgdcekYqmB9JETETf09/XmviZza9wcCNfoDVlfmMPKNs9Fqaxz8fr99OLV/jAq3G0P/\n7zUqa5TuDvt8XDl8JCcMKCI32JbcoNdco1oBbik4Hb1aRllQ1USl1RDi5Ce2VYG7Ho18Cxt+642M\nShFCOr2N+Htu4doRb60Hpz2SeCpRtwTH1z7j6KOM/7pEJKtnxy29cSeOmYr3Zl9z+x+rfV8J2Axp\n02rlFuSQW5DDpLe+qZUQAOKxOIt/WErf3XvX67qO49B3t9Rx/6GcEKGczdVRRYT2nRveDl1d+5z0\nSeXzxYuozCIhAHy3YjmqmvK04vcFKSzoy4SFU3ES20ujqRVeXXUorky/OtyBRYtqJQSAXH+Unnkb\nWFTmvf6KWIzx8xZx+u4nMn3JYgKOU6s7uDIe593587hoyPCU7SI54KtfUhUR8HWssS0Evs5o5JM0\nCQEQH0QmwRaSgkgQfKl9KuLU3bdT10eOOXhjmOp6hlS8T//GmCZS0CE/7fZ4LE5++/T7tgdtQ2FC\nfn+tT93p5AQCGZuv2oTDyaRQH+sjIXrk1R5d5BOXjdHNFVAF6JSTm7xXLE05EYCOOblptzcppwPe\n23TN35lkKLPdeHUtstPsndDGGDjpqqO47fsFVJZtnrvg8/vot/cOFPbYfkdpH9lvZ2797BP6tVnL\nmB7zUYV3luzA3I2po6SCjsPFgwpxN94PxJHwaCQwILn/4D474MtyYZ2i3I0c1XMOef4YHyzrTd+C\ndeT6Nz8tRF1hxrpO3DLsQ9oEIry1aCdeWrgHuxZ25h+fT6QwN5eu+fksXF+CW62pPcfv5xeDUhs/\n1laU89+ffmBVWRnDu/dk3169G5S8qpOck9CyJ6iVFNSHumvQjbcj/l0gfGhKae9MNDobrRxf9z23\n575c61MwP0eqyr//9Dwv3f4GgVCAWDROz527cfPb19O+y/Zdo2jBkr/RWZ7Gn5h4FnMd7p6xNw//\nuPkN9hf9pnLDnlMS5awVCELeeTgFm2cmT1u1kgveeIXyaAwRcF2lvEbT1JE953Dr0I9xxMXnuFTF\nfQR9Dn7ZfJwitYYtFVfmc9g751AajRP2+RHxmsRKKitxRIjGXa4ZOYoL9trcJD952VLOe/1l4qpU\nxmLkBgLs1rkLTx53EsF6lgupya14BzZch9cdq3jVgmKgUaACyAVfZ6TjC3U2DbmlD0DpA0AUX9EP\njSudnRgi+iu8ktmdqNakpKr7Z/3qmpglBfNztmHNRn6aMo+ORe3q3Y+wLdLYXHT1cXgLyGxWGfNx\nxPiTWVTWlqLcjbw39nnCvpozn8Pem161J4a46/LdyuVEYnH+OvFDZq3evOZXfqCKL45+ihx/dlVi\nU+JUeOSHPbht2sjkti55eTxy9PGsq6xgUJci2oQ292W4quzz+EOsKitLjdjv57ej9q/1RNEQqpUQ\n+RYkjG68FaLfkTrzOQA5J+K0vSn9+bH56Opj2PS7b3SZC+BO4Jd4Re32Bl4GOgMTsjzfGFNPbToW\nMOSwQT+LhABA5ftA7TdpEWV09wUAHNptYYbqohG08t2ULT7HYe+i7ozs2Ysf16ROaRrVeSmxBraA\ni8BxfWanbNsYiRD0+divV5+UhADw45rVlEZqV1utjMV4eeb0BsVQO6YwEhoJgQHe4j+1SmFEoXJc\n5gtUvp/mnPSy/a2dAIxV1X8BscSfx+EVxzPGmC0TH+nGragKcfW2e3+ma4ffVMwtO5uu11BujfNV\nFV+G/gGfSMYy2dn2fWQv0+8H6nw7F6eO87K+SopcEnWJgAoRyVXVH0gzzNQY8/NUUlnB27N/YsL8\neVRlMYKoltBhpH3LERi/xBsB/97SPoike4f1IzmpRRCqYjE+mD+XcXN+YvfCLin7PlvZAyftdbZM\nFZ6fNyBlW2FeHju0T19ipF+HjnRIMww3x+/n1F13b1AMmYiEITiM2r/HIOQcm/nE0BiyTQrZzoKZ\nBQwFvgImA38WkQ14pSmMMT9zT0/7nr9+8qFXykJAEB475niGdOue9TXE34uZlefRN/BI4pO1IKL8\n9dtRLK/whtoWV+bx0bKejOmxIOXchRU92MG/Y/LnSUsWc+GbryWrwEbdOAHHIep6TSQV8QBXfj6a\nB/eb4H1a103NVj68ztlkVFQvJavAxmgej/40goCjBH0+/I7DA0cck3GIrIjw4JHHcOarLxJ3XSLx\nOH7HYd9evTl5l6Zfwl7a3oKuORV0A2gVSBB8fZH8yzOf4++BFlwHG29hS8kh247moUBcVb8RkX7A\nA0AB8BtVnVifF9SUrKPZmK3vh9XFnPDCM7XmF+QHg3x1wcWE/dkVdVtZWspBTz5G+0AJh3ZfgALv\nLe1LSaQNvxg0mDlr17Br+wiX7/THtGsbz+Y+BhSNpjwaZfijD1BWoypr2O/n3EGD+WnNavq268AV\nI/Yhz18Gle+CVqDuRih7hC1XJs1hLrfy8bJOdMrNY8yOO5ET2PJrLI9GeXfuHFaXlzG0ew8Gdema\n1e+lIVSjUPUhxBeDfwAER2Y1g1rjK6DyfZz8s+s/ozk1AP262vezgS2utmaM+Xl4aeYMIunWQVD4\neOECxuzYL6vrvDX7R1SV5RX5PDVn8yfosF/o3qYt1+17AF/NPCPj+RUl/4Ki0UyYP5d0n3Zj8Tgg\nPHrMCdW2BiHXW6tLVx9JdqWqK9kx/C799rojq9e1SW4gwHEDBtbrnIYSCUD4sPqf5+sKeWcBmYsl\nZl1ERUQOBk4HugHLgOdU9YN6R2WM2a5sjFSlTNzaxEUpj2RXtgK8T9Kbmneqi7ku5VFv9I5PMr9p\nBx2v2acsGk0bT0w17SigzQFXZN6XQsHdmOWxPz/Zrrx2DfAcXqG6t/BKWj+T2G6M+Rk7bMedyE3T\nfBJzXUb16pXmjFQbqqr4ac1qhnXvQSjNRC6/4zC0Ww9+WrOaePCkNFfwlDpHALBvr964mqaGUSDA\nmB17eLN23ZLaFwgfRnbrF+QgOUdkcdz2R1XRWN0L8GT7pHA1cLCqJgfdishTwHvAPxscoTFmm3dQ\nnx0Y0b0nXy5dTHk0iuC13182dASd8zLXYoq5Ljd9PIEXZ07H7/iIq0ufdu1ZWFKSrJSa4w/Qt107\nznzlRfyOQ8yN8+LBhQxs5y2gJYkJx0vL2zC0r7dmdPeCNlwyZBgPTfmaylgMxUsINw2dy8jQKeha\nBzSKhg9H2t6cLP8g+RejVeMhvhavs9nP5maoON44/lwIDITwkVvhN9myNDoLLbkc4ivrPC7bjual\nwI7VV19LrIswR1WzH37QxKyj2ZjmEXdd3p8/l7dn/0RuIMApu+zG4DRrI1R3++ef8vh3U1I6qMM+\nHycM3JWKWAwBAo7D6z/OorJGn8V5/b7nzJ1m4ojLy/MH8O85w5l43kW0C28e+jl52VJenDmdqliM\n8wcuY9fQvaSOLApBztE4bW9OblG3DK14DSJfgr8nknM66Ea0/AXQEiR0KITHJMtM/1yoW4oWHwDq\nNYvVNaM526RwIXAg8GdgCd4KaX8APgYeT95Y0zzTbUWWFIzZNqkqezx4T60RQgCd8/L48vyLARjy\nyP2srdhyW3/Y7+e6UftzToaSEe7qIyE2O82eINJlsje+vxXT8pfQDX8FvDLcdSWFbJuPHkr8eTqp\n5bTPTOzbNNjXymgbYxKdx+k7oUsqN3cmr6/Mbun3yliMFaV1dP7GV2fYIV6nsa91JwXcVWQ38ir7\npNC3wcEYY1qdgM9H33btmVeyrta+3Qo3L/oysLAz01fV3cYNXp/B3nVNlAvuBVUToOYKy04BONtv\nufEmExgMEk6/YE8N2c5TWAgg3uyILqq6vD7xiEhP4EmgC97f2sOJ+knVjzkQeB2Yn9j0iqqmL/ln\njGkQVRctexDK/s+bEevfGWnzeyQ4rNoxyqPfTuahKV+zrqKCHTt04A/7H8R+vfrU615/OvBgLnjj\n1ZRhqAHHIejzs/O9dwKwd1F3ju8zjyt2/YJuuaWsqMjjjqnDeHvpwOTciLDfT78OHTmwd+bPplJw\nDRr5ArSSzYXfwlBwQ8qkrulLXie38jZ65RWzIRLmh4pjGNH/Rhynfo0cGp2GbrgJotO8dZBzzkAK\nrth2+yKCI8C/qxfvFp4Ysu1TaAfcD5wERFU1T0SOAYap6u+zOL8IKErMiC4ApgDHqerMascciDdD\n+qgtBpRgfQrG1I+74W9Q/gKpHbJhpOMzSMCbUHbnl5/z6DdfU1G9g9jv54njTmRotx5Z3+vZ6VO5\n6eMJVNXoRHbY/LZ9dK+53DzkI3L8m+/lEuKzkvO4ZUpHYq7LCQN34bw999rizGmNzUdL74Pot+Dr\nieT/KiXZ/bjiA3rEfp1yr/KYn6kbD2WfgXdn/bo0tgBdcyxo6u+Q8GicdtvuYEzVCFr2FFS8jK/z\nuEb3KTwIrAN6A5veyL/AG466xaSQeLJYnvh+o4jMArpXu5YxZitTtxTKn6PmegZQhZbeh7R/gKpY\njEe/mZySEMBr07/zi8955sRTsruXKnd++VmthACpBZyv2m1Syps0gEMV+3V4jQPO/CSre20i/r5I\nu9sz7q8ouZNQ29R75fpjDCp4n/LIenKD2S1vqWWPgdacJFcJlePR+LWIr0va81qaSBDJPx/yz6eu\n+kfZVkk9BLg88eauAKpajLemQj0Dkz541VUnpdk9UkS+F5FxIrJrhvMvEpHJIjK5uLi4vrc3pvWK\nLwdJ9zlQIfYTAKvLy6nVLp8wZ92atNvTiboua8q33H7dLbf2mskAuCto6lUhu4SW46R5L3RVWFO6\nIPsLRWeSbl0IJATxuieGbQ+yTQrr8VZcSxKRXiQ+/WdLRPLxFui5UlU31Nj9DdBbVQcB9wCvpbuG\nqj6sqkNUdUhhYWF9bm9M6+YrAk1X8lrAvzMAnXJzyfQpsl+H7DtsA45Dx9wtL2y/rDzD5Dena8aq\npA21sqoIN02ecUTplF+PsTSBXUg70FKrwLf9L4iUbVJ4FHhZRA4CHBEZCTyB16yUFfF6YF4GnlbV\nV2ruV9UNqlqa+P5tICAinWoeZ4xpGHHyIfd04pq6cphLCMm/FICQ38+Few0hx5/6RBH2+7lyxD7Z\n30uEq0eMIpBmkZnqW+6YPpzKeM2nlzDkX531vbKV2+5qqmrcqzzm5/uNo8kJtsn6OpJ3vvdUkCIM\n4cMRX70bT7Y52SaFW4Hngfvwioc8jjdS6F91nbSJeCn/MWCWqqYtPSgiXRPHISKbVpHI/nnVGLNF\nT88fwz0zBrO2KkxchRnrOnLRxCNZWrG5A/mK4SO5esQoOuXk4ojQv0NHHj7quHp1MgOsrahIWwBv\nl8LOBByft6B96AhieX8FXw9AwOkGbf6Ck3tcY19qLf27HswCbmHexs7EVVhblcO0spMZsXP9qqGK\nvw/S4T/eME8ckALIOxdp+/cmj7klZDX6qNE3EdkXmAhMY3M/0/VALwBVfVBELgMuAWJ4QyOuVtXP\n67qujT4yJntVsRhDHrm/1ixjnwgnDNyVWw8d06T363/vncTSJIXcQIDpl2ReEMZsfSLSuNFHiWaj\nBao6X0S64j05uMDvVHXFls5X1U/ZwnI/qnovcG828Rhj6m/R+vVpt8dVmbR0cdp9DVUaiaRNCEDG\nmc5m25Bt89H9bO5uvwOvCckFHt4aQRljml7H3Jy0zTkARfkFTXqvXH/mz5tOE3cgm6aV7TyF7qq6\nSET8wBi8+QoRvMV2jDHNYN7Uhbz50LuUrFzPyGOGcsCp+xAMZT+DtkNOLgf27stHC+enrKQW9vkY\n3LUbl779Bm1CYU7fbQ/2aORSko7jsHdRN6Ysr/0WcegOO6Y5o3mURSK8NGsGXyxeRM+2bTl7jz3p\n1bZdi8WzLcp2RvMSYG9gN+DPqrqfeEXKi1U1uxkfW4H1KZjW4v3/fMxdv3yYaCSGG3cJ54XoOaA7\nd038C8FwMOvrlEUi/Oa9cXy4YD5+x0HwqpauLCujPBrFESHo8/HbUfvxi0F7NSrmqSuWc/wLz6TM\nevCJ8N5Z59KnfYdGXbsh1lVUcMxz/2FtRTkVsRgBx8HvODx89HGM6rn9DyWtj7r6FLJtProH+Bp4\nGm8EEsAo4IfGh2eMqUtleRX/uuQRqioiuHGv+aeyrIpFs5Yy/v8+rNe18oJBHjjyWL74n1/y+qln\ncuOBh7CytCzZzu+qUhmLccunn2RdwTSTmz75sNY0OAVu/rR+M5Wbyv2TJ7GqrDQ5WzvqulTEYvzv\nu+80+US57VlWSUFVbwUOBUap6nOJzUuBC7ZWYMYYzw+TZuP4av9XrSqv4qPn6xygl1H7nBx27NCR\n9+bNoTxWu+M34PPxZSM6n2Ouy3cra49BcVWZuGhhg6/bGOPnzE7bp7K+qpLFG9J3wrdG2fYpoKo/\n1fWzMWbryCnIQdNNxQXy2m151nBd2oTCycVQUijkBbJvlqrJEcHvOCl9F5uE/S2z7EpeMP3riauS\nk2YN6tYq2+YjY0wL6b/3DrTpWEDNQTvhvBBHX9K4uQWn7bo74TQjhYJ+HyN69GzwdR0Rju63M8Ea\nM5pDPh+n7Lp7g69b3erSRXw5+za+/PEvLF77fdpj1laU8+z0qTz27RQO37FfrZnaPhH26NyVwty8\nJonp5yDrJwVjTMsQEf729vX8dvRNlG+sRASiVTFOuuYYho7Zs1HXHlzUjatH7svtn08kkFhTIODz\n8e9jT8SfpkRFffzpwENYuL6EGcWr8IlDTF2Gd+/B1SNGNeq6AFPmP8bAwO3snuPVLtKKZ/hi1lGM\nHPiP5DET5s/j1+P+C3jNWY4Ivdq2Y9H6EvyOD0Upyi/gnrFZV+tvFZplRvPWYqOPTGsSj8eZ+vFM\nNqwpZff9BtCha/smu/a6igq+WLKYgmCQET16EvA1XRPPjFUrmV+yjv4dO9G/Y+PLma0vX4V/7QHk\n+FObpipifpYFHqRfl/0pj0YZ9ugDtSbK5fgD3HLIaBxx6FqQz15duzV54b3tQaNnNBtjWp7P52Pw\nwU3T9FJT+5wcjujXf6tce9fOXdi1c9OtMfDT8hcZEHSoWb466MRYvfY5+nXZn08XLcBJU0ShMhbl\n08WLmrykx8+J9SkYY7Yz6WdlA6Beooirpi2so0A8w6xu47GkYIzZruzY5Xh8Tu039ojrp337EwAY\n1bN3+mJ8/gDH7jxwq8e4PbOkYIzZrnTI78G0il9SGfMRiTvEXa8/YeqG/RlQ5DULtQmFuO3QMYR9\nfoKODwfI8fs5sv/O7Nurdc1eri/raDbGbJeWrZvJwlXPgltJYYdj2KnLfrWP2biBt2b/SGkkwsF9\ndmCPLk2/otv2qK6OZksKxhjTyjRF7SNjjDGtgCUFYwyqSnFZGRuqqlo6FNPCmmWegoj0BJ4EuuCN\nCntYVf9V4xjBW/P5CKAcOFdVv2mO+Ixpzb5etoRr3xvP8tKNqCr79OzNPw87nA45jaurZLZPzfWk\nEAOuUdVdgBHApSKyS41jxgL9El8XAQ80U2zGtFqL16/n3NdeYeH6EiLxOFHX5bPFCzn71ZesnHQr\n1SxJQVWXb/rUr6obgVlA9xqHHQs8qZ4vgXYiUtQc8RnTWj019TtiburM4JjrsqCkhKmrVrZQVKYl\nNXufgoj0AQYDk2rs6g5UL+C+hNqJAxG5SEQmi8jk4uLirRWmMa3C/JJ1adcY8DnC0g0bWiAi09Ka\nNSmISD7wMnClqjboX5yqPqyqQ1R1SGFhYdMGaEwrM7x7j7Sls6PxOLt17twCEZmW1mxJQUQCeAnh\naVV9Jc0hS4HqBdx7JLYZY7aSU3bdjTbBUEqZ7By/n8N36mcL2rdSzZIUEiOLHgNmqeodGQ57AzhH\nPCOA9aq6vDniM6a1ahMK88bpZ3HSwF3plJtLr7ZtuXrkKG4fPbalQzMtpFlmNIvIvsBEYBqbSxxe\nD/QCUNUHE4njXuBwvCGp56lqndOVbUazMcbUX4uvp6Cqn5K2kG3KMQpc2hzxGGOMSc9mNBtjjEmy\npGCMMSbJkoIxxpgkSwrGGGOSLCkYY4xJsqRgjDEmyZKCMcaYJEsKxhhjkiwpGGOMSbKkYIwxJsmS\ngjHGmCRLCsYYY5IsKRhjjEmypGCMMSbJkoIxxpgkSwrGGGOSLCkYY4xJaq41mh8XkVUiMj3D/gNF\nZL2IfJf4+mNzxGWMMSZVsyzHCfwbb/3lJ+s4ZqKqHtU84RhjjEmnWZ4UVPUTYG1z3MsYY0zDbUt9\nCiNF5HsRGSciu2Y6SEQuEpHJIjK5uLi4OeMzxpifvW0lKXwD9FbVQcA9wGuZDlTVh1V1iKoOKSws\nbLYAjTGmNdgmkoKqblDV0sT3bwMBEenUwmEZY0yrs00kBRHpKiKS+H4YXlxrWjYqY4xpfZpl9JGI\nPAscCHQSkSXAn4AAgKo+CJwEXCIiMaACOE1VtTliM8YYs1mzJAVVPX0L++/FG7JqjDGmBW0TzUfG\nGGO2DZYUjDHGJFlSMMYYk2RJwRhjTJIlBWOMMUmWFIwxxiRZUjDGGJNkScEYY0ySJQVjjDFJlhSM\nMcYkyfZcYkhEioGFLRhCJ2B1C96/ISzm5rM9xm0xN4+Wjrm3qqZde2C7TgotTUQmq+qQlo6jPizm\n5rM9xm0xN49tOWZr2O/LEwAAB+1JREFUPjLGGJNkScEYY0ySJYXGebilA2gAi7n5bI9xW8zNY5uN\n2foUjDHGJNmTgjHGmCRLCsYYY5IsKTSAiDwuIqtEZHpLx5ItEekpIh+KyEwRmSEiV7R0TFsiImER\n+UpEvk/EfGNLx5QtEfGJyLci8mZLx5INEVkgItNE5DsRmdzS8WRLRNqJyEsi8oOIzBKRkS0dU11E\nZOfE73jT1wYRubKl46rO+hQaQET2B0qBJ1V1t5aOJxsiUgQUqeo3IlIATAGOU9WZLRxaRv/f3t3H\nalnXcRx/fxQCBZQV5kKQk/OhaBUyOa6wDJGSReCa2cNsPayUCic1C8H1YM2iTSoaJk0YY0tESmnN\nWeEySytqoaVt5BIBD8iDJmccTHScPv3x+527y7vzcJ8Hu67b831tZ+e6r991/67vOX/c3+v63r/r\n95MkYIztI5JGAg8C19jeWnJofZL0BeA84CTb88qOpy+SdgHn2W6qh8AkrQcesL1G0quAE223lx1X\nIyQdD+wFzrdd5kO4LxF3CgNg+7fAs2XH0R+299l+KG93ANuB08qNqndOjuSXI/NP5a9iJE0C3gus\nKTuWVzJJJwPvBNYC2H6xWRJCNhvYUaWEAJEUhiVJLcC5wB/LjaRvuQzzF+AgcK/tyscMfA/4EvDv\nsgPpBwNbJG2TdGXZwTTo9cDTwLpcqlsjaUzZQfXDh4Dbyw6iXiSFYUbSWOBOYLHtw2XH0xfbnban\nAZOAVkmVLtdJmgcctL2t7Fj66QLb04G5wOdyibTqRgDTgVtsnws8B1xXbkiNyaWu+cCPy46lXiSF\nYSTX5e8EbrN9V9nx9EcuC/wauKTsWPowE5ifa/QbgYsk/ajckPpme2/+fRDYDLSWG1FD9gB7CneP\nPyEliWYwF3jI9oGyA6kXSWGYyF/argW22/5O2fE0QtIpksbn7ROAOcDfy42qd7aX2p5ku4VUHrjP\n9hUlh9UrSWPy4ANy+eXdQOVH1tneD7RJOifvmg1UduBEnQ9TwdIRpNuv0E+SbgfeBUyQtAf4qu21\n5UbVp5nAR4FHc40eYJnte0qMqS+vA9bnURrHAZtsN8UQzyZzKrA5XTcwAthg+xflhtSwq4Hbcjnm\nCeATJcfTp5x45wBXlR1Ld2JIagghhJooH4UQQqiJpBBCCKEmkkIIIYSaSAohhBBqIimEEEKoiaQQ\nhjVJlnRmD233S/rU/zumfO4e4+rh+KmS/pyfRxnKOFZI+sxQ9hmqLZJCCCUbouTzDeAmD/0Y85uA\nZfk5gDAMRFIIocnladFnAT8d6r5t7yM9RT5/qPsO1RRJIVSGpCWS9krqkPSYpNl5/3GSrpO0Q9I/\nJW2S9Orc1pJLLVdKekrSPknXFvpslfQHSe25bdVAr3olfTIv5HJI0i8lTSm0WdJCSf/I57q5q5ST\nZ3pdIekZSTslLcrHj5B0I/AOYJWkI5JWFU55cXf9dWMOaR6do4V4Jku6S9LT+X+2Ku//uKTfSfpu\n7vcJSW/P+9uUFo/6WF3/95OmAg/DQCSFUAl5/ppFwAzb44D3ALty89XApcCFwETgEHBzXRezgLNI\n8/YskXRx3t8JfB6YALyNND/OZwcQ3wJgGfB+4BTgAf537pp5wAzgLcDl+W8A+DRpArRppAnbLu16\ng+3rc1+LbI+1vaiB/uq9GXisEOvxwN3AbqCFtG7GxsLx5wOPAK8BNuS2GcCZwBWkBDW2cPx24K09\nnDu8wkRSCFXRCYwCpkoaaXuX7R25bSFwve09tl8AvgZcJqk4d9cNtp+z/SiwjjThGLa32d5q+5jt\nXcAPScmlvxYC37K93fYx4JvAtOLdArDcdrvtJ0kzuk7L+y8HVub4DwHLGzxnT/3VGw90FF63kpLn\nF/P/5KjtBwvtO22vs90J3AFMBr5u+wXbW4AXSQmiS0c+RxgGIimESrD9OLCY9IF/UNJGSRNz8xTS\nhG3tktpJV66dpIncurQVtneTPhSRdLakuyXtl3SY9GE+YQAhTgFWFmJ4FhAvXb1uf2H7X0DX1fbE\nuviK273pqb96h4BxhdeTgd05eXWnOF3z8wB1Uzg/X3eucUAzrWgWBiGSQqgM2xtsX0D6ADbw7dzU\nBsy1Pb7wM7prDYBscmH7dOCpvH0L6YvSs2yfRCoBDWTYZhtwVV0MJ9j+fQPv3UdaJKi7WGHwS4w+\nApxdF+vpdXdSg/FG4K9D1FeouEgKoRIknSPpIkmjgKOkq9Wu5SxXAzd2lWryOgsL6rr4sqQTJb2J\nNH3yHXn/OOAwcETSG4CBjrlfDSzN/SPpZEkfaPC9m4BrJJ2W14dYUtd+ADhjgHEB3AtMlzQ6v/4T\nKREtz2sljJY0cxD9Xwj8fBDvD00kkkKoilGkWvszpLLJa4GluW0l8DPSGsIdwFbSl6VFvwEeB35F\nGq+/Je+/FvgIqS5+K/9NFv1iezPpzmVjLkP9jfTlcSNuBbaQrugfBu4BjpFKYJD+vsvyqKbvDyC2\nA8B9wIL8uhN4H+l7gSdJK5R9sL/9Qm2461RehuGuoZpiPYXQ1CS1ADuBkb3U0CtF0lxgte0pfR7c\neJ9TgfVA61A+wCZpBbDD9g+Gqs9QbZEUQlNrhqSgtJToLNLdwqmkdbK32l5camAhdCPKRyG8/ATc\nQBol9DBp9NRXSo0ohB7EnUIIIYSauFMIIYRQE0khhBBCTSSFEEIINZEUQggh1ERSCCGEUPMf6fEL\n4hH7SnwAAAAASUVORK5CYII=\n",
583 | "text/plain": ""
584 | }
585 | ],
586 | "_view_module": "@jupyter-widgets/output",
587 | "_model_module_version": "1.0.0",
588 | "_view_count": null,
589 | "_view_module_version": "1.0.0",
590 | "layout": "IPY_MODEL_ca31125412e849c7b43b75a955125c5d",
591 | "_model_module": "@jupyter-widgets/output"
592 | }
593 | }
594 | }
595 | }
596 | },
597 | "cells": [
598 | {
599 | "cell_type": "code",
600 | "metadata": {
601 | "id": "1E01gS82my8h",
602 | "colab_type": "code",
603 | "colab": {}
604 | },
605 | "source": [
606 | "from ipywidgets import widgets"
607 | ],
608 | "execution_count": 0,
609 | "outputs": []
610 | },
611 | {
612 | "cell_type": "code",
613 | "metadata": {
614 | "id": "V6lo2FCgm0AB",
615 | "colab_type": "code",
616 | "outputId": "6c5cf3e8-ea50-4416-930c-3bd14deaaad2",
617 | "colab": {
618 | "base_uri": "https://localhost:8080/",
619 | "height": 79,
620 | "referenced_widgets": [
621 | "b6962ffde86b4b3e8c30f3426520a179",
622 | "c78b7b2541b846df94facbe158282a42",
623 | "b9e256133b804ece927ecaf8ed7126cb",
624 | "cd883016a8b147ffa62f1e86bc91eccb",
625 | "d8cc6d5614a343e892a10b3759e3976c",
626 | "496afabf093244ae98a74dd5357c868f"
627 | ]
628 | }
629 | },
630 | "source": [
631 | "x_range = widgets.FloatRangeSlider(value=[2,7],\n",
632 | " min=0,\n",
633 | " max=10.,\n",
634 | " step=0.1,\n",
635 | " description='X-Range: ',\n",
636 | " readout_format='.1f')\n",
637 | "display(x_range)\n",
638 | "\n",
639 | "y_range = widgets.FloatRangeSlider(value=[2,7],\n",
640 | " min=0,\n",
641 | " max=10.,\n",
642 | " step=0.1,\n",
643 | " description='Y-Range: ',\n",
644 | " readout_format='.1f')\n",
645 | "display(y_range)"
646 | ],
647 | "execution_count": 0,
648 | "outputs": [
649 | {
650 | "output_type": "display_data",
651 | "data": {
652 | "application/vnd.jupyter.widget-view+json": {
653 | "model_id": "b6962ffde86b4b3e8c30f3426520a179",
654 | "version_minor": 0,
655 | "version_major": 2
656 | },
657 | "text/plain": [
658 | "FloatRangeSlider(value=(2.0, 7.0), description='X-Range: ', max=10.0, readout_format='.1f')"
659 | ]
660 | },
661 | "metadata": {
662 | "tags": []
663 | }
664 | },
665 | {
666 | "output_type": "display_data",
667 | "data": {
668 | "application/vnd.jupyter.widget-view+json": {
669 | "model_id": "cd883016a8b147ffa62f1e86bc91eccb",
670 | "version_minor": 0,
671 | "version_major": 2
672 | },
673 | "text/plain": [
674 | "FloatRangeSlider(value=(2.0, 7.0), description='Y-Range: ', max=10.0, readout_format='.1f')"
675 | ]
676 | },
677 | "metadata": {
678 | "tags": []
679 | }
680 | }
681 | ]
682 | },
683 | {
684 | "cell_type": "code",
685 | "metadata": {
686 | "id": "nIFE2VuTm0DJ",
687 | "colab_type": "code",
688 | "outputId": "57584e10-af96-42f2-e1eb-205c73ebc7d1",
689 | "colab": {
690 | "base_uri": "https://localhost:8080/",
691 | "height": 49,
692 | "referenced_widgets": [
693 | "8fa927c081a047dbb0315941398367a8",
694 | "a9fe084dd17444559d69eef288272469",
695 | "39342e7201cc45b68146035c7c11ad81"
696 | ]
697 | }
698 | },
699 | "source": [
700 | "species_button = widgets.ToggleButton(\n",
701 | " value=False,\n",
702 | " description='Show Species')\n",
703 | "display(species_button)"
704 | ],
705 | "execution_count": 0,
706 | "outputs": [
707 | {
708 | "output_type": "display_data",
709 | "data": {
710 | "application/vnd.jupyter.widget-view+json": {
711 | "model_id": "8fa927c081a047dbb0315941398367a8",
712 | "version_minor": 0,
713 | "version_major": 2
714 | },
715 | "text/plain": [
716 | "ToggleButton(value=False, description='Show Species')"
717 | ]
718 | },
719 | "metadata": {
720 | "tags": []
721 | }
722 | }
723 | ]
724 | },
725 | {
726 | "cell_type": "code",
727 | "metadata": {
728 | "id": "oDuvClksm0Gx",
729 | "colab_type": "code",
730 | "outputId": "668a599d-473a-4a21-9ac7-0b492723c4e8",
731 | "colab": {
732 | "base_uri": "https://localhost:8080/",
733 | "height": 79,
734 | "referenced_widgets": [
735 | "93c1de18d0244c1184d7b7ac05e11522",
736 | "c9da6a4df77642da918fff628e36aa83",
737 | "bc33d9ac837b4a1c98402f6904780b44",
738 | "9fde7fa1a82c4e4f94cdbc0d88385c61",
739 | "047f57b3565e491697c20fb4c0a4089e",
740 | "730a7f4234c54faab80e609cf524fe11"
741 | ]
742 | }
743 | },
744 | "source": [
745 | "feature_x_select = widgets.Dropdown(\n",
746 | " value=2,\n",
747 | " options=[('Sepal Length',0), ('Sepal Width',1),\n",
748 | " ('Petal Length',2), ('Petal Width',3)],\n",
749 | " description='X-Axis:')\n",
750 | "\n",
751 | "display(feature_x_select)\n",
752 | "\n",
753 | "feature_y_select = widgets.Dropdown(\n",
754 | " value=2,\n",
755 | " options=[('Sepal Length',0), ('Sepal Width',1),\n",
756 | " ('Petal Length',2), ('Petal Width',3)],\n",
757 | " description='Y-Axis:')\n",
758 | "\n",
759 | "display(feature_y_select)"
760 | ],
761 | "execution_count": 0,
762 | "outputs": [
763 | {
764 | "output_type": "display_data",
765 | "data": {
766 | "application/vnd.jupyter.widget-view+json": {
767 | "model_id": "93c1de18d0244c1184d7b7ac05e11522",
768 | "version_minor": 0,
769 | "version_major": 2
770 | },
771 | "text/plain": [
772 | "Dropdown(description='X-Axis:', index=2, options=(('Sepal Length', 0), ('Sepal Width', 1), ('Petal Length', 2)…"
773 | ]
774 | },
775 | "metadata": {
776 | "tags": []
777 | }
778 | },
779 | {
780 | "output_type": "display_data",
781 | "data": {
782 | "application/vnd.jupyter.widget-view+json": {
783 | "model_id": "9fde7fa1a82c4e4f94cdbc0d88385c61",
784 | "version_minor": 0,
785 | "version_major": 2
786 | },
787 | "text/plain": [
788 | "Dropdown(description='Y-Axis:', index=2, options=(('Sepal Length', 0), ('Sepal Width', 1), ('Petal Length', 2)…"
789 | ]
790 | },
791 | "metadata": {
792 | "tags": []
793 | }
794 | }
795 | ]
796 | },
797 | {
798 | "cell_type": "code",
799 | "metadata": {
800 | "id": "n-IBoE8-mcU5",
801 | "colab_type": "code",
802 | "colab": {}
803 | },
804 | "source": [
805 | "from ipywidgets import interactive, widgets, fixed\n",
806 | "\n",
807 | "import numpy as np\n",
808 | "import matplotlib.pyplot as plt\n",
809 | "\n",
810 | "from sklearn import datasets\n",
811 | "iris = datasets.load_iris()"
812 | ],
813 | "execution_count": 0,
814 | "outputs": []
815 | },
816 | {
817 | "cell_type": "code",
818 | "metadata": {
819 | "id": "szCPB6ZjmcVC",
820 | "colab_type": "code",
821 | "colab": {}
822 | },
823 | "source": [
824 | "def plot_iris_data(iris,\n",
825 | " feature_x, feature_y,\n",
826 | " x_limits, y_limits,\n",
827 | " colorby):\n",
828 | " if colorby:\n",
829 | " marker_color = iris.target\n",
830 | " \n",
831 | " else:\n",
832 | " marker_color = 'k'\n",
833 | " \n",
834 | " fig, ax = plt.subplots()\n",
835 | " \n",
836 | " ax.scatter(iris.data[:, feature_x],\n",
837 | " iris.data[:, feature_y],\n",
838 | " c=marker_color,\n",
839 | " cmap=plt.cm.get_cmap('viridis', 3))\n",
840 | " \n",
841 | " ax.set(xlim=x_limits,\n",
842 | " ylim=y_limits)\n",
843 | " \n",
844 | " ax.set_xlabel(iris.feature_names[feature_x],\n",
845 | " fontsize=12)\n",
846 | " ax.set_ylabel(iris.feature_names[feature_y],\n",
847 | " fontsize=12)"
848 | ],
849 | "execution_count": 0,
850 | "outputs": []
851 | },
852 | {
853 | "cell_type": "code",
854 | "metadata": {
855 | "id": "hAaV2oJbmcVJ",
856 | "colab_type": "code",
857 | "outputId": "31d1bad1-6e61-44de-abc2-47cc4994f312",
858 | "colab": {
859 | "base_uri": "https://localhost:8080/",
860 | "height": 442,
861 | "referenced_widgets": [
862 | "49f3d05fc367424c8030eaff3177118e",
863 | "ae91ef85ddaf4827b7da06f49097fe73",
864 | "93c1de18d0244c1184d7b7ac05e11522",
865 | "9fde7fa1a82c4e4f94cdbc0d88385c61",
866 | "b6962ffde86b4b3e8c30f3426520a179",
867 | "cd883016a8b147ffa62f1e86bc91eccb",
868 | "8fa927c081a047dbb0315941398367a8",
869 | "7ba42e6dc0f5417dbf5773dc85365a1f",
870 | "c9da6a4df77642da918fff628e36aa83",
871 | "bc33d9ac837b4a1c98402f6904780b44",
872 | "047f57b3565e491697c20fb4c0a4089e",
873 | "730a7f4234c54faab80e609cf524fe11",
874 | "c78b7b2541b846df94facbe158282a42",
875 | "b9e256133b804ece927ecaf8ed7126cb",
876 | "d8cc6d5614a343e892a10b3759e3976c",
877 | "496afabf093244ae98a74dd5357c868f",
878 | "a9fe084dd17444559d69eef288272469",
879 | "39342e7201cc45b68146035c7c11ad81"
880 | ]
881 | }
882 | },
883 | "source": [
884 | "interactive(plot_iris_data,\n",
885 | " iris=fixed(iris),\n",
886 | " feature_x=feature_x_select,\n",
887 | " feature_y=feature_y_select,\n",
888 | " x_limits=x_range,\n",
889 | " y_limits=y_range,\n",
890 | " colorby=species_button)"
891 | ],
892 | "execution_count": 0,
893 | "outputs": [
894 | {
895 | "output_type": "display_data",
896 | "data": {
897 | "application/vnd.jupyter.widget-view+json": {
898 | "model_id": "49f3d05fc367424c8030eaff3177118e",
899 | "version_minor": 0,
900 | "version_major": 2
901 | },
902 | "text/plain": [
903 | "interactive(children=(Dropdown(description='X-Axis:', index=2, options=(('Sepal Length', 0), ('Sepal Width', 1…"
904 | ]
905 | },
906 | "metadata": {
907 | "tags": []
908 | }
909 | }
910 | ]
911 | },
912 | {
913 | "cell_type": "code",
914 | "metadata": {
915 | "id": "xFBOcrQumcVM",
916 | "colab_type": "code",
917 | "colab": {}
918 | },
919 | "source": [
920 | ""
921 | ],
922 | "execution_count": 0,
923 | "outputs": []
924 | },
925 | {
926 | "cell_type": "code",
927 | "metadata": {
928 | "id": "etZHbh8OmcVO",
929 | "colab_type": "code",
930 | "colab": {}
931 | },
932 | "source": [
933 | ""
934 | ],
935 | "execution_count": 0,
936 | "outputs": []
937 | }
938 | ]
939 | }
--------------------------------------------------------------------------------
/Chapter05/keras_neural_network_architecture.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "view-in-github"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {
16 | "colab_type": "text",
17 | "id": "6WBKm7OZBZez"
18 | },
19 | "source": [
20 | "# Designing Your Neural Networks:\n",
21 | "## A Step by Step Walkthrough\n",
22 | "\n",
23 | "Training neural networks can be very confusing! What’s a good learning rate? How many hidden layers should your network have? Is dropout actually useful? Why are your gradients vanishing?\n",
24 | "\n",
25 | "In this post we'll peel the curtain behind some of the more confusing aspects of neural nets, and help you make smart decisions about your neural network architecture.\n",
26 | "\n",
27 | "We’ll also see how we can use Weights and Biases inside Kaggle kernels to monitor performance and pick the best architecture for our neural network!\n",
28 | "\n",
29 | "**I highly recommend forking this kernel and playing with the different building blocks to hone your intuition.**\n",
30 | "\n",
31 | "I made a quick **demo** to walk you through this kernel: https://www.loom.com/share/fb64035e4576467489cf0f2ad9cff92a.\n",
32 | "\n",
33 | "If you have any more questions or feedback, please don't hesitate to [message me](https://twitter.com/lavanyaai)!"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {
39 | "colab_type": "text",
40 | "id": "Plg-zAxDBZe1"
41 | },
42 | "source": [
43 | "# The Goal"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 8,
49 | "metadata": {
50 | "_kg_hide-input": false,
51 | "_kg_hide-output": true,
52 | "colab": {
53 | "base_uri": "https://localhost:8080/",
54 | "height": 34
55 | },
56 | "colab_type": "code",
57 | "id": "zq0t850YBZe2",
58 | "outputId": "e916242c-810e-44e2-e884-f211ff877faa"
59 | },
60 | "outputs": [],
61 | "source": [
62 | "!pip install 'wandb==0.8.13' -q"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": 10,
68 | "metadata": {
69 | "colab": {
70 | "base_uri": "https://localhost:8080/",
71 | "height": 114
72 | },
73 | "colab_type": "code",
74 | "id": "NgI30p0wqtMI",
75 | "outputId": "c8c4e791-f3b7-4528-96ed-69f8e967270f"
76 | },
77 | "outputs": [
78 | {
79 | "data": {
80 | "text/html": [
81 | "\n",
82 | " Notebook configured with W&B. You can open the run page, or call %%wandb\n",
83 | " in a cell containing your training loop to display live results. Learn more in our docs.\n",
84 | " "
85 | ],
86 | "text/plain": [
87 | ""
88 | ]
89 | },
90 | "metadata": {},
91 | "output_type": "display_data"
92 | },
93 | {
94 | "data": {
95 | "text/plain": [
96 | "W&B Run: https://app.wandb.ai/lrichman/building-neural-nets/runs/g109i1jw"
97 | ]
98 | },
99 | "execution_count": 10,
100 | "metadata": {},
101 | "output_type": "execute_result"
102 | }
103 | ],
104 | "source": [
105 | "from __future__ import print_function\n",
106 | "import keras\n",
107 | "from keras.datasets import cifar10\n",
108 | "from keras.preprocessing.image import ImageDataGenerator\n",
109 | "from keras.models import Sequential\n",
110 | "from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, AlphaDropout\n",
111 | "from keras.layers import Conv2D, MaxPooling2D\n",
112 | "import os\n",
113 | "\n",
114 | "# Essentials\n",
115 | "import numpy as np\n",
116 | "import pandas as pd\n",
117 | "\n",
118 | "# Models\n",
119 | "import tensorflow as tf\n",
120 | "\n",
121 | "# Ignore useless warnings\n",
122 | "import warnings\n",
123 | "warnings.filterwarnings(action=\"ignore\")\n",
124 | "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n",
125 | "import tensorflow as tf\n",
126 | "\n",
127 | "tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n",
128 | "\n",
129 | "# Set random state for numpy\n",
130 | "np.random.seed(42)\n",
131 | "\n",
132 | "# WandB\n",
133 | "import wandb\n",
134 | "from wandb.keras import WandbCallback\n",
135 | "# You can change your project name here. For more config options, see https://docs.wandb.com/docs/init.html\n",
136 | "wandb.init(project=\"building-neural-nets\", name=\"basic_neural_network\")\n",
137 | "\n",
138 | "# Go to https://app.wandb.ai/authorize to get your WandB key"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {
144 | "colab_type": "text",
145 | "id": "bZD1pC5TBZfB",
146 | "toc": true
147 | },
148 | "source": [
149 | "Table of Contents
\n",
150 | ""
151 | ]
152 | },
153 | {
154 | "cell_type": "markdown",
155 | "metadata": {
156 | "colab_type": "text",
157 | "id": "2Q_W2TXKBZfC"
158 | },
159 | "source": [
160 | "## Initialize Hyperparameters"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": 13,
166 | "metadata": {
167 | "_kg_hide-input": false,
168 | "_kg_hide-output": true,
169 | "colab": {
170 | "base_uri": "https://localhost:8080/",
171 | "height": 51
172 | },
173 | "colab_type": "code",
174 | "id": "7T_zKW8CBZfC",
175 | "outputId": "1e7bc3ec-77cc-4cc0-972c-50348c5d6adf"
176 | },
177 | "outputs": [
178 | {
179 | "name": "stderr",
180 | "output_type": "stream",
181 | "text": [
182 | "wandb: Wandb version 0.8.13 is available! To upgrade, please run:\n",
183 | "wandb: $ pip install wandb --upgrade\n"
184 | ]
185 | }
186 | ],
187 | "source": [
188 | "config = wandb.config # Config is a variable that holds and saves hyperparameters and inputs\n",
189 | "config.dropout = 0.2\n",
190 | "config.conv_layer_1_size = 32\n",
191 | "config.conv_layer_2_size = 64\n",
192 | "config.conv_layer_3_size = 128\n",
193 | "config.hidden_layer_size = 512\n",
194 | "config.learn_rate = 0.01\n",
195 | "config.learn_rate_low = 0.001\n",
196 | "config.kernel_size = 3\n",
197 | "config.pool_size = 2\n",
198 | "config.decay = 1e-6\n",
199 | "config.momentum = 0.9\n",
200 | "config.n_epochs = 25\n",
201 | "\n",
202 | "config.img_width=28\n",
203 | "config.img_height=28\n",
204 | "config.num_classes = 10\n",
205 | "config.batch_size = 128\n",
206 | "config.validation_size = 5000"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": 14,
212 | "metadata": {
213 | "colab": {
214 | "base_uri": "https://localhost:8080/",
215 | "height": 34
216 | },
217 | "colab_type": "code",
218 | "id": "4IJnYEvTULJy",
219 | "outputId": "649aede0-c51f-4411-9ecc-0ac00b33de3b"
220 | },
221 | "outputs": [
222 | {
223 | "data": {
224 | "text/plain": [
225 | "'0.8.12'"
226 | ]
227 | },
228 | "execution_count": 14,
229 | "metadata": {},
230 | "output_type": "execute_result"
231 | }
232 | ],
233 | "source": [
234 | "wandb.__version__"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {
240 | "colab_type": "text",
241 | "id": "DIE34bHLBZfE"
242 | },
243 | "source": [
244 | "## Load dataset"
245 | ]
246 | },
247 | {
248 | "cell_type": "code",
249 | "execution_count": 15,
250 | "metadata": {
251 | "_kg_hide-input": false,
252 | "_kg_hide-output": false,
253 | "colab": {
254 | "base_uri": "https://localhost:8080/",
255 | "height": 34
256 | },
257 | "colab_type": "code",
258 | "id": "fFPrREl5BZfF",
259 | "outputId": "024a9dbe-a431-4f86-d76a-491c21b4e476"
260 | },
261 | "outputs": [
262 | {
263 | "data": {
264 | "text/plain": [
265 | "((60000, 28, 28, 1), (55000, 28, 28, 1), (5000, 28, 28, 1), (10000, 28, 28, 1))"
266 | ]
267 | },
268 | "execution_count": 15,
269 | "metadata": {},
270 | "output_type": "execute_result"
271 | }
272 | ],
273 | "source": [
274 | "(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
275 | "labels=[\"T-shirt/top\",\"Trouser\",\"Pullover\",\"Dress\",\"Coat\",\n",
276 | " \"Sandal\",\"Shirt\",\"Sneaker\",\"Bag\",\"Ankle boot\"]\n",
277 | "\n",
278 | "# Normalize pixel values\n",
279 | "X_train_full, X_test = X_train_full / 255.0, X_test / 255.0\n",
280 | "\n",
281 | "#reshape input data\n",
282 | "X_train_full = X_train_full.reshape(X_train_full.shape[0], config.img_width, config.img_height, 1)\n",
283 | "X_test = X_test.reshape(X_test.shape[0], config.img_width, config.img_height, 1)\n",
284 | "\n",
285 | "# one hot encode outputs\n",
286 | "y_train_full = tf.keras.utils.to_categorical(y_train_full)\n",
287 | "y_test = tf.keras.utils.to_categorical(y_test)\n",
288 | "num_classes = y_test.shape[1]\n",
289 | "\n",
290 | "# Split into validation, and training sets\n",
291 | "X_valid, X_train = X_train_full[:config.validation_size], X_train_full[config.validation_size:]\n",
292 | "y_valid, y_train = y_train_full[:config.validation_size], y_train_full[config.validation_size:]\n",
293 | "X_train_full.shape, X_train.shape, X_valid.shape, X_test.shape"
294 | ]
295 | },
296 | {
297 | "cell_type": "markdown",
298 | "metadata": {
299 | "colab_type": "text",
300 | "id": "R87GnDDgBZfH"
301 | },
302 | "source": [
303 | "## Explore Dataset"
304 | ]
305 | },
306 | {
307 | "cell_type": "markdown",
308 | "metadata": {
309 | "colab_type": "text",
310 | "id": "a9sTfZfWBZfI"
311 | },
312 | "source": [
313 | "\n",
314 | "\n",
315 | "\n",
316 | "Fashion-MNIST is a dataset of that shares the same structure as MNIST (60,000 images in training set, 10,000 in test set, with each image being a 28x28 grayscale image, and 10 label classes to predict). Fashion MNIST is slightly more complicated than MNIST and hence serves as a good dataset for benchmarking machine learning algorithms.\n",
317 | "\n",
318 | "A selection of the images in the dataset are shown below."
319 | ]
320 | },
321 | {
322 | "cell_type": "markdown",
323 | "metadata": {
324 | "colab_type": "text",
325 | "id": "XU8RE9V1BZfI"
326 | },
327 | "source": [
328 | "# Model Training"
329 | ]
330 | },
331 | {
332 | "cell_type": "markdown",
333 | "metadata": {
334 | "colab_type": "text",
335 | "id": "F9uTV8CnBZfJ"
336 | },
337 | "source": [
338 | "# Basic Neural Network"
339 | ]
340 | },
341 | {
342 | "cell_type": "markdown",
343 | "metadata": {
344 | "colab_type": "text",
345 | "id": "Tb_LsxEWBZfJ"
346 | },
347 | "source": [
348 | "**1. Input neurons**\n",
349 | "The input vector needs one input neuron per feature. For tabular data, this is the number of relevant features in your dataset. You want to carefully select these features and remove any that may contain patterns that won't generalize beyond the training set (and cause overfitting). For images, this is the dimensions of your image (28*28=784 in case of MNIST).\n",
350 | "\n",
351 | " \n",
352 | "**2. Output neurons**\n",
353 | "This is the number of predictions you want to make.\n",
354 | "- **Regression:** For regression tasks, this can be one value (e.g. housing price). For multi-variate regression, it is one neuron per predicted value (e.g. for bounding boxes it can be 4 neurons – one each for bounding box height, width, x-coordinate, y-coordinate).\n",
355 | "- **Classification:** For binary classification (spam-not spam), we use one output neuron per positive class, wherein the output represents the probability of the positive class. For multi-class classification (e.g. in object detection where an instance can be classified as a car, a dog, a house etc.), we have one output neuron per class, and use the softmax activation function on the output layer to ensure the final probabilities sum to 1.\n",
356 | " \n",
357 | " \n",
358 | "**3. Hidden Layers and Neurons per Hidden Layers**\n",
359 | " The number of hidden layers is highly dependent on the problem and the architecture of your neural network. You're essentially trying to Goldilocks your way into the perfect neural network architecture - not too big, not too small, just right.\n",
360 | " \n",
361 | "Generally, 1-5 hidden layers will serve you well for most problems. When working with image or speech data, you'd want your network to have dozens-hundreds of layers, not all of which might be fully connected. For these use cases, there are pre-trained models (YOLO, ResNet, VGG) that allow you to use large parts of their networks, and train your model on top of these networks to learn only the higher order features. In this case, your model will still have only a few layers to train.\n",
362 | "\n",
363 | "In general using the same number of neurons for all hidden layers will suffice. For some datasets, having a large first layer and following it up with smaller layers will lead to better performance as the first layer can learn a lot of lower-level features that can feed into a few higher order features in the subsequent layers.\n",
364 | "\n",
365 | "Usually you will get more of a performance boost from adding more layers than adding more neurons in each layer.\n",
366 | "\n",
367 | "I'd recommend starting with 1-5 layers and 1-100 neurons and slowly adding more layers and neurons until you start overfitting. You can track your loss and accuracy within your Weights and Biases dashboard to see which hidden layers + hidden neurons combo leads to the best loss.\n",
368 | "\n",
369 | "Something to keep in mind with choosing a smaller number of layers/neurons is that if the this number is too small, your network will not be able to learn the underlying patterns in your data and thus be useless. An approach to counteract this is to start with a huge number of hidden layers + hidden neurons and then use dropout and early stopping to let the neural network size itself down for you. Again, I'd recommend trying a few combinations and track the performance in your Weights and Biases dashboard to determine the perfect network size for your problem.\n",
370 | "\n",
371 | "Andrej Karpathy also recommends the [overfit then regularize](http://karpathy.github.io/2019/04/25/recipe/) approach – \"first get a model large enough that it can overfit (i.e. focus on training loss) and then regularize it appropriately (give up some training loss to improve the validation loss).\"\n",
372 | "\n",
373 | " \n",
374 | "**4. Loss function**\n",
375 | "\n",
376 | " - **Regression:** Mean squared error is the most common loss function to optimize for, unless there are a significant number of outliers. In this case, use mean absolute error or Huber loss. \n",
377 | " - **Classification:** Cross-entropy will serve you well in most cases.\n",
378 | "\n",
379 | "\n",
380 | "**5. Batch Size**\n",
381 | "Large batch sizes can be great because they can harness the power of GPUs to process more training instances per time. [OpenAI has found](https://openai.com/blog/science-of-ai/) larger batch size (of tens of thousands for image-classification and language modeling, and of millions in the case of RL agents) serve well for scaling and parallelizability.\n",
382 | "\n",
383 | "There's a case to be made for smaller batch sizes too, however. According to [this paper](https://arxiv.org/abs/1804.07612) by Masters and Luschi, the advantage gained from increased parallelism from running large batches is offset by the increased performance generalization and smaller memory footprint achieved by smaller batches. They show that increased batch sizes reduce the acceptable range of learning rates that provide stable convergence. Their takeaway is that smaller is, in-fact, better; and that the best performance is obtained by mini-batch sizes between 2 and 32.\n",
384 | "\n",
385 | "If you're not operating at massive scales, I would recommend starting with lower batch sizes and slowly increasing the size and monitoring performance in your [Weights and Biases](https://www.wandb.com/) dashboard to determine the best fit.\n",
386 | " \n",
387 | "**6. Number of epochs**\n",
388 | " I'd recommend starting with a large number of epochs and use early stopping to halt training when performance stops improving.\n",
389 | " \n",
390 | " \n",
391 | "**7. Scaling your features**\n",
392 | " A quick note: Make sure all your features have similar scale before using them as inputs to your neural network. This ensures faster convergence. When your features have different scales (e.g. salaries in thousands and years of experience in tens), the cost function will look like the elongated bowl on the left. This means your optimization algorithm will take a long time to traverse the valley compared to using normalized features (on the right).\n",
393 | " \n",
394 | " "
395 | ]
396 | },
397 | {
398 | "cell_type": "code",
399 | "execution_count": 16,
400 | "metadata": {
401 | "colab": {
402 | "base_uri": "https://localhost:8080/",
403 | "height": 51
404 | },
405 | "colab_type": "code",
406 | "id": "gri3_qNyB0VA",
407 | "outputId": "1483e69f-27d0-4f3c-e915-7c64a315166a"
408 | },
409 | "outputs": [
410 | {
411 | "data": {
412 | "text/html": [
413 | "\n",
414 | " Notebook configured with W&B. You can open the run page, or call %%wandb\n",
415 | " in a cell containing your training loop to display live results. Learn more in our docs.\n",
416 | " "
417 | ],
418 | "text/plain": [
419 | ""
420 | ]
421 | },
422 | "metadata": {},
423 | "output_type": "display_data"
424 | },
425 | {
426 | "data": {
427 | "text/plain": [
428 | "W&B Run: https://app.wandb.ai/lrichman/building-neural-nets/runs/quej8hq0"
429 | ]
430 | },
431 | "execution_count": 16,
432 | "metadata": {},
433 | "output_type": "execute_result"
434 | }
435 | ],
436 | "source": [
437 | "wandb.init(project=\"building-neural-nets\", name=\"basic_neural_network\")"
438 | ]
439 | },
440 | {
441 | "cell_type": "code",
442 | "execution_count": 17,
443 | "metadata": {
444 | "_kg_hide-output": false,
445 | "colab": {
446 | "base_uri": "https://localhost:8080/",
447 | "height": 560
448 | },
449 | "colab_type": "code",
450 | "id": "M6ICCxCuBZfK",
451 | "outputId": "7ef6fc07-0abb-46d2-c31b-82ff953fb31f"
452 | },
453 | "outputs": [
454 | {
455 | "data": {
456 | "text/html": [
457 | ""
459 | ],
460 | "text/plain": [
461 | ""
462 | ]
463 | },
464 | "metadata": {},
465 | "output_type": "display_data"
466 | },
467 | {
468 | "name": "stdout",
469 | "output_type": "stream",
470 | "text": [
471 | "Train on 55000 samples, validate on 5000 samples\n",
472 | "Epoch 1/25\n",
473 | " 32/55000 [..............................] - ETA: 13:44 - loss: 2.2993 - accuracy: 0.1562"
474 | ]
475 | },
476 | {
477 | "name": "stderr",
478 | "output_type": "stream",
479 | "text": [
480 | "wandb: Wandb version 0.8.13 is available! To upgrade, please run:\n",
481 | "wandb: $ pip install wandb --upgrade\n"
482 | ]
483 | },
484 | {
485 | "name": "stdout",
486 | "output_type": "stream",
487 | "text": [
488 | "55000/55000 [==============================] - 132s 2ms/step - loss: 1.0627 - accuracy: 0.6178 - val_loss: 0.6272 - val_accuracy: 0.7630\n",
489 | "Epoch 2/25\n",
490 | "55000/55000 [==============================] - 160s 3ms/step - loss: 0.5271 - accuracy: 0.8031 - val_loss: 0.4597 - val_accuracy: 0.8398\n",
491 | "Epoch 3/25\n",
492 | "55000/55000 [==============================] - 158s 3ms/step - loss: 0.4472 - accuracy: 0.8363 - val_loss: 0.4128 - val_accuracy: 0.8526\n",
493 | "Epoch 4/25\n",
494 | "55000/55000 [==============================] - 163s 3ms/step - loss: 0.4025 - accuracy: 0.8518 - val_loss: 0.3726 - val_accuracy: 0.8658\n",
495 | "Epoch 5/25\n",
496 | "55000/55000 [==============================] - 133s 2ms/step - loss: 0.3709 - accuracy: 0.8641 - val_loss: 0.3596 - val_accuracy: 0.8746\n",
497 | "Epoch 6/25\n",
498 | "55000/55000 [==============================] - 115s 2ms/step - loss: 0.3474 - accuracy: 0.8750 - val_loss: 0.3487 - val_accuracy: 0.8754\n",
499 | "Epoch 7/25\n",
500 | "55000/55000 [==============================] - 128s 2ms/step - loss: 0.3256 - accuracy: 0.8813 - val_loss: 0.3344 - val_accuracy: 0.8824\n",
501 | "Epoch 8/25\n",
502 | "55000/55000 [==============================] - 108s 2ms/step - loss: 0.3094 - accuracy: 0.8876 - val_loss: 0.3249 - val_accuracy: 0.8792\n",
503 | "Epoch 9/25\n",
504 | "55000/55000 [==============================] - 111s 2ms/step - loss: 0.2951 - accuracy: 0.8917 - val_loss: 0.3155 - val_accuracy: 0.8832\n",
505 | "Epoch 10/25\n",
506 | "55000/55000 [==============================] - 121s 2ms/step - loss: 0.2796 - accuracy: 0.8973 - val_loss: 0.3225 - val_accuracy: 0.8858\n",
507 | "Epoch 11/25\n",
508 | "55000/55000 [==============================] - 106s 2ms/step - loss: 0.2681 - accuracy: 0.9001 - val_loss: 0.3081 - val_accuracy: 0.8884\n",
509 | "Epoch 12/25\n",
510 | "55000/55000 [==============================] - 117s 2ms/step - loss: 0.2560 - accuracy: 0.9050 - val_loss: 0.3200 - val_accuracy: 0.8820\n",
511 | "Epoch 13/25\n",
512 | "55000/55000 [==============================] - 120s 2ms/step - loss: 0.2460 - accuracy: 0.9084 - val_loss: 0.3241 - val_accuracy: 0.8804\n",
513 | "Epoch 14/25\n",
514 | "55000/55000 [==============================] - 133s 2ms/step - loss: 0.2355 - accuracy: 0.9113 - val_loss: 0.2946 - val_accuracy: 0.8944\n",
515 | "Epoch 15/25\n",
516 | "55000/55000 [==============================] - 118s 2ms/step - loss: 0.2252 - accuracy: 0.9155 - val_loss: 0.3173 - val_accuracy: 0.8854\n",
517 | "Epoch 16/25\n",
518 | "55000/55000 [==============================] - 122s 2ms/step - loss: 0.2175 - accuracy: 0.9179 - val_loss: 0.3078 - val_accuracy: 0.8874\n",
519 | "Epoch 17/25\n",
520 | "55000/55000 [==============================] - 104s 2ms/step - loss: 0.2086 - accuracy: 0.9205 - val_loss: 0.3023 - val_accuracy: 0.8938\n",
521 | "Epoch 18/25\n",
522 | "55000/55000 [==============================] - 121s 2ms/step - loss: 0.1991 - accuracy: 0.9255 - val_loss: 0.2915 - val_accuracy: 0.8972\n",
523 | "Epoch 19/25\n",
524 | "55000/55000 [==============================] - 116s 2ms/step - loss: 0.1923 - accuracy: 0.9274 - val_loss: 0.3247 - val_accuracy: 0.8864\n",
525 | "Epoch 20/25\n",
526 | "55000/55000 [==============================] - 126s 2ms/step - loss: 0.1841 - accuracy: 0.9314 - val_loss: 0.2861 - val_accuracy: 0.8994\n",
527 | "Epoch 21/25\n",
528 | "55000/55000 [==============================] - 122s 2ms/step - loss: 0.1761 - accuracy: 0.9329 - val_loss: 0.3052 - val_accuracy: 0.8968\n",
529 | "Epoch 22/25\n",
530 | "55000/55000 [==============================] - 115s 2ms/step - loss: 0.1678 - accuracy: 0.9364 - val_loss: 0.3300 - val_accuracy: 0.8882\n",
531 | "Epoch 23/25\n",
532 | "55000/55000 [==============================] - 114s 2ms/step - loss: 0.1611 - accuracy: 0.9403 - val_loss: 0.3126 - val_accuracy: 0.8954\n",
533 | "Epoch 24/25\n",
534 | "55000/55000 [==============================] - 104s 2ms/step - loss: 0.1544 - accuracy: 0.9417 - val_loss: 0.3088 - val_accuracy: 0.8972\n",
535 | "Epoch 25/25\n",
536 | "55000/55000 [==============================] - 125s 2ms/step - loss: 0.1462 - accuracy: 0.9447 - val_loss: 0.3189 - val_accuracy: 0.8984\n"
537 | ]
538 | },
539 | {
540 | "data": {
541 | "text/plain": [
542 | ""
543 | ]
544 | },
545 | "execution_count": 17,
546 | "metadata": {},
547 | "output_type": "execute_result"
548 | }
549 | ],
550 | "source": [
551 | "%%wandb \n",
552 | "# build model\n",
553 | "model = keras.Sequential()\n",
554 | "# Conv2D(number of output filters in the convolution, (length of the convolution window, ...))\n",
555 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu', input_shape=(config.img_width, config.img_height, 1)))\n",
556 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
557 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
558 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
559 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
560 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
561 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='relu'))\n",
562 | "model.add(Flatten())\n",
563 | "model.add(Dense(config.hidden_layer_size, activation='relu'))\n",
564 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
565 | "\n",
566 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=config.learn_rate), metrics=['accuracy'])\n",
567 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_valid, y_valid), epochs=config.n_epochs,\n",
568 | " callbacks=[WandbCallback(data_type=\"image\", validation_data=(X_valid, y_valid), labels=labels)])"
569 | ]
570 | },
571 | {
572 | "cell_type": "markdown",
573 | "metadata": {
574 | "colab_type": "text",
575 | "id": "mJwi6z8nBZfM"
576 | },
577 | "source": [
578 | "# Learning Rate\n",
579 | "\n",
580 | "\n",
581 | "\n",
582 | "\n",
583 | "\n",
584 | "Picking the learning rate is very important, and you want to make sure you get this right! Ideally you want to re-tweak the learning rate when you tweak the other hyper-parameters of your network.\n",
585 | " \n",
586 | "To find the best learning rate, start with a very low values (10^-6) and slowly multiply it by a constant until it reaches a very high value (e.g. 10). Measure your model performance (vs the log of your learning rate) in your Weights and Biases dashboard to determine which rate served you well for your problem. You can then retrain your model using this optimal learning rate.\n",
587 | " \n",
588 | "The best learning rate is usually half of the learning rate that causes the model to diverge. Feel free to set different values for learn_rate in the code and seeing how it affects model performance to develop your intuition around learning rates.\n",
589 | "\n",
590 | "I'd also recommend using the [Learning Rate finder](https://arxiv.org/abs/1506.01186) method proposed by Leslie Smith. It an excellent way to find a good learning rate for most gradient optimizers (most variants of SGD) and works with most network architectures.\n",
591 | " \n",
592 | "Also, see the section on learning rate scheduling below."
593 | ]
594 | },
595 | {
596 | "cell_type": "code",
597 | "execution_count": null,
598 | "metadata": {
599 | "_kg_hide-output": false,
600 | "colab": {},
601 | "colab_type": "code",
602 | "id": "spbkx4snBZfM"
603 | },
604 | "outputs": [],
605 | "source": [
606 | "wandb.init(anonymous='allow', project=\"building-neural-nets\", name=\"lower_learning_rate\")"
607 | ]
608 | },
609 | {
610 | "cell_type": "code",
611 | "execution_count": null,
612 | "metadata": {
613 | "_kg_hide-output": false,
614 | "colab": {},
615 | "colab_type": "code",
616 | "id": "yuax-DmxBZfO"
617 | },
618 | "outputs": [],
619 | "source": [
620 | "%%wandb \n",
621 | "# build model\n",
622 | "model = keras.Sequential()\n",
623 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu', input_shape=(config.img_width, config.img_height, 1)))\n",
624 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
625 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
626 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
627 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
628 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
629 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='relu'))\n",
630 | "model.add(Flatten())\n",
631 | "model.add(Dense(config.hidden_layer_size, activation='relu'))\n",
632 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
633 | "\n",
634 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=config.learn_rate_low), metrics=['accuracy'])\n",
635 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_valid, y_valid), epochs=config.n_epochs,\n",
636 | " callbacks=[WandbCallback(data_type=\"image\", validation_data=(X_valid, y_valid), labels=labels)])"
637 | ]
638 | },
639 | {
640 | "cell_type": "markdown",
641 | "metadata": {
642 | "colab_type": "text",
643 | "id": "nRplGkWTBZfR"
644 | },
645 | "source": [
646 | "# Momentum\n",
647 | "\n",
648 | "\n",
649 | "Gradient Descent takes tiny, consistent steps towards the local minima and when the gradients are tiny it can take a lot of time to converge. Momentum on the other hand takes into account the previous gradients, and accelerates convergence by pushing over valleys faster and avoiding local minima.\n",
650 | " \n",
651 | "In general you want your momentum value to be very close to one. 0.9 is a good place to start for smaller datasets, and you want to move progressively closer to one (0.999) the larger your dataset gets. (Setting nesterov=True lets momentum take into account the gradient of the cost function a few steps ahead of the current point, which makes it slightly more accurate and faster.)"
652 | ]
653 | },
654 | {
655 | "cell_type": "code",
656 | "execution_count": null,
657 | "metadata": {
658 | "_kg_hide-output": false,
659 | "colab": {},
660 | "colab_type": "code",
661 | "id": "P86wJabYBZfS"
662 | },
663 | "outputs": [],
664 | "source": [
665 | "wandb.init(project=\"building-neural-nets\", name=\"momentum\")"
666 | ]
667 | },
668 | {
669 | "cell_type": "code",
670 | "execution_count": null,
671 | "metadata": {
672 | "_kg_hide-output": false,
673 | "colab": {},
674 | "colab_type": "code",
675 | "id": "Z0FZg4s8BZfU"
676 | },
677 | "outputs": [],
678 | "source": [
679 | "%%wandb \n",
680 | "# build model\n",
681 | "model = keras.Sequential()\n",
682 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu', input_shape=(config.img_width, config.img_height, 1)))\n",
683 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
684 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
685 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
686 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu'))\n",
687 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
688 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='relu'))\n",
689 | "model.add(Flatten())\n",
690 | "model.add(Dense(config.hidden_layer_size, activation='relu'))\n",
691 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
692 | "\n",
693 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=config.learn_rate_low, decay=config.decay, momentum=config.momentum,\n",
694 | " nesterov=True), metrics=['accuracy'])\n",
695 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_valid, y_valid), epochs=config.n_epochs,\n",
696 | " callbacks=[WandbCallback(data_type=\"image\", validation_data=(X_valid, y_valid), labels=labels)])"
697 | ]
698 | },
699 | {
700 | "cell_type": "markdown",
701 | "metadata": {
702 | "colab_type": "text",
703 | "id": "6QcROk-BBZfZ"
704 | },
705 | "source": [
706 | "# Dropout\n",
707 | "\n",
708 | "Dropout is a fantastic regularization technique that gives you a massive performance boost (~2% for state-of-the-art models) for how simple the technique actually is. All dropout does is randomly turn off a percentage of neurons at each layer, at each training step. This makes the network more robust because it can't rely on any particular set of input neurons for making predictions. The knowledge is distributed amongst the whole network. Around 2^n (where n is the number of neurons in the architecture) slightly-unique neural networks are generated during the training process, and ensembled together to make predictions.\n",
709 | "\n",
710 | "A good dropout rate is between 0.1 to 0.5; 0.3 for RNNs, and 0.5 for CNNs. Use larger rates for bigger layers. Increasing the dropout rate decreases overfitting, and decreasing the rate is helpful to combat under-fitting.\n",
711 | "\n",
712 | "You want to experiment with different rates of dropout values, in earlier layers of your network, and check your Weights and Biases dashboard to pick the best performing one. You definitely don’t want to use dropout in the output layers.\n",
713 | "\n",
714 | "Read [this paper](https://arxiv.org/abs/1801.05134) before using Dropout in conjunction with BatchNorm.\n",
715 | "\n",
716 | "In this kernel I used AlphaDropout, a flavor of the vanilla dropout that works well with SELU activation functions by preserving the input's mean and standard deviations."
717 | ]
718 | },
719 | {
720 | "cell_type": "code",
721 | "execution_count": null,
722 | "metadata": {
723 | "_kg_hide-output": false,
724 | "colab": {},
725 | "colab_type": "code",
726 | "id": "iRhxttOJBZfa"
727 | },
728 | "outputs": [],
729 | "source": [
730 | "wandb.init(project=\"building-neural-nets\", name=\"dropout\")"
731 | ]
732 | },
733 | {
734 | "cell_type": "code",
735 | "execution_count": null,
736 | "metadata": {
737 | "_kg_hide-output": false,
738 | "colab": {},
739 | "colab_type": "code",
740 | "id": "22hCtE45BZfc"
741 | },
742 | "outputs": [],
743 | "source": [
744 | "%%wandb\n",
745 | "# build model\n",
746 | "model = keras.Sequential()\n",
747 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal',\n",
748 | " input_shape=(config.img_width, config.img_height, 1)))\n",
749 | "BatchNormalization(),\n",
750 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
751 | "BatchNormalization(),\n",
752 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
753 | "AlphaDropout(rate=config.dropout),\n",
754 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
755 | "BatchNormalization(),\n",
756 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
757 | "BatchNormalization(),\n",
758 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
759 | "AlphaDropout(rate=config.dropout),\n",
760 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
761 | "AlphaDropout(rate=config.dropout),\n",
762 | "model.add(Flatten())\n",
763 | "model.add(Dense(config.hidden_layer_size, activation='selu', kernel_initializer='lecun_normal'))\n",
764 | "BatchNormalization(),\n",
765 | "AlphaDropout(rate=config.dropout),\n",
766 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
767 | "\n",
768 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=config.learn_rate, clipnorm=1.0),\n",
769 | " metrics=['accuracy'])\n",
770 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_test, y_test), epochs=config.n_epochs,\n",
771 | " callbacks=[WandbCallback(data_type=\"image\", validation_data=(X_valid, y_valid), labels=labels), tf.keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)])"
772 | ]
773 | },
774 | {
775 | "cell_type": "markdown",
776 | "metadata": {
777 | "colab_type": "text",
778 | "id": "G20kwvnVBZfV"
779 | },
780 | "source": [
781 | "# Vanishing + Exploding Gradients\n",
782 | "\n",
783 | "\n",
784 | "\n",
785 | "Just like people, not all neural network layers learn at the same speed. So when the backprop algorithm propagates the error gradient from the output layer to the first layers, the gradients get smaller and smaller until they're almost negligible when they reach the first layers. This means the weights of the first layers aren't updated significantly at each step.\n",
786 | " \n",
787 | "This is the problem of vanishing gradients. (A similar problem of exploding gradients occurs when the gradients for certain layers get progressively larger, leading to massive weight updates for some layers as opposed to the others.)\n",
788 | " \n",
789 | "There are a few ways to counteract vanishing gradients. Let's take a look at them now!\n",
790 | " \n",
791 | "**1. Activation functions (non-saturating functions)**\n",
792 | "- **Hidden Layer Activation**\n",
793 | " In general, the performance from using different [activation functions](https://isaacchanghau.github.io/post/activation_functions/) improves in this order (from lowest→highest performing): logistic → tanh → ReLU → Leaky ReLU → ELU → SELU. ReLU is the most popular activation function and if you don't want to tweak your activation function, ReLU is a great place to start. But, keep in mind ReLU is becoming increasingly less effective than [ELU](https://arxiv.org/pdf/1511.07289.pdf) or [GELU](https://arxiv.org/pdf/1606.08415.pdf). If you’re feeling more adventurous, you can try the following:\n",
794 | " - to combat neural network overfitting: RReLU\n",
795 | " - reduce latency at runtime: leaky ReLU\n",
796 | " - for massive training sets: PReLU\n",
797 | " - for fast inference times: leaky ReLU\n",
798 | " - if your network doesn't self-normalize: ELU\n",
799 | " - for an overall robust activation function: SELU\n",
800 | " \n",
801 | " [This](https://arxiv.org/pdf/1811.03378.pdf) is an excellent paper that dives deeper into the comparison of various activation functions for neural networks.\n",
802 | "\n",
803 | " As always, don't be afraid to experiment with a few different activation functions, and turn to your Weights and Biases dashboard to help you pick the one that works best for you! \n",
804 | "\n",
805 | "- **Output Layer Activation**\n",
806 | " - **Regression:** Regression problems don't require activation functions for their output neurons because we want the output to take on any value. In cases where we want out values to be bounded into a certain range, we can use tanh for -1→1 values and logistic function for 0→1 values. In cases where we're only looking for positive output, we can use softplus activation.\n",
807 | "\n",
808 | " - **Classification:** Use the sigmoid activation function for binary classification to ensure the output is between 0 and 1. Use softmax for multi-class classification to ensure the output probabilities add up to 1.\n",
809 | " \n",
810 | " \n",
811 | "**2. Weight initialization method**\n",
812 | " The right weight initialization method can speed up time-to-convergence considerably. The choice of your initialization method depends on your activation function. Some things to try:\n",
813 | "- When using ReLU or leaky RELU, use [He initialization](https://arxiv.org/pdf/1502.01852.pdf)\n",
814 | "- When using SELU or ELU, use [LeCun initialization](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n",
815 | "- When using softmax, logistic, or tanh, use [Glorot initialization](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf)\n",
816 | "\n",
817 | "Most initialization methods come in uniform and normal distribution flavors.\n",
818 | " \n",
819 | " \n",
820 | "**3. BatchNorm**\n",
821 | "BatchNorm simply learns the optimal means and scales of each layer's inputs. It does so by zero-centering and normalizing its input vectors, then scaling and shifting them. It also acts like a regularizer which means we don't need dropout or L2 reg.\n",
822 | "\n",
823 | "Using BatchNorm lets us use larger learning rates (which result in faster convergence) and lead to huge improvements in most neural networks by reducing the vanishing gradients problem. The only downside is that it slightly increases training times because of the extra computations required at each layer.\n",
824 | " \n",
825 | " \n",
826 | "**4. Gradient Clipping**\n",
827 | "A great way to reduce gradients from exploding, specially when training RNNs, is to simply clip them when they exceed a certain value. I'd recommend trying clipnorm instead of clipvalue, which allows you to keep the direction of your gradient vector consistent. Clipnorm contains any gradients who's l2 norm is greater than a certain threshold.\n",
828 | "\n",
829 | "Try a few different threshold values to find one that works best for you.\n",
830 | " \n",
831 | " \n",
832 | "**5. Early Stopping**\n",
833 | "Early Stopping lets you live it up by training a model with more hidden layers, hidden neurons and for more epochs than you need, and just stopping training when performance stops improving consecutively for n epochs. It also saves the best performing model for you. You can enable Early Stopping by setting up a callback when you fit your model and setting `save_best_only=True`."
834 | ]
835 | },
836 | {
837 | "cell_type": "code",
838 | "execution_count": null,
839 | "metadata": {
840 | "_kg_hide-output": false,
841 | "colab": {},
842 | "colab_type": "code",
843 | "id": "YlbnCRdKBZfW"
844 | },
845 | "outputs": [],
846 | "source": [
847 | "wandb.init(project=\"building-neural-nets\", name=\"vanishing_gradients\")"
848 | ]
849 | },
850 | {
851 | "cell_type": "code",
852 | "execution_count": null,
853 | "metadata": {
854 | "_kg_hide-output": false,
855 | "colab": {},
856 | "colab_type": "code",
857 | "id": "jW8rr0JzBZfY"
858 | },
859 | "outputs": [],
860 | "source": [
861 | "%%wandb\n",
862 | "# build model\n",
863 | "model = keras.Sequential()\n",
864 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='relu', input_shape=(config.img_width, config.img_height, 1)))\n",
865 | "BatchNormalization(),\n",
866 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
867 | "BatchNormalization(),\n",
868 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
869 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
870 | "BatchNormalization(),\n",
871 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
872 | "BatchNormalization(),\n",
873 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
874 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
875 | "model.add(Flatten())\n",
876 | "model.add(Dense(config.hidden_layer_size, activation='selu', kernel_initializer='lecun_normal'))\n",
877 | "BatchNormalization(),\n",
878 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
879 | "\n",
880 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=config.learn_rate, clipnorm=1.0),\n",
881 | " metrics=['accuracy'])\n",
882 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_test, y_test), epochs=config.n_epochs,\n",
883 | " callbacks=[WandbCallback(data_type=\"image\", validation_data=(X_valid, y_valid), labels=labels), keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)])"
884 | ]
885 | },
886 | {
887 | "cell_type": "markdown",
888 | "metadata": {
889 | "colab_type": "text",
890 | "id": "X7cp0uceBZff"
891 | },
892 | "source": [
893 | "# Optimizers\n",
894 | "\n",
895 | "\n",
896 | "Gradient Descent isn't the only optimizer game in town! There's a few different ones to choose from. This post does a good job of describing some of the optimizers you can choose from.\n",
897 | "\n",
898 | "\n",
899 | "My general advice is to use Stochastic Gradient Descent if you care deeply about quality of convergence and if time is not of the essence.\n",
900 | "\n",
901 | "If you care about time-to-convergence and a point close to optimal convergence will suffice, experiment with Adam, Nadam, RMSProp, and Adamax optimizers. You Weights and Biases dashboard will guide you to the optimizer that works best for you!\n",
902 | "\n",
903 | "Adam/Nadam are usually good starting points, and tend to be quite forgiving to a bad learning late and other non-optimal hyperparameters.\n",
904 | "\n",
905 | "[According to Andrej Karpathy](http://karpathy.github.io/2019/04/25/recipe/), \"a well-tuned SGD will almost always slightly outperform Adam\" in the case of ConvNets.\n",
906 | "\n",
907 | "In this kernel, I got the best performance from Nadam, which is just your regular Adam optimizer with the Nesterov trick, and thus converges faster than Adam."
908 | ]
909 | },
910 | {
911 | "cell_type": "code",
912 | "execution_count": null,
913 | "metadata": {
914 | "_kg_hide-output": false,
915 | "colab": {},
916 | "colab_type": "code",
917 | "id": "6ChRevtuBZfg"
918 | },
919 | "outputs": [],
920 | "source": [
921 | "wandb.init(project=\"building-neural-nets\", name=\"nadam_optimizer\")"
922 | ]
923 | },
924 | {
925 | "cell_type": "code",
926 | "execution_count": null,
927 | "metadata": {
928 | "_kg_hide-output": false,
929 | "colab": {},
930 | "colab_type": "code",
931 | "id": "3EchP1hxBZfi"
932 | },
933 | "outputs": [],
934 | "source": [
935 | "%%wandb\n",
936 | "# build model\n",
937 | "model = keras.Sequential()\n",
938 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal',\n",
939 | " input_shape=(config.img_width, config.img_height, 1)))\n",
940 | "BatchNormalization(),\n",
941 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
942 | "BatchNormalization(),\n",
943 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
944 | "AlphaDropout(rate=config.dropout),\n",
945 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
946 | "BatchNormalization(),\n",
947 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
948 | "BatchNormalization(),\n",
949 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
950 | "AlphaDropout(rate=config.dropout),\n",
951 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
952 | "AlphaDropout(rate=config.dropout),\n",
953 | "model.add(Flatten())\n",
954 | "model.add(Dense(config.hidden_layer_size, activation='selu', kernel_initializer='lecun_normal'))\n",
955 | "BatchNormalization(),\n",
956 | "AlphaDropout(rate=config.dropout),\n",
957 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
958 | "\n",
959 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Nadam(lr=config.learn_rate, beta_1=0.9, beta_2=0.999, clipnorm=1.0), metrics=['accuracy'])\n",
960 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_test, y_test), epochs=config.n_epochs,\n",
961 | " callbacks=[WandbCallback(data_type=\"image\", labels=labels, validation_data=(X_valid, y_valid)), tf.keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)])"
962 | ]
963 | },
964 | {
965 | "cell_type": "markdown",
966 | "metadata": {
967 | "colab_type": "text",
968 | "id": "lYGWaWImBZfk"
969 | },
970 | "source": [
971 | "# Learning Rate Scheduling\n",
972 | "\n",
973 | "\n",
974 | "\n",
975 | "\n",
976 | "We talked about the importance of a good learning rate already – we don't want it to be too high, lest the cost function dance around the optimum value and diverge. We also don't want it to be too low because that means convergence will take a very long time.\n",
977 | "\n",
978 | "Babysitting the learning rate can be tough because both higher and lower learning rates have their advantages. The great news is that we don't have to commit to one learning rate! With learning rate scheduling we can start with higher rates to move faster through gradient slopes, and slow it down when we reach a gradient valley in the hyper-parameter space which requires taking smaller steps.\n",
979 | "\n",
980 | "There are many ways to schedule learning rates including decreasing the learning rate exponentially, or by using a step function, or tweaking it when the performance starts dropping, or using 1cycle scheduling. In this kernel, I show you how to use the ReduceLROnPlateau callback to reduce the learning rate by a constant factor whenever the performance drops for n epochs.\n",
981 | "\n",
982 | "I would highly recommend also trying out 1cycle scheduling.\n",
983 | "\n",
984 | "Use a constant learning rate until you've trained all other hyper-parameters. And implement learning rate decay scheduling at the end.\n",
985 | "\n",
986 | "As with most things, I'd recommend running a few different experiments with different scheduling strategies and using your Weights and Biases dashboard to pick the one that leads to the best model."
987 | ]
988 | },
989 | {
990 | "cell_type": "code",
991 | "execution_count": null,
992 | "metadata": {
993 | "_kg_hide-output": false,
994 | "colab": {},
995 | "colab_type": "code",
996 | "id": "eKyYXeKmBZfk"
997 | },
998 | "outputs": [],
999 | "source": [
1000 | "wandb.init(project=\"building-neural-nets\", name=\"learningrate\")"
1001 | ]
1002 | },
1003 | {
1004 | "cell_type": "code",
1005 | "execution_count": null,
1006 | "metadata": {
1007 | "_kg_hide-output": false,
1008 | "colab": {},
1009 | "colab_type": "code",
1010 | "id": "3t3TkMRmBZfl"
1011 | },
1012 | "outputs": [],
1013 | "source": [
1014 | "%%wandb\n",
1015 | "# build model\n",
1016 | "model = keras.Sequential()\n",
1017 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal',\n",
1018 | " input_shape=(config.img_width, config.img_height, 1)))\n",
1019 | "BatchNormalization(),\n",
1020 | "model.add(Conv2D(config.conv_layer_1_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
1021 | "BatchNormalization(),\n",
1022 | "model.add(MaxPooling2D((config.pool_size, config.pool_size)))\n",
1023 | "AlphaDropout(rate=config.dropout),\n",
1024 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
1025 | "BatchNormalization(),\n",
1026 | "model.add(Conv2D(config.conv_layer_2_size, kernel_size=(config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
1027 | "BatchNormalization(),\n",
1028 | "model.add(MaxPooling2D(pool_size=(config.pool_size, config.pool_size)))\n",
1029 | "AlphaDropout(rate=config.dropout),\n",
1030 | "model.add(Conv2D(config.conv_layer_3_size, (config.kernel_size, config.kernel_size), activation='selu', kernel_initializer='lecun_normal'))\n",
1031 | "AlphaDropout(rate=config.dropout),\n",
1032 | "model.add(Flatten())\n",
1033 | "model.add(Dense(config.hidden_layer_size, activation='selu', kernel_initializer='lecun_normal'))\n",
1034 | "BatchNormalization(),\n",
1035 | "AlphaDropout(rate=config.dropout),\n",
1036 | "model.add(Dense(config.num_classes, activation='softmax'))\n",
1037 | "\n",
1038 | "lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)\n",
1039 | "model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Nadam(lr=config.learn_rate, beta_1=0.9, beta_2=0.999, clipnorm=1.0), metrics=['accuracy'])\n",
1040 | "model.fit(X_train, y_train, verbose=1, validation_data=(X_test, y_test), epochs=config.n_epochs,\n",
1041 | " callbacks=[WandbCallback(data_type=\"image\", labels=labels, validation_data=(X_valid, y_valid)), keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True), lr_scheduler])"
1042 | ]
1043 | },
1044 | {
1045 | "cell_type": "markdown",
1046 | "metadata": {
1047 | "colab_type": "text",
1048 | "id": "kLJjQltwBZfn"
1049 | },
1050 | "source": [
1051 | "# Make Predictions"
1052 | ]
1053 | },
1054 | {
1055 | "cell_type": "code",
1056 | "execution_count": null,
1057 | "metadata": {
1058 | "_kg_hide-output": true,
1059 | "colab": {},
1060 | "colab_type": "code",
1061 | "id": "B5C-a3KjBZfo"
1062 | },
1063 | "outputs": [],
1064 | "source": [
1065 | "model.evaluate(X_test, y_test)"
1066 | ]
1067 | },
1068 | {
1069 | "cell_type": "code",
1070 | "execution_count": null,
1071 | "metadata": {
1072 | "_kg_hide-output": true,
1073 | "colab": {},
1074 | "colab_type": "code",
1075 | "id": "LsFAgV_pBZfp"
1076 | },
1077 | "outputs": [],
1078 | "source": [
1079 | "model.save(\"fashion_mnist_model.h5\")"
1080 | ]
1081 | },
1082 | {
1083 | "cell_type": "markdown",
1084 | "metadata": {
1085 | "colab_type": "text",
1086 | "id": "6_cOCMQFBZfq"
1087 | },
1088 | "source": [
1089 | "# Let's make things more interesting - CIFAR10"
1090 | ]
1091 | },
1092 | {
1093 | "cell_type": "code",
1094 | "execution_count": null,
1095 | "metadata": {
1096 | "colab": {},
1097 | "colab_type": "code",
1098 | "id": "PiY-5K9mBZfr"
1099 | },
1100 | "outputs": [],
1101 | "source": [
1102 | "from __future__ import print_function\n",
1103 | "import keras\n",
1104 | "from keras.datasets import cifar10\n",
1105 | "from keras.preprocessing.image import ImageDataGenerator\n",
1106 | "from keras.models import Sequential\n",
1107 | "from keras.layers import Dense, Dropout, Activation, Flatten\n",
1108 | "from keras.layers import Conv2D, MaxPooling2D\n",
1109 | "import os"
1110 | ]
1111 | },
1112 | {
1113 | "cell_type": "code",
1114 | "execution_count": null,
1115 | "metadata": {
1116 | "colab": {},
1117 | "colab_type": "code",
1118 | "id": "n1cnVznDBZfs"
1119 | },
1120 | "outputs": [],
1121 | "source": [
1122 | "batch_size = 32\n",
1123 | "num_classes = 10\n",
1124 | "epochs = 100\n",
1125 | "data_augmentation = True\n",
1126 | "num_predictions = 20\n",
1127 | "\n",
1128 | "# The data, split between train and test sets:\n",
1129 | "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
1130 | "print('x_train shape:', x_train.shape)\n",
1131 | "print(x_train.shape[0], 'train samples')\n",
1132 | "print(x_test.shape[0], 'test samples')\n",
1133 | "\n",
1134 | "# Convert class vectors to binary class matrices.\n",
1135 | "y_train = keras.utils.to_categorical(y_train, num_classes)\n",
1136 | "y_test = keras.utils.to_categorical(y_test, num_classes)\n",
1137 | "x_train = x_train.astype('float32') / 255.0\n",
1138 | "x_test = x_test.astype('float32') / 255.0\n",
1139 | "\n",
1140 | "labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']"
1141 | ]
1142 | },
1143 | {
1144 | "cell_type": "code",
1145 | "execution_count": null,
1146 | "metadata": {
1147 | "colab": {},
1148 | "colab_type": "code",
1149 | "id": "5SEXIPctBZfu"
1150 | },
1151 | "outputs": [],
1152 | "source": [
1153 | "wandb.init(project=\"building-neural-nets\", name=\"cifar-10\")"
1154 | ]
1155 | },
1156 | {
1157 | "cell_type": "code",
1158 | "execution_count": null,
1159 | "metadata": {
1160 | "colab": {},
1161 | "colab_type": "code",
1162 | "id": "b07ZGnImBZfv"
1163 | },
1164 | "outputs": [],
1165 | "source": [
1166 | "%%wandb\n",
1167 | "model = Sequential()\n",
1168 | "model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:], activation='relu'))\n",
1169 | "model.add(Conv2D(32, (3, 3), activation='relu'))\n",
1170 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
1171 | "model.add(Dropout(0.25))\n",
1172 | "model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\n",
1173 | "model.add(Conv2D(64, (3, 3), activation='relu'))\n",
1174 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
1175 | "model.add(Dropout(0.25))\n",
1176 | "model.add(Flatten())\n",
1177 | "model.add(Dense(512, activation='relu'))\n",
1178 | "model.add(Dropout(0.5))\n",
1179 | "model.add(Dense(num_classes, activation='softmax'))\n",
1180 | "\n",
1181 | "# Let's train the model using RMSprop\n",
1182 | "model.compile(loss='categorical_crossentropy',\n",
1183 | " optimizer=keras.optimizers.Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999, clipnorm=1.0),\n",
1184 | " metrics=['accuracy'])\n",
1185 | "\n",
1186 | "model.fit(x_train, y_train,\n",
1187 | " batch_size=batch_size,\n",
1188 | " epochs=epochs,\n",
1189 | " validation_data=(x_test, y_test),\n",
1190 | " callbacks=[WandbCallback(data_type=\"image\", labels=labels, validation_data=(x_test, y_test)), keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)],\n",
1191 | " shuffle=True)"
1192 | ]
1193 | },
1194 | {
1195 | "cell_type": "markdown",
1196 | "metadata": {
1197 | "colab_type": "text",
1198 | "id": "OqxZ-74nBZfw"
1199 | },
1200 | "source": [
1201 | "# A Few More Things\n",
1202 | "- Try [EfficientNets](https://arxiv.org/pdf/1905.11946.pdf) to scale your network in an optimal way.\n",
1203 | "- Read [this paper](https://arxiv.org/pdf/1803.09820.pdf) for an overview of some additional learning rate, batch sizes, momentum and weight decay techniques.\n",
1204 | "- And [this one](https://arxiv.org/abs/1803.05407) on Stochastic Weight Averaging (SWA). It shows that better generalization can be achieved by averaging multiple points along the SGD's trajectory, with a cyclical or constant learning rate.\n",
1205 | "- Read Andrej Karpathy's [excellent guide](http://karpathy.github.io/2019/04/25/recipe/) on getting the most juice out of your neural networks."
1206 | ]
1207 | },
1208 | {
1209 | "cell_type": "markdown",
1210 | "metadata": {
1211 | "colab_type": "text",
1212 | "id": "-yuH21GbBZfx"
1213 | },
1214 | "source": [
1215 | "# That was fun, yeah?\n",
1216 | "\n",
1217 | "We've explored a lot of different facets of neural networks in this post!\n",
1218 | "\n",
1219 | "We've looked at how to setup a basic neural network (including choosing the number of hidden layers, hidden neurons, batch sizes etc.)\n",
1220 | "\n",
1221 | "We've learnt about the role momentum and learning rates play in influencing model performance.\n",
1222 | "\n",
1223 | "And finally we've explored the problem of vanishing gradients and how to tackle it using non-saturating activation functions, BatchNorm, better weight initialization techniques and early stopping.\n",
1224 | "\n",
1225 | "You can compare the accuracy and loss performances for the various techniques we tried in one single chart, by visiting your [Weights and Biases](https://app.wandb.ai/home) dashboard.\n",
1226 | "\n",
1227 | "Neural networks are powerful beasts that give you a lot of levers to tweak to get the best performance for the problems you're trying to solve! The sheer size of customizations that they offer can be overwhelming to even seasoned practitioners. Tools like Weights and Biases are your best friends in navigating the land of the hyper-parameters, trying different experiments and picking the most powerful models.\n",
1228 | "\n",
1229 | "I hope this guide will serve as a good starting point in your adventures.\n",
1230 | "\n",
1231 | "If you have any questions or feedback, please don't hesitate to [message me](https://twitter.com/lavanyaai)!\n",
1232 | "\n",
1233 | "Good luck!\n",
1234 | "\n",
1235 | "\n",
1236 | "----------\n",
1237 | "## Weights & Biases\n",
1238 | "\n",
1239 | "We're building lightweight, flexible experiment tracking tools for deep learning. Add a couple of lines to your python script, and we'll keep track of your hyperparameters and output metrics, making it easy to compare runs and see the whole history of your progress. Think of us like GitHub for deep learning. Find out more at [wandb.com](http://wandb.com).\n"
1240 | ]
1241 | },
1242 | {
1243 | "cell_type": "code",
1244 | "execution_count": null,
1245 | "metadata": {
1246 | "colab": {},
1247 | "colab_type": "code",
1248 | "id": "LHVSc1BmBZfx"
1249 | },
1250 | "outputs": [],
1251 | "source": []
1252 | }
1253 | ],
1254 | "metadata": {
1255 | "accelerator": "GPU",
1256 | "colab": {
1257 | "include_colab_link": true,
1258 | "name": "keras_neural_network_architecture.ipynb",
1259 | "provenance": []
1260 | },
1261 | "kernelspec": {
1262 | "display_name": "Python 3",
1263 | "language": "python",
1264 | "name": "python3"
1265 | },
1266 | "language_info": {
1267 | "codemirror_mode": {
1268 | "name": "ipython",
1269 | "version": 3
1270 | },
1271 | "file_extension": ".py",
1272 | "mimetype": "text/x-python",
1273 | "name": "python",
1274 | "nbconvert_exporter": "python",
1275 | "pygments_lexer": "ipython3",
1276 | "version": "3.7.3"
1277 | },
1278 | "toc": {
1279 | "base_numbering": 1,
1280 | "nav_menu": {
1281 | "height": "85px",
1282 | "width": "179px"
1283 | },
1284 | "number_sections": true,
1285 | "sideBar": true,
1286 | "skip_h1_title": false,
1287 | "title_cell": "Table of Contents",
1288 | "title_sidebar": "Contents",
1289 | "toc_cell": true,
1290 | "toc_position": {},
1291 | "toc_section_display": true,
1292 | "toc_window_display": true
1293 | }
1294 | },
1295 | "nbformat": 4,
1296 | "nbformat_minor": 4
1297 | }
1298 |
--------------------------------------------------------------------------------