├── .github
├── ISSUE_TEMPLATE
│ └── feature_request.md
└── workflows
│ └── action.yml
├── .gitignore
├── LICENSE
├── RCV1 Dataset Visualization
├── SOM with package.ipynb
├── Self Organizing Map.ipynb
└── model
│ └── README.md
├── README.md
├── Seeds Dataset Classifier
├── classifier.ipynb
├── data
│ └── seeds_dataset.txt
├── logs
│ ├── train
│ │ ├── events.out.tfevents.1659337186.farshids-MacBook-Pro.local.15779.4.v2
│ │ ├── events.out.tfevents.1659369894.farshids-MacBook-Pro.local.2214.0.v2
│ │ ├── events.out.tfevents.1659370512.farshids-MacBook-Pro.local.5988.0.v2
│ │ ├── events.out.tfevents.1659371074.farshids-MacBook-Pro.local.5988.2.v2
│ │ ├── events.out.tfevents.1659371095.farshids-MacBook-Pro.local.5988.4.v2
│ │ ├── events.out.tfevents.1659371108.farshids-MacBook-Pro.local.5988.6.v2
│ │ ├── events.out.tfevents.1659371844.farshids-MacBook-Pro.local.12364.0.v2
│ │ ├── events.out.tfevents.1659371871.farshids-MacBook-Pro.local.12364.2.v2
│ │ ├── events.out.tfevents.1659371974.farshids-MacBook-Pro.local.12364.4.v2
│ │ ├── events.out.tfevents.1659372079.farshids-MacBook-Pro.local.13268.0.v2
│ │ ├── events.out.tfevents.1659372097.farshids-MacBook-Pro.local.13268.2.v2
│ │ ├── events.out.tfevents.1659372166.farshids-MacBook-Pro.local.13268.4.v2
│ │ ├── events.out.tfevents.1659372179.farshids-MacBook-Pro.local.13268.6.v2
│ │ ├── events.out.tfevents.1659372277.farshids-MacBook-Pro.local.13268.8.v2
│ │ ├── events.out.tfevents.1659372301.farshids-MacBook-Pro.local.13268.10.v2
│ │ ├── events.out.tfevents.1659372388.farshids-MacBook-Pro.local.13268.12.v2
│ │ ├── events.out.tfevents.1659372459.farshids-MacBook-Pro.local.13268.14.v2
│ │ ├── events.out.tfevents.1659372468.farshids-MacBook-Pro.local.13268.16.v2
│ │ ├── events.out.tfevents.1659372594.farshids-MacBook-Pro.local.15171.0.v2
│ │ ├── events.out.tfevents.1659372612.farshids-MacBook-Pro.local.15171.2.v2
│ │ ├── events.out.tfevents.1659375153.farshids-MacBook-Pro.local.21272.0.v2
│ │ └── events.out.tfevents.1659375170.farshids-MacBook-Pro.local.21272.2.v2
│ └── validation
│ │ ├── events.out.tfevents.1659337186.farshids-MacBook-Pro.local.15779.5.v2
│ │ ├── events.out.tfevents.1659369894.farshids-MacBook-Pro.local.2214.1.v2
│ │ ├── events.out.tfevents.1659370512.farshids-MacBook-Pro.local.5988.1.v2
│ │ ├── events.out.tfevents.1659371075.farshids-MacBook-Pro.local.5988.3.v2
│ │ ├── events.out.tfevents.1659371095.farshids-MacBook-Pro.local.5988.5.v2
│ │ ├── events.out.tfevents.1659371108.farshids-MacBook-Pro.local.5988.7.v2
│ │ ├── events.out.tfevents.1659371845.farshids-MacBook-Pro.local.12364.1.v2
│ │ ├── events.out.tfevents.1659371872.farshids-MacBook-Pro.local.12364.3.v2
│ │ ├── events.out.tfevents.1659371974.farshids-MacBook-Pro.local.12364.5.v2
│ │ ├── events.out.tfevents.1659372080.farshids-MacBook-Pro.local.13268.1.v2
│ │ ├── events.out.tfevents.1659372097.farshids-MacBook-Pro.local.13268.3.v2
│ │ ├── events.out.tfevents.1659372166.farshids-MacBook-Pro.local.13268.5.v2
│ │ ├── events.out.tfevents.1659372180.farshids-MacBook-Pro.local.13268.7.v2
│ │ ├── events.out.tfevents.1659372277.farshids-MacBook-Pro.local.13268.9.v2
│ │ ├── events.out.tfevents.1659372301.farshids-MacBook-Pro.local.13268.11.v2
│ │ ├── events.out.tfevents.1659372388.farshids-MacBook-Pro.local.13268.13.v2
│ │ ├── events.out.tfevents.1659372459.farshids-MacBook-Pro.local.13268.15.v2
│ │ ├── events.out.tfevents.1659372468.farshids-MacBook-Pro.local.13268.17.v2
│ │ ├── events.out.tfevents.1659372594.farshids-MacBook-Pro.local.15171.1.v2
│ │ ├── events.out.tfevents.1659372612.farshids-MacBook-Pro.local.15171.3.v2
│ │ ├── events.out.tfevents.1659375153.farshids-MacBook-Pro.local.21272.1.v2
│ │ └── events.out.tfevents.1659375170.farshids-MacBook-Pro.local.21272.3.v2
├── model.png
└── models
│ ├── model_non_overfitting.h5
│ └── model_tensorboard.h5
└── Tensorflow Developer
├── Convolutional Neural Networks in TensorFlow
├── README.md
├── W1
│ ├── assignment
│ │ └── C2W1_Assignment.ipynb
│ └── ungraded_lab
│ │ └── C2_W1_Lab_1_cats_vs_dogs.ipynb
├── W2
│ ├── assignment
│ │ └── C2W2_Assignment.ipynb
│ └── ungraded_labs
│ │ ├── C2_W2_Lab_1_cats_v_dogs_augmentation.ipynb
│ │ └── C2_W2_Lab_2_horses_v_humans_augmentation.ipynb
├── W3
│ ├── assignment
│ │ └── C2W3_Assignment.ipynb
│ └── ungraded_lab
│ │ └── C2_W3_Lab_1_transfer_learning.ipynb
└── W4
│ ├── assignment
│ └── C2W4_Assignment.ipynb
│ └── ungraded_lab
│ └── C2_W4_Lab_1_multi_class_classifier.ipynb
├── Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning
├── README.md
├── W1
│ ├── assinment
│ │ └── C1W1_Assignment.ipynb
│ └── ungraded_lab
│ │ └── C1_W1_Lab_1_hello_world_nn.ipynb
├── W2
│ ├── assignment
│ │ └── C1W2_Assignment.ipynb
│ └── ungraded_labs
│ │ ├── C1_W2_Lab_1_beyond_hello_world.ipynb
│ │ └── C1_W2_Lab_2_callbacks.ipynb
├── W3
│ ├── assignment
│ │ └── C1W3_Assignment.ipynb
│ └── ungraded_labs
│ │ ├── C1_W3_Lab_1_improving_accuracy_using_convolutions.ipynb
│ │ └── C1_W3_Lab_2_exploring_convolutions.ipynb
└── W4
│ ├── assignment
│ └── C1W4_Assignment.ipynb
│ └── ungraded_labs
│ ├── C1_W4_Lab_1_image_generator_no_validation.ipynb
│ ├── C1_W4_Lab_2_image_generator_with_validation.ipynb
│ └── C1_W4_Lab_3_compacted_images.ipynb
├── Natural Language Processing in TensorFlow
├── README.md
├── W1
│ ├── assignment
│ │ └── C3W1_Assignment.ipynb
│ └── ungraded_labs
│ │ ├── C3_W1_Lab_1_tokenize_basic.ipynb
│ │ ├── C3_W1_Lab_2_sequences_basic.ipynb
│ │ └── C3_W1_Lab_3_sarcasm.ipynb
├── W2
│ ├── assignment
│ │ └── C3W2_Assignment.ipynb
│ └── ungraded_labs
│ │ ├── C3_W2_Lab_1_imdb.ipynb
│ │ ├── C3_W2_Lab_2_sarcasm_classifier.ipynb
│ │ └── C3_W2_Lab_3_imdb_subwords.ipynb
├── W3
│ ├── assignment
│ │ ├── C3W3_Assignment.ipynb
│ │ └── images
│ │ │ ├── invalid-1.jpg
│ │ │ ├── valid-1.png
│ │ │ ├── valid-2.jpg
│ │ │ └── valid-3.jpg
│ └── ungraded_labs
│ │ ├── C3_W3_Lab_1_single_layer_LSTM.ipynb
│ │ ├── C3_W3_Lab_2_multiple_layer_LSTM.ipynb
│ │ ├── C3_W3_Lab_3_Conv1D.ipynb
│ │ ├── C3_W3_Lab_4_imdb_reviews_with_GRU_LSTM_Conv1D.ipynb
│ │ ├── C3_W3_Lab_5_sarcasm_with_bi_LSTM.ipynb
│ │ └── C3_W3_Lab_6_sarcasm_with_1D_convolutional.ipynb
└── W4
│ ├── assignment
│ └── C3W4_Assignment.ipynb
│ ├── misc
│ └── Laurences_generated_poetry.txt
│ └── ungraded_labs
│ ├── C3_W4_Lab_1.ipynb
│ └── C3_W4_Lab_2_irish_lyrics.ipynb
├── README.md
└── Sequences, Time Series and Prediction
├── README.md
├── W1
├── assignment
│ ├── C4W1_Assignment.ipynb
│ └── images
│ │ ├── diff.png
│ │ ├── diff_moving.png
│ │ ├── moving_avg.png
│ │ ├── naive.png
│ │ ├── naive_zoom.png
│ │ ├── plus_past.png
│ │ ├── plus_smooth.png
│ │ ├── train_series.png
│ │ └── val_series.png
└── ungraded_labs
│ ├── C4_W1_Lab_1_time_series.ipynb
│ └── C4_W1_Lab_2_forecasting.ipynb
├── W2
├── assignment
│ ├── C4W2_Assignment.ipynb
│ └── images
│ │ └── forecast.png
└── ungraded_labs
│ ├── C4_W2_Lab_1_features_and_labels.ipynb
│ ├── C4_W2_Lab_2_single_layer_NN.ipynb
│ └── C4_W2_Lab_3_deep_NN.ipynb
├── W3
├── assignment
│ ├── C4W3_Assignment.ipynb
│ └── images
│ │ └── expected.png
└── ungraded_labs
│ ├── C4_W3_Lab_1_RNN.ipynb
│ └── C4_W3_Lab_2_LSTM.ipynb
└── W4
├── assignment
├── C4W4_Assignment.ipynb
├── data
│ └── daily-min-temperatures.csv
└── images
│ └── temp-series.png
└── ungraded_labs
├── C4_W4_Lab_1_LSTM.ipynb
├── C4_W4_Lab_2_Sunspots_DNN.ipynb
└── C4_W4_Lab_3_Sunspots_CNN_RNN_DNN.ipynb
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: documentation, enhancement
6 | assignees: FarshidNooshi
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/action.yml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 |
3 | name: CI
4 |
5 | # Controls when the workflow will run
6 | on:
7 | # Triggers the workflow on push or pull request events but only for the "master" branch
8 | push:
9 | branches: [ "master" ]
10 | pull_request:
11 | branches: [ "master" ]
12 |
13 | # Allows you to run this workflow manually from the Actions tab
14 | workflow_dispatch:
15 |
16 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
17 | jobs:
18 | # This workflow contains a single job called "build"
19 | build:
20 | # The type of runner that the job will run on
21 | runs-on: ubuntu-latest
22 |
23 | # Steps represent a sequence of tasks that will be executed as part of the job
24 | steps:
25 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
26 | - uses: actions/checkout@v3
27 |
28 | # Runs a single command using the runners shell
29 | - name: Run a one-line script
30 | run: echo Hello, world!
31 |
32 | # - name: An easy to use blogging platform with support for Jupyter Notebooks.
33 | # uses: fastai/fastpages@v2.1.5
34 |
35 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pkl
2 |
3 |
4 | .vscode/
5 | __pycache__
6 | .ipynb_checkpoints
7 | .DS_Store
8 |
9 | .*
10 |
11 | # Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,python,pycharm+all,macos,windows,linux
12 | # Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,python,pycharm+all,macos,windows,linux
13 |
14 | ### Linux ###
15 | *~
16 |
17 | # temporary files which can be created if a process still has a handle open of a deleted file
18 | .fuse_hidden*
19 |
20 | # KDE directory preferences
21 | .directory
22 |
23 | # Linux trash folder which might appear on any partition or disk
24 | .Trash-*
25 |
26 | # .nfs files are created when an open file is removed but is still being accessed
27 | .nfs*
28 |
29 | ### macOS ###
30 | # General
31 | .DS_Store
32 | .AppleDouble
33 | .LSOverride
34 |
35 | # Icon must end with two \r
36 | Icon
37 |
38 |
39 | # Thumbnails
40 | ._*
41 |
42 | # Files that might appear in the root of a volume
43 | .DocumentRevisions-V100
44 | .fseventsd
45 | .Spotlight-V100
46 | .TemporaryItems
47 | .Trashes
48 | .VolumeIcon.icns
49 | .com.apple.timemachine.donotpresent
50 |
51 | # Directories potentially created on remote AFP share
52 | .AppleDB
53 | .AppleDesktop
54 | Network Trash Folder
55 | Temporary Items
56 | .apdisk
57 |
58 | ### macOS Patch ###
59 | # iCloud generated files
60 | *.icloud
61 |
62 | ### PyCharm+all ###
63 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
64 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
65 |
66 | # User-specific stuff
67 | .idea/**/workspace.xml
68 | .idea/**/tasks.xml
69 | .idea/**/usage.statistics.xml
70 | .idea/**/dictionaries
71 | .idea/**/shelf
72 |
73 | # AWS User-specific
74 | .idea/**/aws.xml
75 |
76 | # Generated files
77 | .idea/**/contentModel.xml
78 |
79 | # Sensitive or high-churn files
80 | .idea/**/dataSources/
81 | .idea/**/dataSources.ids
82 | .idea/**/dataSources.local.xml
83 | .idea/**/sqlDataSources.xml
84 | .idea/**/dynamic.xml
85 | .idea/**/uiDesigner.xml
86 | .idea/**/dbnavigator.xml
87 |
88 | # Gradle
89 | .idea/**/gradle.xml
90 | .idea/**/libraries
91 |
92 | # Gradle and Maven with auto-import
93 | # When using Gradle or Maven with auto-import, you should exclude module files,
94 | # since they will be recreated, and may cause churn. Uncomment if using
95 | # auto-import.
96 | # .idea/artifacts
97 | # .idea/compiler.xml
98 | # .idea/jarRepositories.xml
99 | # .idea/modules.xml
100 | # .idea/*.iml
101 | # .idea/modules
102 | # *.iml
103 | # *.ipr
104 |
105 | # CMake
106 | cmake-build-*/
107 |
108 | # Mongo Explorer plugin
109 | .idea/**/mongoSettings.xml
110 |
111 | # File-based project format
112 | *.iws
113 |
114 | # IntelliJ
115 | out/
116 |
117 | # mpeltonen/sbt-idea plugin
118 | .idea_modules/
119 |
120 | # JIRA plugin
121 | atlassian-ide-plugin.xml
122 |
123 | # Cursive Clojure plugin
124 | .idea/replstate.xml
125 |
126 | # SonarLint plugin
127 | .idea/sonarlint/
128 |
129 | # Crashlytics plugin (for Android Studio and IntelliJ)
130 | com_crashlytics_export_strings.xml
131 | crashlytics.properties
132 | crashlytics-build.properties
133 | fabric.properties
134 |
135 | # Editor-based Rest Client
136 | .idea/httpRequests
137 |
138 | # Android studio 3.1+ serialized cache file
139 | .idea/caches/build_file_checksums.ser
140 |
141 | ### PyCharm+all Patch ###
142 | # Ignore everything but code style settings and run configurations
143 | # that are supposed to be shared within teams.
144 |
145 | .idea/*
146 |
147 | !.idea/codeStyles
148 | !.idea/runConfigurations
149 |
150 | ### Python ###
151 | # Byte-compiled / optimized / DLL files
152 | __pycache__/
153 | *.py[cod]
154 | *$py.class
155 |
156 | # C extensions
157 | *.so
158 |
159 | # Distribution / packaging
160 | .Python
161 | build/
162 | develop-eggs/
163 | dist/
164 | downloads/
165 | eggs/
166 | .eggs/
167 | lib/
168 | lib64/
169 | parts/
170 | sdist/
171 | var/
172 | wheels/
173 | share/python-wheels/
174 | *.egg-info/
175 | .installed.cfg
176 | *.egg
177 | MANIFEST
178 |
179 | # PyInstaller
180 | # Usually these files are written by a python script from a template
181 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
182 | *.manifest
183 | *.spec
184 |
185 | # Installer logs
186 | pip-log.txt
187 | pip-delete-this-directory.txt
188 |
189 | # Unit test / coverage reports
190 | htmlcov/
191 | .tox/
192 | .nox/
193 | .coverage
194 | .coverage.*
195 | .cache
196 | nosetests.xml
197 | coverage.xml
198 | *.cover
199 | *.py,cover
200 | .hypothesis/
201 | .pytest_cache/
202 | cover/
203 |
204 | # Translations
205 | *.mo
206 | *.pot
207 |
208 | # Django stuff:
209 | *.log
210 | local_settings.py
211 | db.sqlite3
212 | db.sqlite3-journal
213 |
214 | # Flask stuff:
215 | instance/
216 | .webassets-cache
217 |
218 | # Scrapy stuff:
219 | .scrapy
220 |
221 | # Sphinx documentation
222 | docs/_build/
223 |
224 | # PyBuilder
225 | .pybuilder/
226 | target/
227 |
228 | # Jupyter Notebook
229 | .ipynb_checkpoints
230 |
231 | # IPython
232 | profile_default/
233 | ipython_config.py
234 |
235 | # pyenv
236 | # For a library or package, you might want to ignore these files since the code is
237 | # intended to run in multiple environments; otherwise, check them in:
238 | # .python-version
239 |
240 | # pipenv
241 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
242 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
243 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
244 | # install all needed dependencies.
245 | #Pipfile.lock
246 |
247 | # poetry
248 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
249 | # This is especially recommended for binary packages to ensure reproducibility, and is more
250 | # commonly ignored for libraries.
251 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
252 | #poetry.lock
253 |
254 | # pdm
255 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
256 | #pdm.lock
257 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
258 | # in version control.
259 | # https://pdm.fming.dev/#use-with-ide
260 | .pdm.toml
261 |
262 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
263 | __pypackages__/
264 |
265 | # Celery stuff
266 | celerybeat-schedule
267 | celerybeat.pid
268 |
269 | # SageMath parsed files
270 | *.sage.py
271 |
272 | # Environments
273 | .env
274 | .venv
275 | env/
276 | venv/
277 | ENV/
278 | env.bak/
279 | venv.bak/
280 |
281 | # Spyder project settings
282 | .spyderproject
283 | .spyproject
284 |
285 | # Rope project settings
286 | .ropeproject
287 |
288 | # mkdocs documentation
289 | /site
290 |
291 | # mypy
292 | .mypy_cache/
293 | .dmypy.json
294 | dmypy.json
295 |
296 | # Pyre type checker
297 | .pyre/
298 |
299 | # pytype static type analyzer
300 | .pytype/
301 |
302 | # Cython debug symbols
303 | cython_debug/
304 |
305 | # PyCharm
306 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
307 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
308 | # and can be added to the global gitignore or merged into this file. For a more nuclear
309 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
310 | #.idea/
311 |
312 | ### VisualStudioCode ###
313 | .vscode/*
314 | !.vscode/settings.json
315 | !.vscode/tasks.json
316 | !.vscode/launch.json
317 | !.vscode/extensions.json
318 | !.vscode/*.code-snippets
319 |
320 | # Local History for Visual Studio Code
321 | .history/
322 |
323 | # Built Visual Studio Code Extensions
324 | *.vsix
325 |
326 | ### VisualStudioCode Patch ###
327 | # Ignore all local history of files
328 | .history
329 | .ionide
330 |
331 | # Support for Project snippet scope
332 | .vscode/*.code-snippets
333 |
334 | # Ignore code-workspaces
335 | *.code-workspace
336 |
337 | ### Windows ###
338 | # Windows thumbnail cache files
339 | Thumbs.db
340 | Thumbs.db:encryptable
341 | ehthumbs.db
342 | ehthumbs_vista.db
343 |
344 | # Dump file
345 | *.stackdump
346 |
347 | # Folder config file
348 | [Dd]esktop.ini
349 |
350 | # Recycle Bin used on file shares
351 | $RECYCLE.BIN/
352 |
353 | # Windows Installer files
354 | *.cab
355 | *.msi
356 | *.msix
357 | *.msm
358 | *.msp
359 |
360 | # Windows shortcuts
361 | *.lnk
362 |
363 | # End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,python,pycharm+all,macos,windows,linux
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/RCV1 Dataset Visualization/model/README.md:
--------------------------------------------------------------------------------
1 |
2 |
This directory is empty because I didn't want to increase the amount of storage that the repository has to an unreasonable amount.
3 |
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
In The Name Of GOD
3 |
4 |
5 |
6 | [](https://github.com/FarshidNooshi/TensorFlow-Notebooks/actions/workflows/action.yml)
7 |
8 | # TensorFlow Notebooks
9 |
10 | This repository hosts my extra works and projects in the field of Machine Learning and deep-learning problems with the **TensorFlow platform**. the repository contains several folders in which each of them is for an specific course(or specialization) or project.
11 |
12 | ## TensorFlow Developer
13 |
14 | This folder is for my works(assignments&labs) at TensorFlow Developer Coursera Specialization program and courses which I have taken for that specialization. below is the list of all the specializations and courses with their respective certificates That I have had.
15 |
16 | - [**Machine Learning**](https://www.coursera.org/account/accomplishments/certificate/8YFX6GGF8PB9) by Stanford University
17 | - [**TensorFlow Developer Specilization**](https://www.coursera.org/account/accomplishments/specialization/certificate/GS2KGD5NEU3D) by DeepLearning.AI
18 | - Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning
19 | - Convolutional Neural Networks in TensorFlow
20 | - Natural Language Processing in TensorFlow
21 | - Sequences, Time Series and Prediction
22 | - [**Deep Learning Specialization**](https://www.coursera.org/account/accomplishments/specialization/certificate/KAC9TXFGAVPA) by DeepLearning.AI
23 | - Neural Networks and Deep Learning
24 | - Improving Deep Neural Networks: Hyperparameter Tuning, Regularization and Optimization
25 | - Structuring Machine Learning Projects
26 | - Convolutional Neural Networks
27 | - Sequence Models
28 | - **Reinforcement Learning Specialization** by University of Alberta
29 | - Fundamentals of Reinforcement Learning
30 | - [**Introduction to Artificial Intelligence (AI)**](https://www.coursera.org/account/accomplishments/certificate/4BBSHBTDPSXR) by IBM
31 |
32 | ## Seeds Dataset Classifier
33 |
34 | This folder is for a classifier for the Seeds dataset from [here](https://archive.ics.uci.edu/ml/datasets/seeds). the data is first preprocessed with standard normalization and then feeded to various architectures of neural networks to see the overfitting effect and learning curves.
35 | for better understanding of the classfier **Tensorboard** is used to analyze the results of the learning, and other callbacks such as
36 | early stopping is also used to compile the models. for pre-processing the data _Pandas_ library were used.
37 |
38 | ## RCV1 Dataset Visualization
39 | In this project, we have used the RCV1 dataset to visualize the data.
40 | The dataset is available in the following link: [RCV1 Dataset](https://scikit-learn.org/0.18/datasets/rcv1.html)
41 | The dataset is a collection of news articles from BBC. The dataset is divided into 5 categories: Business, Entertainment, Politics, Sport, Tech. and the visualization is done with Self-Organizing Map (SOM) and K-Means Clustering.
42 |
43 | # Contribution
44 |
45 | If you find a bug or typo please raise an issue :)
46 |
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/data/seeds_dataset.txt:
--------------------------------------------------------------------------------
1 | 15.26 14.84 0.871 5.763 3.312 2.221 5.22 1
2 | 14.88 14.57 0.8811 5.554 3.333 1.018 4.956 1
3 | 14.29 14.09 0.905 5.291 3.337 2.699 4.825 1
4 | 13.84 13.94 0.8955 5.324 3.379 2.259 4.805 1
5 | 16.14 14.99 0.9034 5.658 3.562 1.355 5.175 1
6 | 14.38 14.21 0.8951 5.386 3.312 2.462 4.956 1
7 | 14.69 14.49 0.8799 5.563 3.259 3.586 5.219 1
8 | 14.11 14.1 0.8911 5.42 3.302 2.7 5 1
9 | 16.63 15.46 0.8747 6.053 3.465 2.04 5.877 1
10 | 16.44 15.25 0.888 5.884 3.505 1.969 5.533 1
11 | 15.26 14.85 0.8696 5.714 3.242 4.543 5.314 1
12 | 14.03 14.16 0.8796 5.438 3.201 1.717 5.001 1
13 | 13.89 14.02 0.888 5.439 3.199 3.986 4.738 1
14 | 13.78 14.06 0.8759 5.479 3.156 3.136 4.872 1
15 | 13.74 14.05 0.8744 5.482 3.114 2.932 4.825 1
16 | 14.59 14.28 0.8993 5.351 3.333 4.185 4.781 1
17 | 13.99 13.83 0.9183 5.119 3.383 5.234 4.781 1
18 | 15.69 14.75 0.9058 5.527 3.514 1.599 5.046 1
19 | 14.7 14.21 0.9153 5.205 3.466 1.767 4.649 1
20 | 12.72 13.57 0.8686 5.226 3.049 4.102 4.914 1
21 | 14.16 14.4 0.8584 5.658 3.129 3.072 5.176 1
22 | 14.11 14.26 0.8722 5.52 3.168 2.688 5.219 1
23 | 15.88 14.9 0.8988 5.618 3.507 0.7651 5.091 1
24 | 12.08 13.23 0.8664 5.099 2.936 1.415 4.961 1
25 | 15.01 14.76 0.8657 5.789 3.245 1.791 5.001 1
26 | 16.19 15.16 0.8849 5.833 3.421 0.903 5.307 1
27 | 13.02 13.76 0.8641 5.395 3.026 3.373 4.825 1
28 | 12.74 13.67 0.8564 5.395 2.956 2.504 4.869 1
29 | 14.11 14.18 0.882 5.541 3.221 2.754 5.038 1
30 | 13.45 14.02 0.8604 5.516 3.065 3.531 5.097 1
31 | 13.16 13.82 0.8662 5.454 2.975 0.8551 5.056 1
32 | 15.49 14.94 0.8724 5.757 3.371 3.412 5.228 1
33 | 14.09 14.41 0.8529 5.717 3.186 3.92 5.299 1
34 | 13.94 14.17 0.8728 5.585 3.15 2.124 5.012 1
35 | 15.05 14.68 0.8779 5.712 3.328 2.129 5.36 1
36 | 16.12 15 0.9 5.709 3.485 2.27 5.443 1
37 | 16.2 15.27 0.8734 5.826 3.464 2.823 5.527 1
38 | 17.08 15.38 0.9079 5.832 3.683 2.956 5.484 1
39 | 14.8 14.52 0.8823 5.656 3.288 3.112 5.309 1
40 | 14.28 14.17 0.8944 5.397 3.298 6.685 5.001 1
41 | 13.54 13.85 0.8871 5.348 3.156 2.587 5.178 1
42 | 13.5 13.85 0.8852 5.351 3.158 2.249 5.176 1
43 | 13.16 13.55 0.9009 5.138 3.201 2.461 4.783 1
44 | 15.5 14.86 0.882 5.877 3.396 4.711 5.528 1
45 | 15.11 14.54 0.8986 5.579 3.462 3.128 5.18 1
46 | 13.8 14.04 0.8794 5.376 3.155 1.56 4.961 1
47 | 15.36 14.76 0.8861 5.701 3.393 1.367 5.132 1
48 | 14.99 14.56 0.8883 5.57 3.377 2.958 5.175 1
49 | 14.79 14.52 0.8819 5.545 3.291 2.704 5.111 1
50 | 14.86 14.67 0.8676 5.678 3.258 2.129 5.351 1
51 | 14.43 14.4 0.8751 5.585 3.272 3.975 5.144 1
52 | 15.78 14.91 0.8923 5.674 3.434 5.593 5.136 1
53 | 14.49 14.61 0.8538 5.715 3.113 4.116 5.396 1
54 | 14.33 14.28 0.8831 5.504 3.199 3.328 5.224 1
55 | 14.52 14.6 0.8557 5.741 3.113 1.481 5.487 1
56 | 15.03 14.77 0.8658 5.702 3.212 1.933 5.439 1
57 | 14.46 14.35 0.8818 5.388 3.377 2.802 5.044 1
58 | 14.92 14.43 0.9006 5.384 3.412 1.142 5.088 1
59 | 15.38 14.77 0.8857 5.662 3.419 1.999 5.222 1
60 | 12.11 13.47 0.8392 5.159 3.032 1.502 4.519 1
61 | 11.42 12.86 0.8683 5.008 2.85 2.7 4.607 1
62 | 11.23 12.63 0.884 4.902 2.879 2.269 4.703 1
63 | 12.36 13.19 0.8923 5.076 3.042 3.22 4.605 1
64 | 13.22 13.84 0.868 5.395 3.07 4.157 5.088 1
65 | 12.78 13.57 0.8716 5.262 3.026 1.176 4.782 1
66 | 12.88 13.5 0.8879 5.139 3.119 2.352 4.607 1
67 | 14.34 14.37 0.8726 5.63 3.19 1.313 5.15 1
68 | 14.01 14.29 0.8625 5.609 3.158 2.217 5.132 1
69 | 14.37 14.39 0.8726 5.569 3.153 1.464 5.3 1
70 | 12.73 13.75 0.8458 5.412 2.882 3.533 5.067 1
71 | 17.63 15.98 0.8673 6.191 3.561 4.076 6.06 2
72 | 16.84 15.67 0.8623 5.998 3.484 4.675 5.877 2
73 | 17.26 15.73 0.8763 5.978 3.594 4.539 5.791 2
74 | 19.11 16.26 0.9081 6.154 3.93 2.936 6.079 2
75 | 16.82 15.51 0.8786 6.017 3.486 4.004 5.841 2
76 | 16.77 15.62 0.8638 5.927 3.438 4.92 5.795 2
77 | 17.32 15.91 0.8599 6.064 3.403 3.824 5.922 2
78 | 20.71 17.23 0.8763 6.579 3.814 4.451 6.451 2
79 | 18.94 16.49 0.875 6.445 3.639 5.064 6.362 2
80 | 17.12 15.55 0.8892 5.85 3.566 2.858 5.746 2
81 | 16.53 15.34 0.8823 5.875 3.467 5.532 5.88 2
82 | 18.72 16.19 0.8977 6.006 3.857 5.324 5.879 2
83 | 20.2 16.89 0.8894 6.285 3.864 5.173 6.187 2
84 | 19.57 16.74 0.8779 6.384 3.772 1.472 6.273 2
85 | 19.51 16.71 0.878 6.366 3.801 2.962 6.185 2
86 | 18.27 16.09 0.887 6.173 3.651 2.443 6.197 2
87 | 18.88 16.26 0.8969 6.084 3.764 1.649 6.109 2
88 | 18.98 16.66 0.859 6.549 3.67 3.691 6.498 2
89 | 21.18 17.21 0.8989 6.573 4.033 5.78 6.231 2
90 | 20.88 17.05 0.9031 6.45 4.032 5.016 6.321 2
91 | 20.1 16.99 0.8746 6.581 3.785 1.955 6.449 2
92 | 18.76 16.2 0.8984 6.172 3.796 3.12 6.053 2
93 | 18.81 16.29 0.8906 6.272 3.693 3.237 6.053 2
94 | 18.59 16.05 0.9066 6.037 3.86 6.001 5.877 2
95 | 18.36 16.52 0.8452 6.666 3.485 4.933 6.448 2
96 | 16.87 15.65 0.8648 6.139 3.463 3.696 5.967 2
97 | 19.31 16.59 0.8815 6.341 3.81 3.477 6.238 2
98 | 18.98 16.57 0.8687 6.449 3.552 2.144 6.453 2
99 | 18.17 16.26 0.8637 6.271 3.512 2.853 6.273 2
100 | 18.72 16.34 0.881 6.219 3.684 2.188 6.097 2
101 | 16.41 15.25 0.8866 5.718 3.525 4.217 5.618 2
102 | 17.99 15.86 0.8992 5.89 3.694 2.068 5.837 2
103 | 19.46 16.5 0.8985 6.113 3.892 4.308 6.009 2
104 | 19.18 16.63 0.8717 6.369 3.681 3.357 6.229 2
105 | 18.95 16.42 0.8829 6.248 3.755 3.368 6.148 2
106 | 18.83 16.29 0.8917 6.037 3.786 2.553 5.879 2
107 | 18.85 16.17 0.9056 6.152 3.806 2.843 6.2 2
108 | 17.63 15.86 0.88 6.033 3.573 3.747 5.929 2
109 | 19.94 16.92 0.8752 6.675 3.763 3.252 6.55 2
110 | 18.55 16.22 0.8865 6.153 3.674 1.738 5.894 2
111 | 18.45 16.12 0.8921 6.107 3.769 2.235 5.794 2
112 | 19.38 16.72 0.8716 6.303 3.791 3.678 5.965 2
113 | 19.13 16.31 0.9035 6.183 3.902 2.109 5.924 2
114 | 19.14 16.61 0.8722 6.259 3.737 6.682 6.053 2
115 | 20.97 17.25 0.8859 6.563 3.991 4.677 6.316 2
116 | 19.06 16.45 0.8854 6.416 3.719 2.248 6.163 2
117 | 18.96 16.2 0.9077 6.051 3.897 4.334 5.75 2
118 | 19.15 16.45 0.889 6.245 3.815 3.084 6.185 2
119 | 18.89 16.23 0.9008 6.227 3.769 3.639 5.966 2
120 | 20.03 16.9 0.8811 6.493 3.857 3.063 6.32 2
121 | 20.24 16.91 0.8897 6.315 3.962 5.901 6.188 2
122 | 18.14 16.12 0.8772 6.059 3.563 3.619 6.011 2
123 | 16.17 15.38 0.8588 5.762 3.387 4.286 5.703 2
124 | 18.43 15.97 0.9077 5.98 3.771 2.984 5.905 2
125 | 15.99 14.89 0.9064 5.363 3.582 3.336 5.144 2
126 | 18.75 16.18 0.8999 6.111 3.869 4.188 5.992 2
127 | 18.65 16.41 0.8698 6.285 3.594 4.391 6.102 2
128 | 17.98 15.85 0.8993 5.979 3.687 2.257 5.919 2
129 | 20.16 17.03 0.8735 6.513 3.773 1.91 6.185 2
130 | 17.55 15.66 0.8991 5.791 3.69 5.366 5.661 2
131 | 18.3 15.89 0.9108 5.979 3.755 2.837 5.962 2
132 | 18.94 16.32 0.8942 6.144 3.825 2.908 5.949 2
133 | 15.38 14.9 0.8706 5.884 3.268 4.462 5.795 2
134 | 16.16 15.33 0.8644 5.845 3.395 4.266 5.795 2
135 | 15.56 14.89 0.8823 5.776 3.408 4.972 5.847 2
136 | 15.38 14.66 0.899 5.477 3.465 3.6 5.439 2
137 | 17.36 15.76 0.8785 6.145 3.574 3.526 5.971 2
138 | 15.57 15.15 0.8527 5.92 3.231 2.64 5.879 2
139 | 15.6 15.11 0.858 5.832 3.286 2.725 5.752 2
140 | 16.23 15.18 0.885 5.872 3.472 3.769 5.922 2
141 | 13.07 13.92 0.848 5.472 2.994 5.304 5.395 3
142 | 13.32 13.94 0.8613 5.541 3.073 7.035 5.44 3
143 | 13.34 13.95 0.862 5.389 3.074 5.995 5.307 3
144 | 12.22 13.32 0.8652 5.224 2.967 5.469 5.221 3
145 | 11.82 13.4 0.8274 5.314 2.777 4.471 5.178 3
146 | 11.21 13.13 0.8167 5.279 2.687 6.169 5.275 3
147 | 11.43 13.13 0.8335 5.176 2.719 2.221 5.132 3
148 | 12.49 13.46 0.8658 5.267 2.967 4.421 5.002 3
149 | 12.7 13.71 0.8491 5.386 2.911 3.26 5.316 3
150 | 10.79 12.93 0.8107 5.317 2.648 5.462 5.194 3
151 | 11.83 13.23 0.8496 5.263 2.84 5.195 5.307 3
152 | 12.01 13.52 0.8249 5.405 2.776 6.992 5.27 3
153 | 12.26 13.6 0.8333 5.408 2.833 4.756 5.36 3
154 | 11.18 13.04 0.8266 5.22 2.693 3.332 5.001 3
155 | 11.36 13.05 0.8382 5.175 2.755 4.048 5.263 3
156 | 11.19 13.05 0.8253 5.25 2.675 5.813 5.219 3
157 | 11.34 12.87 0.8596 5.053 2.849 3.347 5.003 3
158 | 12.13 13.73 0.8081 5.394 2.745 4.825 5.22 3
159 | 11.75 13.52 0.8082 5.444 2.678 4.378 5.31 3
160 | 11.49 13.22 0.8263 5.304 2.695 5.388 5.31 3
161 | 12.54 13.67 0.8425 5.451 2.879 3.082 5.491 3
162 | 12.02 13.33 0.8503 5.35 2.81 4.271 5.308 3
163 | 12.05 13.41 0.8416 5.267 2.847 4.988 5.046 3
164 | 12.55 13.57 0.8558 5.333 2.968 4.419 5.176 3
165 | 11.14 12.79 0.8558 5.011 2.794 6.388 5.049 3
166 | 12.1 13.15 0.8793 5.105 2.941 2.201 5.056 3
167 | 12.44 13.59 0.8462 5.319 2.897 4.924 5.27 3
168 | 12.15 13.45 0.8443 5.417 2.837 3.638 5.338 3
169 | 11.35 13.12 0.8291 5.176 2.668 4.337 5.132 3
170 | 11.24 13 0.8359 5.09 2.715 3.521 5.088 3
171 | 11.02 13 0.8189 5.325 2.701 6.735 5.163 3
172 | 11.55 13.1 0.8455 5.167 2.845 6.715 4.956 3
173 | 11.27 12.97 0.8419 5.088 2.763 4.309 5 3
174 | 11.4 13.08 0.8375 5.136 2.763 5.588 5.089 3
175 | 10.83 12.96 0.8099 5.278 2.641 5.182 5.185 3
176 | 10.8 12.57 0.859 4.981 2.821 4.773 5.063 3
177 | 11.26 13.01 0.8355 5.186 2.71 5.335 5.092 3
178 | 10.74 12.73 0.8329 5.145 2.642 4.702 4.963 3
179 | 11.48 13.05 0.8473 5.18 2.758 5.876 5.002 3
180 | 12.21 13.47 0.8453 5.357 2.893 1.661 5.178 3
181 | 11.41 12.95 0.856 5.09 2.775 4.957 4.825 3
182 | 12.46 13.41 0.8706 5.236 3.017 4.987 5.147 3
183 | 12.19 13.36 0.8579 5.24 2.909 4.857 5.158 3
184 | 11.65 13.07 0.8575 5.108 2.85 5.209 5.135 3
185 | 12.89 13.77 0.8541 5.495 3.026 6.185 5.316 3
186 | 11.56 13.31 0.8198 5.363 2.683 4.062 5.182 3
187 | 11.81 13.45 0.8198 5.413 2.716 4.898 5.352 3
188 | 10.91 12.8 0.8372 5.088 2.675 4.179 4.956 3
189 | 11.23 12.82 0.8594 5.089 2.821 7.524 4.957 3
190 | 10.59 12.41 0.8648 4.899 2.787 4.975 4.794 3
191 | 10.93 12.8 0.839 5.046 2.717 5.398 5.045 3
192 | 11.27 12.86 0.8563 5.091 2.804 3.985 5.001 3
193 | 11.87 13.02 0.8795 5.132 2.953 3.597 5.132 3
194 | 10.82 12.83 0.8256 5.18 2.63 4.853 5.089 3
195 | 12.11 13.27 0.8639 5.236 2.975 4.132 5.012 3
196 | 12.8 13.47 0.886 5.16 3.126 4.873 4.914 3
197 | 12.79 13.53 0.8786 5.224 3.054 5.483 4.958 3
198 | 13.37 13.78 0.8849 5.32 3.128 4.67 5.091 3
199 | 12.62 13.67 0.8481 5.41 2.911 3.306 5.231 3
200 | 12.76 13.38 0.8964 5.073 3.155 2.828 4.83 3
201 | 12.38 13.44 0.8609 5.219 2.989 5.472 5.045 3
202 | 12.67 13.32 0.8977 4.984 3.135 2.3 4.745 3
203 | 11.18 12.72 0.868 5.009 2.81 4.051 4.828 3
204 | 12.7 13.41 0.8874 5.183 3.091 8.456 5 3
205 | 12.37 13.47 0.8567 5.204 2.96 3.919 5.001 3
206 | 12.19 13.2 0.8783 5.137 2.981 3.631 4.87 3
207 | 11.23 12.88 0.8511 5.14 2.795 4.325 5.003 3
208 | 13.2 13.66 0.8883 5.236 3.232 8.315 5.056 3
209 | 11.84 13.21 0.8521 5.175 2.836 3.598 5.044 3
210 | 12.3 13.34 0.8684 5.243 2.974 5.637 5.063 3
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659337186.farshids-MacBook-Pro.local.15779.4.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659337186.farshids-MacBook-Pro.local.15779.4.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659369894.farshids-MacBook-Pro.local.2214.0.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659369894.farshids-MacBook-Pro.local.2214.0.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659370512.farshids-MacBook-Pro.local.5988.0.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659370512.farshids-MacBook-Pro.local.5988.0.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371074.farshids-MacBook-Pro.local.5988.2.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371074.farshids-MacBook-Pro.local.5988.2.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371095.farshids-MacBook-Pro.local.5988.4.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371095.farshids-MacBook-Pro.local.5988.4.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371108.farshids-MacBook-Pro.local.5988.6.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371108.farshids-MacBook-Pro.local.5988.6.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371844.farshids-MacBook-Pro.local.12364.0.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371844.farshids-MacBook-Pro.local.12364.0.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371871.farshids-MacBook-Pro.local.12364.2.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371871.farshids-MacBook-Pro.local.12364.2.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371974.farshids-MacBook-Pro.local.12364.4.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659371974.farshids-MacBook-Pro.local.12364.4.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372079.farshids-MacBook-Pro.local.13268.0.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372079.farshids-MacBook-Pro.local.13268.0.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372097.farshids-MacBook-Pro.local.13268.2.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372097.farshids-MacBook-Pro.local.13268.2.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372166.farshids-MacBook-Pro.local.13268.4.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372166.farshids-MacBook-Pro.local.13268.4.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372179.farshids-MacBook-Pro.local.13268.6.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372179.farshids-MacBook-Pro.local.13268.6.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372277.farshids-MacBook-Pro.local.13268.8.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372277.farshids-MacBook-Pro.local.13268.8.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372301.farshids-MacBook-Pro.local.13268.10.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372301.farshids-MacBook-Pro.local.13268.10.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372388.farshids-MacBook-Pro.local.13268.12.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372388.farshids-MacBook-Pro.local.13268.12.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372459.farshids-MacBook-Pro.local.13268.14.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372459.farshids-MacBook-Pro.local.13268.14.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372468.farshids-MacBook-Pro.local.13268.16.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372468.farshids-MacBook-Pro.local.13268.16.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372594.farshids-MacBook-Pro.local.15171.0.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372594.farshids-MacBook-Pro.local.15171.0.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372612.farshids-MacBook-Pro.local.15171.2.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659372612.farshids-MacBook-Pro.local.15171.2.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659375153.farshids-MacBook-Pro.local.21272.0.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659375153.farshids-MacBook-Pro.local.21272.0.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659375170.farshids-MacBook-Pro.local.21272.2.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/train/events.out.tfevents.1659375170.farshids-MacBook-Pro.local.21272.2.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659337186.farshids-MacBook-Pro.local.15779.5.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659337186.farshids-MacBook-Pro.local.15779.5.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659369894.farshids-MacBook-Pro.local.2214.1.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659369894.farshids-MacBook-Pro.local.2214.1.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659370512.farshids-MacBook-Pro.local.5988.1.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659370512.farshids-MacBook-Pro.local.5988.1.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371075.farshids-MacBook-Pro.local.5988.3.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371075.farshids-MacBook-Pro.local.5988.3.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371095.farshids-MacBook-Pro.local.5988.5.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371095.farshids-MacBook-Pro.local.5988.5.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371108.farshids-MacBook-Pro.local.5988.7.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371108.farshids-MacBook-Pro.local.5988.7.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371845.farshids-MacBook-Pro.local.12364.1.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371845.farshids-MacBook-Pro.local.12364.1.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371872.farshids-MacBook-Pro.local.12364.3.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371872.farshids-MacBook-Pro.local.12364.3.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371974.farshids-MacBook-Pro.local.12364.5.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659371974.farshids-MacBook-Pro.local.12364.5.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372080.farshids-MacBook-Pro.local.13268.1.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372080.farshids-MacBook-Pro.local.13268.1.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372097.farshids-MacBook-Pro.local.13268.3.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372097.farshids-MacBook-Pro.local.13268.3.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372166.farshids-MacBook-Pro.local.13268.5.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372166.farshids-MacBook-Pro.local.13268.5.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372180.farshids-MacBook-Pro.local.13268.7.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372180.farshids-MacBook-Pro.local.13268.7.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372277.farshids-MacBook-Pro.local.13268.9.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372277.farshids-MacBook-Pro.local.13268.9.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372301.farshids-MacBook-Pro.local.13268.11.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372301.farshids-MacBook-Pro.local.13268.11.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372388.farshids-MacBook-Pro.local.13268.13.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372388.farshids-MacBook-Pro.local.13268.13.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372459.farshids-MacBook-Pro.local.13268.15.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372459.farshids-MacBook-Pro.local.13268.15.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372468.farshids-MacBook-Pro.local.13268.17.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372468.farshids-MacBook-Pro.local.13268.17.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372594.farshids-MacBook-Pro.local.15171.1.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372594.farshids-MacBook-Pro.local.15171.1.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372612.farshids-MacBook-Pro.local.15171.3.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659372612.farshids-MacBook-Pro.local.15171.3.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659375153.farshids-MacBook-Pro.local.21272.1.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659375153.farshids-MacBook-Pro.local.21272.1.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659375170.farshids-MacBook-Pro.local.21272.3.v2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/logs/validation/events.out.tfevents.1659375170.farshids-MacBook-Pro.local.21272.3.v2
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/model.png
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/models/model_non_overfitting.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/models/model_non_overfitting.h5
--------------------------------------------------------------------------------
/Seeds Dataset Classifier/models/model_tensorboard.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Seeds Dataset Classifier/models/model_tensorboard.h5
--------------------------------------------------------------------------------
/Tensorflow Developer/Convolutional Neural Networks in TensorFlow/README.md:
--------------------------------------------------------------------------------
1 |
2 |
In The Name Of GOD
3 |
4 |
5 | # Tensorflow notebooks
6 |
7 | This directory is for my works(assignments&labs) at the Coursera Tensorflow Developer program. **course 2** for **Convolutional Neural Networks with TensorFlow**. below is the list of assignments and ungraded labs for this course.
8 |
9 | ## Convolutional Neural Networks in TensorFlow
10 |
11 | ## Week 1
12 |
13 | ### Assignment
14 |
15 | - Cats vs. Dogs _(C2W1_Assignment.ipynb)_
16 |
17 | ### Ungraded Labs
18 |
19 | 1. Using more sophisticated images with Convolutional Neural Networks _(C2_W1_Lab_1_cats_vs_dogs.ipynb)_
20 |
21 | ## Week 2
22 |
23 | ### Assignment
24 |
25 | - Cats vs. Dogs using Augmentation _(C2W2_Assignment.ipynb)_
26 |
27 | ### Ungraded Labs
28 |
29 | 1. Cats vs. Dogs with Augmentation _(C2_W2_Lab_1_cats_v_dogs_augmentation.ipynb)_
30 | 2. Horses vs. Humans with Augmentation _(C2_W2_Lab_2_horses_v_humans_augmentation.ipynb)_
31 |
32 | ## Week 3
33 |
34 | ### Assignment
35 |
36 | - Horses vs. Humans using Transfer Learning _(C2W3_Assignment.ipynb)_
37 |
38 | ### Ungraded Labs
39 |
40 | 1. Exploring Transfer Learning _(C2_W3_Lab_1_transfer_learning.ipynb)_
41 |
42 | ## Week 4
43 |
44 | ### Assignment
45 |
46 | - Multi-class Classifier _(C2W4_Assignment.ipynb)_
47 |
48 | ### Ungraded Labs
49 |
50 | 1. Classifying Rock, Paper, and Scissors _(C2_W4_Lab_1_multi_class_classifier.ipynb)_
51 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Convolutional Neural Networks in TensorFlow/W2/ungraded_labs/C2_W2_Lab_2_horses_v_humans_augmentation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "37v_yExZppEp"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Data Augmentation on the Horses or Humans Dataset\n",
17 | "\n",
18 | "In the previous lab, you saw how data augmentation helped improve the model's performance on unseen data. By tweaking the cat and dog training images, the model was able to learn features that are also representative of the validation data. However, applying data augmentation requires good understanding of your dataset. Simply transforming it randomly will not always yield good results. \n",
19 | "\n",
20 | "In the next cells, you will apply the same techniques to the `Horses or Humans` dataset and analyze the results."
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {
27 | "id": "Lslf0vB3rQlU"
28 | },
29 | "outputs": [],
30 | "source": [
31 | "# Download the training set\n",
32 | "!wget https://storage.googleapis.com/tensorflow-1-public/course2/week3/horse-or-human.zip"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "# Download the validation set\n",
42 | "!wget https://storage.googleapis.com/tensorflow-1-public/course2/week3/validation-horse-or-human.zip"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {
49 | "id": "RXZT2UsyIVe_"
50 | },
51 | "outputs": [],
52 | "source": [
53 | "import os\n",
54 | "import zipfile\n",
55 | "\n",
56 | "# Extract the archive\n",
57 | "zip_ref = zipfile.ZipFile('./horse-or-human.zip', 'r')\n",
58 | "zip_ref.extractall('tmp/horse-or-human')\n",
59 | "\n",
60 | "zip_ref = zipfile.ZipFile('./validation-horse-or-human.zip', 'r')\n",
61 | "zip_ref.extractall('tmp/validation-horse-or-human')\n",
62 | "\n",
63 | "zip_ref.close()\n",
64 | "\n",
65 | "# Directory with training horse pictures\n",
66 | "train_horse_dir = os.path.join('tmp/horse-or-human/horses')\n",
67 | "\n",
68 | "# Directory with training human pictures\n",
69 | "train_human_dir = os.path.join('tmp/horse-or-human/humans')\n",
70 | "\n",
71 | "# Directory with training horse pictures\n",
72 | "validation_horse_dir = os.path.join('tmp/validation-horse-or-human/horses')\n",
73 | "\n",
74 | "# Directory with training human pictures\n",
75 | "validation_human_dir = os.path.join('tmp/validation-horse-or-human/humans')"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {
82 | "id": "PixZ2s5QbYQ3"
83 | },
84 | "outputs": [],
85 | "source": [
86 | "import tensorflow as tf\n",
87 | "\n",
88 | "# Build the model\n",
89 | "model = tf.keras.models.Sequential([\n",
90 | " # Note the input shape is the desired size of the image 300x300 with 3 bytes color\n",
91 | " # This is the first convolution\n",
92 | " tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),\n",
93 | " tf.keras.layers.MaxPooling2D(2, 2),\n",
94 | " # The second convolution\n",
95 | " tf.keras.layers.Conv2D(32, (3,3), activation='relu'),\n",
96 | " tf.keras.layers.MaxPooling2D(2,2),\n",
97 | " # The third convolution\n",
98 | " tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n",
99 | " tf.keras.layers.MaxPooling2D(2,2),\n",
100 | " # The fourth convolution\n",
101 | " tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n",
102 | " tf.keras.layers.MaxPooling2D(2,2),\n",
103 | " # The fifth convolution\n",
104 | " tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n",
105 | " tf.keras.layers.MaxPooling2D(2,2),\n",
106 | " # Flatten the results to feed into a DNN\n",
107 | " tf.keras.layers.Flatten(),\n",
108 | " # 512 neuron hidden layer\n",
109 | " tf.keras.layers.Dense(512, activation='relu'),\n",
110 | " # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')\n",
111 | " tf.keras.layers.Dense(1, activation='sigmoid')\n",
112 | "])"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": null,
118 | "metadata": {
119 | "id": "8DHWhFP_uhq3"
120 | },
121 | "outputs": [],
122 | "source": [
123 | "from tensorflow.keras.optimizers import RMSprop\n",
124 | "\n",
125 | "# Set training parameters\n",
126 | "model.compile(loss='binary_crossentropy',\n",
127 | " optimizer=RMSprop(learning_rate=1e-4),\n",
128 | " metrics=['accuracy'])"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {
135 | "id": "ClebU9NJg99G"
136 | },
137 | "outputs": [],
138 | "source": [
139 | "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
140 | "\n",
141 | "# Apply data augmentation\n",
142 | "train_datagen = ImageDataGenerator(\n",
143 | " rescale=1./255,\n",
144 | " rotation_range=40,\n",
145 | " width_shift_range=0.2,\n",
146 | " height_shift_range=0.2,\n",
147 | " shear_range=0.2,\n",
148 | " zoom_range=0.2,\n",
149 | " horizontal_flip=True,\n",
150 | " fill_mode='nearest')\n",
151 | "\n",
152 | "validation_datagen = ImageDataGenerator(rescale=1/255)\n",
153 | "\n",
154 | "# Flow training images in batches of 128 using train_datagen generator\n",
155 | "train_generator = train_datagen.flow_from_directory(\n",
156 | " 'tmp/horse-or-human/', # This is the source directory for training images\n",
157 | " target_size=(300, 300), # All images will be resized to 150x150\n",
158 | " batch_size=128,\n",
159 | " # Since we use binary_crossentropy loss, we need binary labels\n",
160 | " class_mode='binary')\n",
161 | "\n",
162 | "# Flow training images in batches of 128 using train_datagen generator\n",
163 | "validation_generator = validation_datagen.flow_from_directory(\n",
164 | " 'tmp/validation-horse-or-human/', # This is the source directory for training images\n",
165 | " target_size=(300, 300), # All images will be resized to 150x150\n",
166 | " batch_size=32,\n",
167 | " # Since we use binary_crossentropy loss, we need binary labels\n",
168 | " class_mode='binary')"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {
175 | "id": "Fb1_lgobv81m"
176 | },
177 | "outputs": [],
178 | "source": [
179 | "# Constant for epochs\n",
180 | "EPOCHS = 20\n",
181 | "\n",
182 | "# Train the model\n",
183 | "history = model.fit(\n",
184 | " train_generator,\n",
185 | " steps_per_epoch=8, \n",
186 | " epochs=EPOCHS,\n",
187 | " verbose=1,\n",
188 | " validation_data = validation_generator,\n",
189 | " validation_steps=8)"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": null,
195 | "metadata": {
196 | "id": "7zNPRWOVJdOH"
197 | },
198 | "outputs": [],
199 | "source": [
200 | "import matplotlib.pyplot as plt\n",
201 | "\n",
202 | "# Plot the model results\n",
203 | "acc = history.history['accuracy']\n",
204 | "val_acc = history.history['val_accuracy']\n",
205 | "loss = history.history['loss']\n",
206 | "val_loss = history.history['val_loss']\n",
207 | "\n",
208 | "epochs = range(len(acc))\n",
209 | "\n",
210 | "plt.plot(epochs, acc, 'r', label='Training accuracy')\n",
211 | "plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n",
212 | "plt.title('Training and validation accuracy')\n",
213 | "\n",
214 | "plt.figure()\n",
215 | "\n",
216 | "plt.plot(epochs, loss, 'r', label='Training Loss')\n",
217 | "plt.plot(epochs, val_loss, 'b', label='Validation Loss')\n",
218 | "plt.title('Training and validation loss')\n",
219 | "plt.legend()\n",
220 | "\n",
221 | "plt.show()"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {
227 | "id": "hwyabYvCsvtn"
228 | },
229 | "source": [
230 | "As you can see in the results, the preprocessing techniques used in augmenting the data did not help much in the results. The validation accuracy is fluctuating and not trending up like the training accuracy. This might be because the additional training data generated still do not represent the features in the validation data. For example, some human or horse poses in the validation set cannot be mimicked by the image processing techniques that `ImageDataGenerator` provides. It might also be that the background of the training images are also learned so the white background of the validation set is throwing the model off even with cropping. Try looking at the validation images in the `tmp/validation-horse-or-human` directory (note: if you are using Colab, you can use the file explorer on the left to explore the images) and see if you can augment the training images to match its characteristics. If this is not possible, then at this point you can consider other techniques and you will see that in next week's lessons."
231 | ]
232 | }
233 | ],
234 | "metadata": {
235 | "accelerator": "GPU",
236 | "colab": {
237 | "collapsed_sections": [],
238 | "name": "C2_W2_Lab_2_horses_v_humans_augmentation.ipynb",
239 | "private_outputs": true,
240 | "provenance": [
241 | {
242 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/adding_C2/C2/W2/ungraded_labs/C2_W2_Lab_2_horses_v_humans_augmentation.ipynb",
243 | "timestamp": 1639648217641
244 | }
245 | ]
246 | },
247 | "kernelspec": {
248 | "display_name": "Python 3",
249 | "language": "python",
250 | "name": "python3"
251 | },
252 | "language_info": {
253 | "codemirror_mode": {
254 | "name": "ipython",
255 | "version": 3
256 | },
257 | "file_extension": ".py",
258 | "mimetype": "text/x-python",
259 | "name": "python",
260 | "nbconvert_exporter": "python",
261 | "pygments_lexer": "ipython3",
262 | "version": "3.7.4"
263 | }
264 | },
265 | "nbformat": 4,
266 | "nbformat_minor": 1
267 | }
268 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Convolutional Neural Networks in TensorFlow/W3/ungraded_lab/C2_W3_Lab_1_transfer_learning.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "bT0to3TL2q7H"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Transfer Learning\n",
17 | "\n",
18 | "In this lab, you will see how you can use a pre-trained model to achieve good results even with a small training dataset. This is called _transfer learning_ and you do this by leveraging the trained layers of an existing model and adding your own layers to fit your application. For example, you can:\n",
19 | "\n",
20 | "1. just get the convolution layers of one model\n",
21 | "2. attach some dense layers onto it\n",
22 | "3. train just the dense network\n",
23 | "4. evaluate the results\n",
24 | "\n",
25 | "Doing this will allow you to save time building your application because you will essentially skip weeks of training time of very deep networks. You will just use the features it has learned and tweak it for your dataset. Let's see how these are done in the next sections."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {
31 | "id": "Qvrr8pLRzJMV"
32 | },
33 | "source": [
34 | "**IMPORTANT NOTE:** This notebook is designed to run as a Colab. Running the notebook on your local machine might result in some of the code blocks throwing errors."
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {
40 | "id": "-12slkPL6_JH"
41 | },
42 | "source": [
43 | "## Setup the pretrained model\n",
44 | "\n",
45 | "You will need to prepare pretrained model and configure the layers that you need. For this exercise, you will use the convolution layers of the [InceptionV3](https://arxiv.org/abs/1512.00567) architecture as your base model. To do that, you need to:\n",
46 | "\n",
47 | "1. Set the input shape to fit your application. In this case. set it to `150x150x3` as you've been doing in the last few labs.\n",
48 | "\n",
49 | "2. Pick and freeze the convolution layers to take advantage of the features it has learned already.\n",
50 | "\n",
51 | "3. Add dense layers which you will train.\n",
52 | "\n",
53 | "Let's see how to do these in the next cells."
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {
59 | "id": "3VqhFEK2Y-PK"
60 | },
61 | "source": [
62 | "First, in preparing the input to the model, you want to fetch the pretrained weights of the `InceptionV3` model and remove the fully connected layer at the end because you will be replacing it later. You will also specify the input shape that your model will accept. Lastly, you want to freeze the weights of these layers because they have been trained already."
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": null,
68 | "metadata": {
69 | "id": "1xJZ5glPPCRz"
70 | },
71 | "outputs": [],
72 | "source": [
73 | "# Download the pre-trained weights. No top means it excludes the fully connected layer it uses for classification.\n",
74 | "!wget --no-check-certificate \\\n",
75 | " https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \\\n",
76 | " -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "id": "KsiBCpQ1VvPp"
84 | },
85 | "outputs": [],
86 | "source": [
87 | "from tensorflow.keras.applications.inception_v3 import InceptionV3\n",
88 | "from tensorflow.keras import layers\n",
89 | "\n",
90 | "# Set the weights file you downloaded into a variable\n",
91 | "local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'\n",
92 | "\n",
93 | "# Initialize the base model.\n",
94 | "# Set the input shape and remove the dense layers.\n",
95 | "pre_trained_model = InceptionV3(input_shape = (150, 150, 3), \n",
96 | " include_top = False, \n",
97 | " weights = None)\n",
98 | "\n",
99 | "# Load the pre-trained weights you downloaded.\n",
100 | "pre_trained_model.load_weights(local_weights_file)\n",
101 | "\n",
102 | "# Freeze the weights of the layers.\n",
103 | "for layer in pre_trained_model.layers:\n",
104 | " layer.trainable = False"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "metadata": {
110 | "id": "1y2rEnqFaa9k"
111 | },
112 | "source": [
113 | "You can see the summary of the model below. You can see that it is a very deep network. You can then select up to which point of the network you want to use. As Laurence showed in the exercise, you will use up to `mixed_7` as your base model and add to that. This is because the original last layer might be too specialized in what it has learned so it might not translate well into your application. `mixed_7` on the other hand will be more generalized and you can start with that for your application. After the exercise, feel free to modify and use other layers to see what the results you get."
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "metadata": {
120 | "id": "qeGP0Ust5kCR"
121 | },
122 | "outputs": [],
123 | "source": [
124 | "pre_trained_model.summary()\n"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "metadata": {
131 | "id": "jDmGO9tg5iPc"
132 | },
133 | "outputs": [],
134 | "source": [
135 | "# Choose `mixed_7` as the last layer of your base model\n",
136 | "last_layer = pre_trained_model.get_layer('mixed7')\n",
137 | "print('last layer output shape: ', last_layer.output_shape)\n",
138 | "last_output = last_layer.output"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {
144 | "id": "UXT9SDMK7Ioa"
145 | },
146 | "source": [
147 | "## Add dense layers for your classifier\n",
148 | "\n",
149 | "Next, you will add dense layers to your model. These will be the layers that you will train and is tasked with recognizing cats and dogs. You will add a [Dropout](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout) layer as well to regularize the output and avoid overfitting."
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {
156 | "id": "BMXb913pbvFg"
157 | },
158 | "outputs": [],
159 | "source": [
160 | "from tensorflow.keras.optimizers import RMSprop\n",
161 | "from tensorflow.keras import Model\n",
162 | "\n",
163 | "# Flatten the output layer to 1 dimension\n",
164 | "x = layers.Flatten()(last_output)\n",
165 | "# Add a fully connected layer with 1,024 hidden units and ReLU activation\n",
166 | "x = layers.Dense(1024, activation='relu')(x)\n",
167 | "# Add a dropout rate of 0.2\n",
168 | "x = layers.Dropout(0.2)(x) \n",
169 | "# Add a final sigmoid layer for classification\n",
170 | "x = layers.Dense (1, activation='sigmoid')(x) \n",
171 | "\n",
172 | "# Append the dense network to the base model\n",
173 | "model = Model(pre_trained_model.input, x) \n",
174 | "\n",
175 | "# Print the model summary. See your dense network connected at the end.\n",
176 | "model.summary()"
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": null,
182 | "metadata": {
183 | "id": "SAwTTkWr56uC"
184 | },
185 | "outputs": [],
186 | "source": [
187 | "# Set the training parameters\n",
188 | "model.compile(optimizer = RMSprop(learning_rate=0.0001), \n",
189 | " loss = 'binary_crossentropy', \n",
190 | " metrics = ['accuracy'])"
191 | ]
192 | },
193 | {
194 | "cell_type": "markdown",
195 | "metadata": {
196 | "id": "aYLGw_RO7Z_X"
197 | },
198 | "source": [
199 | "## Prepare the dataset\n",
200 | "\n",
201 | "Now you will prepare the dataset. This is basically the same code as the one you used in the data augmentation lab."
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": null,
207 | "metadata": {
208 | "id": "O4s8HckqGlnb"
209 | },
210 | "outputs": [],
211 | "source": [
212 | "# Download the dataset\n",
213 | "!wget https://storage.googleapis.com/tensorflow-1-public/course2/cats_and_dogs_filtered.zip"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "metadata": {
220 | "id": "WOV8jON3c3Jv"
221 | },
222 | "outputs": [],
223 | "source": [
224 | "import os\n",
225 | "import zipfile\n",
226 | "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
227 | "\n",
228 | "# Extract the archive\n",
229 | "zip_ref = zipfile.ZipFile(\"./cats_and_dogs_filtered.zip\", 'r')\n",
230 | "zip_ref.extractall(\"tmp/\")\n",
231 | "zip_ref.close()\n",
232 | "\n",
233 | "# Define our example directories and files\n",
234 | "base_dir = 'tmp/cats_and_dogs_filtered'\n",
235 | "\n",
236 | "train_dir = os.path.join( base_dir, 'train')\n",
237 | "validation_dir = os.path.join( base_dir, 'validation')\n",
238 | "\n",
239 | "# Directory with training cat pictures\n",
240 | "train_cats_dir = os.path.join(train_dir, 'cats') \n",
241 | "\n",
242 | "# Directory with training dog pictures\n",
243 | "train_dogs_dir = os.path.join(train_dir, 'dogs') \n",
244 | "\n",
245 | "# Directory with validation cat pictures\n",
246 | "validation_cats_dir = os.path.join(validation_dir, 'cats') \n",
247 | "\n",
248 | "# Directory with validation dog pictures\n",
249 | "validation_dogs_dir = os.path.join(validation_dir, 'dogs')\n",
250 | "\n",
251 | "# Add our data-augmentation parameters to ImageDataGenerator\n",
252 | "train_datagen = ImageDataGenerator(rescale = 1./255.,\n",
253 | " rotation_range = 40,\n",
254 | " width_shift_range = 0.2,\n",
255 | " height_shift_range = 0.2,\n",
256 | " shear_range = 0.2,\n",
257 | " zoom_range = 0.2,\n",
258 | " horizontal_flip = True)\n",
259 | "\n",
260 | "# Note that the validation data should not be augmented!\n",
261 | "test_datagen = ImageDataGenerator( rescale = 1.0/255. )\n",
262 | "\n",
263 | "# Flow training images in batches of 20 using train_datagen generator\n",
264 | "train_generator = train_datagen.flow_from_directory(train_dir,\n",
265 | " batch_size = 20,\n",
266 | " class_mode = 'binary', \n",
267 | " target_size = (150, 150)) \n",
268 | "\n",
269 | "# Flow validation images in batches of 20 using test_datagen generator\n",
270 | "validation_generator = test_datagen.flow_from_directory( validation_dir,\n",
271 | " batch_size = 20,\n",
272 | " class_mode = 'binary', \n",
273 | " target_size = (150, 150))"
274 | ]
275 | },
276 | {
277 | "cell_type": "markdown",
278 | "metadata": {
279 | "id": "3m3S6AZb7h-B"
280 | },
281 | "source": [
282 | "## Train the model\n",
283 | "\n",
284 | "With that, you can now train the model. You will do 20 epochs and plot the results afterwards."
285 | ]
286 | },
287 | {
288 | "cell_type": "code",
289 | "execution_count": null,
290 | "metadata": {
291 | "id": "Blhq2MAUeyGA"
292 | },
293 | "outputs": [],
294 | "source": [
295 | "# Train the model.\n",
296 | "history = model.fit(\n",
297 | " train_generator,\n",
298 | " validation_data = validation_generator,\n",
299 | " steps_per_epoch = 100,\n",
300 | " epochs = 20,\n",
301 | " validation_steps = 50,\n",
302 | " verbose = 2)"
303 | ]
304 | },
305 | {
306 | "cell_type": "markdown",
307 | "metadata": {
308 | "id": "RwcB2bPj7lIx"
309 | },
310 | "source": [
311 | "## Evaluate the results\n",
312 | "\n",
313 | "You will use the same code to plot the results. As you can see, the validation accuracy is also trending upwards as your training accuracy improves. This is a good sign that your model is no longer overfitting!"
314 | ]
315 | },
316 | {
317 | "cell_type": "code",
318 | "execution_count": null,
319 | "metadata": {
320 | "id": "C2Fp6Se9rKuL"
321 | },
322 | "outputs": [],
323 | "source": [
324 | "import matplotlib.pyplot as plt\n",
325 | "acc = history.history['accuracy']\n",
326 | "val_acc = history.history['val_accuracy']\n",
327 | "loss = history.history['loss']\n",
328 | "val_loss = history.history['val_loss']\n",
329 | "\n",
330 | "epochs = range(len(acc))\n",
331 | "\n",
332 | "plt.plot(epochs, acc, 'r', label='Training accuracy')\n",
333 | "plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n",
334 | "plt.title('Training and validation accuracy')\n",
335 | "plt.legend(loc=0)\n",
336 | "plt.figure()\n",
337 | "\n",
338 | "\n",
339 | "plt.show()"
340 | ]
341 | }
342 | ],
343 | "metadata": {
344 | "accelerator": "GPU",
345 | "colab": {
346 | "collapsed_sections": [],
347 | "name": "C2_W3_Lab_1_transfer_learning.ipynb",
348 | "private_outputs": true,
349 | "provenance": [
350 | {
351 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/adding_C2/C2/W3/ungraded_labs/C2_W3_Lab_1_transfer_learning.ipynb",
352 | "timestamp": 1639668234563
353 | }
354 | ],
355 | "toc_visible": true
356 | },
357 | "kernelspec": {
358 | "display_name": "Python 3",
359 | "language": "python",
360 | "name": "python3"
361 | },
362 | "language_info": {
363 | "codemirror_mode": {
364 | "name": "ipython",
365 | "version": 3
366 | },
367 | "file_extension": ".py",
368 | "mimetype": "text/x-python",
369 | "name": "python",
370 | "nbconvert_exporter": "python",
371 | "pygments_lexer": "ipython3",
372 | "version": "3.7.4"
373 | }
374 | },
375 | "nbformat": 4,
376 | "nbformat_minor": 1
377 | }
378 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/README.md:
--------------------------------------------------------------------------------
1 |
2 |
In The Name Of GOD
3 |
4 |
5 | # Tensorflow notebooks
6 |
7 | This directory is for my works(assignments&labs) at the Coursera Tensorflow Developer program. **course 1** for **Introduction to Tensorflow for AI & ML and Deep Learning**. below is the list of assignments and ungraded labs for this course.
8 |
9 | ## Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning
10 |
11 | ## Week 1
12 |
13 | ### Assignment
14 |
15 | - Housing Prices _(C1W1_Assignment.ipynb)_
16 |
17 | ### Ungraded Labs
18 |
19 | 1. Hello World Neural Network _(C1_W1_Lab_1_hello_world_nn.ipynb)_
20 |
21 | ## Week 2
22 |
23 | ### Assignment
24 |
25 | - Handwriting Recognition _(C1W2_Assignment.ipynb)_
26 |
27 | ### Ungraded Labs
28 |
29 | 1. Beyond Hello World, A Computer Vision Example _(C1_W2_Lab_1_beyond_hello_world.ipynb)_
30 | 2. Callbacks _(C1_W2_Lab_2_callbacks.ipynb)_
31 |
32 | ## Week 3
33 |
34 | ### Assignment
35 |
36 | - Improve MNIST with Convolutions _(C1W3_Assignment.ipynb)_
37 |
38 | ### Ungraded Labs
39 |
40 | 1. Improving Accuracy with Convolutions _(C1_W3_Lab_1_improving_accuracy_using_convolutions.ipynb)_
41 | 2. Exploring Convolutions _(C1_W3_Lab_2_exploring_convolutions.ipynb)_
42 |
43 | ## Week 4
44 |
45 | ### Assignment
46 |
47 | - Handling Complex Images _(C1W4_Assignment.ipynb)_
48 |
49 | ### Ungraded Labs
50 |
51 | 1. Image Generator _(C1_W4_Lab_1_image_generator_no_validation.ipynb)_
52 | 2. Image Generator with Validation _(C1_W4_Lab_2_image_generator_with_validation.ipynb)_
53 | 3. Compacted Images _(C1_W4_Lab_3_compacted_images.ipynb)_
54 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/W1/ungraded_lab/C1_W1_Lab_1_hello_world_nn.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "ZIAkIlfmCe1B"
14 | },
15 | "source": [
16 | "# Ungraded Lab: The Hello World of Deep Learning with Neural Networks"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {
22 | "id": "fA93WUy1zzWf"
23 | },
24 | "source": [
25 | "Like every first app, you should start with something super simple that shows the overall scaffolding for how your code works. In the case of creating neural networks, one simple case is where it learns the relationship between two numbers. So, for example, if you were writing code for a function like this, you already know the 'rules': \n",
26 | "\n",
27 | "\n",
28 | "```\n",
29 | "def hw_function(x):\n",
30 | " y = (2 * x) - 1\n",
31 | " return y\n",
32 | "```\n",
33 | "\n",
34 | "So how would you train a neural network to do the equivalent task? By using data! By feeding it with a set of x's and y's, it should be able to figure out the relationship between them. \n",
35 | "\n",
36 | "This is obviously a very different paradigm from what you might be used to. So let's step through it piece by piece.\n"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {
42 | "id": "DzbtdRcZDO9B"
43 | },
44 | "source": [
45 | "## Imports\n",
46 | "\n",
47 | "Let's start with the imports. Here, you are importing [TensorFlow](https://www.tensorflow.org/) and calling it `tf` for convention and ease of use.\n",
48 | "\n",
49 | "You then import a library called [`numpy`](https://numpy.org) which helps to represent data as arrays easily and to optimize numerical operations.\n",
50 | "\n",
51 | "The framework you will use to build a neural network as a sequence of layers is called [`keras`](https://keras.io/) so you will import that too.\n"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {
58 | "id": "X9uIpOS2zx7k"
59 | },
60 | "outputs": [],
61 | "source": [
62 | "import tensorflow as tf\n",
63 | "import numpy as np\n",
64 | "from tensorflow import keras\n",
65 | "\n",
66 | "print(tf.__version__)"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "metadata": {
72 | "id": "wwJGmDrQ0EoB"
73 | },
74 | "source": [
75 | "## Define and Compile the Neural Network\n",
76 | "\n",
77 | "Next, you will create the simplest possible neural network. It has 1 layer with 1 neuron, and the input shape to it is just 1 value. You will build this model using Keras' [Sequential](https://keras.io/api/models/sequential/) class which allows you to define the network as a sequence of [layers](https://keras.io/api/layers/). You can use a single [Dense](https://keras.io/api/layers/core_layers/dense/) layer to build this simple network as shown below."
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "id": "kQFAr_xo0M4T"
85 | },
86 | "outputs": [],
87 | "source": [
88 | "# Build a simple Sequential model\n",
89 | "model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {
95 | "id": "KhjZjZ-c0Ok9"
96 | },
97 | "source": [
98 | "Now, you will compile the neural network. When you do so, you have to specify 2 functions: a [loss](https://keras.io/api/losses/) and an [optimizer](https://keras.io/api/optimizers/).\n",
99 | "\n",
100 | "If you've seen lots of math for machine learning, here's where it's usually used. But in this case, it's nicely encapsulated in functions and classes for you. But what happens here? Let's explain...\n",
101 | "\n",
102 | "You know that in the function declared at the start of this notebook, the relationship between the numbers is `y=2x-1`. When the computer is trying to 'learn' that, it makes a guess... maybe `y=10x+10`. The `loss` function measures the guessed answers against the known correct answers and measures how well or how badly it did.\n",
103 | "\n",
104 | "It then uses the `optimizer` function to make another guess. Based on how the loss function went, it will try to minimize the loss. At that point maybe it will come up with something like `y=5x+5`, which, while still pretty bad, is closer to the correct result (i.e. the loss is lower).\n",
105 | "\n",
106 | "It will repeat this for the number of _epochs_ which you will see shortly. But first, here's how you will tell it to use [mean squared error](https://keras.io/api/losses/regression_losses/#meansquarederror-function) for the loss and [stochastic gradient descent](https://keras.io/api/optimizers/sgd/) for the optimizer. You don't need to understand the math for these yet, but you can see that they work!\n",
107 | "\n",
108 | "Over time, you will learn the different and appropriate loss and optimizer functions for different scenarios. \n"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {
115 | "id": "m8YQN1H41L-Y"
116 | },
117 | "outputs": [],
118 | "source": [
119 | "# Compile the model\n",
120 | "model.compile(optimizer='sgd', loss='mean_squared_error')"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "metadata": {
126 | "id": "5QyOUhFw1OUX"
127 | },
128 | "source": [
129 | "## Providing the Data\n",
130 | "\n",
131 | "Next up, you will feed in some data. In this case, you are taking 6 X's and 6 Y's. You can see that the relationship between these is `y=2x-1`, so where `x = -1`, `y=-3` etc. \n",
132 | "\n",
133 | "The de facto standard way of declaring model inputs and outputs is to use `numpy`, a Python library that provides lots of array type data structures. You can specify these values by building numpy arrays with [`np.array()`](https://numpy.org/doc/stable/reference/generated/numpy.array.html)."
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {
140 | "id": "4Dxk4q-jzEy4"
141 | },
142 | "outputs": [],
143 | "source": [
144 | "# Declare model inputs and outputs for training\n",
145 | "xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)\n",
146 | "ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {
152 | "id": "n_YcWRElnM_b"
153 | },
154 | "source": [
155 | "# Training the Neural Network\n",
156 | "\n",
157 | "The process of training the neural network, where it 'learns' the relationship between the x's and y's is in the [`model.fit()`](https://keras.io/api/models/model_training_apis/#fit-method) call. This is where it will go through the loop we spoke about above: making a guess, measuring how good or bad it is (aka the loss), using the optimizer to make another guess etc. It will do it for the number of `epochs` you specify. When you run this code, you'll see the loss on the right hand side."
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "metadata": {
164 | "id": "lpRrl7WK10Pq"
165 | },
166 | "outputs": [],
167 | "source": [
168 | "# Train the model\n",
169 | "model.fit(xs, ys, epochs=500)"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "metadata": {
175 | "id": "kaFIr71H2OZ-"
176 | },
177 | "source": [
178 | "Ok, now you have a model that has been trained to learn the relationship between `x` and `y`. You can use the [`model.predict()`](https://keras.io/api/models/model_training_apis/#predict-method) method to have it figure out the `y` for a previously unknown `x`. So, for example, if `x=10`, what do you think `y` will be? Take a guess before you run this code:"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "metadata": {
185 | "id": "oxNzL4lS2Gui"
186 | },
187 | "outputs": [],
188 | "source": [
189 | "# Make a prediction\n",
190 | "print(model.predict([10.0]))"
191 | ]
192 | },
193 | {
194 | "cell_type": "markdown",
195 | "metadata": {
196 | "id": "btF2CSFH2iEX"
197 | },
198 | "source": [
199 | "You might have thought `19`, right? But it ended up being a little under. Why do you think that is? \n",
200 | "\n",
201 | "Remember that neural networks deal with probabilities. So given the data that we fed the model with, it calculated that there is a very high probability that the relationship between `x` and `y` is `y=2x-1`, but with only 6 data points we can't know for sure. As a result, the result for 10 is very close to 19, but not necessarily 19.\n",
202 | "\n",
203 | "As you work with neural networks, you'll see this pattern recurring. You will almost always deal with probabilities, not certainties, and will do a little bit of coding to figure out what the result is based on the probabilities, particularly when it comes to classification.\n"
204 | ]
205 | }
206 | ],
207 | "metadata": {
208 | "colab": {
209 | "collapsed_sections": [],
210 | "name": "C1_W1_Lab_1_hello_world_nn.ipynb",
211 | "private_outputs": true,
212 | "provenance": [
213 | {
214 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C1/W1/ungraded_lab/C1_W1_Lab_1_hello_world_nn.ipynb",
215 | "timestamp": 1637670538744
216 | }
217 | ],
218 | "toc_visible": true
219 | },
220 | "kernelspec": {
221 | "display_name": "Python 3",
222 | "language": "python",
223 | "name": "python3"
224 | },
225 | "language_info": {
226 | "codemirror_mode": {
227 | "name": "ipython",
228 | "version": 3
229 | },
230 | "file_extension": ".py",
231 | "mimetype": "text/x-python",
232 | "name": "python",
233 | "nbconvert_exporter": "python",
234 | "pygments_lexer": "ipython3",
235 | "version": "3.7.4"
236 | }
237 | },
238 | "nbformat": 4,
239 | "nbformat_minor": 1
240 | }
241 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/W2/assignment/C1W2_Assignment.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "_2s0EJ5Fy4u2"
7 | },
8 | "source": [
9 | "# Week 2: Implementing Callbacks in TensorFlow using the MNIST Dataset\n",
10 | "\n",
11 | "In the course you learned how to do classification using Fashion MNIST, a data set containing items of clothing. There's another, similar dataset called MNIST which has items of handwriting -- the digits 0 through 9.\n",
12 | "\n",
13 | "Write an MNIST classifier that trains to 99% accuracy and stops once this threshold is achieved. In the lecture you saw how this was done for the loss but here you will be using accuracy instead.\n",
14 | "\n",
15 | "Some notes:\n",
16 | "1. Your network should succeed in less than 9 epochs.\n",
17 | "2. When it reaches 99% or greater it should print out the string \"Reached 99% accuracy so cancelling training!\" and stop training.\n",
18 | "3. If you add any additional variables, make sure you use the same names as the ones used in the class. This is important for the function signatures (the parameters and names) of the callbacks."
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 1,
24 | "metadata": {
25 | "tags": [
26 | "graded"
27 | ]
28 | },
29 | "outputs": [],
30 | "source": [
31 | "import os\n",
32 | "import tensorflow as tf\n",
33 | "from tensorflow import keras"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## Load and inspect the data\n",
41 | "\n",
42 | "Begin by loading the data. A couple of things to notice:\n",
43 | "\n",
44 | "- The file `mnist.npz` is already included in the current workspace under the `data` directory. By default the `load_data` from Keras accepts a path relative to `~/.keras/datasets` but in this case it is stored somewhere else, as a result of this, you need to specify the full path.\n",
45 | "\n",
46 | "- `load_data` returns the train and test sets in the form of the tuples `(x_train, y_train), (x_test, y_test)` but in this exercise you will be needing only the train set so you can ignore the second tuple."
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 2,
52 | "metadata": {
53 | "tags": [
54 | "graded"
55 | ]
56 | },
57 | "outputs": [],
58 | "source": [
59 | "# Load the data\n",
60 | "\n",
61 | "# Get current working directory\n",
62 | "current_dir = os.getcwd()\n",
63 | "\n",
64 | "# Append data/mnist.npz to the previous path to get the full path\n",
65 | "data_path = os.path.join(current_dir, \"data/mnist.npz\")\n",
66 | "\n",
67 | "# Discard test set\n",
68 | "(x_train, y_train), _ = tf.keras.datasets.mnist.load_data(path=data_path)\n",
69 | " \n",
70 | "# Normalize pixel values\n",
71 | "x_train = x_train / 255.0"
72 | ]
73 | },
74 | {
75 | "cell_type": "markdown",
76 | "metadata": {},
77 | "source": [
78 | "Now take a look at the shape of the training data:"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 3,
84 | "metadata": {
85 | "tags": [
86 | "graded"
87 | ]
88 | },
89 | "outputs": [
90 | {
91 | "name": "stdout",
92 | "output_type": "stream",
93 | "text": [
94 | "There are 60000 examples with shape (28, 28)\n"
95 | ]
96 | }
97 | ],
98 | "source": [
99 | "data_shape = x_train.shape\n",
100 | "\n",
101 | "print(f\"There are {data_shape[0]} examples with shape ({data_shape[1]}, {data_shape[2]})\")"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "## Defining your callback\n",
109 | "\n",
110 | "Now it is time to create your own custom callback. For this complete the `myCallback` class and the `on_epoch_end` method in the cell below. If you need some guidance on how to proceed, check out this [link](https://www.tensorflow.org/guide/keras/custom_callback)."
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": 13,
116 | "metadata": {
117 | "tags": [
118 | "graded"
119 | ]
120 | },
121 | "outputs": [],
122 | "source": [
123 | "# GRADED CLASS: myCallback\n",
124 | "### START CODE HERE\n",
125 | "\n",
126 | "# Remember to inherit from the correct class\n",
127 | "class myCallback(tf.keras.callbacks.Callback):\n",
128 | " # Define the correct function signature for on_epoch_end\n",
129 | " def on_epoch_end(self, epoch, logs={}):\n",
130 | " if logs.get('accuracy') is not None and logs.get('accuracy') > 0.99:\n",
131 | " print(\"\\nReached 99% accuracy so cancelling training!\") \n",
132 | " \n",
133 | " self.model.stop_training = True\n",
134 | "\n",
135 | "### END CODE HERE\n"
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "metadata": {},
141 | "source": [
142 | "## Create and train your model\n",
143 | "\n",
144 | "Now that you have defined your callback it is time to complete the `train_mnist` function below. \n",
145 | "\n",
146 | "**You must set your model to train for 10 epochs and the callback should fire before the 9th epoch for you to pass this assignment.**\n",
147 | "\n",
148 | "**Hint:**\n",
149 | "- Feel free to try the architecture for the neural network that you see fit but in case you need extra help you can check out an architecture that works pretty well at the end of this notebook."
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": 16,
155 | "metadata": {
156 | "id": "rEHcB3kqyHZ6",
157 | "tags": [
158 | "graded"
159 | ]
160 | },
161 | "outputs": [],
162 | "source": [
163 | "# GRADED FUNCTION: train_mnist\n",
164 | "def train_mnist(x_train, y_train):\n",
165 | "\n",
166 | " ### START CODE HERE\n",
167 | " \n",
168 | " # Instantiate the callback class\n",
169 | " callbacks = myCallback()\n",
170 | " \n",
171 | " # Define the model\n",
172 | " model = tf.keras.models.Sequential([ \n",
173 | " tf.keras.layers.Flatten(),\n",
174 | " tf.keras.layers.Dense(1024, activation=tf.nn.sigmoid),\n",
175 | " tf.keras.layers.Dense(10, activation=tf.nn.sigmoid)\n",
176 | " ]) \n",
177 | "\n",
178 | " # Compile the model\n",
179 | " model.compile(optimizer='adam', \n",
180 | " loss='sparse_categorical_crossentropy', \n",
181 | " metrics=['accuracy']) \n",
182 | " \n",
183 | " # Fit the model for 10 epochs adding the callbacks\n",
184 | " # and save the training history\n",
185 | " history = model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])\n",
186 | "\n",
187 | " ### END CODE HERE\n",
188 | "\n",
189 | " return history"
190 | ]
191 | },
192 | {
193 | "cell_type": "markdown",
194 | "metadata": {},
195 | "source": [
196 | "Call the `train_mnist` passing in the appropiate parameters to get the training history:"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 17,
202 | "metadata": {
203 | "id": "sFgpwbGly4u4",
204 | "tags": [
205 | "graded"
206 | ]
207 | },
208 | "outputs": [
209 | {
210 | "name": "stdout",
211 | "output_type": "stream",
212 | "text": [
213 | "Epoch 1/10\n",
214 | "1875/1875 [==============================] - 14s 8ms/step - loss: 0.3283 - accuracy: 0.9015\n",
215 | "Epoch 2/10\n",
216 | "1875/1875 [==============================] - 14s 8ms/step - loss: 0.1647 - accuracy: 0.9506\n",
217 | "Epoch 3/10\n",
218 | "1875/1875 [==============================] - 14s 8ms/step - loss: 0.1064 - accuracy: 0.9676\n",
219 | "Epoch 4/10\n",
220 | "1875/1875 [==============================] - 14s 7ms/step - loss: 0.0733 - accuracy: 0.9768\n",
221 | "Epoch 5/10\n",
222 | "1875/1875 [==============================] - 14s 7ms/step - loss: 0.0539 - accuracy: 0.9830\n",
223 | "Epoch 6/10\n",
224 | "1875/1875 [==============================] - 14s 7ms/step - loss: 0.0389 - accuracy: 0.9882\n",
225 | "Epoch 7/10\n",
226 | "1867/1875 [============================>.] - ETA: 0s - loss: 0.0281 - accuracy: 0.9913\n",
227 | "Reached 99% accuracy so cancelling training!\n",
228 | "1875/1875 [==============================] - 14s 7ms/step - loss: 0.0283 - accuracy: 0.9912\n"
229 | ]
230 | }
231 | ],
232 | "source": [
233 | "hist = train_mnist(x_train, y_train)"
234 | ]
235 | },
236 | {
237 | "cell_type": "markdown",
238 | "metadata": {},
239 | "source": [
240 | "If you see the message `Reached 99% accuracy so cancelling training!` printed out after less than 9 epochs it means your callback worked as expected. "
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "## Need more help?\n",
248 | "\n",
249 | "Run the following cell to see an architecture that works well for the problem at hand:"
250 | ]
251 | },
252 | {
253 | "cell_type": "code",
254 | "execution_count": 10,
255 | "metadata": {},
256 | "outputs": [
257 | {
258 | "name": "stdout",
259 | "output_type": "stream",
260 | "text": [
261 | "\n",
262 | " - A Flatten layer that receives inputs with the same shape as the images\n",
263 | " - A Dense layer with 512 units and ReLU activation function\n",
264 | " - A Dense layer with 10 units and softmax activation function\n",
265 | "\n"
266 | ]
267 | }
268 | ],
269 | "source": [
270 | "# WE STRONGLY RECOMMEND YOU TO TRY YOUR OWN ARCHITECTURES FIRST\n",
271 | "# AND ONLY RUN THIS CELL IF YOU WISH TO SEE AN ANSWER\n",
272 | "\n",
273 | "import base64\n",
274 | "\n",
275 | "encoded_answer = \"CiAgIC0gQSBGbGF0dGVuIGxheWVyIHRoYXQgcmVjZWl2ZXMgaW5wdXRzIHdpdGggdGhlIHNhbWUgc2hhcGUgYXMgdGhlIGltYWdlcwogICAtIEEgRGVuc2UgbGF5ZXIgd2l0aCA1MTIgdW5pdHMgYW5kIFJlTFUgYWN0aXZhdGlvbiBmdW5jdGlvbgogICAtIEEgRGVuc2UgbGF5ZXIgd2l0aCAxMCB1bml0cyBhbmQgc29mdG1heCBhY3RpdmF0aW9uIGZ1bmN0aW9uCg==\"\n",
276 | "encoded_answer = encoded_answer.encode('ascii')\n",
277 | "answer = base64.b64decode(encoded_answer)\n",
278 | "answer = answer.decode('ascii')\n",
279 | "\n",
280 | "print(answer)"
281 | ]
282 | },
283 | {
284 | "cell_type": "markdown",
285 | "metadata": {},
286 | "source": [
287 | "**Congratulations on finishing this week's assignment!**\n",
288 | "\n",
289 | "You have successfully implemented a callback that gives you more control over the training loop for your model. Nice job!\n",
290 | "\n",
291 | "**Keep it up!**"
292 | ]
293 | }
294 | ],
295 | "metadata": {
296 | "jupytext": {
297 | "main_language": "python"
298 | },
299 | "kernelspec": {
300 | "display_name": "Python 3",
301 | "language": "python",
302 | "name": "python3"
303 | },
304 | "language_info": {
305 | "codemirror_mode": {
306 | "name": "ipython",
307 | "version": 3
308 | },
309 | "file_extension": ".py",
310 | "mimetype": "text/x-python",
311 | "name": "python",
312 | "nbconvert_exporter": "python",
313 | "pygments_lexer": "ipython3",
314 | "version": "3.8.8"
315 | }
316 | },
317 | "nbformat": 4,
318 | "nbformat_minor": 4
319 | }
320 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/W2/ungraded_labs/C1_W2_Lab_2_callbacks.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "vBNo9JrZIYG6"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Using Callbacks to Control Training\n",
17 | "\n",
18 | "In this lab, you will use the [Callbacks API](https://keras.io/api/callbacks/) to stop training when a specified metric is met. This is a useful feature so you won't need to complete all epochs when this threshold is reached. For example, if you set 1000 epochs and your desired accuracy is already reached at epoch 200, then the training will automatically stop. Let's see how this is implemented in the next sections.\n"
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "Mcwrn9AKKVb8"
25 | },
26 | "source": [
27 | "## Load and Normalize the Fashion MNIST dataset\n",
28 | "\n",
29 | "Like the previous lab, you will use the Fashion MNIST dataset again for this exercise. And also as mentioned before, you will normalize the pixel values to help optimize the training."
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "8LTaefqDJMIn"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "import tensorflow as tf\n",
41 | "\n",
42 | "# Instantiate the dataset API\n",
43 | "fmnist = tf.keras.datasets.fashion_mnist\n",
44 | "\n",
45 | "# Load the dataset\n",
46 | "(x_train, y_train),(x_test, y_test) = fmnist.load_data()\n",
47 | "\n",
48 | "# Normalize the pixel values\n",
49 | "x_train, x_test = x_train / 255.0, x_test / 255.0"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {
55 | "id": "Ia2OadhALJjS"
56 | },
57 | "source": [
58 | "## Creating a Callback class\n",
59 | "\n",
60 | "You can create a callback by defining a class that inherits the [tf.keras.callbacks.Callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/Callback) base class. From there, you can define available methods to set where the callback will be executed. For instance below, you will use the [on_epoch_end()](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/Callback#on_epoch_end) method to check the loss at each training epoch."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {
67 | "id": "uuRmQZWVJAJH"
68 | },
69 | "outputs": [],
70 | "source": [
71 | "class myCallback(tf.keras.callbacks.Callback):\n",
72 | " def on_epoch_end(self, epoch, logs={}):\n",
73 | " '''\n",
74 | " Halts the training after reaching 60 percent accuracy\n",
75 | "\n",
76 | " Args:\n",
77 | " epoch (integer) - index of epoch (required but unused in the function definition below)\n",
78 | " logs (dict) - metric results from the training epoch\n",
79 | " '''\n",
80 | "\n",
81 | " # Check accuracy\n",
82 | " if(logs.get('loss') < 0.4):\n",
83 | "\n",
84 | " # Stop if threshold is met\n",
85 | " print(\"\\nLoss is lower than 0.4 so cancelling training!\")\n",
86 | " self.model.stop_training = True\n",
87 | "\n",
88 | "# Instantiate class\n",
89 | "callbacks = myCallback()"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {
95 | "id": "4xlXeLkFeMn8"
96 | },
97 | "source": [
98 | "## Define and compile the model\n",
99 | "\n",
100 | "Next, you will define and compile the model. The architecture will be similar to the one you built in the previous lab. Afterwards, you will set the optimizer, loss, and metrics that you will use for training."
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {
107 | "id": "7JXxMg3TpzER"
108 | },
109 | "outputs": [],
110 | "source": [
111 | "# Define the model\n",
112 | "model = tf.keras.models.Sequential([\n",
113 | " tf.keras.layers.Flatten(input_shape=(28, 28)),\n",
114 | " tf.keras.layers.Dense(512, activation=tf.nn.relu),\n",
115 | " tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n",
116 | "])\n",
117 | "\n",
118 | "# Compile the model\n",
119 | "model.compile(optimizer=tf.optimizers.Adam(),\n",
120 | " loss='sparse_categorical_crossentropy',\n",
121 | " metrics=['accuracy'])\n",
122 | "\n"
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "metadata": {
128 | "id": "6eLe4cPZe-ui"
129 | },
130 | "source": [
131 | "### Train the model\n",
132 | "\n",
133 | "Now you are ready to train the model. To set the callback, simply set the `callbacks` parameter to the `myCallback` instance you declared before. Run the cell below and observe what happens."
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {
140 | "id": "nLXTB32de3_e"
141 | },
142 | "outputs": [],
143 | "source": [
144 | "# Train the model with a callback\n",
145 | "model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])"
146 | ]
147 | },
148 | {
149 | "cell_type": "markdown",
150 | "metadata": {
151 | "id": "fGBSkRQPff93"
152 | },
153 | "source": [
154 | "You will notice that the training does not need to complete all 10 epochs. By having a callback at each end of the epoch, it is able to check the training parameters and compare if it meets the threshold you set in the function definition. In this case, it will simply stop when the loss falls below `0.40` after the current epoch.\n",
155 | "\n",
156 | "*Optional Challenge: Modify the code to make the training stop when the accuracy metric exceeds 60%.*\n",
157 | "\n",
158 | "That concludes this simple exercise on callbacks!"
159 | ]
160 | }
161 | ],
162 | "metadata": {
163 | "colab": {
164 | "collapsed_sections": [],
165 | "name": "C1_W2_Lab_2_callbacks.ipynb",
166 | "private_outputs": true,
167 | "provenance": [
168 | {
169 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/adding_C1/C1/W2/ungraded_labs/C1_W2_Lab_2_callbacks.ipynb",
170 | "timestamp": 1638884482962
171 | }
172 | ],
173 | "toc_visible": true
174 | },
175 | "kernelspec": {
176 | "display_name": "Python 3",
177 | "language": "python",
178 | "name": "python3"
179 | },
180 | "language_info": {
181 | "codemirror_mode": {
182 | "name": "ipython",
183 | "version": 3
184 | },
185 | "file_extension": ".py",
186 | "mimetype": "text/x-python",
187 | "name": "python",
188 | "nbconvert_exporter": "python",
189 | "pygments_lexer": "ipython3",
190 | "version": "3.7.4"
191 | }
192 | },
193 | "nbformat": 4,
194 | "nbformat_minor": 1
195 | }
196 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/W3/assignment/C1W3_Assignment.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "iQjHqsmTAVLU"
7 | },
8 | "source": [
9 | "# Week 3: Improve MNIST with Convolutions\n",
10 | "\n",
11 | "In the videos you looked at how you would improve Fashion MNIST using Convolutions. For this exercise see if you can improve MNIST to 99.5% accuracy or more by adding only a single convolutional layer and a single MaxPooling 2D layer to the model from the assignment of the previous week. \n",
12 | "\n",
13 | "You should stop training once the accuracy goes above this amount. It should happen in less than 10 epochs, so it's ok to hard code the number of epochs for training, but your training must end once it hits the above metric. If it doesn't, then you'll need to redesign your callback.\n",
14 | "\n",
15 | "When 99.5% accuracy has been hit, you should print out the string \"Reached 99.5% accuracy so cancelling training!\"\n"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "id": "ZpztRwBouwYp",
23 | "tags": [
24 | "graded"
25 | ]
26 | },
27 | "outputs": [],
28 | "source": [
29 | "import os\n",
30 | "import numpy as np\n",
31 | "import tensorflow as tf\n",
32 | "from tensorflow import keras"
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "metadata": {},
38 | "source": [
39 | "## Load the data\n",
40 | "\n",
41 | "Begin by loading the data. A couple of things to notice:\n",
42 | "\n",
43 | "- The file `mnist.npz` is already included in the current workspace under the `data` directory. By default the `load_data` from Keras accepts a path relative to `~/.keras/datasets` but in this case it is stored somewhere else, as a result of this, you need to specify the full path.\n",
44 | "\n",
45 | "- `load_data` returns the train and test sets in the form of the tuples `(x_train, y_train), (x_test, y_test)` but in this exercise you will be needing only the train set so you can ignore the second tuple."
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 2,
51 | "metadata": {
52 | "tags": [
53 | "graded"
54 | ]
55 | },
56 | "outputs": [],
57 | "source": [
58 | "# Load the data\n",
59 | "\n",
60 | "# Get current working directory\n",
61 | "current_dir = os.getcwd() \n",
62 | "\n",
63 | "# Append data/mnist.npz to the previous path to get the full path\n",
64 | "data_path = os.path.join(current_dir, \"data/mnist.npz\") \n",
65 | "\n",
66 | "# Get only training set\n",
67 | "(training_images, training_labels), _ = tf.keras.datasets.mnist.load_data(path=data_path) "
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "## Pre-processing the data\n",
75 | "\n",
76 | "One important step when dealing with image data is to preprocess the data. During the preprocess step you can apply transformations to the dataset that will be fed into your convolutional neural network.\n",
77 | "\n",
78 | "Here you will apply two transformations to the data:\n",
79 | "- Reshape the data so that it has an extra dimension. The reason for this \n",
80 | "is that commonly you will use 3-dimensional arrays (without counting the batch dimension) to represent image data. The third dimension represents the color using RGB values. This data might be in black and white format so the third dimension doesn't really add any additional information for the classification process but it is a good practice regardless.\n",
81 | "\n",
82 | "\n",
83 | "- Normalize the pixel values so that these are values between 0 and 1. You can achieve this by dividing every value in the array by the maximum.\n",
84 | "\n",
85 | "Remember that these tensors are of type `numpy.ndarray` so you can use functions like [reshape](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html) or [divide](https://numpy.org/doc/stable/reference/generated/numpy.divide.html) to complete the `reshape_and_normalize` function below:"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": 7,
91 | "metadata": {
92 | "tags": [
93 | "graded"
94 | ]
95 | },
96 | "outputs": [],
97 | "source": [
98 | "# GRADED FUNCTION: reshape_and_normalize\n",
99 | "\n",
100 | "def reshape_and_normalize(images):\n",
101 | " \n",
102 | " ### START CODE HERE\n",
103 | "\n",
104 | " # Reshape the images to add an extra dimension\n",
105 | " images = images.reshape((images.shape + (1,)))\n",
106 | " \n",
107 | " # Normalize pixel values\n",
108 | " images = images / np.max(images)\n",
109 | " \n",
110 | " ### END CODE HERE\n",
111 | "\n",
112 | " return images"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "Test your function with the next cell:"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": 8,
125 | "metadata": {
126 | "tags": [
127 | "graded"
128 | ]
129 | },
130 | "outputs": [
131 | {
132 | "name": "stdout",
133 | "output_type": "stream",
134 | "text": [
135 | "Maximum pixel value after normalization: 1.0\n",
136 | "\n",
137 | "Shape of training set after reshaping: (60000, 28, 28, 1)\n",
138 | "\n",
139 | "Shape of one image after reshaping: (28, 28, 1)\n"
140 | ]
141 | }
142 | ],
143 | "source": [
144 | "# Reload the images in case you run this cell multiple times\n",
145 | "(training_images, _), _ = tf.keras.datasets.mnist.load_data(path=data_path) \n",
146 | "\n",
147 | "# Apply your function\n",
148 | "training_images = reshape_and_normalize(training_images)\n",
149 | "\n",
150 | "print(f\"Maximum pixel value after normalization: {np.max(training_images)}\\n\")\n",
151 | "print(f\"Shape of training set after reshaping: {training_images.shape}\\n\")\n",
152 | "print(f\"Shape of one image after reshaping: {training_images[0].shape}\")\n"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "**Expected Output:**\n",
160 | "```\n",
161 | "Maximum pixel value after normalization: 1.0\n",
162 | "\n",
163 | "Shape of training set after reshaping: (60000, 28, 28, 1)\n",
164 | "\n",
165 | "Shape of one image after reshaping: (28, 28, 1)\n",
166 | "```"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "## Defining your callback\n",
174 | "\n",
175 | "Now complete the callback that will ensure that training will stop after an accuracy of 99.5% is reached:"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": 19,
181 | "metadata": {
182 | "tags": [
183 | "graded"
184 | ]
185 | },
186 | "outputs": [],
187 | "source": [
188 | "# GRADED CLASS: myCallback\n",
189 | "### START CODE HERE\n",
190 | "\n",
191 | "# Remember to inherit from the correct class\n",
192 | "class myCallback(tf.keras.callbacks.Callback):\n",
193 | " def on_epoch_end(self, epoch, logs={}):\n",
194 | " if 'accuracy' in logs and logs['accuracy'] >= 0.995:\n",
195 | " print(\"\\nReached 99.5% accuracy so cancelling training!\")\n",
196 | " self.model.stop_training = True\n",
197 | "\n",
198 | "### END CODE HERE\n"
199 | ]
200 | },
201 | {
202 | "cell_type": "markdown",
203 | "metadata": {},
204 | "source": [
205 | "## Convolutional Model\n",
206 | "\n",
207 | "Finally, complete the `convolutional_model` function below. This function should return your convolutional neural network.\n",
208 | "\n",
209 | "**Your model should achieve an accuracy of 99.5% or more before 10 epochs to pass this assignment.**\n",
210 | "\n",
211 | "**Hints:**\n",
212 | "- You can try any architecture for the network but try to keep in mind you don't need a complex one. For instance, only one convolutional layer is needed. \n",
213 | "\n",
214 | "- In case you need extra help you can check out an architecture that works pretty well at the end of this notebook."
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": 20,
220 | "metadata": {
221 | "tags": [
222 | "graded"
223 | ]
224 | },
225 | "outputs": [],
226 | "source": [
227 | "# GRADED FUNCTION: convolutional_model\n",
228 | "def convolutional_model():\n",
229 | " ### START CODE HERE\n",
230 | "\n",
231 | " # Define the model\n",
232 | " model = tf.keras.models.Sequential([ \n",
233 | " tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),\n",
234 | " tf.keras.layers.MaxPooling2D((2, 2)),\n",
235 | " tf.keras.layers.Conv2D(32, (5, 5), activation='relu'),\n",
236 | " tf.keras.layers.MaxPooling2D((2, 2)),\n",
237 | " tf.keras.layers.Flatten(),\n",
238 | " tf.keras.layers.Dense(128, activation='sigmoid'),\n",
239 | " tf.keras.layers.Dense(10, activation='softmax')\n",
240 | " ])\n",
241 | "\n",
242 | " ### END CODE HERE\n",
243 | "\n",
244 | " # Compile the model\n",
245 | " model.compile(optimizer='adam', \n",
246 | " loss='sparse_categorical_crossentropy', \n",
247 | " metrics=['accuracy'])\n",
248 | " \n",
249 | " return model"
250 | ]
251 | },
252 | {
253 | "cell_type": "code",
254 | "execution_count": 21,
255 | "metadata": {
256 | "tags": [
257 | "graded"
258 | ]
259 | },
260 | "outputs": [
261 | {
262 | "name": "stdout",
263 | "output_type": "stream",
264 | "text": [
265 | "Epoch 1/10\n",
266 | "1875/1875 [==============================] - 92s 49ms/step - loss: 0.1736 - accuracy: 0.9512\n",
267 | "Epoch 2/10\n",
268 | "1875/1875 [==============================] - 88s 47ms/step - loss: 0.0470 - accuracy: 0.9857\n",
269 | "Epoch 3/10\n",
270 | "1875/1875 [==============================] - 91s 49ms/step - loss: 0.0324 - accuracy: 0.9902\n",
271 | "Epoch 4/10\n",
272 | "1875/1875 [==============================] - 91s 49ms/step - loss: 0.0231 - accuracy: 0.9931\n",
273 | "Epoch 5/10\n",
274 | "1875/1875 [==============================] - 91s 48ms/step - loss: 0.0179 - accuracy: 0.9948\n",
275 | "Epoch 6/10\n",
276 | "1874/1875 [============================>.] - ETA: 0s - loss: 0.0131 - accuracy: 0.9959\n",
277 | "Reached 99.5% accuracy so cancelling training!\n",
278 | "1875/1875 [==============================] - 90s 48ms/step - loss: 0.0131 - accuracy: 0.9959\n"
279 | ]
280 | }
281 | ],
282 | "source": [
283 | "# Save your untrained model\n",
284 | "model = convolutional_model()\n",
285 | "\n",
286 | "# Instantiate the callback class\n",
287 | "callbacks = myCallback()\n",
288 | "\n",
289 | "# Train your model (this can take up to 5 minutes)\n",
290 | "history = model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks])"
291 | ]
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "metadata": {},
296 | "source": [
297 | "If you see the message that you defined in your callback printed out after less than 10 epochs it means your callback worked as expected. You can also double check by running the following cell:"
298 | ]
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": 22,
303 | "metadata": {
304 | "tags": [
305 | "graded"
306 | ]
307 | },
308 | "outputs": [
309 | {
310 | "name": "stdout",
311 | "output_type": "stream",
312 | "text": [
313 | "Your model was trained for 6 epochs\n"
314 | ]
315 | }
316 | ],
317 | "source": [
318 | "print(f\"Your model was trained for {len(history.epoch)} epochs\")"
319 | ]
320 | },
321 | {
322 | "cell_type": "markdown",
323 | "metadata": {},
324 | "source": [
325 | "## Need more help?\n",
326 | "\n",
327 | "Run the following cell to see an architecture that works well for the problem at hand:"
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "execution_count": 23,
333 | "metadata": {},
334 | "outputs": [
335 | {
336 | "name": "stdout",
337 | "output_type": "stream",
338 | "text": [
339 | "\n",
340 | " - A Conv2D layer with 32 filters, a kernel_size of 3x3, ReLU activation function and an input shape that matches that of every image in the training set\n",
341 | " - A MaxPooling2D layer with a pool_size of 2x2\n",
342 | " - A Flatten layer with no arguments\n",
343 | " - A Dense layer with 128 units and ReLU activation function\n",
344 | " - A Dense layer with 10 units and softmax activation function\n",
345 | "\n"
346 | ]
347 | }
348 | ],
349 | "source": [
350 | "# WE STRONGLY RECOMMEND YOU TO TRY YOUR OWN ARCHITECTURES FIRST\n",
351 | "# AND ONLY RUN THIS CELL IF YOU WISH TO SEE AN ANSWER\n",
352 | "\n",
353 | "import base64\n",
354 | "\n",
355 | "encoded_answer = \"CiAgIC0gQSBDb252MkQgbGF5ZXIgd2l0aCAzMiBmaWx0ZXJzLCBhIGtlcm5lbF9zaXplIG9mIDN4MywgUmVMVSBhY3RpdmF0aW9uIGZ1bmN0aW9uIGFuZCBhbiBpbnB1dCBzaGFwZSB0aGF0IG1hdGNoZXMgdGhhdCBvZiBldmVyeSBpbWFnZSBpbiB0aGUgdHJhaW5pbmcgc2V0CiAgIC0gQSBNYXhQb29saW5nMkQgbGF5ZXIgd2l0aCBhIHBvb2xfc2l6ZSBvZiAyeDIKICAgLSBBIEZsYXR0ZW4gbGF5ZXIgd2l0aCBubyBhcmd1bWVudHMKICAgLSBBIERlbnNlIGxheWVyIHdpdGggMTI4IHVuaXRzIGFuZCBSZUxVIGFjdGl2YXRpb24gZnVuY3Rpb24KICAgLSBBIERlbnNlIGxheWVyIHdpdGggMTAgdW5pdHMgYW5kIHNvZnRtYXggYWN0aXZhdGlvbiBmdW5jdGlvbgo=\"\n",
356 | "encoded_answer = encoded_answer.encode('ascii')\n",
357 | "answer = base64.b64decode(encoded_answer)\n",
358 | "answer = answer.decode('ascii')\n",
359 | "\n",
360 | "print(answer)"
361 | ]
362 | },
363 | {
364 | "cell_type": "markdown",
365 | "metadata": {},
366 | "source": [
367 | "**Congratulations on finishing this week's assignment!**\n",
368 | "\n",
369 | "You have successfully implemented a CNN to assist you in the image classification task. Nice job!\n",
370 | "\n",
371 | "**Keep it up!**"
372 | ]
373 | }
374 | ],
375 | "metadata": {
376 | "jupytext": {
377 | "main_language": "python"
378 | },
379 | "kernelspec": {
380 | "display_name": "Python 3",
381 | "language": "python",
382 | "name": "python3"
383 | },
384 | "language_info": {
385 | "codemirror_mode": {
386 | "name": "ipython",
387 | "version": 3
388 | },
389 | "file_extension": ".py",
390 | "mimetype": "text/x-python",
391 | "name": "python",
392 | "nbconvert_exporter": "python",
393 | "pygments_lexer": "ipython3",
394 | "version": "3.8.8"
395 | }
396 | },
397 | "nbformat": 4,
398 | "nbformat_minor": 4
399 | }
400 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/W3/ungraded_labs/C1_W3_Lab_2_exploring_convolutions.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "tJTHvE8Qe5nM"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Exploring Convolutions\n",
17 | "\n",
18 | "In this lab, you will explore how convolutions work by creating a basic convolution on a 2D grayscale image. First, you wil load the image by taking the [ascent](https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.ascent.html) image from [SciPy](https://scipy.org/). It's a nice, built-in picture with lots of angles and lines. "
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {
25 | "executionInfo": {
26 | "elapsed": 784,
27 | "status": "ok",
28 | "timestamp": 1639058947063,
29 | "user": {
30 | "displayName": "Chris Favila",
31 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
32 | "userId": "17311369472417335306"
33 | },
34 | "user_tz": -480
35 | },
36 | "id": "DZ5OXYiolCUi"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "from scipy import misc\n",
41 | "\n",
42 | "# load the ascent image\n",
43 | "ascent_image = misc.ascent()"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {
49 | "id": "SRIzxjWWfJjk"
50 | },
51 | "source": [
52 | "You can use the pyplot library to draw the image so you'll know what it looks like."
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {
59 | "colab": {
60 | "base_uri": "https://localhost:8080/",
61 | "height": 248
62 | },
63 | "executionInfo": {
64 | "elapsed": 976,
65 | "status": "ok",
66 | "timestamp": 1639059000048,
67 | "user": {
68 | "displayName": "Chris Favila",
69 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
70 | "userId": "17311369472417335306"
71 | },
72 | "user_tz": -480
73 | },
74 | "id": "R4p0cfWcfIvi",
75 | "outputId": "4565e085-4fb0-4129-8e83-ee4dc6646250"
76 | },
77 | "outputs": [],
78 | "source": [
79 | "import matplotlib.pyplot as plt\n",
80 | "\n",
81 | "# Visualize the image\n",
82 | "plt.grid(False)\n",
83 | "plt.gray()\n",
84 | "plt.axis('off')\n",
85 | "plt.imshow(ascent_image)\n",
86 | "plt.show()"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "metadata": {
92 | "id": "C1mhZ_ZTfPWH"
93 | },
94 | "source": [
95 | "The image is stored as a numpy array so you can create the transformed image by first copying that array. You can also get the dimensions of the image so you can loop over it later. "
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {
102 | "executionInfo": {
103 | "elapsed": 353,
104 | "status": "ok",
105 | "timestamp": 1639059122348,
106 | "user": {
107 | "displayName": "Chris Favila",
108 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
109 | "userId": "17311369472417335306"
110 | },
111 | "user_tz": -480
112 | },
113 | "id": "o5pxGq1SmJMD"
114 | },
115 | "outputs": [],
116 | "source": [
117 | "import numpy as np\n",
118 | "\n",
119 | "# Copy image to a numpy array\n",
120 | "image_transformed = np.copy(ascent_image)\n",
121 | "\n",
122 | "# Get the dimensions of the image\n",
123 | "size_x = image_transformed.shape[0]\n",
124 | "size_y = image_transformed.shape[1]"
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {
130 | "id": "Y7PwNkiXfddd"
131 | },
132 | "source": [
133 | "Now you can create a filter as a 3x3 array. "
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {
140 | "executionInfo": {
141 | "elapsed": 544,
142 | "status": "ok",
143 | "timestamp": 1639059236890,
144 | "user": {
145 | "displayName": "Chris Favila",
146 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
147 | "userId": "17311369472417335306"
148 | },
149 | "user_tz": -480
150 | },
151 | "id": "sN3imZannN5J"
152 | },
153 | "outputs": [],
154 | "source": [
155 | "# Experiment with different values and see the effect\n",
156 | "filter = [ [0, 1, 0], [1, -4, 1], [0, 1, 0]]\n",
157 | "\n",
158 | "# A couple more filters to try for fun!\n",
159 | "# filter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]]\n",
160 | "# filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]\n",
161 | "\n",
162 | "# If all the digits in the filter don't add up to 0 or 1, you \n",
163 | "# should probably do a weight to get it to do so\n",
164 | "# so, for example, if your weights are 1,1,1 1,2,1 1,1,1\n",
165 | "# They add up to 10, so you would set a weight of .1 if you want to normalize them\n",
166 | "weight = 1"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {
172 | "id": "JQmm_iBufmCz"
173 | },
174 | "source": [
175 | "Now you can create a convolution. You will iterate over the image, leaving a 1 pixel margin, and multiplying each of the neighbors of the current pixel by the value defined in the filter (i.e. the current pixel's neighbor above it and to the left will be multiplied by the top left item in the filter, etc.) \n",
176 | "\n",
177 | "You'll then multiply the result by the weight, and then ensure the result is in the range 0-255.\n",
178 | "\n",
179 | "Finally you'll load the new value into the transformed image. "
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {
186 | "executionInfo": {
187 | "elapsed": 3511,
188 | "status": "ok",
189 | "timestamp": 1639059241813,
190 | "user": {
191 | "displayName": "Chris Favila",
192 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
193 | "userId": "17311369472417335306"
194 | },
195 | "user_tz": -480
196 | },
197 | "id": "299uU2jAr90h"
198 | },
199 | "outputs": [],
200 | "source": [
201 | "# Iterate over the image\n",
202 | "for x in range(1,size_x-1):\n",
203 | " for y in range(1,size_y-1):\n",
204 | " convolution = 0.0\n",
205 | " convolution = convolution + (ascent_image[x-1, y-1] * filter[0][0])\n",
206 | " convolution = convolution + (ascent_image[x-1, y] * filter[0][1]) \n",
207 | " convolution = convolution + (ascent_image[x-1, y+1] * filter[0][2]) \n",
208 | " convolution = convolution + (ascent_image[x, y-1] * filter[1][0]) \n",
209 | " convolution = convolution + (ascent_image[x, y] * filter[1][1]) \n",
210 | " convolution = convolution + (ascent_image[x, y+1] * filter[1][2]) \n",
211 | " convolution = convolution + (ascent_image[x+1, y-1] * filter[2][0]) \n",
212 | " convolution = convolution + (ascent_image[x+1, y] * filter[2][1]) \n",
213 | " convolution = convolution + (ascent_image[x+1, y+1] * filter[2][2]) \n",
214 | " \n",
215 | " # Multiply by weight\n",
216 | " convolution = convolution * weight \n",
217 | " \n",
218 | " # Check the boundaries of the pixel values\n",
219 | " if(convolution<0):\n",
220 | " convolution=0\n",
221 | " if(convolution>255):\n",
222 | " convolution=255\n",
223 | "\n",
224 | " # Load into the transformed image\n",
225 | " image_transformed[x, y] = convolution"
226 | ]
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "metadata": {
231 | "id": "6XA--vgvgDEQ"
232 | },
233 | "source": [
234 | "After the loop, you can now plot the image to see the effect of the convolution!"
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": null,
240 | "metadata": {
241 | "colab": {
242 | "base_uri": "https://localhost:8080/",
243 | "height": 269
244 | },
245 | "executionInfo": {
246 | "elapsed": 899,
247 | "status": "ok",
248 | "timestamp": 1639059523867,
249 | "user": {
250 | "displayName": "Chris Favila",
251 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
252 | "userId": "17311369472417335306"
253 | },
254 | "user_tz": -480
255 | },
256 | "id": "7oPhUPNhuGWC",
257 | "outputId": "2aee35d3-e378-441c-e497-1c215722c34c"
258 | },
259 | "outputs": [],
260 | "source": [
261 | "# Plot the image. Note the size of the axes -- they are 512 by 512\n",
262 | "plt.gray()\n",
263 | "plt.grid(False)\n",
264 | "plt.imshow(image_transformed)\n",
265 | "plt.show() "
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {
271 | "id": "xF0FPplsgHNh"
272 | },
273 | "source": [
274 | "## Effect of Max Pooling\n",
275 | "\n",
276 | "The next cell will show a (2, 2) pooling. The idea here is to iterate over the image, and look at the pixel and it's immediate neighbors to the right, beneath, and right-beneath. It will take the largest of them and load it into the new image. Thus, the new image will be 1/4 the size of the old -- with the dimensions on X and Y being halved by this process. You'll see that the features get maintained despite this compression!"
277 | ]
278 | },
279 | {
280 | "cell_type": "code",
281 | "execution_count": null,
282 | "metadata": {
283 | "colab": {
284 | "base_uri": "https://localhost:8080/",
285 | "height": 269
286 | },
287 | "executionInfo": {
288 | "elapsed": 1881,
289 | "status": "ok",
290 | "timestamp": 1639059312953,
291 | "user": {
292 | "displayName": "Chris Favila",
293 | "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64",
294 | "userId": "17311369472417335306"
295 | },
296 | "user_tz": -480
297 | },
298 | "id": "kDHjf-ehaBqm",
299 | "outputId": "3d0837c6-11d6-44e0-a470-8c7a2f139d88"
300 | },
301 | "outputs": [],
302 | "source": [
303 | "# Assign dimensions half the size of the original image\n",
304 | "new_x = int(size_x/2)\n",
305 | "new_y = int(size_y/2)\n",
306 | "\n",
307 | "# Create blank image with reduced dimensions\n",
308 | "newImage = np.zeros((new_x, new_y))\n",
309 | "\n",
310 | "# Iterate over the image\n",
311 | "for x in range(0, size_x, 2):\n",
312 | " for y in range(0, size_y, 2):\n",
313 | " \n",
314 | " # Store all the pixel values in the (2,2) pool\n",
315 | " pixels = []\n",
316 | " pixels.append(image_transformed[x, y])\n",
317 | " pixels.append(image_transformed[x+1, y])\n",
318 | " pixels.append(image_transformed[x, y+1])\n",
319 | " pixels.append(image_transformed[x+1, y+1])\n",
320 | "\n",
321 | " # Get only the largest value and assign to the reduced image\n",
322 | " newImage[int(x/2),int(y/2)] = max(pixels)\n",
323 | "\n",
324 | "# Plot the image. Note the size of the axes -- it is now 256 pixels instead of 512\n",
325 | "plt.gray()\n",
326 | "plt.grid(False)\n",
327 | "plt.imshow(newImage)\n",
328 | "plt.show() "
329 | ]
330 | }
331 | ],
332 | "metadata": {
333 | "accelerator": "GPU",
334 | "colab": {
335 | "collapsed_sections": [],
336 | "name": "C1_W3_Lab_2_exploring_convolutions.ipynb",
337 | "provenance": [
338 | {
339 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/12_sep_2021_fixes/C1/W3/ungraded_labs/C1_W3_Lab_2_exploring_convolutions.ipynb",
340 | "timestamp": 1639058610295
341 | }
342 | ]
343 | },
344 | "kernelspec": {
345 | "display_name": "Python 3",
346 | "language": "python",
347 | "name": "python3"
348 | },
349 | "language_info": {
350 | "codemirror_mode": {
351 | "name": "ipython",
352 | "version": 3
353 | },
354 | "file_extension": ".py",
355 | "mimetype": "text/x-python",
356 | "name": "python",
357 | "nbconvert_exporter": "python",
358 | "pygments_lexer": "ipython3",
359 | "version": "3.7.4"
360 | }
361 | },
362 | "nbformat": 4,
363 | "nbformat_minor": 1
364 | }
365 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/README.md:
--------------------------------------------------------------------------------
1 |
2 |
In The Name Of GOD
3 |
4 |
5 | # Tensorflow notebooks
6 |
7 | This directory is for my works(assignments&labs) at the Coursera Tensorflow Developer program. **course 3** for **Natural Language Processing in TensorFlow**. below is the list of assignments and ungraded labs for this course.
8 |
9 | ## C3 - Natural Language Processing in TensorFlow
10 |
11 | ## Week 1
12 |
13 | ### Assignment
14 |
15 | - Explore the BBC News Archive _(C3W1_Assignment.ipynb)_
16 |
17 | ### Ungraded Labs
18 |
19 | 1. Simple Tokenizing _(C3_W1_Lab_1_tokenize_basic.ipynb)_
20 | 2. Simple Sequences _(C3_W1_Lab_2_sequences_basic.ipynb)_
21 | 3. Sarcasm _(C3_W1_Lab_3_sarcasm.ipynb)_
22 |
23 | ## Week 2
24 |
25 | ### Assignment
26 |
27 | - Categorizing the BBC News Archive _(C3W2_Assignment.ipynb)_
28 |
29 | ### Ungraded Labs
30 |
31 | 1. Positive or Negative IMDB Reviews _(C3_W2_Lab_1_imdb.ipynb)_
32 | 2. Sarcasm Classifier _(C3_W2_Lab_2_sarcasm_classifier.ipynb)_
33 | 3. IMDB Review Subwords _(C3_W2_Lab_3_imdb_subwords.ipynb)_
34 |
35 | ## Week 3
36 |
37 | ### Assignment
38 |
39 | - Exploring Overfitting in NLP _(C3W3_Assignment.ipynb)_
40 |
41 | ### Ungraded Labs
42 |
43 | 1. IMDB Subwords 8K with Single Layer LSTM _(C3_W3_Lab_1_single_layer_LSTM.ipynb)_
44 | 2. IMDB Subwords 8K with Multi Layer LSTM _(C3_W3_Lab_2_multiple_layer_LSTM.ipynb)_
45 | 3. IMDB Subwords 8K with 1D Convolutional Layer _(C3_W3_Lab_3_Conv1D.ipynb)_
46 | 4. IMDB Reviews with GRU (and optional LSTM and Conv1D) _(C3_W3_Lab_4_imdb_reviews_with_GRU_LSTM_Conv1D.ipynb)_
47 | 5. Sarcasm with Bidirectional LSTM _(C3_W3_Lab_5_sarcasm_with_bi_LSTM.ipynb)_
48 | 6. Sarcasm with 1D Convolutional Layer _(C3_W3_Lab_6_sarcasm_with_1D_convolutional.ipynb)_
49 |
50 | ## Week 4
51 |
52 | ### Assignment
53 |
54 | - Writing Shakespeare with LSTMs _(C3W4_Assignment.ipynb)_
55 |
56 | ### Ungraded Labs
57 |
58 | 1. NLP with Irish Music _(C3_W4_Lab_1.ipynb)_
59 | 2. Generating Poetry from Irish Lyrics _(C3_W4_Lab_2_irish_lyrics.ipynb)_
60 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W1/ungraded_labs/C3_W1_Lab_1_tokenize_basic.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "rL-LzAqpoGLC"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Tokenizer Basics\n",
17 | "\n",
18 | "In most NLP tasks, the initial step in preparing your data is to extract a vocabulary of words from your *corpus* (i.e. input texts). You will need to define how to represent the texts into numerical representations which can be used to train a neural network. These representations are called *tokens* and Tensorflow and Keras makes it easy to generate these using its APIs. You will see how to do that in the next cells."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "-nt3uR9TPrUt"
25 | },
26 | "source": [
27 | "## Generating the vocabulary\n",
28 | "\n",
29 | "In this notebook, you will look first at how you can provide a look up dictionary for each word. The code below takes a list of sentences, then takes each word in those sentences and assigns it to an integer. This is done using the [fit_on_texts()](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text/Tokenizer#fit_on_texts) method and you can get the result by looking at the `word_index` property. More frequent words have a lower index."
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "zaCMcjMQifQc"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "from tensorflow.keras.preprocessing.text import Tokenizer\n",
41 | "\n",
42 | "# Define input sentences\n",
43 | "sentences = [\n",
44 | " 'i love my dog',\n",
45 | " 'I, love my cat'\n",
46 | " ]\n",
47 | "\n",
48 | "# Initialize the Tokenizer class\n",
49 | "tokenizer = Tokenizer(num_words = 100)\n",
50 | "\n",
51 | "# Generate indices for each word in the corpus\n",
52 | "tokenizer.fit_on_texts(sentences)\n",
53 | "\n",
54 | "# Get the indices and print it\n",
55 | "word_index = tokenizer.word_index\n",
56 | "print(word_index)"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {
62 | "id": "uTPWesNaRdX2"
63 | },
64 | "source": [
65 | "The `num_words` parameter used in the initializer specifies the maximum number of words minus one (based on frequency) to keep when generating sequences. You will see this in a later exercise. For now, the important thing to note is it does not affect how the `word_index` dictionary is generated. You can try passing `1` instead of `100` as shown on the next cell and you will arrive at the same `word_index`. \n",
66 | "\n",
67 | "Also notice that by default, all punctuation is ignored and words are converted to lower case. You can override these behaviors by modifying the `filters` and `lower` arguments of the `Tokenizer` class as described [here](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text/Tokenizer#arguments). You can try modifying these in the next cell below and compare the output to the one generated above."
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "metadata": {
74 | "id": "VX1A1pDNoVKm"
75 | },
76 | "outputs": [],
77 | "source": [
78 | "# Define input sentences\n",
79 | "sentences = [\n",
80 | " 'i love my dog',\n",
81 | " 'I, love my cat',\n",
82 | " 'You love my dog!'\n",
83 | "]\n",
84 | "\n",
85 | "# Initialize the Tokenizer class\n",
86 | "tokenizer = Tokenizer(num_words = 1)\n",
87 | "\n",
88 | "# Generate indices for each word in the corpus\n",
89 | "tokenizer.fit_on_texts(sentences)\n",
90 | "\n",
91 | "# Get the indices and print it\n",
92 | "word_index = tokenizer.word_index\n",
93 | "print(word_index)"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {
99 | "id": "c9LFfwBffDaj"
100 | },
101 | "source": [
102 | "That concludes this short exercise on tokenizing input texts!"
103 | ]
104 | }
105 | ],
106 | "metadata": {
107 | "colab": {
108 | "collapsed_sections": [],
109 | "name": "C3_W1_Lab_1_tokenize_basic.ipynb",
110 | "private_outputs": true,
111 | "provenance": [
112 | {
113 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/adding_C3/C3/W1/ungraded_labs/C3_W1_Lab_1_tokenize_basic.ipynb",
114 | "timestamp": 1642431620601
115 | }
116 | ]
117 | },
118 | "kernelspec": {
119 | "display_name": "Python 3",
120 | "language": "python",
121 | "name": "python3"
122 | },
123 | "language_info": {
124 | "codemirror_mode": {
125 | "name": "ipython",
126 | "version": 3
127 | },
128 | "file_extension": ".py",
129 | "mimetype": "text/x-python",
130 | "name": "python",
131 | "nbconvert_exporter": "python",
132 | "pygments_lexer": "ipython3",
133 | "version": "3.7.4"
134 | }
135 | },
136 | "nbformat": 4,
137 | "nbformat_minor": 1
138 | }
139 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W1/ungraded_labs/C3_W1_Lab_2_sequences_basic.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "1SmE2CODfmmL"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Generating Sequences and Padding\n",
17 | "\n",
18 | "In this lab, you will look at converting your input sentences into a sequence of tokens. Similar to images in the previous course, you need to prepare text data with uniform size before feeding it to your model. You will see how to do these in the next sections."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "JiFUJg-lmTm6"
25 | },
26 | "source": [
27 | "## Text to Sequences\n",
28 | "\n",
29 | "In the previous lab, you saw how to generate a `word_index` dictionary to generate tokens for each word in your corpus. You can use then use the result to convert each of the input sentences into a sequence of tokens. That is done using the [`texts_to_sequences()`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text/Tokenizer#texts_to_sequences) method as shown below."
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "ArOPfBwyZtln"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "from tensorflow.keras.preprocessing.text import Tokenizer\n",
41 | "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
42 | "\n",
43 | "# Define your input texts\n",
44 | "sentences = [\n",
45 | " 'I love my dog',\n",
46 | " 'I love my cat',\n",
47 | " 'You love my dog!',\n",
48 | " 'Do you think my dog is amazing?'\n",
49 | "]\n",
50 | "\n",
51 | "# Initialize the Tokenizer class\n",
52 | "tokenizer = Tokenizer(num_words = 100, oov_token=\"\")\n",
53 | "\n",
54 | "# Tokenize the input sentences\n",
55 | "tokenizer.fit_on_texts(sentences)\n",
56 | "\n",
57 | "# Get the word index dictionary\n",
58 | "word_index = tokenizer.word_index\n",
59 | "\n",
60 | "# Generate list of token sequences\n",
61 | "sequences = tokenizer.texts_to_sequences(sentences)\n",
62 | "\n",
63 | "# Print the result\n",
64 | "print(\"\\nWord Index = \" , word_index)\n",
65 | "print(\"\\nSequences = \" , sequences)"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {
71 | "id": "z56pEkF2p8c-"
72 | },
73 | "source": [
74 | "## Padding\n",
75 | "\n",
76 | "As mentioned in the lecture, you will usually need to pad the sequences into a uniform length because that is what your model expects. You can use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) for that. By default, it will pad according to the length of the longest sequence. You can override this with the `maxlen` argument to define a specific length. Feel free to play with the [other arguments](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences#args) shown in class and compare the result."
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "id": "qljgx1eSlEse"
84 | },
85 | "outputs": [],
86 | "source": [
87 | "# Pad the sequences to a uniform length\n",
88 | "padded = pad_sequences(sequences, maxlen=5)\n",
89 | "\n",
90 | "# Print the result\n",
91 | "print(\"\\nPadded Sequences:\")\n",
92 | "print(padded)"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {
98 | "id": "btEb9jI0k7Ip"
99 | },
100 | "source": [
101 | "## Out-of-vocabulary tokens\n",
102 | "\n",
103 | "Notice that you defined an `oov_token` when the `Tokenizer` was initialized earlier. This will be used when you have input words that are not found in the `word_index` dictionary. For example, you may decide to collect more text after your initial training and decide to not re-generate the `word_index`. You will see this in action in the cell below. Notice that the token `1` is inserted for words that are not found in the dictionary."
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": null,
109 | "metadata": {
110 | "id": "4fW1NWTok72V"
111 | },
112 | "outputs": [],
113 | "source": [
114 | "# Try with words that the tokenizer wasn't fit to\n",
115 | "test_data = [\n",
116 | " 'i really love my dog',\n",
117 | " 'my dog loves my manatee'\n",
118 | "]\n",
119 | "\n",
120 | "# Generate the sequences\n",
121 | "test_seq = tokenizer.texts_to_sequences(test_data)\n",
122 | "\n",
123 | "# Print the word index dictionary\n",
124 | "print(\"\\nWord Index = \" , word_index)\n",
125 | "\n",
126 | "# Print the sequences with OOV\n",
127 | "print(\"\\nTest Sequence = \", test_seq)\n",
128 | "\n",
129 | "# Print the padded result\n",
130 | "padded = pad_sequences(test_seq, maxlen=10)\n",
131 | "print(\"\\nPadded Test Sequence: \")\n",
132 | "print(padded)"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {
138 | "id": "UBlQIPBqskAJ"
139 | },
140 | "source": [
141 | "This concludes another introduction to text data preprocessing. So far, you've just been using dummy data. In the next exercise, you will be applying the same concepts to a real-world and much larger dataset."
142 | ]
143 | }
144 | ],
145 | "metadata": {
146 | "colab": {
147 | "collapsed_sections": [],
148 | "name": "C3_W1_Lab_2_sequences_basic.ipynb",
149 | "private_outputs": true,
150 | "provenance": [
151 | {
152 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/adding_C3/C3/W1/ungraded_labs/C3_W1_Lab_2_sequences_basic.ipynb",
153 | "timestamp": 1642431659610
154 | }
155 | ],
156 | "toc_visible": true
157 | },
158 | "kernelspec": {
159 | "display_name": "Python 3",
160 | "language": "python",
161 | "name": "python3"
162 | },
163 | "language_info": {
164 | "codemirror_mode": {
165 | "name": "ipython",
166 | "version": 3
167 | },
168 | "file_extension": ".py",
169 | "mimetype": "text/x-python",
170 | "name": "python",
171 | "nbconvert_exporter": "python",
172 | "pygments_lexer": "ipython3",
173 | "version": "3.7.4"
174 | }
175 | },
176 | "nbformat": 4,
177 | "nbformat_minor": 1
178 | }
179 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W1/ungraded_labs/C3_W1_Lab_3_sarcasm.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "SdNGfEo2u-r7"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Tokenizing the Sarcasm Dataset\n",
17 | "\n",
18 | "In this lab, you will be applying what you've learned in the past two exercises to preprocess the [News Headlines Dataset for Sarcasm Detection](https://www.kaggle.com/rmisra/news-headlines-dataset-for-sarcasm-detection/home). This contains news headlines which are labeled as sarcastic or not. You will revisit this dataset in later labs so it is good to be acquainted with it now."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "Twhyfjg0xTkg"
25 | },
26 | "source": [
27 | "## Download and inspect the dataset\n",
28 | "\n",
29 | "First, you will fetch the dataset and preview some of its elements."
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "33W129a7xgoJ"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "# Download the dataset\n",
41 | "!wget https://storage.googleapis.com/tensorflow-1-public/course3/sarcasm.json"
42 | ]
43 | },
44 | {
45 | "cell_type": "markdown",
46 | "metadata": {
47 | "id": "zJHdzh9FyWa2"
48 | },
49 | "source": [
50 | "The dataset is saved as a [JSON](https://www.json.org/json-en.html) file and you can use Python's [`json`](https://docs.python.org/3/library/json.html) module to load it into your workspace. The cell below unpacks the JSON file into a list."
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "id": "OkaBMeNDwMel"
58 | },
59 | "outputs": [],
60 | "source": [
61 | "import json\n",
62 | "\n",
63 | "# Load the JSON file\n",
64 | "with open(\"./sarcasm.json\", 'r') as f:\n",
65 | " datastore = json.load(f)"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {
71 | "id": "D2aSBvJVzRNV"
72 | },
73 | "source": [
74 | "You can inspect a few of the elements in the list. You will notice that each element consists of a dictionary with a URL link, the actual headline, and a label named `is_sarcastic`. Printed below are two elements with contrasting labels."
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {
81 | "id": "RiiFcWU2xnMJ"
82 | },
83 | "outputs": [],
84 | "source": [
85 | "# Non-sarcastic headline\n",
86 | "print(datastore[0])\n",
87 | "\n",
88 | "# Sarcastic headline\n",
89 | "print(datastore[20000])"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {
95 | "id": "dPuH0bBiz8LJ"
96 | },
97 | "source": [
98 | "With that, you can collect all urls, headlines, and labels for easier processing when using the tokenizer. For this lab, you will only need the headlines but we included the code to collect the URLs and labels as well."
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {
105 | "id": "9pxLUQJCxkNB"
106 | },
107 | "outputs": [],
108 | "source": [
109 | "# Initialize lists\n",
110 | "sentences = [] \n",
111 | "labels = []\n",
112 | "urls = []\n",
113 | "\n",
114 | "# Append elements in the dictionaries into each list\n",
115 | "for item in datastore:\n",
116 | " sentences.append(item['headline'])\n",
117 | " labels.append(item['is_sarcastic'])\n",
118 | " urls.append(item['article_link'])"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {
124 | "id": "lBHSXJ5V0qqK"
125 | },
126 | "source": [
127 | "## Preprocessing the headlines\n",
128 | "\n",
129 | "You can convert the `sentences` list above into padded sequences by using the same methods you've been using in the past exercises. The cell below generates the `word_index` dictionary and generates the list of padded sequences for each of the 26,709 headlines."
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {
136 | "id": "5OSTw3uJuvmY"
137 | },
138 | "outputs": [],
139 | "source": [
140 | "from tensorflow.keras.preprocessing.text import Tokenizer\n",
141 | "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
142 | "\n",
143 | "# Initialize the Tokenizer class\n",
144 | "tokenizer = Tokenizer(oov_token=\"\")\n",
145 | "\n",
146 | "# Generate the word index dictionary\n",
147 | "tokenizer.fit_on_texts(sentences)\n",
148 | "\n",
149 | "# Print the length of the word index\n",
150 | "word_index = tokenizer.word_index\n",
151 | "print(f'number of words in word_index: {len(word_index)}')\n",
152 | "\n",
153 | "# Print the word index\n",
154 | "print(f'word_index: {word_index}')\n",
155 | "print()\n",
156 | "\n",
157 | "# Generate and pad the sequences\n",
158 | "sequences = tokenizer.texts_to_sequences(sentences)\n",
159 | "padded = pad_sequences(sequences, padding='post')\n",
160 | "\n",
161 | "# Print a sample headline\n",
162 | "index = 2\n",
163 | "print(f'sample headline: {sentences[index]}')\n",
164 | "print(f'padded sequence: {padded[index]}')\n",
165 | "print()\n",
166 | "\n",
167 | "# Print dimensions of padded sequences\n",
168 | "print(f'shape of padded sequences: {padded.shape}')"
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {
174 | "id": "4wyLF5T036W8"
175 | },
176 | "source": [
177 | "This concludes the short demo on using text data preprocessing APIs on a relatively large dataset. Next week, you will start building models that can be trained on these output sequences. See you there!"
178 | ]
179 | }
180 | ],
181 | "metadata": {
182 | "colab": {
183 | "collapsed_sections": [],
184 | "name": "C3_W1_Lab_3_sarcasm.ipynb",
185 | "private_outputs": true,
186 | "provenance": [
187 | {
188 | "file_id": "https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/c3_w1_l1_missing_link/C3/W1/ungraded_labs/C3_W1_Lab_3_sarcasm.ipynb",
189 | "timestamp": 1642431645591
190 | }
191 | ],
192 | "toc_visible": true
193 | },
194 | "kernelspec": {
195 | "display_name": "Python 3",
196 | "language": "python",
197 | "name": "python3"
198 | },
199 | "language_info": {
200 | "codemirror_mode": {
201 | "name": "ipython",
202 | "version": 3
203 | },
204 | "file_extension": ".py",
205 | "mimetype": "text/x-python",
206 | "name": "python",
207 | "nbconvert_exporter": "python",
208 | "pygments_lexer": "ipython3",
209 | "version": "3.7.4"
210 | }
211 | },
212 | "nbformat": 4,
213 | "nbformat_minor": 1
214 | }
215 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/invalid-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/invalid-1.jpg
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/valid-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/valid-1.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/valid-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/valid-2.jpg
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/valid-3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/assignment/images/valid-3.jpg
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/ungraded_labs/C3_W3_Lab_1_single_layer_LSTM.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "rFiCyWQ-NC5D"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Single Layer LSTM\n",
17 | "\n",
18 | "So far in this course, you've been using mostly basic dense layers and embeddings to build your models. It detects how the combination of words (or subwords) in the input text determines the output class. In the labs this week, you will look at other layers you can use to build your models. Most of these will deal with *Recurrent Neural Networks*, a kind of model that takes the ordering of inputs into account. This makes it suitable for different applications such as parts-of-speech tagging, music composition, language translation, and the like. For example, you may want your model to differentiate sentiments even if the words used in two sentences are the same:\n",
19 | "\n",
20 | "```\n",
21 | "1: My friends do like the movie but I don't. --> negative review\n",
22 | "2: My friends don't like the movie but I do. --> positive review\n",
23 | "```\n",
24 | "\n",
25 | "The first layer you will be looking at is the [*LSTM (Long Short-Term Memory)*](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM). In a nutshell, it computes the state of a current timestep and passes it on to the next timesteps where this state is also updated. The process repeats until the final timestep where the output computation is affected by all previous states. Not only that, it can be configured to be bidirectional so you can get the relationship of later words to earlier ones. If you want to go in-depth of how these processes work, you can look at the [Sequence Models](https://www.coursera.org/learn/nlp-sequence-models) course of the Deep Learning Specialization. For this lab, you can take advantage of Tensorflow's APIs that implements the complexities of these layers for you. This makes it easy to just plug it in to your model. Let's see how to do that in the next sections below. "
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {
31 | "id": "tfp2tBZYnE5b"
32 | },
33 | "source": [
34 | "## Download the dataset\n",
35 | "\n",
36 | "For this lab, you will use the `subwords8k` pre-tokenized [IMDB Reviews dataset](https://www.tensorflow.org/datasets/catalog/imdb_reviews). You will load it via Tensorflow Datasets as you've done last week:"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {
43 | "id": "AW-4Vo4TMUHb"
44 | },
45 | "outputs": [],
46 | "source": [
47 | "import tensorflow_datasets as tfds\n",
48 | "\n",
49 | "# Download the subword encoded pretokenized dataset\n",
50 | "dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True)\n",
51 | "\n",
52 | "# Get the tokenizer\n",
53 | "tokenizer = info.features['text'].encoder"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {
59 | "id": "YfL_2x3SoXeu"
60 | },
61 | "source": [
62 | "## Prepare the dataset\n",
63 | "\n",
64 | "You can then get the train and test splits and generate padded batches. \n",
65 | "\n",
66 | "*Note: To make the training go faster in this lab, you will increase the batch size that Laurence used in the lecture. In particular, you will use `256` and this takes roughly a minute to train per epoch. In the video, Laurence used `16` which takes around 4 minutes per epoch.*"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "metadata": {
73 | "id": "ffvRUI0_McDS"
74 | },
75 | "outputs": [],
76 | "source": [
77 | "BUFFER_SIZE = 10000\n",
78 | "BATCH_SIZE = 256\n",
79 | "\n",
80 | "# Get the train and test splits\n",
81 | "train_data, test_data = dataset['train'], dataset['test'], \n",
82 | "\n",
83 | "# Shuffle the training data\n",
84 | "train_dataset = train_data.shuffle(BUFFER_SIZE)\n",
85 | "\n",
86 | "# Batch and pad the datasets to the maximum length of the sequences\n",
87 | "train_dataset = train_dataset.padded_batch(BATCH_SIZE)\n",
88 | "test_dataset = test_data.padded_batch(BATCH_SIZE)"
89 | ]
90 | },
91 | {
92 | "cell_type": "markdown",
93 | "metadata": {
94 | "id": "4HkUeYNWoi9j"
95 | },
96 | "source": [
97 | "## Build and compile the model\n",
98 | "\n",
99 | "Now you will build the model. You will simply swap the `Flatten` or `GlobalAveragePooling1D` from before with an `LSTM` layer. Moreover, you will nest it inside a [Biderectional](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional) layer so the passing of the sequence information goes both forwards and backwards. These additional computations will naturally make the training go slower than the models you built last week. You should take this into account when using RNNs in your own applications."
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": null,
105 | "metadata": {
106 | "id": "FxQooMEkMgur"
107 | },
108 | "outputs": [],
109 | "source": [
110 | "import tensorflow as tf\n",
111 | "\n",
112 | "# Hyperparameters\n",
113 | "embedding_dim = 64\n",
114 | "lstm_dim = 64\n",
115 | "dense_dim = 64\n",
116 | "\n",
117 | "# Build the model\n",
118 | "model = tf.keras.Sequential([\n",
119 | " tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),\n",
120 | " tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_dim)),\n",
121 | " tf.keras.layers.Dense(dense_dim, activation='relu'),\n",
122 | " tf.keras.layers.Dense(1, activation='sigmoid')\n",
123 | "])\n",
124 | "\n",
125 | "# Print the model summary\n",
126 | "model.summary()"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": null,
132 | "metadata": {
133 | "id": "Uip7QOVzMoMq"
134 | },
135 | "outputs": [],
136 | "source": [
137 | "# Set the training parameters\n",
138 | "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {
144 | "id": "EEKm-MzDs59w"
145 | },
146 | "source": [
147 | "## Train the model\n",
148 | "\n",
149 | "Now you can start training. Using the default parameters above, you should reach around 98% training accuracy and 82% validation accuracy. You can visualize the results using the same plot utilities. See if you can still improve on this by modifying the hyperparameters or by training with more epochs."
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {
156 | "id": "7mlgzaRDMtF6"
157 | },
158 | "outputs": [],
159 | "source": [
160 | "NUM_EPOCHS = 10\n",
161 | "\n",
162 | "history = model.fit(train_dataset, epochs=NUM_EPOCHS, validation_data=test_dataset)"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "id": "Mp1Z7P9pYRSK"
170 | },
171 | "outputs": [],
172 | "source": [
173 | "import matplotlib.pyplot as plt\n",
174 | "\n",
175 | "# Plot utility\n",
176 | "def plot_graphs(history, string):\n",
177 | " plt.plot(history.history[string])\n",
178 | " plt.plot(history.history['val_'+string])\n",
179 | " plt.xlabel(\"Epochs\")\n",
180 | " plt.ylabel(string)\n",
181 | " plt.legend([string, 'val_'+string])\n",
182 | " plt.show()\n",
183 | "\n",
184 | "# Plot the accuracy and results \n",
185 | "plot_graphs(history, \"accuracy\")\n",
186 | "plot_graphs(history, \"loss\")"
187 | ]
188 | },
189 | {
190 | "cell_type": "markdown",
191 | "metadata": {
192 | "id": "c1pnGOV9ur9Y"
193 | },
194 | "source": [
195 | "## Wrap Up\n",
196 | "\n",
197 | "In this lab, you got a first look at using LSTM layers to build Recurrent Neural Networks. You only used a single LSTM layer but this can be stacked as well to build deeper networks. You will see how to do that in the next lab."
198 | ]
199 | }
200 | ],
201 | "metadata": {
202 | "accelerator": "GPU",
203 | "colab": {
204 | "collapsed_sections": [],
205 | "name": "C3_W3_Lab_1_single_layer_LSTM.ipynb",
206 | "private_outputs": true,
207 | "provenance": [],
208 | "toc_visible": true
209 | },
210 | "kernelspec": {
211 | "display_name": "Python 3",
212 | "language": "python",
213 | "name": "python3"
214 | },
215 | "language_info": {
216 | "codemirror_mode": {
217 | "name": "ipython",
218 | "version": 3
219 | },
220 | "file_extension": ".py",
221 | "mimetype": "text/x-python",
222 | "name": "python",
223 | "nbconvert_exporter": "python",
224 | "pygments_lexer": "ipython3",
225 | "version": "3.7.4"
226 | }
227 | },
228 | "nbformat": 4,
229 | "nbformat_minor": 1
230 | }
231 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/ungraded_labs/C3_W3_Lab_2_multiple_layer_LSTM.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "rFiCyWQ-NC5D"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Multiple LSTMs\n",
17 | "\n",
18 | "In this lab, you will look at how to build a model with multiple LSTM layers. Since you know the preceding steps already (e.g. downloading datasets, preparing the data, etc.), we won't expound on it anymore so you can just focus on the model building code."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "xqmDNHeByJqr"
25 | },
26 | "source": [
27 | "## Download and Prepare the Dataset"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {
34 | "id": "AW-4Vo4TMUHb"
35 | },
36 | "outputs": [],
37 | "source": [
38 | "import tensorflow_datasets as tfds\n",
39 | "\n",
40 | "# Download the subword encoded pretokenized dataset\n",
41 | "dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True)\n",
42 | "\n",
43 | "# Get the tokenizer\n",
44 | "tokenizer = info.features['text'].encoder"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {
50 | "id": "fF8bUh_5Ff7y"
51 | },
52 | "source": [
53 | "Like the previous lab, we increased the `BATCH_SIZE` here to make the training faster. If you are doing this on your local machine and have a powerful processor, feel free to use the value used in the lecture (i.e. 64) to get the same results as Laurence."
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {
60 | "id": "ffvRUI0_McDS"
61 | },
62 | "outputs": [],
63 | "source": [
64 | "BUFFER_SIZE = 10000\n",
65 | "BATCH_SIZE = 256\n",
66 | "\n",
67 | "# Get the train and test splits\n",
68 | "train_data, test_data = dataset['train'], dataset['test'], \n",
69 | "\n",
70 | "# Shuffle the training data\n",
71 | "train_dataset = train_data.shuffle(BUFFER_SIZE)\n",
72 | "\n",
73 | "# Batch and pad the datasets to the maximum length of the sequences\n",
74 | "train_dataset = train_dataset.padded_batch(BATCH_SIZE)\n",
75 | "test_dataset = test_data.padded_batch(BATCH_SIZE)"
76 | ]
77 | },
78 | {
79 | "cell_type": "markdown",
80 | "metadata": {
81 | "id": "xcZEiG9ayNZr"
82 | },
83 | "source": [
84 | "## Build and Compile the Model\n",
85 | "\n",
86 | "You can build multiple layer LSTM models by simply appending another `LSTM` layer in your `Sequential` model and enabling the `return_sequences` flag to `True`. This is because an `LSTM` layer expects a sequence input so if the previous layer is also an LSTM, then it should output a sequence as well. See the code cell below that demonstrates this flag in action. You'll notice that the output dimension is in 3 dimensions `(batch_size, timesteps, features)` when when `return_sequences` is True."
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {
93 | "id": "18MsI2LU75kH"
94 | },
95 | "outputs": [],
96 | "source": [
97 | "import tensorflow as tf\n",
98 | "import numpy as np\n",
99 | "\n",
100 | "# Hyperparameters\n",
101 | "batch_size = 1\n",
102 | "timesteps = 20\n",
103 | "features = 16\n",
104 | "lstm_dim = 8\n",
105 | "\n",
106 | "print(f'batch_size: {batch_size}')\n",
107 | "print(f'timesteps (sequence length): {timesteps}')\n",
108 | "print(f'features (embedding size): {features}')\n",
109 | "print(f'lstm output units: {lstm_dim}')\n",
110 | "\n",
111 | "# Define array input with random values\n",
112 | "random_input = np.random.rand(batch_size,timesteps,features)\n",
113 | "print(f'shape of input array: {random_input.shape}')\n",
114 | "\n",
115 | "# Define LSTM that returns a single output\n",
116 | "lstm = tf.keras.layers.LSTM(lstm_dim)\n",
117 | "result = lstm(random_input)\n",
118 | "print(f'shape of lstm output(return_sequences=False): {result.shape}')\n",
119 | "\n",
120 | "# Define LSTM that returns a sequence\n",
121 | "lstm_rs = tf.keras.layers.LSTM(lstm_dim, return_sequences=True)\n",
122 | "result = lstm_rs(random_input)\n",
123 | "print(f'shape of lstm output(return_sequences=True): {result.shape}')"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "metadata": {
129 | "id": "6Was3BX6_50C"
130 | },
131 | "source": [
132 | "The next cell implements the stacked LSTM architecture."
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {
139 | "id": "VPNwU1SVyTjm"
140 | },
141 | "outputs": [],
142 | "source": [
143 | "import tensorflow as tf\n",
144 | "\n",
145 | "# Hyperparameters\n",
146 | "embedding_dim = 64\n",
147 | "lstm1_dim = 64\n",
148 | "lstm2_dim = 32\n",
149 | "dense_dim = 64\n",
150 | "\n",
151 | "# Build the model\n",
152 | "model = tf.keras.Sequential([\n",
153 | " tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),\n",
154 | " tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm1_dim, return_sequences=True)),\n",
155 | " tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm2_dim)),\n",
156 | " tf.keras.layers.Dense(dense_dim, activation='relu'),\n",
157 | " tf.keras.layers.Dense(1, activation='sigmoid')\n",
158 | "])\n",
159 | "\n",
160 | "# Print the model summary\n",
161 | "model.summary()"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "metadata": {
168 | "id": "Uip7QOVzMoMq"
169 | },
170 | "outputs": [],
171 | "source": [
172 | "# Set the training parameters\n",
173 | "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "metadata": {
179 | "id": "uh39GlZP79DY"
180 | },
181 | "source": [
182 | "## Train the Model\n",
183 | "\n",
184 | "The additional LSTM layer will lengthen the training time compared to the previous lab. Given the default parameters we set, it will take around 2 minutes per epoch with the Colab GPU enabled. "
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {
191 | "id": "7mlgzaRDMtF6"
192 | },
193 | "outputs": [],
194 | "source": [
195 | "NUM_EPOCHS = 10\n",
196 | "\n",
197 | "# Train the model\n",
198 | "history = model.fit(train_dataset, epochs=NUM_EPOCHS, validation_data=test_dataset)"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": null,
204 | "metadata": {
205 | "id": "Mp1Z7P9pYRSK"
206 | },
207 | "outputs": [],
208 | "source": [
209 | "import matplotlib.pyplot as plt\n",
210 | "\n",
211 | "# Plot utility\n",
212 | "def plot_graphs(history, string):\n",
213 | " plt.plot(history.history[string])\n",
214 | " plt.plot(history.history['val_'+string])\n",
215 | " plt.xlabel(\"Epochs\")\n",
216 | " plt.ylabel(string)\n",
217 | " plt.legend([string, 'val_'+string])\n",
218 | " plt.show()\n",
219 | "\n",
220 | "# Plot the accuracy and results \n",
221 | "plot_graphs(history, \"accuracy\")\n",
222 | "plot_graphs(history, \"loss\")"
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "metadata": {
228 | "id": "txQdN63vBlTK"
229 | },
230 | "source": [
231 | "## Wrap Up\n",
232 | "\n",
233 | "This lab showed how you can build deep networks by stacking LSTM layers. In the next labs, you will continue exploring other architectures you can use to implement your sentiment classification model."
234 | ]
235 | }
236 | ],
237 | "metadata": {
238 | "accelerator": "GPU",
239 | "colab": {
240 | "collapsed_sections": [],
241 | "name": "C3_W3_Lab_2_multiple_layer_LSTM.ipynb",
242 | "private_outputs": true,
243 | "provenance": []
244 | },
245 | "kernelspec": {
246 | "display_name": "Python 3",
247 | "language": "python",
248 | "name": "python3"
249 | },
250 | "language_info": {
251 | "codemirror_mode": {
252 | "name": "ipython",
253 | "version": 3
254 | },
255 | "file_extension": ".py",
256 | "mimetype": "text/x-python",
257 | "name": "python",
258 | "nbconvert_exporter": "python",
259 | "pygments_lexer": "ipython3",
260 | "version": "3.7.4"
261 | }
262 | },
263 | "nbformat": 4,
264 | "nbformat_minor": 1
265 | }
266 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/ungraded_labs/C3_W3_Lab_3_Conv1D.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "rFiCyWQ-NC5D"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Using Convolutional Neural Networks\n",
17 | "\n",
18 | "In this lab, you will look at another way of building your text classification model and this will be with a convolution layer. As you learned in Course 2 of this specialization, convolutions extract features by applying filters to the input. Let's see how you can use that for text data in the next sections."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "djvGxIRDHT5e"
25 | },
26 | "source": [
27 | "## Download and prepare the dataset"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {
34 | "id": "Y20Lud2ZMBhW"
35 | },
36 | "outputs": [],
37 | "source": [
38 | "import tensorflow_datasets as tfds\n",
39 | "\n",
40 | "# Download the subword encoded pretokenized dataset\n",
41 | "dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True)\n",
42 | "\n",
43 | "# Get the tokenizer\n",
44 | "tokenizer = info.features['text'].encoder"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {
51 | "id": "AW-4Vo4TMUHb"
52 | },
53 | "outputs": [],
54 | "source": [
55 | "BUFFER_SIZE = 10000\n",
56 | "BATCH_SIZE = 256\n",
57 | "\n",
58 | "# Get the train and test splits\n",
59 | "train_data, test_data = dataset['train'], dataset['test'], \n",
60 | "\n",
61 | "# Shuffle the training data\n",
62 | "train_dataset = train_data.shuffle(BUFFER_SIZE)\n",
63 | "\n",
64 | "# Batch and pad the datasets to the maximum length of the sequences\n",
65 | "train_dataset = train_dataset.padded_batch(BATCH_SIZE)\n",
66 | "test_dataset = test_data.padded_batch(BATCH_SIZE)\n"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "metadata": {
72 | "id": "nfatNr6-IAcd"
73 | },
74 | "source": [
75 | "## Build the Model\n",
76 | "\n",
77 | "In Course 2, you were using 2D convolution layers because you were applying it on images. For temporal data such as text sequences, you will use [Conv1D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D) instead so the convolution will happen over a single dimension. You will also append a pooling layer to reduce the output of the convolution layer. For this lab, you will use [GlobalMaxPooling1D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalMaxPool1D) to get the max value across the time dimension. You can also use average pooling and you will do that in the next labs. See how these layers behave as standalone layers in the cell below."
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "id": "Ay87qbqwIJaV"
85 | },
86 | "outputs": [],
87 | "source": [
88 | "import tensorflow as tf\n",
89 | "import numpy as np\n",
90 | "\n",
91 | "# Hyperparameters\n",
92 | "batch_size = 1\n",
93 | "timesteps = 20\n",
94 | "features = 20\n",
95 | "filters = 128\n",
96 | "kernel_size = 5\n",
97 | "\n",
98 | "print(f'batch_size: {batch_size}')\n",
99 | "print(f'timesteps (sequence length): {timesteps}')\n",
100 | "print(f'features (embedding size): {features}')\n",
101 | "print(f'filters: {filters}')\n",
102 | "print(f'kernel_size: {kernel_size}')\n",
103 | "\n",
104 | "# Define array input with random values\n",
105 | "random_input = np.random.rand(batch_size,timesteps,features)\n",
106 | "print(f'shape of input array: {random_input.shape}')\n",
107 | "\n",
108 | "# Pass array to convolution layer and inspect output shape\n",
109 | "conv1d = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')\n",
110 | "result = conv1d(random_input)\n",
111 | "print(f'shape of conv1d output: {result.shape}')\n",
112 | "\n",
113 | "# Pass array to max pooling layer and inspect output shape\n",
114 | "gmp = tf.keras.layers.GlobalMaxPooling1D()\n",
115 | "result = gmp(result)\n",
116 | "print(f'shape of global max pooling output: {result.shape}')"
117 | ]
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "metadata": {
122 | "id": "lNNYF7tqO7it"
123 | },
124 | "source": [
125 | "You can build the model by simply appending the convolution and pooling layer after the embedding layer as shown below."
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "metadata": {
132 | "id": "jo1jjO3vn0jo"
133 | },
134 | "outputs": [],
135 | "source": [
136 | "import tensorflow as tf\n",
137 | "\n",
138 | "# Hyperparameters\n",
139 | "embedding_dim = 64\n",
140 | "filters = 128\n",
141 | "kernel_size = 5\n",
142 | "dense_dim = 64\n",
143 | "\n",
144 | "# Build the model\n",
145 | "model = tf.keras.Sequential([\n",
146 | " tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),\n",
147 | " tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu'),\n",
148 | " tf.keras.layers.GlobalMaxPooling1D(),\n",
149 | " tf.keras.layers.Dense(dense_dim, activation='relu'),\n",
150 | " tf.keras.layers.Dense(1, activation='sigmoid')\n",
151 | "])\n",
152 | "\n",
153 | "# Print the model summary\n",
154 | "model.summary()"
155 | ]
156 | },
157 | {
158 | "cell_type": "code",
159 | "execution_count": null,
160 | "metadata": {
161 | "id": "Uip7QOVzMoMq"
162 | },
163 | "outputs": [],
164 | "source": [
165 | "# Set the training parameters\n",
166 | "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {
172 | "id": "iLJu8HEvPG0L"
173 | },
174 | "source": [
175 | "## Train the model\n",
176 | "\n",
177 | "Training will take around 30 seconds per epoch and you will notice that it reaches higher accuracies than the previous models you've built."
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "metadata": {
184 | "id": "7mlgzaRDMtF6"
185 | },
186 | "outputs": [],
187 | "source": [
188 | "NUM_EPOCHS = 10\n",
189 | "\n",
190 | "# Train the model\n",
191 | "history = model.fit(train_dataset, epochs=NUM_EPOCHS, validation_data=test_dataset)"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "metadata": {
198 | "id": "Mp1Z7P9pYRSK"
199 | },
200 | "outputs": [],
201 | "source": [
202 | "import matplotlib.pyplot as plt\n",
203 | "\n",
204 | "# Plot utility\n",
205 | "def plot_graphs(history, string):\n",
206 | " plt.plot(history.history[string])\n",
207 | " plt.plot(history.history['val_'+string])\n",
208 | " plt.xlabel(\"Epochs\")\n",
209 | " plt.ylabel(string)\n",
210 | " plt.legend([string, 'val_'+string])\n",
211 | " plt.show()\n",
212 | "\n",
213 | "# Plot the accuracy and results \n",
214 | "plot_graphs(history, \"accuracy\")\n",
215 | "plot_graphs(history, \"loss\")"
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "metadata": {
221 | "id": "0rD7ZS84PlUp"
222 | },
223 | "source": [
224 | "## Wrap Up\n",
225 | "\n",
226 | "In this lab, you explored another model architecture you can use for text classification. In the next lessons, you will revisit full word encoding of the IMDB reviews and compare which model works best when the data is prepared that way."
227 | ]
228 | }
229 | ],
230 | "metadata": {
231 | "accelerator": "GPU",
232 | "colab": {
233 | "collapsed_sections": [],
234 | "name": "C3_W3_Lab_3_Conv1D.ipynb",
235 | "private_outputs": true,
236 | "provenance": []
237 | },
238 | "kernelspec": {
239 | "display_name": "Python 3",
240 | "language": "python",
241 | "name": "python3"
242 | },
243 | "language_info": {
244 | "codemirror_mode": {
245 | "name": "ipython",
246 | "version": 3
247 | },
248 | "file_extension": ".py",
249 | "mimetype": "text/x-python",
250 | "name": "python",
251 | "nbconvert_exporter": "python",
252 | "pygments_lexer": "ipython3",
253 | "version": "3.7.4"
254 | }
255 | },
256 | "nbformat": 4,
257 | "nbformat_minor": 1
258 | }
259 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/ungraded_labs/C3_W3_Lab_5_sarcasm_with_bi_LSTM.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "Q2MY4-M1zuhV"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Training a Sarcasm Detection Model using Bidirectional LSTMs\n",
17 | "\n",
18 | "In this lab, you will revisit the [News Headlines Dataset for Sarcasm Detection](https://www.kaggle.com/rmisra/news-headlines-dataset-for-sarcasm-detection/home) dataset and use it to train a Bi-LSTM Model.\n"
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "S-AgItE6z80t"
25 | },
26 | "source": [
27 | "## Download the Dataset\n",
28 | "\n",
29 | "First, you will download the JSON file and extract the contents into lists."
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "k_Wlz9i10Dmn"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "# Download the dataset\n",
41 | "!wget https://storage.googleapis.com/tensorflow-1-public/course3/sarcasm.json"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {
48 | "id": "Pr4R0I240GOh"
49 | },
50 | "outputs": [],
51 | "source": [
52 | "import json\n",
53 | "\n",
54 | "# Load the JSON file\n",
55 | "with open(\"./sarcasm.json\", 'r') as f:\n",
56 | " datastore = json.load(f)\n",
57 | "\n",
58 | "# Initialize the lists\n",
59 | "sentences = []\n",
60 | "labels = []\n",
61 | "\n",
62 | "# Collect sentences and labels into the lists\n",
63 | "for item in datastore:\n",
64 | " sentences.append(item['headline'])\n",
65 | " labels.append(item['is_sarcastic'])"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {
71 | "id": "zN9-ojV55UCR"
72 | },
73 | "source": [
74 | "## Split the Dataset\n",
75 | "\n",
76 | "You will then split the lists into train and test sets."
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "id": "50H0ZrJf035i"
84 | },
85 | "outputs": [],
86 | "source": [
87 | "training_size = 20000\n",
88 | "\n",
89 | "# Split the sentences\n",
90 | "training_sentences = sentences[0:training_size]\n",
91 | "testing_sentences = sentences[training_size:]\n",
92 | "\n",
93 | "# Split the labels\n",
94 | "training_labels = labels[0:training_size]\n",
95 | "testing_labels = labels[training_size:]"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {
101 | "id": "MYVNY4tE5YbN"
102 | },
103 | "source": [
104 | "## Data preprocessing\n",
105 | "\n",
106 | "Next, you will generate the vocabulary and padded sequences."
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {
113 | "id": "hodsUZib1Ce7"
114 | },
115 | "outputs": [],
116 | "source": [
117 | "import numpy as np\n",
118 | "from tensorflow.keras.preprocessing.text import Tokenizer\n",
119 | "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
120 | "\n",
121 | "vocab_size = 10000\n",
122 | "max_length = 120\n",
123 | "trunc_type='post'\n",
124 | "padding_type='post'\n",
125 | "oov_tok = \"\"\n",
126 | "\n",
127 | "# Initialize the Tokenizer class\n",
128 | "tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\n",
129 | "\n",
130 | "# Generate the word index dictionary\n",
131 | "tokenizer.fit_on_texts(training_sentences)\n",
132 | "word_index = tokenizer.word_index\n",
133 | "\n",
134 | "# Generate and pad the training sequences\n",
135 | "training_sequences = tokenizer.texts_to_sequences(training_sentences)\n",
136 | "training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
137 | "\n",
138 | "# Generate and pad the testing sequences\n",
139 | "testing_sequences = tokenizer.texts_to_sequences(testing_sentences)\n",
140 | "testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
141 | "\n",
142 | "# Convert the labels lists into numpy arrays\n",
143 | "training_labels = np.array(training_labels)\n",
144 | "testing_labels = np.array(testing_labels)"
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "metadata": {
150 | "id": "o23gJhj95el5"
151 | },
152 | "source": [
153 | "## Build and Compile the Model\n",
154 | "\n",
155 | "The architecture here is almost identical to the one you used in the previous lab with the IMDB Reviews. Try to tweak the parameters and see how it affects the training time and accuracy (both training and validation)."
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {
162 | "id": "jGwXGIXvFhXW"
163 | },
164 | "outputs": [],
165 | "source": [
166 | "import tensorflow as tf\n",
167 | "\n",
168 | "# Parameters\n",
169 | "embedding_dim = 16\n",
170 | "lstm_dim = 32\n",
171 | "dense_dim = 24\n",
172 | "\n",
173 | "# Model Definition with LSTM\n",
174 | "model_lstm = tf.keras.Sequential([\n",
175 | " tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n",
176 | " tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_dim)),\n",
177 | " tf.keras.layers.Dense(dense_dim, activation='relu'),\n",
178 | " tf.keras.layers.Dense(1, activation='sigmoid')\n",
179 | "])\n",
180 | "\n",
181 | "# Set the training parameters\n",
182 | "model_lstm.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
183 | "\n",
184 | "# Print the model summary\n",
185 | "model_lstm.summary()"
186 | ]
187 | },
188 | {
189 | "cell_type": "markdown",
190 | "metadata": {
191 | "id": "krcQGm7B5g9A"
192 | },
193 | "source": [
194 | "## Train the Model"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "metadata": {
201 | "colab": {
202 | "background_save": true
203 | },
204 | "id": "nEKV8EMj11BW"
205 | },
206 | "outputs": [],
207 | "source": [
208 | "NUM_EPOCHS = 10\n",
209 | "\n",
210 | "# Train the model\n",
211 | "history_lstm = model_lstm.fit(training_padded, training_labels, epochs=NUM_EPOCHS, validation_data=(testing_padded, testing_labels))"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": null,
217 | "metadata": {
218 | "id": "g9DC6dmLF8DC"
219 | },
220 | "outputs": [],
221 | "source": [
222 | "import matplotlib.pyplot as plt\n",
223 | "\n",
224 | "# Plot Utility\n",
225 | "def plot_graphs(history, string):\n",
226 | " plt.plot(history.history[string])\n",
227 | " plt.plot(history.history['val_'+string])\n",
228 | " plt.xlabel(\"Epochs\")\n",
229 | " plt.ylabel(string)\n",
230 | " plt.legend([string, 'val_'+string])\n",
231 | " plt.show()\n",
232 | "\n",
233 | "# Plot the accuracy and loss history\n",
234 | "plot_graphs(history_lstm, 'accuracy')\n",
235 | "plot_graphs(history_lstm, 'loss')"
236 | ]
237 | }
238 | ],
239 | "metadata": {
240 | "accelerator": "GPU",
241 | "colab": {
242 | "collapsed_sections": [],
243 | "name": "C3_W3_Lab_5_sarcasm_with_bi_LSTM.ipynb",
244 | "private_outputs": true,
245 | "provenance": []
246 | },
247 | "kernelspec": {
248 | "display_name": "Python 3",
249 | "language": "python",
250 | "name": "python3"
251 | },
252 | "language_info": {
253 | "codemirror_mode": {
254 | "name": "ipython",
255 | "version": 3
256 | },
257 | "file_extension": ".py",
258 | "mimetype": "text/x-python",
259 | "name": "python",
260 | "nbconvert_exporter": "python",
261 | "pygments_lexer": "ipython3",
262 | "version": "3.7.4"
263 | }
264 | },
265 | "nbformat": 4,
266 | "nbformat_minor": 1
267 | }
268 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Natural Language Processing in TensorFlow/W3/ungraded_labs/C3_W3_Lab_6_sarcasm_with_1D_convolutional.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {
13 | "id": "YEdilk144fzb"
14 | },
15 | "source": [
16 | "# Ungraded Lab: Training a Sarcasm Detection Model using a Convolution Layer\n",
17 | "\n",
18 | "You will be doing the same steps here as the previous lab but will be using a convolution layer instead. As usual, try tweaking the parameters and observe how it affects the results. \n"
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {
24 | "id": "pmokcpHc5u1R"
25 | },
26 | "source": [
27 | "## Download the Dataset"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {
34 | "id": "dxezdGoV29Yz"
35 | },
36 | "outputs": [],
37 | "source": [
38 | "# Download the dataset\n",
39 | "!wget https://storage.googleapis.com/tensorflow-1-public/course3/sarcasm.json"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {
46 | "id": "BTcGA2Po2_nN"
47 | },
48 | "outputs": [],
49 | "source": [
50 | "import json\n",
51 | "\n",
52 | "# Load the JSON file\n",
53 | "with open(\"./sarcasm.json\", 'r') as f:\n",
54 | " datastore = json.load(f)\n",
55 | "\n",
56 | "# Initialize the lists\n",
57 | "sentences = []\n",
58 | "labels = []\n",
59 | "\n",
60 | "# Collect sentences and labels into the lists\n",
61 | "for item in datastore:\n",
62 | " sentences.append(item['headline'])\n",
63 | " labels.append(item['is_sarcastic'])"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {
69 | "id": "F2zXSds45s2P"
70 | },
71 | "source": [
72 | "## Split the Dataset"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": null,
78 | "metadata": {
79 | "id": "baDwTn9S3ENB"
80 | },
81 | "outputs": [],
82 | "source": [
83 | "training_size = 20000\n",
84 | "\n",
85 | "# Split the sentences\n",
86 | "training_sentences = sentences[0:training_size]\n",
87 | "testing_sentences = sentences[training_size:]\n",
88 | "\n",
89 | "# Split the labels\n",
90 | "training_labels = labels[0:training_size]\n",
91 | "testing_labels = labels[training_size:]"
92 | ]
93 | },
94 | {
95 | "cell_type": "markdown",
96 | "metadata": {
97 | "id": "NdpLY-or5pTP"
98 | },
99 | "source": [
100 | "## Data preprocessing"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {
107 | "id": "RHjZR4oi3LOq"
108 | },
109 | "outputs": [],
110 | "source": [
111 | "import numpy as np\n",
112 | "from tensorflow.keras.preprocessing.text import Tokenizer\n",
113 | "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
114 | "\n",
115 | "vocab_size = 10000\n",
116 | "max_length = 120\n",
117 | "trunc_type='post'\n",
118 | "padding_type='post'\n",
119 | "oov_tok = \"\"\n",
120 | "\n",
121 | "# Initialize the Tokenizer class\n",
122 | "tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\n",
123 | "\n",
124 | "# Generate the word index dictionary\n",
125 | "tokenizer.fit_on_texts(training_sentences)\n",
126 | "word_index = tokenizer.word_index\n",
127 | "\n",
128 | "# Generate and pad the training sequences\n",
129 | "training_sequences = tokenizer.texts_to_sequences(training_sentences)\n",
130 | "training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
131 | "\n",
132 | "# Generate and pad the testing sequences\n",
133 | "testing_sequences = tokenizer.texts_to_sequences(testing_sentences)\n",
134 | "testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
135 | "\n",
136 | "# Convert the labels lists into numpy arrays\n",
137 | "training_labels = np.array(training_labels)\n",
138 | "testing_labels = np.array(testing_labels)"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {
144 | "id": "HQBjPv_A5m1x"
145 | },
146 | "source": [
147 | "## Build and Compile the Model"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "metadata": {
154 | "id": "jGwXGIXvFhXW"
155 | },
156 | "outputs": [],
157 | "source": [
158 | "import tensorflow as tf\n",
159 | "\n",
160 | "# Parameters\n",
161 | "embedding_dim = 16\n",
162 | "filters = 128\n",
163 | "kernel_size = 5\n",
164 | "dense_dim = 6\n",
165 | "\n",
166 | "# Model Definition with Conv1D\n",
167 | "model_conv = tf.keras.Sequential([\n",
168 | " tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n",
169 | " tf.keras.layers.Conv1D(filters, kernel_size, activation='relu'),\n",
170 | " tf.keras.layers.GlobalMaxPooling1D(),\n",
171 | " tf.keras.layers.Dense(dense_dim, activation='relu'),\n",
172 | " tf.keras.layers.Dense(1, activation='sigmoid')\n",
173 | "])\n",
174 | "\n",
175 | "# Set the training parameters\n",
176 | "model_conv.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
177 | "\n",
178 | "# Print the model summary\n",
179 | "model_conv.summary()"
180 | ]
181 | },
182 | {
183 | "cell_type": "markdown",
184 | "metadata": {
185 | "id": "PcXC5QG45kM7"
186 | },
187 | "source": [
188 | "## Train the Model"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": null,
194 | "metadata": {
195 | "id": "oB6C55FO3z3q"
196 | },
197 | "outputs": [],
198 | "source": [
199 | "NUM_EPOCHS = 10\n",
200 | "\n",
201 | "# Train the model\n",
202 | "history_conv = model_conv.fit(training_padded, training_labels, epochs=NUM_EPOCHS, validation_data=(testing_padded, testing_labels))"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {
209 | "id": "g9DC6dmLF8DC"
210 | },
211 | "outputs": [],
212 | "source": [
213 | "import matplotlib.pyplot as plt\n",
214 | "\n",
215 | "# Plot Utility\n",
216 | "def plot_graphs(history, string):\n",
217 | " plt.plot(history.history[string])\n",
218 | " plt.plot(history.history['val_'+string])\n",
219 | " plt.xlabel(\"Epochs\")\n",
220 | " plt.ylabel(string)\n",
221 | " plt.legend([string, 'val_'+string])\n",
222 | " plt.show()\n",
223 | "\n",
224 | "# Plot the accuracy and loss history\n",
225 | "plot_graphs(history_conv, 'accuracy')\n",
226 | "plot_graphs(history_conv, 'loss')"
227 | ]
228 | }
229 | ],
230 | "metadata": {
231 | "colab": {
232 | "collapsed_sections": [],
233 | "name": "C3_W3_Lab_6_sarcasm_with_1D_convolutional.ipynb",
234 | "private_outputs": true,
235 | "provenance": []
236 | },
237 | "kernelspec": {
238 | "display_name": "Python 3",
239 | "language": "python",
240 | "name": "python3"
241 | },
242 | "language_info": {
243 | "codemirror_mode": {
244 | "name": "ipython",
245 | "version": 3
246 | },
247 | "file_extension": ".py",
248 | "mimetype": "text/x-python",
249 | "name": "python",
250 | "nbconvert_exporter": "python",
251 | "pygments_lexer": "ipython3",
252 | "version": "3.7.4"
253 | }
254 | },
255 | "nbformat": 4,
256 | "nbformat_minor": 1
257 | }
258 |
--------------------------------------------------------------------------------
/Tensorflow Developer/README.md:
--------------------------------------------------------------------------------
1 |
2 |
In The Name Of GOD
3 |
4 |
5 |
6 | ## DeepLearning.AI TensorFlow Developer
7 |
8 | The original link to the specialization is **[this](https://www.coursera.org/professional-certificates/tensorflow-in-practice)**\
9 | Also for this specialization I have taken the following courses and the directories of each course are shown below.
10 |
11 | - [Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning](https://github.com/FarshidNooshi/Tensorflow-Notebooks/tree/master/Tensorflow%20Developer/Introduction%20to%20TensorFlow%20for%20Artificial%20Intelligence%2C%20Machine%20Learning%2C%20and%20Deep%20Learning)
12 | - [Convolutional Neural Networks in TensorFlow](https://github.com/FarshidNooshi/Tensorflow-notebooks/tree/master/Tensorflow%20Developer/Convolutional%20Neural%20Networks%20in%20TensorFlow)
13 | - [Natural Language Processing in TensorFlow](https://github.com/FarshidNooshi/Tensorflow-notebooks/tree/master/Tensorflow%20Developer/Natural%20Language%20Processing%20in%20TensorFlow)
14 | - [Sequences, Time Series and Prediction](https://github.com/FarshidNooshi/Tensorflow-notebooks/tree/master/Tensorflow%20Developer/Sequences%2C%20Time%20Series%20and%20Prediction)
15 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/README.md:
--------------------------------------------------------------------------------
1 |
2 |
In The Name Of GOD
3 |
4 |
5 | # Tensorflow notebooks
6 |
7 | This directory is for my works(assignments&labs) at the Coursera Tensorflow Developer program. **course 4** for **Sequences, Time Series and Prediction**. below is the list of assignments and ungraded labs for this course.
8 |
9 | ## C4 - Sequences, Time Series and Prediction
10 |
11 | ## Week 1
12 |
13 | ### Assignment
14 |
15 | - Create and Predict Synthetic Data _(C4W1_Assignment.ipynb)_
16 |
17 | ### Ungraded Labs
18 |
19 | 1. Time Series _(C4_W1_Lab_1_time_series.ipynb)_
20 | 2. Forecasting _(C4_W1_Lab_2_forecasting.ipynb)_
21 |
22 | ## Week 2
23 |
24 | ### Assignment
25 |
26 | - Predict with a DNN _(C4W2_Assignment.ipynb)_
27 |
28 | ### Ungraded Labs
29 |
30 | 1. Preparing Features and Labels _(C4_W2_Lab_1_features_and_labels.ipynb)_
31 | 2. Single Layer Neural Network _(C4_W2_Lab_2_single_layer_NN.ipynb)_
32 | 3. Deep Neural Network _(C4_W2_Lab_3_deep_NN.ipynb)_
33 |
34 | ## Week 3
35 |
36 | ### Assignment
37 |
38 | - Using RNN's and LSTM's for time series _(C4W3_Assignment.ipynb)_
39 |
40 | ### Ungraded Labs
41 |
42 | 1. Recurrent Neural Network (RNN) _(C4_W3_Lab_1_RNN.ipynb)_
43 | 2. Long Short-Term Memory (LSTM) _(C4_W3_Lab_2_LSTM.ipynb)_
44 |
45 | ## Week 4
46 |
47 | ### Assignment
48 |
49 | - Daily Minimum Temperatures in Melbourne - Real Life Data _(C4W4_Assignment.ipynb)_
50 |
51 | ### Ungraded Labs
52 |
53 | 1. Long Short-Term Memory (LSTM) _(C4_W4_Lab_1_LSTM.ipynb)_
54 | 2. Sunspots _(C4_W4_Lab_2_Sunspots.ipynb)_
55 | 3. Sunspots - DNN Only _(C4_W4_Lab_3_DNN_only.ipynb)_
56 |
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/diff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/diff.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/diff_moving.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/diff_moving.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/moving_avg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/moving_avg.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/naive.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/naive.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/naive_zoom.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/naive_zoom.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/plus_past.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/plus_past.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/plus_smooth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/plus_smooth.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/train_series.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/train_series.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/val_series.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W1/assignment/images/val_series.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W2/assignment/images/forecast.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W2/assignment/images/forecast.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W3/assignment/images/expected.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W3/assignment/images/expected.png
--------------------------------------------------------------------------------
/Tensorflow Developer/Sequences, Time Series and Prediction/W4/assignment/images/temp-series.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FarshidNooshi/TensorFlow-Notebooks/5e76e5f6e98a077dd45012438f65c8b81a85ffd2/Tensorflow Developer/Sequences, Time Series and Prediction/W4/assignment/images/temp-series.png
--------------------------------------------------------------------------------