├── .github
├── ISSUE_TEMPLATE
│ └── upgrade.md
└── workflows
│ ├── ci.yaml
│ ├── gh-page.yaml
│ ├── setup.yaml
│ └── upgrade.yaml
├── .gitignore
├── Gemfile
├── Gemfile.lock
├── LICENSE
├── Makefile
├── README.md
├── _action_files
├── Dockerfile
├── __init__.py
├── action.yml
├── action_entrypoint.sh
├── fast_template.py
├── fastpages-jekyll.Dockerfile
├── fastpages-nbdev.Dockerfile
├── fastpages.tpl
├── hide.tpl
├── nb2post.py
├── parse_netlify.py
├── pr_comment.sh
├── settings.ini
├── word2post.py
└── word2post.sh
├── _config.yml
├── _fastpages_docs
├── CONTRIBUTING.md
├── DEVELOPMENT.md
├── NOTEBOOK_FOOTNOTES.md
├── README_TEMPLATE.md
├── UPGRADE.md
├── _checkbox.png
├── _manual_setup.md
├── _paginate.png
├── _post_tags.png
├── _setup_pr_template.md
├── _show_image_true.png
├── _upgrade_pr.md
├── highlight_dracula.png
├── highlight_original.png
├── upgrade_step1.png
├── upgrade_step2.png
├── upgrade_step3.png
└── version.txt
├── _includes
├── alert.html
├── custom-head.html
├── favicons.html
├── google-analytics.html
├── head.html
├── image-r
├── image.html
├── important.html
├── info.html
├── note.html
├── notebook_binder_link.html
├── notebook_colab_link.html
├── notebook_github_link.html
├── post_list.html
├── post_list_image_card.html
├── reading_time.html
├── screenshot
├── tip.html
├── toc.html
├── twitter.html
├── utterances.html
├── warning.html
└── youtube.html
├── _layouts
├── categories.html
├── home.html
├── notebook.html
└── post.html
├── _notebooks
├── 2020-03-18-A_Gentle_Introduction_to_PyTorch_1_2.ipynb
├── 2020-03-18-RNN_PT.ipynb
├── 2020-03-18-pytorch_hello_world.ipynb
├── 2020-03-18-pytorch_logistic_regression.ipynb
├── 2020-03-18-pytorch_quick_start.ipynb
├── 2020-03-19-Writing_Primer_for_Data_Scientists.ipynb
├── 2020-03-19-nlp_basics_tokenization_segmentation.ipynb
├── 2020-03-19-nn.ipynb
├── README.md
└── my_icons
│ └── fastai_logo.png
├── _pages
├── 404.html
├── about.md
├── search.html
├── submit.html
└── tags.html
├── _plugins
├── footnote-detail.rb
└── footnote.rb
├── _posts
└── README.md
├── _sass
└── minima
│ ├── custom-styles.scss
│ ├── fastpages-dracula-highlight.scss
│ └── fastpages-styles.scss
├── assets
├── badges
│ ├── binder.svg
│ ├── colab.svg
│ └── github.svg
└── js
│ ├── search-data.json
│ ├── search.js
│ └── vendor
│ └── lunr.min.js
├── docker-compose.yml
├── images
├── copied_from_nb
│ └── README.md
├── data-science.png
├── diagram.png
├── favicon.ico
├── favicon.png
├── fon-1.png
├── front-matter.png
├── intro-pytorch.png
├── logistic-regression.png
├── logo.png
├── model-nn.png
├── nn.png
├── notebooks.png
├── pytorch-quick.png
└── rnn-pt.png
└── index.html
/.github/ISSUE_TEMPLATE/upgrade.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "[fastpages] Automated Upgrade"
3 | about: "Trigger a PR for upgrading fastpages"
4 | title: "[fastpages] Automated Upgrade"
5 | labels: fastpages-automation
6 | assignees: ''
7 |
8 | ---
9 |
10 | Opening this issue will trigger GitHub Actions to fetch the lastest version of [fastpages](https://github.com/fastai/fastpages). More information will be provided in forthcoming comments below.
11 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | push:
4 | branches:
5 | - master # need to filter here so we only deploy when there is a push to master
6 | # no filters on pull requests, so intentionally left blank
7 | pull_request:
8 |
9 | jobs:
10 | build-site:
11 | if: ( github.event.commits[0].message != 'Initial commit' ) || github.run_number > 1
12 | runs-on: ubuntu-latest
13 | steps:
14 |
15 | - name: Copy Repository Contents
16 | uses: actions/checkout@master
17 | with:
18 | persist-credentials: false
19 |
20 | - name: convert notebooks and word docs to posts
21 | uses: ./_action_files
22 |
23 | - name: setup directories for Jekyll build
24 | run: |
25 | rm -rf _site
26 | sudo chmod -R 777 .
27 |
28 | - name: Jekyll build
29 | uses: docker://hamelsmu/fastpages-jekyll
30 | with:
31 | args: bash -c "gem install bundler && jekyll build -V"
32 | env:
33 | JEKYLL_ENV: 'production'
34 |
35 | - name: copy CNAME file into _site if CNAME exists
36 | run: |
37 | sudo chmod -R 777 _site/
38 | cp CNAME _site/ 2>/dev/null || :
39 |
40 | - name: Deploy
41 | if: github.event_name == 'push'
42 | uses: peaceiris/actions-gh-pages@v3
43 | with:
44 | deploy_key: ${{ secrets.SSH_DEPLOY_KEY }}
45 | publish_dir: ./_site
46 |
--------------------------------------------------------------------------------
/.github/workflows/gh-page.yaml:
--------------------------------------------------------------------------------
1 | name: GH-Pages Status
2 | on:
3 | page_build
4 |
5 | jobs:
6 | see-page-build-payload:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - name: check status
10 | run: |
11 | import os
12 | status, errormsg = os.getenv('STATUS'), os.getenv('ERROR')
13 | assert status == 'built', 'There was an error building the page on GitHub pages.\n\nStatus: {}\n\nError messsage: {}'.format(status, errormsg)
14 | shell: python
15 | env:
16 | STATUS: ${{ github.event.build.status }}
17 | ERROR: ${{ github.event.build.error.message }}
18 |
--------------------------------------------------------------------------------
/.github/workflows/setup.yaml:
--------------------------------------------------------------------------------
1 | name: Setup
2 | on: push
3 |
4 | jobs:
5 | setup:
6 | if: (github.event.commits[0].message == 'Initial commit') && (github.run_number == 1)
7 | runs-on: ubuntu-latest
8 | steps:
9 |
10 | - name: Set up Python
11 | uses: actions/setup-python@v1
12 | with:
13 | python-version: 3.6
14 |
15 | - name: Copy Repository Contents
16 | uses: actions/checkout@v2
17 |
18 | - name: modify files
19 | run: |
20 | import re, os
21 | from pathlib import Path
22 | from configparser import ConfigParser
23 | config = ConfigParser()
24 |
25 | nwo = os.getenv('GITHUB_REPOSITORY')
26 | username, repo_name = nwo.split('/')
27 | readme_template_path = Path('_fastpages_docs/README_TEMPLATE.md')
28 | readme_path = Path('README.md')
29 | config_path = Path('_config.yml')
30 | pr_msg_path = Path('_fastpages_docs/_setup_pr_template.md')
31 | settings = Path('_action_files/settings.ini')
32 |
33 | assert readme_template_path.exists(), 'Did not find _fastpages_docs/README_TEMPLATE.md in the current directory!'
34 | assert readme_path.exists(), 'Did not find README.md in the current directory!'
35 | assert config_path.exists(), 'Did not find _config.yml in the current directory!'
36 | assert pr_msg_path.exists(), 'Did not find _fastpages_docs/_setup_pr_template.md in the current directory!'
37 | assert settings.exists(), 'Did not find _action_files/settings.ini in the current directory!'
38 |
39 | # edit settings.ini file to inject baseurl
40 | config.read(settings)
41 | config['DEFAULT']['baseurl'] = f'/{repo_name}'
42 | with open('_action_files/settings.ini', 'w') as configfile:
43 | config.write(configfile)
44 |
45 | # replace content of README with template
46 | readme = readme_template_path.read_text().replace('{_username_}', username).replace('{_repo_name_}', repo_name)
47 | readme_path.write_text(readme)
48 |
49 | # update _config.yml
50 | cfg = config_path.read_text()
51 | cfg = re.sub(r'^(github_username: )(fastai)', r'\1{}'.format(username), cfg, flags=re.MULTILINE)
52 | cfg = re.sub(r'^(baseurl: )("")', r'\1"/{}"'.format(repo_name), cfg, flags=re.MULTILINE)
53 | cfg = re.sub(r'^(github_repo: ")(fastpages)', r'\1{}'.format(repo_name), cfg, flags=re.MULTILINE)
54 | cfg = re.sub(r'^(url: "https://)(fastpages.fast.ai)(")', r'\1{}.github.io\3'.format(username), cfg, flags=re.MULTILINE)
55 | cfg = re.sub('UA-57531313-5', '', cfg, flags=re.MULTILINE)
56 | config_path.write_text(cfg)
57 |
58 | # prepare the pr message
59 | pr = pr_msg_path.read_text().replace('{_username_}', username).replace('{_repo_name_}', repo_name)
60 | pr_msg_path.write_text(pr)
61 | shell: python
62 |
63 | - name: commit changes
64 | run: |
65 | git config --global user.email "${GH_EMAIL}"
66 | git config --global user.name "${GH_USERNAME}"
67 | git checkout -B fastpages-automated-setup
68 | git rm CNAME action.yml
69 | git rm _notebooks/2020-02-21-introducing-fastpages.ipynb
70 | git rm _posts/2020-03-06-fastpages-actions.md
71 | git rm -rf images/fastpages_posts
72 | git rm .github/workflows/chatops.yaml
73 | git rm .github/workflows/docker.yaml
74 | git rm .github/workflows/docker-nbdev.yaml
75 | git rm .github/ISSUE_TEMPLATE/bug.md
76 | git rm .github/ISSUE_TEMPLATE/feature_request.md
77 | git add _config.yml README.md _fastpages_docs/ _action_files/settings.ini
78 | git commit -m'setup repo'
79 | git push -f --set-upstream origin fastpages-automated-setup
80 | env:
81 | GH_EMAIL: ${{ github.event.commits[0].author.email }}
82 | GH_USERNAME: ${{ github.event.commits[0].author.username }}
83 |
84 | - name: Open a PR
85 | uses: actions/github-script@0.5.0
86 | with:
87 | github-token: ${{secrets.GITHUB_TOKEN}}
88 | script: |
89 | var fs = require('fs');
90 | var contents = fs.readFileSync('_fastpages_docs/_setup_pr_template.md', 'utf8');
91 | github.pulls.create({
92 | owner: context.repo.owner,
93 | repo: context.repo.repo,
94 | title: 'Initial Setup',
95 | head: 'fastpages-automated-setup',
96 | base: 'master',
97 | body: `${contents}`
98 | })
99 |
--------------------------------------------------------------------------------
/.github/workflows/upgrade.yaml:
--------------------------------------------------------------------------------
1 | name: Upgrade fastpages
2 | on:
3 | issues:
4 | types: [opened]
5 |
6 | jobs:
7 | check_credentials:
8 | if: |
9 | (github.repository != 'fastai/fastpages') &&
10 | (github.event.issue.title == '[fastpages] Automated Upgrade')
11 | runs-on: ubuntu-latest
12 | steps:
13 |
14 | - name: see payload
15 | run: |
16 | echo "FULL PAYLOAD:\n${PAYLOAD}\n"
17 | echo "PR_PAYLOAD PAYLOAD:\n${PR_PAYLOAD}"
18 | env:
19 | PAYLOAD: ${{ toJSON(github.event) }}
20 | PR_PAYLOAD: ${{ github.event.pull_request }}
21 |
22 | - name: Comment on issue if sufficient access does not exist
23 | if: |
24 | (github.event.issue.author_association != 'OWNER') &&
25 | (github.event.issue.author_association != 'COLLABORATOR') &&
26 | (github.event.issue.author_association != 'MEMBER')
27 | uses: actions/github-script@0.6.0
28 | with:
29 | github-token: ${{secrets.GITHUB_TOKEN}}
30 | script: |
31 | var permission_level = process.env.permission_level;
32 | var url = 'https://help.github.com/en/github/setting-up-and-managing-your-github-user-account/permission-levels-for-a-user-account-repository#collaborator-access-on-a-repository-owned-by-a-user-account'
33 | var msg = `You must have the [permission level](${url}) of either an **OWNER**, **COLLABORATOR** or **MEMBER** to instantiate an upgrade request. Your permission level is ${permission_level}`
34 | github.issues.createComment({
35 | issue_number: context.issue.number,
36 | owner: context.repo.owner,
37 | repo: context.repo.repo,
38 | body: msg
39 | })
40 | github.issues.update({
41 | issue_number: context.issue.number,
42 | owner: context.repo.owner,
43 | repo: context.repo.repo,
44 | state: 'closed'
45 | })
46 | throw msg;
47 | env:
48 | permission_level: ${{ github.event.issue.author_association }}
49 |
50 | upgrade:
51 | needs: [check_credentials]
52 | if: |
53 | (github.repository != 'fastai/fastpages') &&
54 | (github.event.issue.title == '[fastpages] Automated Upgrade') &&
55 | (github.event.issue.author_association == 'OWNER' || github.event.issue.author_association == 'COLLABORATOR' || github.event.issue.author_association == 'MEMBER')
56 | runs-on: ubuntu-latest
57 | steps:
58 |
59 | - name: Set up Python
60 | uses: actions/setup-python@v1
61 | with:
62 | python-version: 3.7
63 |
64 | - name: checkout latest fastpages
65 | uses: actions/checkout@v2
66 | with:
67 | repository: 'fastai/fastpages'
68 | path: 'new_files'
69 | persist-credentials: false
70 |
71 | - name: copy this repo's contents
72 | uses: actions/checkout@v2
73 | with:
74 | path: 'current_files'
75 | persist-credentials: false
76 |
77 | - name: compare versions
78 | id: check_version
79 | run: |
80 | from pathlib import Path
81 | new_version = Path('new_files/_fastpages_docs/version.txt')
82 | old_version = Path('current_files/_fastpages_docs/version.txt')
83 |
84 | if old_version.exists():
85 | old_num = old_version.read_text().strip()
86 | new_num = new_version.read_text().strip()
87 | print(f'Old version: {old_num}')
88 | print(f'New version: {new_num}')
89 | if old_num == new_num:
90 | print('::set-output name=vbump::false')
91 | else:
92 | print('::set-output name=vbump::true')
93 | else:
94 | print('::set-output name=vbump::true')
95 | shell: python
96 |
97 | - name: copy new files
98 | if: steps.check_version.outputs.vbump == 'true'
99 | run: |
100 | # remove files you don't want to copy from current version of fastpages
101 | cd new_files
102 | rm -rf _posts _notebooks _word images
103 | rm *.md CNAME action.yml _config.yml index.html LICENSE
104 | rm .github/workflows/chatops.yaml
105 | rm .github/workflows/docker-nbdev.yaml
106 | rm .github/workflows/docker.yaml
107 | rm .github/ISSUE_TEMPLATE/bug.md .github/ISSUE_TEMPLATE/feature_request.md
108 |
109 | # copy new files from fastpages into your repo
110 | for file in $(ls | egrep -v "(assets|_sass)"); do
111 | if [[ -f "$file" ]] || [[ -d "$file" ]]
112 | then
113 | echo "copying $file";
114 | cp -r $file ../current_files;
115 | fi
116 | done
117 |
118 | # copy select files in assets and _sass
119 | cp -r assets/js ../current_files/assets
120 | cp -r assets/badges ../current_files/assets
121 | cp _sass/minima/fastpages-styles.scss ../current_files/_sass/minima/
122 | cp _sass/minima/fastpages-dracula-highlight.scss ../current_files/_sass/minima/
123 |
124 | # copy action workflows
125 | cp -r .github ../current_files
126 |
127 | # install dependencies
128 | pip3 install pyyaml
129 |
130 | - name: sync baseurl
131 | if: steps.check_version.outputs.vbump == 'true'
132 | run: |
133 | import re, os, yaml
134 | from pathlib import Path
135 | from configparser import ConfigParser
136 | settings = ConfigParser()
137 |
138 | # specify location of config files
139 | nwo = os.getenv('GITHUB_REPOSITORY')
140 | username, repo_name = nwo.split('/')
141 | settings_path = Path('current_files/_action_files/settings.ini')
142 | config_path = Path('current_files/_config.yml')
143 | setup_pr_path = Path('current_files/_fastpages_docs/_setup_pr_template.md')
144 | upgrade_pr_path = Path('current_files/_fastpages_docs/_upgrade_pr.md')
145 |
146 | assert settings_path.exists(), 'Did not find _action_files/settings.ini in your repository!'
147 | assert config_path.exists(), 'Did not find _config.yml in your repository!'
148 | assert setup_pr_path.exists(), 'Did not find_fastpages_docs/_setup_pr_template.md in the current directory!'
149 | assert upgrade_pr_path.exists(), 'Did not find _fastpages_docs/_upgrade_pr.md in your repository!'
150 |
151 | # read data from config files
152 | settings.read(settings_path)
153 | with open(config_path, 'r') as cfg:
154 | config = yaml.load(cfg)
155 |
156 | # sync value for baseurl b/w config.yml and settings.ini
157 | settings['DEFAULT']['baseurl'] = config['baseurl']
158 | with open(settings_path, 'w') as stg:
159 | settings.write(stg)
160 |
161 | # update PR templates
162 | setup_pr = setup_pr_path.read_text().replace('{_username_}', username).replace('{_repo_name_}', repo_name)
163 | setup_pr_path.write_text(setup_pr)
164 | upgrade_pr = upgrade_pr_path.read_text().replace('{_username_}', username).replace('{_repo_name_}', repo_name)
165 | upgrade_pr_path.write_text(upgrade_pr)
166 | shell: python
167 |
168 | - uses: webfactory/ssh-agent@v0.2.0
169 | if: steps.check_version.outputs.vbump == 'true'
170 | with:
171 | ssh-private-key: ${{ secrets.SSH_DEPLOY_KEY }}
172 |
173 | - name: push changes to branch
174 | if: steps.check_version.outputs.vbump == 'true'
175 | run: |
176 | # commit changes
177 | cd current_files
178 | git config --global user.email "${GH_USERNAME}@users.noreply.github.com"
179 | git config --global user.name "${GH_USERNAME}"
180 | git remote remove origin
181 | git remote add origin "git@github.com:${GITHUB_REPOSITORY}.git"
182 |
183 | git add _action_files/settings.ini
184 | git checkout -b fastpages-automated-upgrade
185 | git add -A
186 | git commit -m'upgrade fastpages'
187 | git push -f --set-upstream origin fastpages-automated-upgrade master
188 | env:
189 | GH_USERNAME: ${{ github.event.issue.user.login }}
190 |
191 | - name: Open a PR
192 | if: steps.check_version.outputs.vbump == 'true'
193 | id: pr
194 | uses: actions/github-script@0.6.0
195 | with:
196 | github-token: ${{secrets.GITHUB_TOKEN}}
197 | script: |
198 | var fs = require('fs');
199 | var contents = fs.readFileSync('current_files/_fastpages_docs/_upgrade_pr.md', 'utf8');
200 | github.pulls.create({
201 | owner: context.repo.owner,
202 | repo: context.repo.repo,
203 | title: '[fastpages] Update repo with changes from fastpages',
204 | head: 'fastpages-automated-upgrade',
205 | base: 'master',
206 | body: `${contents}`
207 | })
208 | .then(result => console.log(`::set-output name=pr_num::${result.data.number}`))
209 |
210 | - name: Comment on issue if failure
211 | if: failure() && (steps.check_version.outputs.vbump == 'true')
212 | uses: actions/github-script@0.6.0
213 | with:
214 | github-token: ${{secrets.GITHUB_TOKEN}}
215 | script: |
216 | var pr_num = process.env.PR_NUM;
217 | var repo = process.env.REPO
218 | github.issues.createComment({
219 | issue_number: context.issue.number,
220 | owner: context.repo.owner,
221 | repo: context.repo.repo,
222 | body: `An error occurred when attempting to open a PR to update fastpages. See the [Actions tab of your repo](https://github.com/${repo}/actions) for more details.`
223 | })
224 | env:
225 | PR_NUM: ${{ steps.pr.outputs.pr_num }}
226 | REPO: ${{ github.repository }}
227 |
228 | - name: Comment on issue
229 | if: steps.check_version.outputs.vbump == 'true'
230 | uses: actions/github-script@0.6.0
231 | with:
232 | github-token: ${{secrets.GITHUB_TOKEN}}
233 | script: |
234 | var pr_num = process.env.PR_NUM;
235 | var repo = process.env.REPO
236 | github.issues.createComment({
237 | issue_number: context.issue.number,
238 | owner: context.repo.owner,
239 | repo: context.repo.repo,
240 | body: `Opened PR https://github.com/${repo}/pull/${pr_num} to assist with updating fastpages.`
241 | })
242 | env:
243 | PR_NUM: ${{ steps.pr.outputs.pr_num }}
244 | REPO: ${{ github.repository }}
245 |
246 | - name: Comment on issue if version has not changed
247 | if: steps.check_version.outputs.vbump == 'false'
248 | uses: actions/github-script@0.6.0
249 | with:
250 | github-token: ${{secrets.GITHUB_TOKEN}}
251 | script: |
252 | github.issues.createComment({
253 | issue_number: context.issue.number,
254 | owner: context.repo.owner,
255 | repo: context.repo.repo,
256 | body: `Your version of fastpages is up to date. There is nothing to change.`
257 | })
258 |
259 | - name: Close Issue
260 | if: always()
261 | uses: actions/github-script@0.6.0
262 | with:
263 | github-token: ${{secrets.GITHUB_TOKEN}}
264 | script: |
265 | github.issues.update({
266 | issue_number: context.issue.number,
267 | owner: context.repo.owner,
268 | repo: context.repo.repo,
269 | state: 'closed'
270 | })
271 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | ~*
3 | *~
4 | _site
5 | .sass-cache
6 | .jekyll-cache
7 | .jekyll-metadata
8 | vendor
9 | _notebooks/.ipynb_checkpoints
10 | # Local Netlify folder
11 | .netlify
12 | .tweet-cache
13 | __pycache__
14 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source "https://rubygems.org"
2 | # Hello! This is where you manage which Jekyll version is used to run.
3 | # When you want to use a different version, change it below, save the
4 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
5 | #
6 | # bundle exec jekyll serve
7 | #
8 | # This will help ensure the proper Jekyll version is running.
9 | # Happy Jekylling!
10 | gem "jekyll", "~> 4.0.0"
11 | # This is the default theme for new Jekyll sites. You may change this to anything you like.
12 | gem "minima"
13 | # To upgrade, run `bundle update github-pages`.
14 | # gem "github-pages", group: :jekyll_plugins
15 | # If you have any plugins, put them here!
16 | group :jekyll_plugins do
17 | gem "jekyll-feed", "~> 0.12"
18 | gem 'jekyll-octicons'
19 | gem 'jekyll-remote-theme'
20 | gem "jekyll-twitter-plugin"
21 | gem 'jekyll-relative-links'
22 | gem 'jekyll-seo-tag'
23 | gem 'jekyll-toc'
24 | gem 'jekyll-gist'
25 | gem 'jekyll-paginate'
26 | end
27 |
28 | gem "kramdown-math-katex"
29 |
30 | # Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
31 | # and associated library.
32 | install_if -> { RUBY_PLATFORM =~ %r!mingw|mswin|java! } do
33 | gem "tzinfo", "~> 1.2"
34 | gem "tzinfo-data"
35 | end
36 |
37 | # Performance-booster for watching directories on Windows
38 | gem "wdm", "~> 0.1.1", :install_if => Gem.win_platform?
39 |
40 | gem "faraday", "< 1.0"
41 |
42 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | addressable (2.7.0)
5 | public_suffix (>= 2.0.2, < 5.0)
6 | colorator (1.1.0)
7 | concurrent-ruby (1.1.6)
8 | em-websocket (0.5.1)
9 | eventmachine (>= 0.12.9)
10 | http_parser.rb (~> 0.6.0)
11 | eventmachine (1.2.7)
12 | execjs (2.7.0)
13 | faraday (0.17.3)
14 | multipart-post (>= 1.2, < 3)
15 | ffi (1.12.2)
16 | forwardable-extended (2.6.0)
17 | http_parser.rb (0.6.0)
18 | i18n (1.8.2)
19 | concurrent-ruby (~> 1.0)
20 | jekyll (4.0.0)
21 | addressable (~> 2.4)
22 | colorator (~> 1.0)
23 | em-websocket (~> 0.5)
24 | i18n (>= 0.9.5, < 2)
25 | jekyll-sass-converter (~> 2.0)
26 | jekyll-watch (~> 2.0)
27 | kramdown (~> 2.1)
28 | kramdown-parser-gfm (~> 1.0)
29 | liquid (~> 4.0)
30 | mercenary (~> 0.3.3)
31 | pathutil (~> 0.9)
32 | rouge (~> 3.0)
33 | safe_yaml (~> 1.0)
34 | terminal-table (~> 1.8)
35 | jekyll-feed (0.13.0)
36 | jekyll (>= 3.7, < 5.0)
37 | jekyll-gist (1.5.0)
38 | octokit (~> 4.2)
39 | jekyll-octicons (9.5.0)
40 | jekyll (>= 3.6, < 5.0)
41 | octicons (= 9.5.0)
42 | jekyll-paginate (1.1.0)
43 | jekyll-relative-links (0.6.1)
44 | jekyll (>= 3.3, < 5.0)
45 | jekyll-remote-theme (0.4.2)
46 | addressable (~> 2.0)
47 | jekyll (>= 3.5, < 5.0)
48 | jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0)
49 | rubyzip (>= 1.3.0, < 3.0)
50 | jekyll-sass-converter (2.1.0)
51 | sassc (> 2.0.1, < 3.0)
52 | jekyll-seo-tag (2.6.1)
53 | jekyll (>= 3.3, < 5.0)
54 | jekyll-toc (0.13.1)
55 | jekyll (>= 3.7)
56 | nokogiri (~> 1.9)
57 | jekyll-twitter-plugin (2.1.0)
58 | jekyll-watch (2.2.1)
59 | listen (~> 3.0)
60 | katex (0.6.0)
61 | execjs (~> 2.7)
62 | kramdown (2.1.0)
63 | kramdown-math-katex (1.0.1)
64 | katex (~> 0.4)
65 | kramdown (~> 2.0)
66 | kramdown-parser-gfm (1.1.0)
67 | kramdown (~> 2.0)
68 | liquid (4.0.3)
69 | listen (3.2.1)
70 | rb-fsevent (~> 0.10, >= 0.10.3)
71 | rb-inotify (~> 0.9, >= 0.9.10)
72 | mercenary (0.3.6)
73 | mini_portile2 (2.4.0)
74 | minima (2.5.1)
75 | jekyll (>= 3.5, < 5.0)
76 | jekyll-feed (~> 0.9)
77 | jekyll-seo-tag (~> 2.1)
78 | multipart-post (2.1.1)
79 | nokogiri (1.10.9)
80 | mini_portile2 (~> 2.4.0)
81 | octicons (9.5.0)
82 | nokogiri (>= 1.6.3.1)
83 | octokit (4.16.0)
84 | faraday (>= 0.9)
85 | sawyer (~> 0.8.0, >= 0.5.3)
86 | pathutil (0.16.2)
87 | forwardable-extended (~> 2.6)
88 | public_suffix (4.0.3)
89 | rb-fsevent (0.10.3)
90 | rb-inotify (0.10.1)
91 | ffi (~> 1.0)
92 | rouge (3.16.0)
93 | rubyzip (2.2.0)
94 | safe_yaml (1.0.5)
95 | sassc (2.2.1)
96 | ffi (~> 1.9)
97 | sawyer (0.8.2)
98 | addressable (>= 2.3.5)
99 | faraday (> 0.8, < 2.0)
100 | terminal-table (1.8.0)
101 | unicode-display_width (~> 1.1, >= 1.1.1)
102 | thread_safe (0.3.6)
103 | tzinfo (1.2.6)
104 | thread_safe (~> 0.1)
105 | tzinfo-data (1.2019.3)
106 | tzinfo (>= 1.0.0)
107 | unicode-display_width (1.6.1)
108 | wdm (0.1.1)
109 |
110 | PLATFORMS
111 | ruby
112 |
113 | DEPENDENCIES
114 | faraday (< 1.0)
115 | jekyll (~> 4.0.0)
116 | jekyll-feed (~> 0.12)
117 | jekyll-gist
118 | jekyll-octicons
119 | jekyll-paginate
120 | jekyll-relative-links
121 | jekyll-remote-theme
122 | jekyll-seo-tag
123 | jekyll-toc
124 | jekyll-twitter-plugin
125 | kramdown-math-katex
126 | minima
127 | tzinfo (~> 1.2)
128 | tzinfo-data
129 | wdm (~> 0.1.1)
130 |
131 | BUNDLED WITH
132 | 2.1.4
133 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2020 onwards, fast.ai, Inc
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | help:
2 | cat Makefile
3 |
4 | # start (or restart) the services
5 | server: .FORCE
6 | docker-compose down --remove-orphans || true;
7 | docker-compose up
8 |
9 | # start (or restart) the services in detached mode
10 | server-detached: .FORCE
11 | docker-compose down || true;
12 | docker-compose up -d
13 |
14 | # build or rebuild the services WITHOUT cache
15 | build: .FORCE
16 | docker-compose stop || true; docker-compose rm || true;
17 | docker build -t hamelsmu/fastpages-jekyll -f _action_files/fastpages-jekyll.Dockerfile .
18 | docker-compose build --force-rm --no-cache
19 |
20 | # rebuild the services WITH cache
21 | quick-build: .FORCE
22 | docker-compose stop || true;
23 | docker build -t hamelsmu/fastpages-jekyll -f _action_files/fastpages-jekyll.Dockerfile .
24 | docker-compose build
25 |
26 | # convert word & nb without Jekyll services
27 | convert: .FORCE
28 | docker-compose up converter
29 |
30 | # stop all containers
31 | stop: .FORCE
32 | docker-compose stop
33 |
34 | # remove all containers
35 | remove: .FORCE
36 | docker-compose stop || true; docker-compose rm || true;
37 |
38 | # get shell inside the notebook converter service (Must already be running)
39 | bash-nb: .FORCE
40 | docker-compose exec watcher /bin/bash
41 |
42 | # get shell inside jekyll service (Must already be running)
43 | bash-jekyll: .FORCE
44 | docker-compose exec jekyll /bin/bash
45 |
46 | # restart just the Jekyll server
47 | restart-jekyll: .FORCE
48 | docker-compose restart jekyll
49 |
50 | .FORCE:
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [//]: # (This template replaces README.md when someone creates a new repo with the fastpages template.)
2 |
3 | 
4 | 
5 | [](https://github.com/fastai/fastpages)
6 |
7 |
8 | # Notebooks by dair.ai
9 | This is a place to host and share data science notebooks that range from beginner tutorials for deep learing to complete walkthrough of complex topics such as Transformers for NLP and object detection for CV.
10 |
11 | Sharing is easy! Just upload your notebooks to the `_notebooks` folder and they will be featured on our [website](https://dair.ai/notebooks/). The notebooks will also be featured in our weekly newsletter which is read by thousands of avid learners.
12 |
13 | https://dair.ai/notebooks/
14 |
15 | ### How to Contribute
16 |
17 | [](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/0)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/1)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/2)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/3)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/4)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/5)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/6)[](https://sourcerer.io/fame/omarsar/omarsar/notebooks/links/7)
18 |
19 | 1) Have your Jupyter notebook ready for publication. Make sure you check out this [guideline](https://fastpages.fast.ai/jupyter/2020/02/20/test.html) to leverage all the amazing features of fastpages. In order to properly format the notebook when rendered as a web page, you need to include a markdown cell at the beginning of the notebook as shown in the example below:
20 |
21 | 
22 |
23 | 2) You can then upload your notebook directly to the [`_notebooks`](https://github.com/dair-ai/notebooks/tree/master/_notebooks) folder and submit it as a pull request (PR). Ensure that the notebook follows the following naming convention: `YYYY-MM-DD-Name-of-your-notebook.ipynb`. Check the examples [here](https://github.com/dair-ai/notebooks/tree/master/_notebooks). If you face any issues or need help submitting the PR, email me directly to ellfae@gmail.com or [DM on Twitter](https://twitter.com/omarsar0).
24 |
25 | 3) Notebooks will then be reviewed and published. This will also be followed by a special feature in the upcoming [NLP Newsletter](https://github.com/dair-ai/nlp_newsletter) where educational resources are shared with thousands of avid readers.
26 |
27 | ---
28 | ### Credits
29 | _powered by [fastpages](https://github.com/fastai/fastpages)_
30 |
--------------------------------------------------------------------------------
/_action_files/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM hamelsmu/fastpages-nbdev
2 |
3 | WORKDIR /fastpages
4 | COPY . .
5 | RUN chmod u+x action_entrypoint.sh
6 | RUN chmod u+x word2post.sh
7 |
8 | CMD [ "/fastpages/action_entrypoint.sh" ]
9 |
--------------------------------------------------------------------------------
/_action_files/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_action_files/__init__.py
--------------------------------------------------------------------------------
/_action_files/action.yml:
--------------------------------------------------------------------------------
1 | name: 'fastpages: An easy to use blogging platform with support for Jupyter Notebooks.'
2 | description: Converts Jupyter notebooks and Word docs into Jekyll blog posts.
3 | author: Hamel Husain
4 | inputs:
5 | BOOL_SAVE_MARKDOWN:
6 | description: Either 'true' or 'false'. Whether or not to commit converted markdown files from notebooks and word documents into the _posts directory in your repo. This is useful for debugging.
7 | required: false
8 | default: false
9 | SSH_DEPLOY_KEY:
10 | description: a ssh deploy key is required if BOOL_SAVE_MARKDOWN = 'true'
11 | required: false
12 | branding:
13 | color: 'blue'
14 | icon: 'book'
15 | runs:
16 | using: 'docker'
17 | image: 'Dockerfile'
18 |
--------------------------------------------------------------------------------
/_action_files/action_entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # setup ssh: allow key to be used without a prompt and start ssh agent
5 | export GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
6 | eval "$(ssh-agent -s)"
7 |
8 | ######## Run notebook/word converter ########
9 | # word converter using pandoc
10 | /fastpages/word2post.sh
11 | # notebook converter using nbdev
12 | cp /fastpages/settings.ini .
13 | python /fastpages/nb2post.py
14 |
15 |
16 | ######## Optionally save files and build GitHub Pages ########
17 | if [[ "$INPUT_BOOL_SAVE_MARKDOWN" == "true" ]];then
18 |
19 | if [ -z "$INPUT_SSH_DEPLOY_KEY" ];then
20 | echo "You must set the SSH_DEPLOY_KEY input if BOOL_SAVE_MARKDOWN is set to true.";
21 | exit 1;
22 | fi
23 |
24 | # Get user's email from commit history
25 | if [[ "$GITHUB_EVENT_NAME" == "push" ]];then
26 | USER_EMAIL=`cat $GITHUB_EVENT_PATH | jq '.commits | .[0] | .author.email'`
27 | else
28 | USER_EMAIL="actions@github.com"
29 | fi
30 |
31 | # Setup Git credentials if we are planning to change the data in the repo
32 | git config --global user.name "$GITHUB_ACTOR"
33 | git config --global user.email "$USER_EMAIL"
34 | git remote add fastpages-origin "git@github.com:$GITHUB_REPOSITORY.git"
35 | echo "${INPUT_SSH_DEPLOY_KEY}" > _mykey
36 | chmod 400 _mykey
37 | ssh-add _mykey
38 |
39 | # Optionally save intermediate markdown
40 | if [[ "$INPUT_BOOL_SAVE_MARKDOWN" == "true" ]]; then
41 | git pull fastpages-origin ${GITHUB_REF} --ff-only
42 | git add _posts
43 | git commit -m "[Bot] Update $INPUT_FORMAT blog posts" --allow-empty
44 | git push fastpages-origin HEAD:${GITHUB_REF}
45 | fi
46 | fi
47 |
48 |
49 |
--------------------------------------------------------------------------------
/_action_files/fast_template.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import re, os
3 | from pathlib import Path
4 | from typing import Tuple, Set
5 |
6 | # Check for YYYY-MM-DD
7 | _re_blog_date = re.compile(r'([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01])-)')
8 | # Check for leading dashses or numbers
9 | _re_numdash = re.compile(r'(^[-\d]+)')
10 |
11 | def rename_for_jekyll(nb_path: Path, warnings: Set[Tuple[str, str]]=None) -> str:
12 | """
13 | Return a Path's filename string appended with its modified time in YYYY-MM-DD format.
14 | """
15 | assert nb_path.exists(), f'{nb_path} could not be found.'
16 |
17 | # Checks if filename is compliant with Jekyll blog posts
18 | if _re_blog_date.match(nb_path.name): return nb_path.with_suffix('.md').name.replace(' ', '-')
19 |
20 | else:
21 | clean_name = _re_numdash.sub('', nb_path.with_suffix('.md').name).replace(' ', '-')
22 |
23 | # Gets the file's last modified time and and append YYYY-MM-DD- to the beginning of the filename
24 | mdate = os.path.getmtime(nb_path) - 86400 # subtract one day b/c dates in the future break Jekyll
25 | dtnm = datetime.fromtimestamp(mdate).strftime("%Y-%m-%d-") + clean_name
26 | assert _re_blog_date.match(dtnm), f'{dtnm} is not a valid name, filename must be pre-pended with YYYY-MM-DD-'
27 | # push this into a set b/c _nb2htmlfname gets called multiple times per conversion
28 | if warnings: warnings.add((nb_path, dtnm))
29 | return dtnm
30 |
--------------------------------------------------------------------------------
/_action_files/fastpages-jekyll.Dockerfile:
--------------------------------------------------------------------------------
1 | # Defines https://hub.docker.com/repository/docker/hamelsmu/fastpages-jekyll
2 | FROM jekyll/jekyll:4.0.0
3 |
4 | COPY . .
5 |
6 | # Pre-load all gems into the environment
7 | RUN gem install bundler
8 | RUN jekyll build
9 |
--------------------------------------------------------------------------------
/_action_files/fastpages-nbdev.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3-slim-stretch
2 |
3 | RUN pip install --upgrade pip
4 | RUN apt-get update; apt-get -y install wget git jq
5 | RUN wget https://github.com/jgm/pandoc/releases/download/2.9.1.1/pandoc-2.9.1.1-1-amd64.deb
6 | RUN dpkg -i pandoc-2.9.1.1-1-amd64.deb
7 | RUN pip install jupyter watchdog[watchmedo] jupyter_client ipykernel jupyter
8 | RUN python3 -m ipykernel install --user
9 | RUN pip install nbdev==0.2.13
10 |
--------------------------------------------------------------------------------
/_action_files/fastpages.tpl:
--------------------------------------------------------------------------------
1 | {%- extends 'hide.tpl' -%}
2 | {%- block body -%}
3 | {%- set internals = ["metadata", "output_extension", "inlining",
4 | "raw_mimetypes", "global_content_filter"] -%}
5 | ---
6 | {%- for k in resources |reject("in", internals) %}
7 | {% if k == "summary" and "description" not in resources %}description{% else %}{{ k }}{% endif %}: {{ resources[k] }}
8 | {%- endfor %}
9 | layout: notebook
10 | ---
11 |
12 |
18 |
19 |
36 | {%- else -%}
37 | {{ super() }}
38 | {%- endif -%}
39 | {% endblock output_area_prompt %}
--------------------------------------------------------------------------------
/_action_files/nb2post.py:
--------------------------------------------------------------------------------
1 | """Converts Jupyter Notebooks to Jekyll compliant blog posts"""
2 | from datetime import datetime
3 | import re, os, logging
4 | from nbdev import export2html
5 | from nbdev.export2html import Config, Path, _re_digits, _to_html, _re_block_notes
6 | from fast_template import rename_for_jekyll
7 |
8 | warnings = set()
9 |
10 | # Modify the naming process such that destination files get named properly for Jekyll _posts
11 | def _nb2htmlfname(nb_path, dest=None):
12 | fname = rename_for_jekyll(nb_path, warnings=warnings)
13 | if dest is None: dest = Config().doc_path
14 | return Path(dest)/fname
15 |
16 | # TODO: Open a GitHub Issue in addition to printing warnings
17 | for original, new in warnings:
18 | print(f'{original} has been renamed to {new} to be complaint with Jekyll naming conventions.\n')
19 |
20 | ## apply monkey patches
21 | export2html._nb2htmlfname = _nb2htmlfname
22 | export2html.notebook2html(fname='_notebooks/*.ipynb', dest='_posts/', template_file='/fastpages/fastpages.tpl')
23 |
--------------------------------------------------------------------------------
/_action_files/parse_netlify.py:
--------------------------------------------------------------------------------
1 | import sys, re
2 | logs = sys.stdin.read()
3 |
4 | draft_url = re.findall(r'Live Draft URL: .*(https://.*)', logs)[0]
5 | assert draft_url, 'Was not able to find Draft URL in the logs:\n{}'.format(logs)
6 | print("::set-output name=draft_url::{}".format(draft_url))
7 |
8 |
--------------------------------------------------------------------------------
/_action_files/pr_comment.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Make a comment on a PR.
4 | # Usage:
5 | # > pr_comment.sh <>
6 |
7 | set -e
8 |
9 | # This is populated by our secret from the Workflow file.
10 | if [[ -z "${GITHUB_TOKEN}" ]]; then
11 | echo "Set the GITHUB_TOKEN env variable."
12 | exit 1
13 | fi
14 |
15 | if [[ -z "${ISSUE_NUMBER}" ]]; then
16 | echo "Set the ISSUE_NUMBER env variable."
17 | exit 1
18 | fi
19 |
20 | if [ -z "$1" ]
21 | then
22 | echo "No MESSAGE argument supplied. Usage: issue_comment.sh "
23 | exit 1
24 | fi
25 |
26 | MESSAGE=$1
27 |
28 | ## Set Vars
29 | URI=https://api.github.com
30 | API_VERSION=v3
31 | API_HEADER="Accept: application/vnd.github.${API_VERSION}+json"
32 | AUTH_HEADER="Authorization: token ${GITHUB_TOKEN}"
33 |
34 | # Create a comment with APIv3 # POST /repos/:owner/:repo/issues/:issue_number/comments
35 | curl -XPOST -sSL \
36 | -d "{\"body\": \"$MESSAGE\"}" \
37 | -H "${AUTH_HEADER}" \
38 | -H "${API_HEADER}" \
39 | "${URI}/repos/${GITHUB_REPOSITORY}/issues/${ISSUE_NUMBER}/comments"
40 |
--------------------------------------------------------------------------------
/_action_files/settings.ini:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | lib_name = nbdev
3 | user = fastai
4 | branch = master
5 | version = 0.2.10
6 | description = Writing a library entirely in notebooks
7 | keywords = jupyter notebook
8 | author = Sylvain Gugger and Jeremy Howard
9 | author_email = info@fast.ai
10 | baseurl = /notebooks
11 | title = nbdev
12 | copyright = fast.ai
13 | license = apache2
14 | status = 2
15 | min_python = 3.6
16 | audience = Developers
17 | language = English
18 | requirements = nbformat>=4.4.0 nbconvert>=5.6.1 pyyaml fastscript packaging
19 | console_scripts = nbdev_build_lib=nbdev.cli:nbdev_build_lib
20 | nbdev_update_lib=nbdev.cli:nbdev_update_lib
21 | nbdev_diff_nbs=nbdev.cli:nbdev_diff_nbs
22 | nbdev_test_nbs=nbdev.cli:nbdev_test_nbs
23 | nbdev_build_docs=nbdev.cli:nbdev_build_docs
24 | nbdev_nb2md=nbdev.cli:nbdev_nb2md
25 | nbdev_trust_nbs=nbdev.cli:nbdev_trust_nbs
26 | nbdev_clean_nbs=nbdev.clean:nbdev_clean_nbs
27 | nbdev_read_nbs=nbdev.cli:nbdev_read_nbs
28 | nbdev_fix_merge=nbdev.cli:nbdev_fix_merge
29 | nbdev_install_git_hooks=nbdev.cli:nbdev_install_git_hooks
30 | nbdev_bump_version=nbdev.cli:nbdev_bump_version
31 | nbdev_new=nbdev.cli:nbdev_new
32 | nbdev_detach=nbdev.cli:nbdev_detach
33 | nbs_path = nbs
34 | doc_path = images/copied_from_nb
35 | doc_host = https://nbdev.fast.ai
36 | doc_baseurl = %(baseurl)s/images/copied_from_nb/
37 | git_url = https://github.com/fastai/nbdev/tree/master/
38 | lib_path = nbdev
39 | tst_flags = fastai2
40 | custom_sidebar = False
41 | cell_spacing = 1
42 | monospace_docstrings = False
43 | jekyll_styles = note,warning,tip,important,youtube,twitter
44 |
45 |
--------------------------------------------------------------------------------
/_action_files/word2post.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 | from fast_template import rename_for_jekyll
4 |
5 | if __name__ == '__main__':
6 | file_path = Path(sys.argv[1])
7 | new_name = rename_for_jekyll(file_path)
8 | print(new_name)
9 |
--------------------------------------------------------------------------------
/_action_files/word2post.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # This sets the environment variable when testing locally and not in a GitHub Action
4 | if [ -z "$GITHUB_ACTIONS" ]; then
5 | GITHUB_WORKSPACE='/data'
6 | echo "=== Running Locally: All assets expected to be in the directory /data ==="
7 | fi
8 |
9 | # Loops through directory of *.docx files and converts to markdown
10 | # markdown files are saved in _posts, media assets are saved in assets/img//media
11 | for FILENAME in ${GITHUB_WORKSPACE}/_word/*.docx; do
12 | [ -e "$FILENAME" ] || continue # skip when glob doesn't match
13 | NAME=${FILENAME##*/} # Get filename without the directory
14 | NEW_NAME=`python3 "/fastpages/word2post.py" "${FILENAME}"` # clean filename to be Jekyll compliant for posts
15 | BASE_NEW_NAME=${NEW_NAME%.md} # Strip the file extension
16 |
17 | if [ -z "$NEW_NAME" ]; then
18 | echo "Unable To Rename: ${FILENAME} to a Jekyll complaint filename for blog posts"
19 | exit 1
20 | fi
21 |
22 | echo "Converting: ${NAME} ---to--- ${NEW_NAME}"
23 | cd ${GITHUB_WORKSPACE}
24 | pandoc --from docx --to gfm --output "${GITHUB_WORKSPACE}/_posts/${NEW_NAME}" --columns 9999 \
25 | --extract-media="assets/img/${BASE_NEW_NAME}" --standalone "${FILENAME}"
26 |
27 | # Inject correction to image links in markdown
28 | sed -i.bak 's/!\[\](assets/!\[\]({{ site.url }}{{ site.baseurl }}\/assets/g' "_posts/${NEW_NAME}"
29 | # Remove intermediate files
30 | rm _posts/*.bak
31 | done
32 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | # Welcome to Jekyll!
2 | #
3 | # This config file is meant for settings that affect your whole blog.
4 | #
5 | # If you need help with YAML syntax, here are some quick references for you:
6 | # https://learn-the-web.algonquindesign.ca/topics/markdown-yaml-cheat-sheet/#yaml
7 | # https://learnxinyminutes.com/docs/yaml/
8 |
9 | title: Notebooks by dair.ai
10 | description: Sharing data science notebooks made easy.
11 | github_username: dair-ai
12 | # you can comment the below line out if your repo name is not different than your baseurl
13 | github_repo: "notebooks"
14 |
15 | # OPTIONAL: override baseurl and url if using a custom domain
16 | # Note: leave out the trailing / from this value.
17 | url: "https://dair-ai.github.io" # the base hostname & protocol for your site, e.g. http://example.com
18 |
19 | ###########################################################
20 | ######### Special Instructions for baseurl ###############
21 | #
22 | #### Scenario One: If you do not have a Custom Domain #####
23 | # - if you are not using a custom domain, the baseurl *must* be set to your repo name
24 | #
25 | #### Scenario Two: If you have a Custom Domain #####
26 | # 1. If your domain does NOT have a subpath, this leave this value as ""
27 | # 2. If your domain does have a subpath, you must preceed the value with a / and NOT have a / at the end.
28 | # For example:
29 | # "" is valid
30 | # "/blog" is valid
31 | # "/blog/site/" is invalid ( / at the end)
32 | # "/blog/site" is valid
33 | # "blog/site" is invalid ( because doesn't begin with a /)
34 | #
35 | # 3. You must replace the parameter `baseurl` in _action_files/settings.ini with the same value as you set here but WITHOUT QUOTES.
36 | #
37 | baseurl: "/notebooks" # the subpath of your site, e.g. "/blog".
38 |
39 | # Github and twitter are optional:
40 | minima:
41 | social_links:
42 | twitter: dair_ai
43 | github: dair-ai
44 |
45 | # Set this to true to get LaTeX math equation support
46 | use_math:
47 |
48 | # Set this to true to display the summary of your blog post under your title on the Home page.
49 | show_description: true
50 |
51 | # Set this to true to display image previews on home page, if they exist
52 | show_image: false
53 |
54 | # Set this to true to display tags on each post
55 | show_tags: true
56 |
57 | # Add your Google Analytics ID here if you have one and want to use it
58 | google_analytics:
59 |
60 | exclude:
61 | - docker-compose.yml
62 | - action.yml
63 | - Makefile
64 |
65 | # this setting allows you to keep pages organized in the _pages folder
66 | include:
67 | - _pages
68 |
69 | # This specifies what badges are turned on by default for notebook posts.
70 | default_badges:
71 | github: true
72 | binder: true
73 | colab: true
74 |
75 | # Everything below here should be left alone. Modifications may break fastpages
76 | future: true
77 | theme: minima
78 | plugins:
79 | - jekyll-feed
80 | - jekyll-gist
81 | - jekyll-octicons
82 | - jekyll-toc
83 | - jekyll-twitter-plugin
84 | - jekyll-relative-links
85 | - jekyll-seo-tag
86 | - jekyll-remote-theme
87 | - jekyll-paginate
88 |
89 | # See https://jekyllrb.com/docs/pagination/
90 | # For pagination to work, you cannot have index.md at the root of your repo, instead you must rename this file to index.html
91 | paginate: 15
92 | paginate_path: /page:num/
93 |
94 | remote_theme: jekyll/minima
95 |
96 | titles_from_headings:
97 | enabled: true
98 | strip_title: true
99 | collections: true
100 |
101 | highlighter: rouge
102 | markdown: kramdown
103 | kramdown:
104 | math_engine: katex
105 | input: GFM
106 | auto_ids: true
107 | hard_wrap: false
108 | syntax_highlighter: rouge
109 |
--------------------------------------------------------------------------------
/_fastpages_docs/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | _Adapted from [fastai/nbdev/CONTRIBUTING.md](https://github.com/fastai/nbdev/blob/master/CONTRIBUTING.md)_
2 |
3 | # How to contribute to fastpages
4 |
5 | First, thanks a lot for wanting to help! Some things to keep in mind:
6 |
7 | - The jupyter to blog post conversion functionality relies on [fastai/nbdev](https://github.com/fastai/nbdev). For idiosyncratic uses of nbdev that only apply to blogs that would require a large refactor to nbdev, it might be acceptable to apply a [monkey patch](https://stackoverflow.com/questions/5626193/what-is-monkey-patching) in `fastpages`. However, it is encouraged to contribute to `nbdev` where possible if there is a change that could unlock a new feature. If you are unsure, please open an issue in this repo to discucss.
8 |
9 |
10 | ## Note for new contributors from Jeremy
11 |
12 | It can be tempting to jump into a new project by questioning the stylistic decisions that have been made, such as naming, formatting, and so forth. This can be especially so for python programmers contributing to this project, which is unusual in following a number of conventions that are common in other programming communities, but not in Python. However, please don’t do this, for (amongst others) the following reasons:
13 |
14 | - Contributing to [Parkinson’s law of triviality](https://www.wikiwand.com/en/Law_of_triviality) has negative consequences for a project. Let’s focus on deep learning!
15 | - It’s exhausting to repeat the same discussion over and over again, especially when it’s been well documented already. When you have a question about the project, please check the pages in the docs website linked here.
16 | - You’re likely to get a warmer welcome from the community if you start out by contributing something that’s been requested on the forum, since you’ll be solving someone’s current problem.
17 | - If you start out by just telling us your point of view, rather than studying the background behind the decisions that have been made, you’re unlikely to be contributing anything new or useful.
18 | - I’ve been writing code for nearly 40 years now, across dozens of languages, and other folks involved have quite a bit of experience too - the approaches used are based on significant experience and research. Whilst there’s always room for improvement, it’s much more likely you’ll be making a positive contribution if you spend a few weeks studying and working within the current framework before suggesting wholesale changes.
19 |
20 |
21 | ## Did you find a bug?
22 |
23 | * Nobody is perfect, especially not us. But first, please double-check the bug doesn't come from something on your side. The [forum](http://forums.fast.ai/) is a tremendous source for help, and we'd advise to use it as a first step. Be sure to include as much code as you can so that other people can easily help you.
24 | * Then, ensure the bug was not already reported by searching on GitHub under [Issues](https://github.com/fastai/fastpages/issues).
25 | * If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/fastai/fastpages/issues/new). Be sure to include a title and clear description, as much relevant information as possible, and a code sample or an executable test case demonstrating the expected behavior that is not occurring.
26 | * Be sure to add the complete error messages.
27 |
28 | #### Did you write a patch that fixes a bug?
29 |
30 | * Open a new GitHub pull request with the patch.
31 | * Ensure that your PR includes a test that fails without your patch, and pass with it.
32 | * Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable.
33 | * Before submitting, please be sure you abide by our [coding style](https://docs.fast.ai/dev/style.html) (where appropriate) and [the guide on abbreviations](https://docs.fast.ai/dev/abbr.html) and clean-up your code accordingly.
34 |
35 | ## Do you intend to add a new feature or change an existing one?
36 |
37 | * You can suggest your change on the [fastai forum](http://forums.fast.ai/) to see if others are interested or want to help.
38 | * Once your approach has been discussed and confirmed on the forum, you are welcome to push a PR, including a complete description of the new feature and an example of how it's used. Be sure to document your code in the notabook.
39 | * Ensure that your code includes tests that exercise not only your feature, but also any other code that might be impacted.
40 |
41 | ## PR submission guidelines
42 |
43 | Some general rules of thumb that will make your life easier.
44 |
45 | * Test locally before opening a pull request. See [the development guide](_fastpages_docs/DEVELOPMENT.md) for instructions on how to run fastpages on your local machine.
46 | * When you do open a pull request, please request a draft build of your PR by making a **comment with the magic command `/preview` in the pull request.** This will allow reviewers to see a live-preview of your changes without having to clone your branch.
47 | * You can do this multiple times, if necessary, to rebuild your preview due to changes. But please do not abuse this and test locally before doing this.
48 |
49 | * Keep each PR focused. While it's more convenient, do not combine several unrelated fixes together. Create as many branches as needing to keep each PR focused.
50 | * Do not mix style changes/fixes with "functional" changes. It's very difficult to review such PRs and it most likely get rejected.
51 | * Do not add/remove vertical whitespace. Preserve the original style of the file you edit as much as you can.
52 | * Do not turn an already submitted PR into your development playground. If after you submitted PR, you discovered that more work is needed - close the PR, do the required work and then submit a new PR. Otherwise each of your commits requires attention from maintainers of the project.
53 | * If, however, you submitted a PR and received a request for changes, you should proceed with commits inside that PR, so that the maintainer can see the incremental fixes and won't need to review the whole PR again. In the exception case where you realize it'll take many many commits to complete the requests, then it's probably best to close the PR, do the work and then submit it again. Use common sense where you'd choose one way over another.
54 | * When you open a pull request, you can generate a live preview build of how the blog site will look by making a comment in the PR that contains this command: `/preview`. GitHub will build your site and drop a temporary link for everyone to review. You can do this as multiple times if necessary, however as mentioned previously do not turn an already submitted PR inot a development playground.
55 |
56 | ## Do you have questions about the source code?
57 |
58 | * Please ask it on the [fastai forum](http://forums.fast.ai/) (after searching someone didn't ask the same one before with a quick search). We'd rather have the maximum of discussions there so that the largest number can benefit from it.
59 |
60 | ## Do you want to contribute to the documentation?
61 |
62 | * PRs are welcome for this. For any confusion about the documentation, please feel free to open an issue on this repo.
63 |
64 |
--------------------------------------------------------------------------------
/_fastpages_docs/DEVELOPMENT.md:
--------------------------------------------------------------------------------
1 | # Development Guide
2 | - [Seeing All Options From the Terminal](#seeing-all-commands-in-the-terminal)
3 | - [Basic usage: viewing your blog](#basic-usage-viewing-your-blog)
4 | - [Converting the pages locally](#converting-the-pages-locally)
5 | - [Visual Studio Code integration](#visual-studio-code-integration)
6 | - [Advanced usage](#advanced-usage)
7 | - [Rebuild all the containers](#rebuild-all-the-containers)
8 | - [Removing all the containers](#removing-all-the-containers)
9 | - [Attaching a shell to a container](#attaching-a-shell-to-a-container)
10 | - [Running a Jupyter Server](#running-a-jupyter-server)
11 |
12 | You can run your fastpages blog on your local machine, and view any changes you make to your posts, including Jupyter Notebooks and Word documents, live.
13 | The live preview requires that you have Docker installed on your machine. [Follow the instructions on this page if you need to install Docker.](https://www.docker.com/products/docker-desktop)
14 |
15 | ## Seeing All Commands In The Terminal
16 |
17 | There are many different `docker-compose` commands that are necessary to manage the lifecycle of the fastpages Docker containers. To make this easier, we aliased common commands in a [Makefile](https://www.gnu.org/software/make/manual/html_node/Introduction.html).
18 |
19 | You can quickly see all available commands by running this command in the root of your repository:
20 |
21 | `make`
22 |
23 | ## Basic usage: viewing your blog
24 |
25 | All of the commands in this block assume that you're in your blog root directory.
26 | To run the blog with live preview:
27 |
28 | ```bash
29 | make server
30 | ```
31 |
32 | When you run this command for the first time, it'll build the required Docker images, and the process might take a couple minutes.
33 |
34 | This command will build all the necessary containers and run the following services:
35 | 1. A service that monitors any changes in `./_notebooks/*.ipynb/` and `./_word/*.docx;*.doc` and rebuild the blog on change.
36 | 2. A Jekyll server on https://127.0.0.1:4000 — use this to preview your blog.
37 |
38 | The services will output to your terminal. If you close the terminal or hit `Ctrl-C`, the services will stop.
39 | If you want to run the services in the background:
40 |
41 | ```bash
42 | # run all services in the background
43 | make server-detached
44 |
45 | # stop the services
46 | make stop
47 | ```
48 |
49 | If you need to restart just the Jekyll server, and it's running in the background — you can do `make restart-jekyll`.
50 |
51 | _Note that the blog won't autoreload on change, you'll have to refresh your browser manually._
52 |
53 | **If containers won't start**: try `make build` first, this would rebuild all the containers from scratch, This might fix the majority of update problems.
54 |
55 | ## Converting the pages locally
56 |
57 | If you just want to convert your notebooks and word documents to `.md` posts in `_posts`, this command will do it for you:
58 |
59 | ```bash
60 | make convert
61 | ```
62 |
63 | You can launch just the jekyll server with `make server`.
64 |
65 | ## Visual Studio Code integration
66 |
67 | If you're using VSCode with the Docker extension, you can run these containers from the sidebar: `fastpages_watcher_1` and `fastpages_jekyll_1`.
68 | The containers will only show up in the list after you run or build them for the first time. So if they're not in the list — try `make build` in the console.
69 |
70 | ## Advanced usage
71 |
72 | ### Rebuild all the containers
73 | If you changed files in `_action_files` directory, you might need to rebuild the containers manually, without cache.
74 |
75 | ```bash
76 | make build
77 | ```
78 |
79 | ### Removing all the containers
80 | Want to start from scratch and remove all the containers?
81 |
82 | ```
83 | make remove
84 | ```
85 |
86 | ### Attaching a shell to a container
87 | You can attach a terminal to a running service:
88 |
89 | ```bash
90 |
91 | # If the container is already running:
92 |
93 | # attach to a bash shell in the jekyll service
94 | make bash-jekyll
95 |
96 | # attach to a bash shell in the watcher service.
97 | make bash-nb
98 | ```
99 |
100 | _Note: you can use `docker-compose run` instead of `make bash-nb` or `make bash-jekyll` to start a service and then attach to it.
101 | Or you can run all your services in the background, `make server-detached`, and then use `make bash-nb` or `make bash-jekyll` as in the examples above._
102 |
103 | ## Running A Jupyter Server
104 |
105 | The fastpages development enviornment does not provide a Jupyter server for you. This is intentional so that you are free to run Jupyter Notebooks or Jupyter Lab in a manner that is familiar to you, and manage dependencies (requirements.txt, conda, etc) in the way you wish. Some tips that may make your life easier:
106 |
107 | - Provide instructions in your README and your blog posts on how to install the dependencies required to run your notebooks. This will make it eaiser for your audience to reproduce your notebooks.
108 | - Do not edit the Dockerfile in `/_action_files`, as that may interfere with the blogging environment. Furthermore, any changes you make to these files may get lost in future upgrades, if [upgrading automatically](UGPRADE.md). Instead, if you wish to manage your Jupyter server with Docker, we recommend that you maintain a seperate Dockerfile at the root of your repository.
109 |
--------------------------------------------------------------------------------
/_fastpages_docs/NOTEBOOK_FOOTNOTES.md:
--------------------------------------------------------------------------------
1 | # Detailed Guide To Footnotes in Notebooks
2 |
3 | Notebook -> HTML Footnotes don't work the same as Markdown. There isn't a good solution, so made these Jekyll plugins as a workaround
4 |
5 | ```
6 | This adds a linked superscript {% fn 15 %}
7 |
8 | {{ "This is the actual footnote" | fndetail: 15 }}
9 | ```
10 |
11 | 
12 |
13 | You can have links, but then you have to use **single quotes** to escape the link.
14 | ```
15 | This adds a linked superscript {% fn 20 %}
16 |
17 | {{ 'This is the actual footnote with a [link](www.github.com) as well!' | fndetail: 20 }}
18 | ```
19 | 
20 |
21 | However, what if you want a single quote in your footnote? There is not an easy way to escape that. Fortunately, you can use the special HTML character `'` (you must keep the semicolon!). For example, you can include a single quote like this:
22 |
23 |
24 | ```
25 | This adds a linked superscript {% fn 20 %}
26 |
27 | {{ 'This is the actual footnote; with a [link](www.github.com) as well! and a single quote ' too!' | fndetail: 20 }}
28 | ```
29 |
30 | 
31 |
--------------------------------------------------------------------------------
/_fastpages_docs/README_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | [//]: # (This template replaces README.md when someone creates a new repo with the fastpages template.)
2 |
3 | 
4 | 
5 | [](https://github.com/fastai/fastpages)
6 |
7 | https://{_username_}.github.io/{_repo_name_}/
8 |
9 | # My Blog
10 |
11 |
12 | _powered by [fastpages](https://github.com/fastai/fastpages)_
13 |
14 |
15 | ## What To Do Next?
16 |
17 | Great! You have setup your repo. Now its time to start writing content. Some helpful links:
18 |
19 | - [Writing Blogs With Jupyter](https://github.com/fastai/fastpages#writing-blog-posts-with-jupyter)
20 |
21 | - [Writing Blogs With Markdown](https://github.com/fastai/fastpages#writing-blog-posts-with-markdown)
22 |
23 | - [Writing Blog Posts With Word](https://github.com/fastai/fastpages#writing-blog-posts-with-microsoft-word)
24 |
25 | - [(Optional) Preview Your Blog Locally](_fastpages_docs/DEVELOPMENT.md)
26 |
27 | Note: you may want to remove example blog posts from the `_posts`, `_notebooks` or `_word` folders (but leave them empty, don't delete these folders) if you don't want these blog posts to appear on your site.
28 |
29 | Please use the [nbdev & blogging channel](https://forums.fast.ai/c/fastai-users/nbdev/48) in the fastai forums for any questions or feature requests.
30 |
--------------------------------------------------------------------------------
/_fastpages_docs/UPGRADE.md:
--------------------------------------------------------------------------------
1 | # Upgrading fastpages
2 |
3 |
4 |
5 | - [Automated Upgrade](#automated-upgrade)
6 | - [Step 1: Open An Issue With The Upgrade Template.](#step-1-open-an-issue-with-the-upgrade-template)
7 | - [Step 2: Click `Submit new issue`](#step-2-click-submit-new-issue)
8 | - [Step 3: A Link to Pull Request Will Appaer](#step-3-a-link-to-pull-request-will-appaer)
9 | - [Step 4: Review & Merge PR](#step-4-review-merge-pr)
10 | - [Manual Upgrade](#manual-upgrade)
11 | - [Easy Way (Recommended)](#easy-way-recommended)
12 | - [Advanced](#advanced)
13 | - [Additional Resources](#additional-resources)
14 |
15 |
16 | There are two ways to upgrade fastpages. One is an automated way that assumes you have made no changes to the HTML of your site. Alternatively, you may [upgrade manually](#manual-upgrade) and determine which changes to accept or reject. For most people we recommend upgrading fastpages automatically.
17 |
18 | ## Automated Upgrade
19 |
20 | - This method is appropriate for those who have not customized the HTML of their site.
21 | - **If you are unsure, try the Automated approach and review which files are changed in the automated PR** to see if this appropriate for you.
22 |
23 | ### Step 1: Open An Issue With The Upgrade Template.
24 |
25 | - Open a new issue in your repository, and push the "Get Started" button for the `[fastpages] Automated Upgrade` Issue template, which looks like this:
26 | - **IF YOU DON'T SEE THIS**: you have an older version of fastpages and you **must [manually upgrade](#manual-upgrade) once** to get this new functionality.
27 |
28 | 
29 |
30 | ### Step 2: Click `Submit new issue`
31 |
32 | - Be careful not to change anything before clicking the button.
33 |
34 | 
35 |
36 | ### Step 3: A Link to Pull Request Will Appaer
37 |
38 | - This issue will trigger GitHub to open a PR making changes to your repository for the upgrade to take palce. A comment with the link to the PR will be made in the issue, and will look like this:
39 |
40 | 
41 |
42 | It is possible that you might receive an error message instead of this command. You can follow the instructions in the comment to troubleshoot the issue. Common reasons for receiving an error are:
43 |
44 | - You are up to date, therefore no upgrade is possible. You will see an error that there is "nothing to commit".
45 | - You already have a PR from a prevoius upgrade open that you never merged.
46 |
47 | Please [ask on the forums](https://forums.fast.ai/) if you have encounter another problem that is unclear.
48 |
49 | ### Step 4: Review & Merge PR
50 |
51 | - Ensure that you read the instructions in the PR carefully. Furthermore, carefully review which files will be changed to determine if this interferes with any customizations you have mades to your site. When ready, select `Merge pull request`.
52 | - If the PR is making undesired changes to files you can use the manual upgrade approach instead.
53 |
54 | ## Manual Upgrade
55 |
56 | ### Easy Way (Recommended)
57 |
58 | Create a new repo with the current `fastpages` template by following the [setup instructions](https://github.com/fastai/fastpages#setup-instructions) in the README, and copy all of your blog posts from `_notebooks`, `_word`, and `_posts` into the new template. This is very similar to what the automated process is doing.
59 |
60 | ### Advanced
61 |
62 | - This method is appropriate for those who made customizations to the HTML of fastpages.
63 | - You must proceed with caution, as new versions of fastpages may not be compatible with your customizations.
64 | - You can use git to perform the upgrade by [following this approach](https://stackoverflow.com/questions/56577184/github-pull-changes-from-a-template-repository/56577320) instead. A step-by-step companion to this stack overflow post with screenshots is [written up here](https://github.com/fastai/fastpages/issues/163#issuecomment-593766189).
65 | - Be careful to not duplicate files, as files in fastpages have been reorganized several times.
66 |
67 |
68 | ## Additional Resources
69 |
70 | - [This Actions workflow](/.github/workflows/upgrade.yaml) defines the automated upgrade process.
71 | - You can get more help with upgrading in the [fastai forums - nbdev & blogging category](https://forums.fast.ai/c/fastai-users/nbdev/48).
72 |
--------------------------------------------------------------------------------
/_fastpages_docs/_checkbox.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/_checkbox.png
--------------------------------------------------------------------------------
/_fastpages_docs/_manual_setup.md:
--------------------------------------------------------------------------------
1 | # Manual Setup Instructions
2 |
3 | These are the setup steps that are automated by [setup.yaml](.github/workflows/setup.yaml)
4 |
5 | 1. Click the [](https://github.com/fastai/fastpages/generate) button to create a copy of this repo in your account.
6 |
7 | 2. [Follow these instructions to create an ssh-deploy key](https://developer.github.com/v3/guides/managing-deploy-keys/#deploy-keys). Make sure you **select Allow write access** when adding this key to your GitHub account.
8 |
9 | 3. [Follow these instructions to upload your deploy key](https://help.github.com/en/actions/configuring-and-managing-workflows/creating-and-storing-encrypted-secrets#creating-encrypted-secrets) as an encrypted secret on GitHub. Make sure you name your key `SSH_DEPLOY_KEY`. Note: The deploy key secret is your **private key** (NOT the public key).
10 |
11 | 4. [Create a branch](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository#creating-a-branch) named `gh-pages`.
12 |
13 | 5. Change the badges on this README to point to **your** repository instead of `fastai/fastpages`. Badges are organized in a section at the beginning of this README. For example, you should replace `fastai` and `fastpages` in the below url:
14 |
15 | ``
16 |
17 | to
18 |
19 | ``
20 |
21 | 6. Change `baseurl:` in `_config.yaml` to the name of your repository. For example, instead of
22 |
23 | `baseurl: "/fastpages"`
24 |
25 | this should be
26 |
27 | `baseurl: "/your-repo-name"`
28 |
29 | 7. Similarly, change the `url:` parameter in `_config.yaml` to the url your blog will be served on. For example, instead of
30 |
31 | `url: "https://fastpages.fast.ai/"`
32 |
33 | this should be
34 |
35 | `url: "https://.github.io"`
36 |
37 | 8. Read through `_config.yaml` carefully as there may be other options that must be set. The comments in this file will provide instructions.
38 |
39 | 9. Delete the `CNAME` file from the root of your `master` branch (or change it if you are using a custom domain)
40 |
41 | 10. Go to your [repository settings and enable GitHub Pages](https://help.github.com/en/enterprise/2.13/user/articles/configuring-a-publishing-source-for-github-pages) with the `gh-pages` branch you created earlier.
--------------------------------------------------------------------------------
/_fastpages_docs/_paginate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/_paginate.png
--------------------------------------------------------------------------------
/_fastpages_docs/_post_tags.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/_post_tags.png
--------------------------------------------------------------------------------
/_fastpages_docs/_setup_pr_template.md:
--------------------------------------------------------------------------------
1 | Hello :wave: @dair-ai! Thank you for using fastpages!
2 |
3 | ## Before you merge this PR
4 |
5 | 1. Create an ssh key-pair. Open this utility. Select: `RSA` and `4096` and leave `Passphrase` blank. Click the blue button `Generate-SSH-Keys`.
6 |
7 | 2. Navigate to this link and click `Add a new secret`. Copy and paste the **Private Key** into the `Value` field. In the `Name` field, name the secret `SSH_DEPLOY_KEY`.
8 |
9 | 3. Navigate to this link and click the `Add deploy key` button. Paste your **Public Key** from step 1 into the `Key` box. In the `Title`, name the key anything you want, for example `fastpages-key`. Finally, **make sure you click the checkbox next to `Allow write access`** (pictured below), and click `Add key` to save the key.
10 |
11 | 
12 |
13 |
14 | ### What to Expect After Merging This PR
15 |
16 | - GitHub Actions will build your site, which will take 2-3 minutes to complete. **This will happen anytime you push changes to the master branch of your repository.** You can monitor the logs of this if you like on the [Actions tab of your repo](https://github.com/dair-ai/notebooks/actions).
17 | - Your GH-Pages Status badge on your README will eventually appear and be green, indicating your first sucessfull build.
18 | - You can monitor the status of your site in the GitHub Pages section of your [repository settings](https://github.com/dair-ai/notebooks/settings).
19 |
20 | If you are not using a custom domain, your website will appear at:
21 |
22 | #### https://dair-ai.github.io/notebooks
23 |
24 |
25 | ## Optional: Using a Custom Domain
26 |
27 | 1. After merging this PR, add a file named `CNAME` at the root of your repo. For example, the `fastpages` blog is hosted at `https://fastpages.fast.ai`, which means [our CNAME](https://github.com/fastai/fastpages/blob/master/CNAME) contains the following contents:
28 |
29 |
30 | >`fastpages.fast.ai`
31 |
32 |
33 | 2. Change the `url` and `baseurl` parameters in your `/_config.yml` file to reflect your custom domain.
34 |
35 |
36 | Wondering how to setup a custom domain? See [this article](https://dev.to/trentyang/how-to-setup-google-domain-for-github-pages-1p58). You must add a CNAME file to the root of your master branch for the intructions in the article to work correctly.
37 |
38 |
39 | ## Questions
40 |
41 | Please use the [nbdev & blogging channel](https://forums.fast.ai/c/fastai-users/nbdev/48) in the fastai forums for any questions or feature requests.
42 |
--------------------------------------------------------------------------------
/_fastpages_docs/_show_image_true.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/_show_image_true.png
--------------------------------------------------------------------------------
/_fastpages_docs/_upgrade_pr.md:
--------------------------------------------------------------------------------
1 | Hello :wave: @{_username_}!
2 |
3 | This PR pulls the most recent files from [fastpages](https://github.com/fastai/fastpages), and attempts to replace relevant files in your repository, without changing the content of your blog posts. This allows you to receive bug fixes and feature updates.
4 |
5 | ## Warning
6 |
7 | If you have applied **customizations to the HTML or styling of your site, they may be lost if you merge this PR. Please review the changes this PR makes carefully before merging!.** However, for people who only write content and don't change the styling of their site, this method is recommended.
8 |
9 | If you would like more fine-grained control over what changes to accept or decline, consider [following this approach](https://stackoverflow.com/questions/56577184/github-pull-changes-from-a-template-repository/56577320) instead.
10 |
11 | ### What to Expect After Merging This PR
12 |
13 | - GitHub Actions will build your site, which will take 3-4 minutes to complete. **This will happen anytime you push changes to the master branch of your repository.** You can monitor the logs of this if you like on the [Actions tab of your repo](https://github.com/{_username_}/{_repo_name_}/actions).
14 | - You can monitor the status of your site in the GitHub Pages section of your [repository settings](https://github.com/{_username_}/{_repo_name_}/settings).
15 |
--------------------------------------------------------------------------------
/_fastpages_docs/highlight_dracula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/highlight_dracula.png
--------------------------------------------------------------------------------
/_fastpages_docs/highlight_original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/highlight_original.png
--------------------------------------------------------------------------------
/_fastpages_docs/upgrade_step1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/upgrade_step1.png
--------------------------------------------------------------------------------
/_fastpages_docs/upgrade_step2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/upgrade_step2.png
--------------------------------------------------------------------------------
/_fastpages_docs/upgrade_step3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_fastpages_docs/upgrade_step3.png
--------------------------------------------------------------------------------
/_fastpages_docs/version.txt:
--------------------------------------------------------------------------------
1 | 2.1.11
2 |
--------------------------------------------------------------------------------
/_includes/alert.html:
--------------------------------------------------------------------------------
1 |
2 | {% octicon alert %}
3 | {{include.text}}
4 |
5 |
--------------------------------------------------------------------------------
/_includes/custom-head.html:
--------------------------------------------------------------------------------
1 | {% comment %}
2 | Placeholder to allow defining custom head, in principle, you can add anything here, e.g. favicons:
3 |
4 | 1. Head over to https://realfavicongenerator.net/ to add your own favicons.
5 | 2. Customize default _includes/custom-head.html in your source directory and insert the given code snippet.
6 | {% endcomment %}
7 |
8 |
9 | {%- include favicons.html -%}
10 | {% seo %}
11 |
12 |
13 | {%- feed_meta -%}
14 | {%- if jekyll.environment == 'production' and site.google_analytics -%}
15 | {%- include google-analytics.html -%}
16 | {%- endif -%}
17 |
18 | {% if site.use_math %}
19 |
20 |
21 |
22 |
23 |
34 | {% endif %}
35 |
36 |
57 |
58 |
65 |
--------------------------------------------------------------------------------
/_includes/favicons.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/_includes/google-analytics.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | {% if site.google_analytics %}
4 |
5 |
6 | {% endif %}
7 |
--------------------------------------------------------------------------------
/_includes/head.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {%- seo -%}
6 |
7 | {%- feed_meta -%}
8 | {%- if jekyll.environment == 'production' and site.google_analytics -%}
9 | {%- include google-analytics.html -%}
10 | {%- endif -%}
11 |
12 | {%- include custom-head.html -%}
13 |
14 |
--------------------------------------------------------------------------------
/_includes/image-r:
--------------------------------------------------------------------------------
1 |
2 |
7 | {% if include.caption %}
8 | {{ include.caption }}
9 | {% endif %}
10 |
11 |
12 |
--------------------------------------------------------------------------------
/_includes/image.html:
--------------------------------------------------------------------------------
1 |
2 | {% if {{include.url}} %}{% endif %}
3 |
4 | {% if {{include.url}} %}{% endif %}
5 | {% if {{include.caption}} %}
6 | {{include.caption}}
7 | {% endif %}
8 |
9 |
--------------------------------------------------------------------------------
/_includes/important.html:
--------------------------------------------------------------------------------
1 |
56 | {%- if page.comments -%}
57 | {%- include utterances.html -%}
58 | {%- endif -%}
59 | {%- if site.disqus.shortname -%}
60 | {%- include disqus_comments.html -%}
61 | {%- endif -%}
62 |
63 |
--------------------------------------------------------------------------------
/_notebooks/2020-03-18-pytorch_hello_world.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "pytorch_hello_world.ipynb",
7 | "provenance": []
8 | },
9 | "kernelspec": {
10 | "name": "python3",
11 | "display_name": "Python 3"
12 | }
13 | },
14 | "cells": [
15 | {
16 | "cell_type": "markdown",
17 | "metadata": {
18 | "id": "H7gQFbUxOQtb",
19 | "colab_type": "text"
20 | },
21 | "source": [
22 | "# A First Shot at Deep Learning with PyTorch\n",
23 | "\n",
24 | "> \"Create a hello world for deep learning using PyTorch.\"\n",
25 | "\n",
26 | "- toc: false\n",
27 | "- branch: master\n",
28 | "- author: Elvis Saravia\n",
29 | "- badges: true\n",
30 | "- comments: true\n",
31 | "- categories: [deep learning, beginner, neural network]\n",
32 | "- image: images/model-nn.png\n",
33 | "- hide: false"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {
39 | "id": "9CzZNY9mqFaY",
40 | "colab_type": "text"
41 | },
42 | "source": [
43 | "## About\n",
44 | "\n",
45 | "In this notebook, we are going to take a baby step into the world of deep learning using PyTorch. There are a ton of notebooks out there that teach you the fundamentals of deep learning and PyTorch, so here the idea is to give you some basic introduction to deep learning and PyTorch at a very high level. Therefore, this notebook is targeting beginners but it can also serve as a review for more experienced developers.\n",
46 | "\n",
47 | "After completion of this notebook, you are expected to know the basic components of training a basic neural network with PyTorch. I have also left a couple of exercises towards the end with the intention of encouraging more research and practise of your deep learning skills. \n",
48 | "\n",
49 | "---\n",
50 | "\n",
51 | "**Author:** Elvis Saravia - [Twitter](https://twitter.com/omarsar0) | [LinkedIn](https://www.linkedin.com/in/omarsar/)\n",
52 | "\n",
53 | "**Complete Code Walkthrough:** [Blog post](https://medium.com/dair-ai/a-first-shot-at-deep-learning-with-pytorch-4a8252d30c75)"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {
59 | "id": "CkzttrQCwaSQ",
60 | "colab_type": "text"
61 | },
62 | "source": [
63 | "## Importing the libraries\n",
64 | "\n",
65 | "Like with any other programming exercise, the first step is to import the necessary libraries. As we are going to be using Google Colab to program our neural network, we need to install and import the necessary PyTorch libraries."
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "metadata": {
71 | "id": "7Exoj-CDskQD",
72 | "colab_type": "code",
73 | "outputId": "44612825-dbec-4d7a-94bc-147e069e72a9",
74 | "colab": {
75 | "base_uri": "https://localhost:8080/",
76 | "height": 102
77 | }
78 | },
79 | "source": [
80 | "!pip3 install torch torchvision"
81 | ],
82 | "execution_count": 0,
83 | "outputs": [
84 | {
85 | "output_type": "stream",
86 | "text": [
87 | "Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.4.0)\n",
88 | "Requirement already satisfied: torchvision in /usr/local/lib/python3.6/dist-packages (0.5.0)\n",
89 | "Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.6/dist-packages (from torchvision) (7.0.0)\n",
90 | "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.18.1)\n",
91 | "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.12.0)\n"
92 | ],
93 | "name": "stdout"
94 | }
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "metadata": {
100 | "id": "FuhJIaeXO2W9",
101 | "colab_type": "code",
102 | "outputId": "d1650745-da7f-4d09-dd0f-45cdf05b3473",
103 | "colab": {
104 | "base_uri": "https://localhost:8080/",
105 | "height": 34
106 | }
107 | },
108 | "source": [
109 | "## The usual imports\n",
110 | "import torch\n",
111 | "import torch.nn as nn\n",
112 | "\n",
113 | "## print out the pytorch version used\n",
114 | "print(torch.__version__)"
115 | ],
116 | "execution_count": 0,
117 | "outputs": [
118 | {
119 | "output_type": "stream",
120 | "text": [
121 | "1.4.0\n"
122 | ],
123 | "name": "stdout"
124 | }
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {
130 | "id": "0a2C_nneO_wp",
131 | "colab_type": "text"
132 | },
133 | "source": [
134 | "## The Neural Network\n",
135 | "\n",
136 | "\n",
137 | "\n",
138 | "Before building and training a neural network the first step is to process and prepare the data. In this notebook, we are going to use syntethic data (i.e., fake data) so we won't be using any real world data. \n",
139 | "\n",
140 | "For the sake of simplicity, we are going to use the following input and output pairs converted to tensors, which is how data is typically represented in the world of deep learning. The x values represent the input of dimension `(6,1)` and the y values represent the output of similar dimension. The example is taken from this [tutorial](https://github.com/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%202%20-%20Lesson%202%20-%20Notebook.ipynb). \n",
141 | "\n",
142 | "The objective of the neural network model that we are going to build and train is to automatically learn patterns that better characterize the relationship between the `x` and `y` values. Essentially, the model learns the relationship that exists between inputs and outputs which can then be used to predict the corresponding `y` value for any given input `x`."
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "metadata": {
148 | "id": "JWFtgUX85iwO",
149 | "colab_type": "code",
150 | "colab": {}
151 | },
152 | "source": [
153 | "## our data in tensor form\n",
154 | "x = torch.tensor([[-1.0], [0.0], [1.0], [2.0], [3.0], [4.0]], dtype=torch.float)\n",
155 | "y = torch.tensor([[-3.0], [-1.0], [1.0], [3.0], [5.0], [7.0]], dtype=torch.float)"
156 | ],
157 | "execution_count": 0,
158 | "outputs": []
159 | },
160 | {
161 | "cell_type": "code",
162 | "metadata": {
163 | "id": "NcQUjR_95z5J",
164 | "colab_type": "code",
165 | "outputId": "50e919a8-34b7-4be5-f504-7998c540eb3f",
166 | "colab": {
167 | "base_uri": "https://localhost:8080/",
168 | "height": 34
169 | }
170 | },
171 | "source": [
172 | "## print size of the input tensor\n",
173 | "x.size()"
174 | ],
175 | "execution_count": 0,
176 | "outputs": [
177 | {
178 | "output_type": "execute_result",
179 | "data": {
180 | "text/plain": [
181 | "torch.Size([6, 1])"
182 | ]
183 | },
184 | "metadata": {
185 | "tags": []
186 | },
187 | "execution_count": 4
188 | }
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {
194 | "id": "9CJXO5WX1QtQ",
195 | "colab_type": "text"
196 | },
197 | "source": [
198 | "## The Neural Network Components\n",
199 | "As said earlier, we are going to first define and build out the components of our neural network before training the model.\n",
200 | "\n",
201 | "### Model\n",
202 | "\n",
203 | "Typically, when building a neural network model, we define the layers and weights which form the basic components of the model. Below we show an example of how to define a hidden layer named `layer1` with size `(1, 1)`. For the purpose of this tutorial, we won't explicitly define the `weights` and allow the built-in functions provided by PyTorch to handle that part for us. By the way, the `nn.Linear(...)` function applies a linear transformation ($y = xA^T + b$) to the data that was provided as its input. We ignore the bias for now by setting `bias=False`.\n",
204 | "\n",
205 | "\n",
206 | "\n"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "metadata": {
212 | "id": "N1Ii5JRz3Jud",
213 | "colab_type": "code",
214 | "colab": {}
215 | },
216 | "source": [
217 | "## Neural network with 1 hidden layer\n",
218 | "layer1 = nn.Linear(1,1, bias=False)\n",
219 | "model = nn.Sequential(layer1)"
220 | ],
221 | "execution_count": 0,
222 | "outputs": []
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {
227 | "id": "9HTWYD4aMBXQ",
228 | "colab_type": "text"
229 | },
230 | "source": [
231 | "### Loss and Optimizer\n",
232 | "The loss function, `nn.MSELoss()`, is in charge of letting the model know how good it has learned the relationship between the input and output. The optimizer (in this case an `SGD`) primary role is to minimize or lower that loss value as it tunes its weights."
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "metadata": {
238 | "id": "3hglFpejArxx",
239 | "colab_type": "code",
240 | "colab": {}
241 | },
242 | "source": [
243 | "## loss function\n",
244 | "criterion = nn.MSELoss()\n",
245 | "\n",
246 | "## optimizer algorithm\n",
247 | "optimizer = torch.optim.SGD(model.parameters(), lr=0.01)"
248 | ],
249 | "execution_count": 0,
250 | "outputs": []
251 | },
252 | {
253 | "cell_type": "markdown",
254 | "metadata": {
255 | "id": "FKj6jvZTUtGh",
256 | "colab_type": "text"
257 | },
258 | "source": [
259 | "## Training the Neural Network Model\n",
260 | "We have all the components we need to train our model. Below is the code used to train our model. \n",
261 | "\n",
262 | "In simple terms, we train the model by feeding it the input and output pairs for a couple of rounds (i.e., `epoch`). After a series of forward and backward steps, the model somewhat learns the relationship between x and y values. This is notable by the decrease in the computed `loss`. For a more detailed explanation of this code check out this [tutorial](https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0). "
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "metadata": {
268 | "id": "JeOr9i-aBzRv",
269 | "colab_type": "code",
270 | "outputId": "2dc9ea2f-f9d7-4153-ede8-da5144698e9f",
271 | "colab": {
272 | "base_uri": "https://localhost:8080/",
273 | "height": 1000
274 | }
275 | },
276 | "source": [
277 | "## training\n",
278 | "for i in range(150):\n",
279 | " model = model.train()\n",
280 | "\n",
281 | " ## forward\n",
282 | " output = model(x)\n",
283 | " loss = criterion(output, y)\n",
284 | " optimizer.zero_grad()\n",
285 | "\n",
286 | " ## backward + update model params \n",
287 | " loss.backward()\n",
288 | " optimizer.step()\n",
289 | "\n",
290 | " model.eval()\n",
291 | " print('Epoch: %d | Loss: %.4f' %(i, loss.detach().item()))"
292 | ],
293 | "execution_count": 0,
294 | "outputs": [
295 | {
296 | "output_type": "stream",
297 | "text": [
298 | "Epoch: 0 | Loss: 25.5853\n",
299 | "Epoch: 1 | Loss: 20.6815\n",
300 | "Epoch: 2 | Loss: 16.7388\n",
301 | "Epoch: 3 | Loss: 13.5688\n",
302 | "Epoch: 4 | Loss: 11.0201\n",
303 | "Epoch: 5 | Loss: 8.9709\n",
304 | "Epoch: 6 | Loss: 7.3234\n",
305 | "Epoch: 7 | Loss: 5.9987\n",
306 | "Epoch: 8 | Loss: 4.9337\n",
307 | "Epoch: 9 | Loss: 4.0774\n",
308 | "Epoch: 10 | Loss: 3.3889\n",
309 | "Epoch: 11 | Loss: 2.8353\n",
310 | "Epoch: 12 | Loss: 2.3903\n",
311 | "Epoch: 13 | Loss: 2.0325\n",
312 | "Epoch: 14 | Loss: 1.7448\n",
313 | "Epoch: 15 | Loss: 1.5134\n",
314 | "Epoch: 16 | Loss: 1.3275\n",
315 | "Epoch: 17 | Loss: 1.1779\n",
316 | "Epoch: 18 | Loss: 1.0577\n",
317 | "Epoch: 19 | Loss: 0.9610\n",
318 | "Epoch: 20 | Loss: 0.8833\n",
319 | "Epoch: 21 | Loss: 0.8208\n",
320 | "Epoch: 22 | Loss: 0.7706\n",
321 | "Epoch: 23 | Loss: 0.7302\n",
322 | "Epoch: 24 | Loss: 0.6977\n",
323 | "Epoch: 25 | Loss: 0.6716\n",
324 | "Epoch: 26 | Loss: 0.6506\n",
325 | "Epoch: 27 | Loss: 0.6338\n",
326 | "Epoch: 28 | Loss: 0.6202\n",
327 | "Epoch: 29 | Loss: 0.6093\n",
328 | "Epoch: 30 | Loss: 0.6005\n",
329 | "Epoch: 31 | Loss: 0.5935\n",
330 | "Epoch: 32 | Loss: 0.5878\n",
331 | "Epoch: 33 | Loss: 0.5832\n",
332 | "Epoch: 34 | Loss: 0.5796\n",
333 | "Epoch: 35 | Loss: 0.5766\n",
334 | "Epoch: 36 | Loss: 0.5742\n",
335 | "Epoch: 37 | Loss: 0.5723\n",
336 | "Epoch: 38 | Loss: 0.5708\n",
337 | "Epoch: 39 | Loss: 0.5696\n",
338 | "Epoch: 40 | Loss: 0.5686\n",
339 | "Epoch: 41 | Loss: 0.5678\n",
340 | "Epoch: 42 | Loss: 0.5671\n",
341 | "Epoch: 43 | Loss: 0.5666\n",
342 | "Epoch: 44 | Loss: 0.5662\n",
343 | "Epoch: 45 | Loss: 0.5659\n",
344 | "Epoch: 46 | Loss: 0.5656\n",
345 | "Epoch: 47 | Loss: 0.5654\n",
346 | "Epoch: 48 | Loss: 0.5652\n",
347 | "Epoch: 49 | Loss: 0.5651\n",
348 | "Epoch: 50 | Loss: 0.5650\n",
349 | "Epoch: 51 | Loss: 0.5649\n",
350 | "Epoch: 52 | Loss: 0.5648\n",
351 | "Epoch: 53 | Loss: 0.5648\n",
352 | "Epoch: 54 | Loss: 0.5647\n",
353 | "Epoch: 55 | Loss: 0.5647\n",
354 | "Epoch: 56 | Loss: 0.5646\n",
355 | "Epoch: 57 | Loss: 0.5646\n",
356 | "Epoch: 58 | Loss: 0.5646\n",
357 | "Epoch: 59 | Loss: 0.5646\n",
358 | "Epoch: 60 | Loss: 0.5646\n",
359 | "Epoch: 61 | Loss: 0.5646\n",
360 | "Epoch: 62 | Loss: 0.5645\n",
361 | "Epoch: 63 | Loss: 0.5645\n",
362 | "Epoch: 64 | Loss: 0.5645\n",
363 | "Epoch: 65 | Loss: 0.5645\n",
364 | "Epoch: 66 | Loss: 0.5645\n",
365 | "Epoch: 67 | Loss: 0.5645\n",
366 | "Epoch: 68 | Loss: 0.5645\n",
367 | "Epoch: 69 | Loss: 0.5645\n",
368 | "Epoch: 70 | Loss: 0.5645\n",
369 | "Epoch: 71 | Loss: 0.5645\n",
370 | "Epoch: 72 | Loss: 0.5645\n",
371 | "Epoch: 73 | Loss: 0.5645\n",
372 | "Epoch: 74 | Loss: 0.5645\n",
373 | "Epoch: 75 | Loss: 0.5645\n",
374 | "Epoch: 76 | Loss: 0.5645\n",
375 | "Epoch: 77 | Loss: 0.5645\n",
376 | "Epoch: 78 | Loss: 0.5645\n",
377 | "Epoch: 79 | Loss: 0.5645\n",
378 | "Epoch: 80 | Loss: 0.5645\n",
379 | "Epoch: 81 | Loss: 0.5645\n",
380 | "Epoch: 82 | Loss: 0.5645\n",
381 | "Epoch: 83 | Loss: 0.5645\n",
382 | "Epoch: 84 | Loss: 0.5645\n",
383 | "Epoch: 85 | Loss: 0.5645\n",
384 | "Epoch: 86 | Loss: 0.5645\n",
385 | "Epoch: 87 | Loss: 0.5645\n",
386 | "Epoch: 88 | Loss: 0.5645\n",
387 | "Epoch: 89 | Loss: 0.5645\n",
388 | "Epoch: 90 | Loss: 0.5645\n",
389 | "Epoch: 91 | Loss: 0.5645\n",
390 | "Epoch: 92 | Loss: 0.5645\n",
391 | "Epoch: 93 | Loss: 0.5645\n",
392 | "Epoch: 94 | Loss: 0.5645\n",
393 | "Epoch: 95 | Loss: 0.5645\n",
394 | "Epoch: 96 | Loss: 0.5645\n",
395 | "Epoch: 97 | Loss: 0.5645\n",
396 | "Epoch: 98 | Loss: 0.5645\n",
397 | "Epoch: 99 | Loss: 0.5645\n",
398 | "Epoch: 100 | Loss: 0.5645\n",
399 | "Epoch: 101 | Loss: 0.5645\n",
400 | "Epoch: 102 | Loss: 0.5645\n",
401 | "Epoch: 103 | Loss: 0.5645\n",
402 | "Epoch: 104 | Loss: 0.5645\n",
403 | "Epoch: 105 | Loss: 0.5645\n",
404 | "Epoch: 106 | Loss: 0.5645\n",
405 | "Epoch: 107 | Loss: 0.5645\n",
406 | "Epoch: 108 | Loss: 0.5645\n",
407 | "Epoch: 109 | Loss: 0.5645\n",
408 | "Epoch: 110 | Loss: 0.5645\n",
409 | "Epoch: 111 | Loss: 0.5645\n",
410 | "Epoch: 112 | Loss: 0.5645\n",
411 | "Epoch: 113 | Loss: 0.5645\n",
412 | "Epoch: 114 | Loss: 0.5645\n",
413 | "Epoch: 115 | Loss: 0.5645\n",
414 | "Epoch: 116 | Loss: 0.5645\n",
415 | "Epoch: 117 | Loss: 0.5645\n",
416 | "Epoch: 118 | Loss: 0.5645\n",
417 | "Epoch: 119 | Loss: 0.5645\n",
418 | "Epoch: 120 | Loss: 0.5645\n",
419 | "Epoch: 121 | Loss: 0.5645\n",
420 | "Epoch: 122 | Loss: 0.5645\n",
421 | "Epoch: 123 | Loss: 0.5645\n",
422 | "Epoch: 124 | Loss: 0.5645\n",
423 | "Epoch: 125 | Loss: 0.5645\n",
424 | "Epoch: 126 | Loss: 0.5645\n",
425 | "Epoch: 127 | Loss: 0.5645\n",
426 | "Epoch: 128 | Loss: 0.5645\n",
427 | "Epoch: 129 | Loss: 0.5645\n",
428 | "Epoch: 130 | Loss: 0.5645\n",
429 | "Epoch: 131 | Loss: 0.5645\n",
430 | "Epoch: 132 | Loss: 0.5645\n",
431 | "Epoch: 133 | Loss: 0.5645\n",
432 | "Epoch: 134 | Loss: 0.5645\n",
433 | "Epoch: 135 | Loss: 0.5645\n",
434 | "Epoch: 136 | Loss: 0.5645\n",
435 | "Epoch: 137 | Loss: 0.5645\n",
436 | "Epoch: 138 | Loss: 0.5645\n",
437 | "Epoch: 139 | Loss: 0.5645\n",
438 | "Epoch: 140 | Loss: 0.5645\n",
439 | "Epoch: 141 | Loss: 0.5645\n",
440 | "Epoch: 142 | Loss: 0.5645\n",
441 | "Epoch: 143 | Loss: 0.5645\n",
442 | "Epoch: 144 | Loss: 0.5645\n",
443 | "Epoch: 145 | Loss: 0.5645\n",
444 | "Epoch: 146 | Loss: 0.5645\n",
445 | "Epoch: 147 | Loss: 0.5645\n",
446 | "Epoch: 148 | Loss: 0.5645\n",
447 | "Epoch: 149 | Loss: 0.5645\n"
448 | ],
449 | "name": "stdout"
450 | }
451 | ]
452 | },
453 | {
454 | "cell_type": "markdown",
455 | "metadata": {
456 | "id": "Bp50Q7J0Xkiw",
457 | "colab_type": "text"
458 | },
459 | "source": [
460 | "## Testing the Model\n",
461 | "After training the model we have the ability to test the model predictive capability by passing it an input. Below is a simple example of how you could achieve this with our model. The result we obtained aligns with the results obtained in this [notebook](https://github.com/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%202%20-%20Lesson%202%20-%20Notebook.ipynb), which inspired this entire tutorial. "
462 | ]
463 | },
464 | {
465 | "cell_type": "code",
466 | "metadata": {
467 | "id": "V1odfZpGFoBi",
468 | "colab_type": "code",
469 | "outputId": "305ca82f-a71f-4ec0-8177-a7eced1b69b8",
470 | "colab": {
471 | "base_uri": "https://localhost:8080/",
472 | "height": 34
473 | }
474 | },
475 | "source": [
476 | "## test the model\n",
477 | "sample = torch.tensor([10.0], dtype=torch.float)\n",
478 | "predicted = model(sample)\n",
479 | "print(predicted.detach().item())"
480 | ],
481 | "execution_count": 0,
482 | "outputs": [
483 | {
484 | "output_type": "stream",
485 | "text": [
486 | "17.096769332885742\n"
487 | ],
488 | "name": "stdout"
489 | }
490 | ]
491 | },
492 | {
493 | "cell_type": "markdown",
494 | "metadata": {
495 | "id": "ozX4V1GhPLyr",
496 | "colab_type": "text"
497 | },
498 | "source": [
499 | "## Final Words\n",
500 | "\n",
501 | "Congratulations! In this tutorial you learned how to train a simple neural network using PyTorch. You also learned about the basic components that make up a neural network model such as the linear transformation layer, optimizer, and loss function. We then trained the model and tested its predictive capabilities. You are well on your way to become more knowledgeable about deep learning and PyTorch. I have provided a bunch of references below if you are interested in practising and learning more. \n",
502 | "\n",
503 | "*I would like to thank Laurence Moroney for his excellent [tutorial](https://github.com/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%202%20-%20Lesson%202%20-%20Notebook.ipynb) which I used as an inspiration for this tutorial.*"
504 | ]
505 | },
506 | {
507 | "cell_type": "markdown",
508 | "metadata": {
509 | "id": "LAABGiMHeDOr",
510 | "colab_type": "text"
511 | },
512 | "source": [
513 | "## Exercises\n",
514 | "- Add more examples in the input and output tensors. In addition, try to change the dimensions of the data, say by adding an extra value in each array. What needs to be changed to successfully train the network with the new data?\n",
515 | "- The model converged really fast, which means it learned the relationship between x and y values after a couple of iterations. Do you think it makes sense to continue training? How would you automate the process of stopping the training after the model loss doesn't subtantially change?\n",
516 | "- In our example, we used a single hidden layer. Try to take a look at the PyTorch documentation to figure out what you need to do to get a model with more layers. What happens if you add more hidden layers?\n",
517 | "- We did not discuss the learning rate (`lr-0.001`) and the optimizer in great detail. Check out the [PyTorch documentation](https://pytorch.org/docs/stable/optim.html) to learn more about what other optimizers you can use.\n"
518 | ]
519 | },
520 | {
521 | "cell_type": "markdown",
522 | "metadata": {
523 | "id": "4-o4w9vpPHZz",
524 | "colab_type": "text"
525 | },
526 | "source": [
527 | "## References\n",
528 | "- [The Hello World of Deep Learning with Neural Networks](https://github.com/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%202%20-%20Lesson%202%20-%20Notebook.ipynb)\n",
529 | "- [A Simple Neural Network from Scratch with PyTorch and Google Colab](https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0?source=collection_category---4------1-----------------------)\n",
530 | "- [PyTorch Official Docs](https://pytorch.org/docs/stable/nn.html)\n",
531 | "- [PyTorch 1.2 Quickstart with Google Colab](https://medium.com/dair-ai/pytorch-1-2-quickstart-with-google-colab-6690a30c38d)\n",
532 | "- [A Gentle Intoduction to PyTorch](https://medium.com/dair-ai/pytorch-1-2-introduction-guide-f6fa9bb7597c)"
533 | ]
534 | }
535 | ]
536 | }
--------------------------------------------------------------------------------
/_notebooks/2020-03-19-Writing_Primer_for_Data_Scientists.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "Writing Primer for Data Scientists.ipynb",
7 | "provenance": []
8 | }
9 | },
10 | "cells": [
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {
14 | "id": "oPzdNvIMKK1m",
15 | "colab_type": "text"
16 | },
17 | "source": [
18 | "# Writing Primer for Data Scientists\n",
19 | "> \"A guide for writing outstanding data science tutorials.\"\n",
20 | "\n",
21 | "- toc: false\n",
22 | "- branch: master\n",
23 | "- author: Elvis Saravia\n",
24 | "- badges: true\n",
25 | "- comments: true\n",
26 | "- categories: [data science]\n",
27 | "- image: images/data-science.png\n",
28 | "- hide: false\n"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {
34 | "id": "tD-GvSpBxCDj",
35 | "colab_type": "text"
36 | },
37 | "source": [
38 | "## [Headline]\n",
39 | "\n",
40 | "Typically, when you are writing something that's technical, you want a headline that's *appealing*. This will help you to reach a wider audience and let your work have more visibility and impact as well. The headline or topics of your tutorial should include a short title (typically 10~15 maximum words). The title can include the technology and technique you are talking about, combined with a nice action verb that appeals to a wider audience. For instance, I titled one of my [articles](https://medium.com/dair-ai/building-rnns-is-fun-with-pytorch-and-google-colab-3903ea9a3a79) as follows: \"Building RNNs is Fun with PyTorch and Google Colab\". You can get pretty creative with the headline, but keep in mind that this is the face of your article and it should be given plenty of tinkering before you finalize it. "
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {
46 | "id": "91oJq77cyui_",
47 | "colab_type": "text"
48 | },
49 | "source": [
50 | "## [Project Description]\n",
51 | "The project description marks the beginning of the tutorial you are writing. It should be clear, concise, and interesting. Here I suggest you to briefly explain what the following notebook tutorial does (usually one sentence)? Then you can explain what technologies you will be using in the tutorial (usually one sentence)? You can also briefly explain what value or knowledge the user will obtain after finishing the tutorial (a short list or two sentences will do)? In addition, you can give credit to any of the notable resources you are utilizing, and also briefly introduce yourself if the project description is not too lengthy? "
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "metadata": {
57 | "id": "q_e7rSozyvon",
58 | "colab_type": "code",
59 | "colab": {}
60 | },
61 | "source": [
62 | "### Here you usually import your libraries"
63 | ],
64 | "execution_count": 0,
65 | "outputs": []
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {
70 | "id": "qvp3U7N41RBT",
71 | "colab_type": "text"
72 | },
73 | "source": [
74 | "If there is an important clarification that you need to make about any of the libraries imported, now is the right time to do so."
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {
80 | "id": "Rylpdsb21Wo_",
81 | "colab_type": "text"
82 | },
83 | "source": [
84 | "## [Data Loading]\n",
85 | "The first step of the pipeline with any data science related tutorial is usually the data loading component. Besides visually describing the dataset in use to your audience, also try to briefly explain (in one or two sentences) where the data came from, i.e., the source of the data. Other specifications like dimensions and attribute type are important but can be neatly explained with examples using code and tools such as `pandas`."
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "metadata": {
91 | "id": "g1_euxvq1XQZ",
92 | "colab_type": "code",
93 | "colab": {}
94 | },
95 | "source": [
96 | "### code for importing or downloading data"
97 | ],
98 | "execution_count": 0,
99 | "outputs": []
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {
104 | "id": "Ia6remU_fV7W",
105 | "colab_type": "text"
106 | },
107 | "source": [
108 | "## [Data Exploration] \n",
109 | "Since you are teaching through writing and not actually live coding, resist the temptation to write code that does anything with the data like transformation or feature engineering before actually exploring it. It's a common mistake or practice that should be minimized. You want to give the readers some idea about the data through basic statistics, plots, and figures. Practise this as much as you can, and it will become an important habit in your data science work flow. Your readers will also appreciate the courtesy."
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {
115 | "id": "MwVV-MGG14UL",
116 | "colab_type": "text"
117 | },
118 | "source": [
119 | "## [Data Preprocessing]\n",
120 | "Although sometimes not necessary, as some datasets already come preprocessed, I believe it is important to slightly mention what type of preprocessing steps the data has undergone -- even if you need to do this through code examples. It should clarify any confusion that can present itself during the modeling section of the tutorial. Remember, your audience wants to get a broad understanding of the data before the modeling component of the tutorial, so try to explain this part of the tutorial as clear as possible with examples. Take advantage of your notebook features and other tools such as `matplotlib` and `pandas`."
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "metadata": {
126 | "id": "05S4Z52q156M",
127 | "colab_type": "code",
128 | "colab": {}
129 | },
130 | "source": [
131 | "### code for preprocessing"
132 | ],
133 | "execution_count": 0,
134 | "outputs": []
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {
139 | "id": "uZ6eXpGl2WBt",
140 | "colab_type": "text"
141 | },
142 | "source": [
143 | "## [Constructing Model]\n",
144 | "If you are using tools such as PyTorch or TensorFlow for your data science projects, this section is reserved for the computation graph. Here you usually just state very briefly what you are building. No need to go into details just yet!"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "metadata": {
150 | "id": "l7tEeylE2kSR",
151 | "colab_type": "code",
152 | "colab": {}
153 | },
154 | "source": [
155 | "### code for model"
156 | ],
157 | "execution_count": 0,
158 | "outputs": []
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {
163 | "id": "GGGakX-l2o0s",
164 | "colab_type": "text"
165 | },
166 | "source": [
167 | "## [Testing Model]\n",
168 | "One of the things I have learned over the years is that everything in data science is better understood with examples, rather than just using plain code or pictures. Before you begin training your models make sure to explain to the reader what the model is expecting as input and what it is expected to output. Rendering code here with nice descriptions help to prepare the reader on what to expect during training the model, especially since the training code is usually longer than most sections of the tutorial. With libraries like [PyTorch](https://pytorch.org/) and [DyNet](http://dynet.io/) this is fairly easy since they are dynamic computing libraries. TensorFlow also offers an [eager](https://www.tensorflow.org/guide/eager) execution command, `tf.enable_eager_execution()` to evaluate operations immediately. This is what's called imperative programming and I am glad they have it. It makes it easy to teach others about the beautiful things these tools are able to accomplish. I like to think that data science is about storytelling and discovery, and it should remain that way. Clear writing helps!"
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {
174 | "id": "NtF8ipN93JTj",
175 | "colab_type": "text"
176 | },
177 | "source": [
178 | "## [Training Model]\n",
179 | "When training the models you would specify what kind of optimization, hyperparameters, and data iterating methods you are using. To be honest, the training code is usually self-explanatory. If you did your job at the beginning, explaining your dataset and testing the model, this part of the tutorial is probably the one that needs less explanation. In my experience, most data computing libraries use similar training strategies, thus the training structure has become ubiquitous in some sense. If there is still any clarification in your training that you need the reader to know, you can always explain it beforehand. "
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "metadata": {
185 | "id": "P2b6vtcU3ddJ",
186 | "colab_type": "code",
187 | "colab": {}
188 | },
189 | "source": [
190 | "### Hyperparameters\n",
191 | "\n",
192 | "### Training code"
193 | ],
194 | "execution_count": 0,
195 | "outputs": []
196 | },
197 | {
198 | "cell_type": "markdown",
199 | "metadata": {
200 | "id": "C3Hf7M1r3isG",
201 | "colab_type": "text"
202 | },
203 | "source": [
204 | "## [Evaluating Model]\n",
205 | "And lastly, it is good practice to evaluate your models on some held out samples of the dataset. This helps the reader to get a gist of what the tutorial you just showed him/her contains. It also helps to re-emphasize on the values the tutorial is providing for the reader. This part of the tutorial also helps to finalize your final thoughts and share insights with your readers. Readers love insights. You can share plots, a lot of examples, and even explore the parameters of the model. "
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "metadata": {
211 | "id": "GiRH0DTd3u4N",
212 | "colab_type": "code",
213 | "colab": {}
214 | },
215 | "source": [
216 | "### Evaluation code"
217 | ],
218 | "execution_count": 0,
219 | "outputs": []
220 | },
221 | {
222 | "cell_type": "markdown",
223 | "metadata": {
224 | "id": "plZ38XRC3zJu",
225 | "colab_type": "text"
226 | },
227 | "source": [
228 | "## [Final Words]\n",
229 | "You are not writing a book, so it is not necessary to have a conclusion section. In my experience, you use the final section to summarize all your findings and the future ideas you are working on. This is also a great time to congratualte the reader for making it to the end of the tutorial -- that's a huge achievement. You show that you appreciate the readers. Then you can end the section with your favorite quote. \n",
230 | "\n",
231 | "And that's it! Congratulations for reaching the end of this primer. You are now more than equipped to deliver excellent tutorials to the whole data science community and to a wider audience. With this short primer, you should reach thousands, and hopefully millions, but most importantly, with it, you should be able to bring value to your readers and keep expanding the human knowledge base. "
232 | ]
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "metadata": {
237 | "id": "Ng8pGkkw8XB3",
238 | "colab_type": "text"
239 | },
240 | "source": [
241 | "## [References]\n",
242 | "Remember to always give credit where it is due. It shows you are responsible and care for the long-term success of the community. Papers, other implementations, video, code repositories, etc., are some of the things are you looking to reference. If you don't want to include this very formal reference section, make sure to embed links throughout the tutorial as an alternative. "
243 | ]
244 | },
245 | {
246 | "cell_type": "markdown",
247 | "metadata": {
248 | "id": "vvQvwkAx4u5r",
249 | "colab_type": "text"
250 | },
251 | "source": [
252 | "Written with ❤️ by [dair.ai](https://medium.com/dair-ai)"
253 | ]
254 | },
255 | {
256 | "cell_type": "markdown",
257 | "metadata": {
258 | "id": "uvcxsIkL89AB",
259 | "colab_type": "text"
260 | },
261 | "source": [
262 | "## [Other Tips]\n",
263 | "- Try to ensure that your notebook-based tutorials have a very nice flow. If you are using a lot of functions, it will be nice if you can create seperate python files for them and import them here. You don't want your notebooks to be too detailed, but you also don't want it to be too flat.\n",
264 | "- Remember! You are teaching not dictating. Ask questions and immerse the reader, challenge them. There are various ways to do so.\n",
265 | "- Be sure to add comments in your code. These should be very short and concise intruction -- user a lot of action verbs, and avoid abstract nouns wherever possible. This tend help with those readers that prefer code rather than text. Another suggestion, is to specify the different dimensions of the data transformation steps you are applying in the different steps of the computation graph. \n",
266 | "- More coming soon!"
267 | ]
268 | }
269 | ]
270 | }
--------------------------------------------------------------------------------
/_notebooks/2020-03-19-nn.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "nn.ipynb",
7 | "provenance": [],
8 | "collapsed_sections": []
9 | },
10 | "kernelspec": {
11 | "display_name": "Python 3",
12 | "language": "python",
13 | "name": "python3"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {
20 | "id": "Ee4B4v5tAp1C",
21 | "colab_type": "text"
22 | },
23 | "source": [
24 | "# A Simple Neural Network from Scratch with PyTorch and Google Colab\n",
25 | "\n",
26 | "> \"In this tutorial we implement a simple neural network from scratch using PyTorch.\"\n",
27 | "\n",
28 | "- toc: false\n",
29 | "- branch: master\n",
30 | "- author: Elvis Saravia\n",
31 | "- badges: true\n",
32 | "- comments: true\n",
33 | "- categories: [machine learning, beginner, pytorch, neural network]\n",
34 | "- image: images/nn.png\n",
35 | "- hide: false"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {
41 | "id": "w4cEhtf_Ap1E",
42 | "colab_type": "text"
43 | },
44 | "source": [
45 | "## About\n",
46 | "\n",
47 | "In this tutorial we will implement a simple neural network from scratch using PyTorch. The idea of the tutorial is to teach you the basics of PyTorch and how it can be used to implement a neural network from scratch. I will go over some of the basic functionalities and concepts available in PyTorch that will allow you to build your own neural networks. \n",
48 | "\n",
49 | "This tutorial assumes you have prior knowledge of how a neural network works. Don’t worry! Even if you are not so sure, you will be okay. For advanced PyTorch users, this tutorial may still serve as a refresher. This tutorial is heavily inspired by this [Neural Network implementation](https://repl.it/talk/announcements/Build-a-Neural-Network-in-Python/5457) coded purely using Numpy. In fact, I tried re-implementing the code using PyTorch instead and added my own intuitions and explanations. Thanks to [Samay](https://repl.it/@shamdasani) for his phenomenal work, I hope this inspires many others as it did with me."
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {
55 | "id": "MP9ewMSlC7JU",
56 | "colab_type": "text"
57 | },
58 | "source": [
59 | "\n",
60 | "The `torch` module provides all the necessary **tensor** operators you will need to implement your first neural network from scratch in PyTorch. That's right! In PyTorch everything is a Tensor, so this is the first thing you will need to get used to. Let's import the libraries we will need for this tutorial."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "metadata": {
66 | "id": "bKmXKSQnAp1G",
67 | "colab_type": "code",
68 | "colab": {}
69 | },
70 | "source": [
71 | "import torch\n",
72 | "import torch.nn as nn"
73 | ],
74 | "execution_count": 0,
75 | "outputs": []
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {
80 | "id": "1EWBBl1nAp1M",
81 | "colab_type": "text"
82 | },
83 | "source": [
84 | "## Data\n",
85 | "Let's start by creating some sample data using the `torch.tensor` command. In Numpy, this could be done with `np.array`. Both functions serve the same purpose, but in PyTorch everything is a Tensor as opposed to a vector or matrix. We define types in PyTorch using the `dtype=torch.xxx` command. \n",
86 | "\n",
87 | "In the data below, `X` represents the amount of hours studied and how much time students spent sleeping, whereas `y` represent grades. The variable `xPredicted` is a single input for which we want to predict a grade using the parameters learned by the neural network. Remember, the neural network wants to learn a mapping between `X` and `y`, so it will try to take a guess from what it has learned from the training data. "
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "metadata": {
93 | "id": "fsAVbHnjAp1P",
94 | "colab_type": "code",
95 | "colab": {}
96 | },
97 | "source": [
98 | "X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor\n",
99 | "y = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor\n",
100 | "xPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor"
101 | ],
102 | "execution_count": 0,
103 | "outputs": []
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {
108 | "id": "RC0ru9kCAp1U",
109 | "colab_type": "text"
110 | },
111 | "source": [
112 | "You can check the size of the tensors we have just created with the `size` command. This is equivalent to the `shape` command used in tools such as Numpy and Tensorflow. "
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "metadata": {
118 | "id": "sfC-B1BEAp1W",
119 | "colab_type": "code",
120 | "outputId": "eba8424c-519d-48f7-ccd7-bc6a76035c61",
121 | "colab": {
122 | "base_uri": "https://localhost:8080/",
123 | "height": 51
124 | }
125 | },
126 | "source": [
127 | "print(X.size())\n",
128 | "print(y.size())"
129 | ],
130 | "execution_count": 3,
131 | "outputs": [
132 | {
133 | "output_type": "stream",
134 | "text": [
135 | "torch.Size([3, 2])\n",
136 | "torch.Size([3, 1])\n"
137 | ],
138 | "name": "stdout"
139 | }
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {
145 | "id": "zrND9MS9Ap1f",
146 | "colab_type": "text"
147 | },
148 | "source": [
149 | "## Scaling\n",
150 | "\n",
151 | "Below we are performing some scaling on the sample data. Notice that the `max` function returns both a tensor and the corresponding indices. So we use `_` to capture the indices which we won't use here because we are only interested in the max values to conduct the scaling. Perfect! Our data is now in a very nice format our neural network will appreciate later on. "
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "metadata": {
157 | "id": "hlBvtfAmAp1i",
158 | "colab_type": "code",
159 | "outputId": "82145a5e-d662-44b8-c8ce-e80f9a381694",
160 | "colab": {
161 | "base_uri": "https://localhost:8080/",
162 | "height": 34
163 | }
164 | },
165 | "source": [
166 | "# scale units\n",
167 | "X_max, _ = torch.max(X, 0)\n",
168 | "xPredicted_max, _ = torch.max(xPredicted, 0)\n",
169 | "\n",
170 | "X = torch.div(X, X_max)\n",
171 | "xPredicted = torch.div(xPredicted, xPredicted_max)\n",
172 | "y = y / 100 # max test score is 100\n",
173 | "print(xPredicted)"
174 | ],
175 | "execution_count": 4,
176 | "outputs": [
177 | {
178 | "output_type": "stream",
179 | "text": [
180 | "tensor([0.5000, 1.0000])\n"
181 | ],
182 | "name": "stdout"
183 | }
184 | ]
185 | },
186 | {
187 | "cell_type": "markdown",
188 | "metadata": {
189 | "id": "R1kTs5S5Ap1m",
190 | "colab_type": "text"
191 | },
192 | "source": [
193 | "Notice that there are two functions `max` and `div` that I didn't discuss above. They do exactly what they imply: `max` finds the maximum value in a vector... I mean tensor; and `div` is basically a nice little function to divide two tensors. "
194 | ]
195 | },
196 | {
197 | "cell_type": "markdown",
198 | "metadata": {
199 | "id": "xRvMSpEFAp1n",
200 | "colab_type": "text"
201 | },
202 | "source": [
203 | "## Model (Computation Graph)\n",
204 | "Once the data has been processed and it is in the proper format, all you need to do now is to define your model. Here is where things begin to change a little as compared to how you would build your neural networks using, say, something like Keras or Tensorflow. However, you will realize quickly as you go along that PyTorch doesn't differ much from other deep learning tools. At the end of the day we are constructing a computation graph, which is used to dictate how data should flow and what type of operations are performed on this information. \n",
205 | "\n",
206 | "For illustration purposes, we are building the following neural network or computation graph:\n",
207 | "\n",
208 | "\n",
209 | ""
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "metadata": {
215 | "id": "C7pDC5SfAp1p",
216 | "colab_type": "code",
217 | "colab": {}
218 | },
219 | "source": [
220 | "class Neural_Network(nn.Module):\n",
221 | " def __init__(self, ):\n",
222 | " super(Neural_Network, self).__init__()\n",
223 | " # parameters\n",
224 | " # TODO: parameters can be parameterized instead of declaring them here\n",
225 | " self.inputSize = 2\n",
226 | " self.outputSize = 1\n",
227 | " self.hiddenSize = 3\n",
228 | " \n",
229 | " # weights\n",
230 | " self.W1 = torch.randn(self.inputSize, self.hiddenSize) # 3 X 2 tensor\n",
231 | " self.W2 = torch.randn(self.hiddenSize, self.outputSize) # 3 X 1 tensor\n",
232 | " \n",
233 | " def forward(self, X):\n",
234 | " self.z = torch.matmul(X, self.W1) # 3 X 3 \".dot\" does not broadcast in PyTorch\n",
235 | " self.z2 = self.sigmoid(self.z) # activation function\n",
236 | " self.z3 = torch.matmul(self.z2, self.W2)\n",
237 | " o = self.sigmoid(self.z3) # final activation function\n",
238 | " return o\n",
239 | " \n",
240 | " def sigmoid(self, s):\n",
241 | " return 1 / (1 + torch.exp(-s))\n",
242 | " \n",
243 | " def sigmoidPrime(self, s):\n",
244 | " # derivative of sigmoid\n",
245 | " return s * (1 - s)\n",
246 | " \n",
247 | " def backward(self, X, y, o):\n",
248 | " self.o_error = y - o # error in output\n",
249 | " self.o_delta = self.o_error * self.sigmoidPrime(o) # derivative of sig to error\n",
250 | " self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n",
251 | " self.z2_delta = self.z2_error * self.sigmoidPrime(self.z2)\n",
252 | " self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n",
253 | " self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n",
254 | " \n",
255 | " def train(self, X, y):\n",
256 | " # forward + backward pass for training\n",
257 | " o = self.forward(X)\n",
258 | " self.backward(X, y, o)\n",
259 | " \n",
260 | " def saveWeights(self, model):\n",
261 | " # we will use the PyTorch internal storage functions\n",
262 | " torch.save(model, \"NN\")\n",
263 | " # you can reload model with all the weights and so forth with:\n",
264 | " # torch.load(\"NN\")\n",
265 | " \n",
266 | " def predict(self):\n",
267 | " print (\"Predicted data based on trained weights: \")\n",
268 | " print (\"Input (scaled): \\n\" + str(xPredicted))\n",
269 | " print (\"Output: \\n\" + str(self.forward(xPredicted)))\n",
270 | " "
271 | ],
272 | "execution_count": 0,
273 | "outputs": []
274 | },
275 | {
276 | "cell_type": "markdown",
277 | "metadata": {
278 | "id": "qm5gimnyAp1s",
279 | "colab_type": "text"
280 | },
281 | "source": [
282 | "For the purpose of this tutorial, we are not going to be talking math stuff, that's for another day. I just want you to get a gist of what it takes to build a neural network from scratch using PyTorch. Let's break down the model which was declared via the class above. \n",
283 | "\n",
284 | "## Class Header\n",
285 | "First, we defined our model via a class because that is the recommended way to build the computation graph. The class header contains the name of the class `Neural Network` and the parameter `nn.Module` which basically indicates that we are defining our own neural network. \n",
286 | "\n",
287 | "```python\n",
288 | "class Neural_Network(nn.Module):\n",
289 | "```\n",
290 | "\n",
291 | "## Initialization\n",
292 | "The next step is to define the initializations ( `def __init__(self,)`) that will be performed upon creating an instance of the customized neural network. You can declare the parameters of your model here, but typically, you would declare the structure of your network in this section -- the size of the hidden layers and so forth. Since we are building the neural network from scratch, we explicitly declared the size of the weights matrices: one that stores the parameters from the input to hidden layer; and one that stores the parameter from the hidden to output layer. Both weight matrices are initialized with values randomly chosen from a normal distribution via `torch.randn(...)`. Note that we are not using bias just to keep things as simple as possible. \n",
293 | "\n",
294 | "```python\n",
295 | "def __init__(self, ):\n",
296 | " super(Neural_Network, self).__init__()\n",
297 | " # parameters\n",
298 | " # TODO: parameters can be parameterized instead of declaring them here\n",
299 | " self.inputSize = 2\n",
300 | " self.outputSize = 1\n",
301 | " self.hiddenSize = 3\n",
302 | "\n",
303 | " # weights\n",
304 | " self.W1 = torch.randn(self.inputSize, self.hiddenSize) # 3 X 2 tensor\n",
305 | " self.W2 = torch.randn(self.hiddenSize, self.outputSize) # 3 X 1 tensor\n",
306 | "```\n",
307 | "\n",
308 | "## The Forward Function\n",
309 | "The `forward` function is where all the magic happens (see below). This is where the data enters and is fed into the computation graph (i.e., the neural network structure we have built). Since we are building a simple neural network with one hidden layer, our forward function looks very simple:\n",
310 | "\n",
311 | "```python\n",
312 | "def forward(self, X):\n",
313 | " self.z = torch.matmul(X, self.W1) \n",
314 | " self.z2 = self.sigmoid(self.z) # activation function\n",
315 | " self.z3 = torch.matmul(self.z2, self.W2)\n",
316 | " o = self.sigmoid(self.z3) # final activation function\n",
317 | " return o\n",
318 | "```\n",
319 | "\n",
320 | "The `forward` function above takes the input `X`and then performs a matrix multiplication (`torch.matmul(...)`) with the first weight matrix `self.W1`. Then the result is applied an activation function, `sigmoid`. The resulting matrix of the activation is then multiplied with the second weight matrix `self.W2`. Then another activation if performed, which renders the output of the neural network or computation graph. The process I described above is simply what's known as a `feedforward pass`. In order for the weights to optimize when training, we need a backpropagation algorithm. \n",
321 | "\n",
322 | "## The Backward Function\n",
323 | "The `backward` function contains the backpropagation algorithm, where the goal is to essentially minimize the loss with respect to our weights. In other words, the weights need to be updated in such a way that the loss decreases while the neural network is training (well, that is what we hope for). All this magic is possible with the gradient descent algorithm which is declared in the `backward` function. Take a minute or two to inspect what is happening in the code below:\n",
324 | "\n",
325 | "```python\n",
326 | "def backward(self, X, y, o):\n",
327 | " self.o_error = y - o # error in output\n",
328 | " self.o_delta = self.o_error * self.sigmoidPrime(o) \n",
329 | " self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n",
330 | " self.z2_delta = self.z2_error * self.sigmoidPrime(self.z2)\n",
331 | " self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n",
332 | " self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n",
333 | "```\n",
334 | "\n",
335 | "Notice that we are performing a lot of matrix multiplications along with the transpose operations via the `torch.matmul(...)` and `torch.t(...)` operations, respectively. The rest is simply gradient descent -- there is nothing to it."
336 | ]
337 | },
338 | {
339 | "cell_type": "markdown",
340 | "metadata": {
341 | "id": "9t26Dr5zAp1u",
342 | "colab_type": "text"
343 | },
344 | "source": [
345 | "## Training\n",
346 | "All that is left now is to train the neural network. First we create an instance of the computation graph we have just built:\n",
347 | "\n",
348 | "```python\n",
349 | "NN = Neural_Network()\n",
350 | "```\n",
351 | "\n",
352 | "Then we train the model for `1000` rounds. Notice that in PyTorch `NN(X)` automatically calls the `forward` function so there is no need to explicitly call `NN.forward(X)`. \n",
353 | "\n",
354 | "After we have obtained the predicted output for ever round of training, we compute the loss, with the following code:\n",
355 | "\n",
356 | "```python\n",
357 | "torch.mean((y - NN(X))**2).detach().item()\n",
358 | "```\n",
359 | "\n",
360 | "The next step is to start the training (foward + backward) via `NN.train(X, y)`. After we have trained the neural network, we can store the model and output the predicted value of the single instance we declared in the beginning, `xPredicted`. \n",
361 | "\n",
362 | "Let's train!"
363 | ]
364 | },
365 | {
366 | "cell_type": "code",
367 | "metadata": {
368 | "id": "9sTddOpLAp1w",
369 | "colab_type": "code",
370 | "outputId": "1c1beaf2-ace7-4ac1-c2f9-eb943c29c1f9",
371 | "colab": {
372 | "base_uri": "https://localhost:8080/",
373 | "height": 343
374 | }
375 | },
376 | "source": [
377 | "NN = Neural_Network()\n",
378 | "for i in range(1000): # trains the NN 1,000 times\n",
379 | " if (i % 100) == 0:\n",
380 | " print (\"#\" + str(i) + \" Loss: \" + str(torch.mean((y - NN(X))**2).detach().item())) # mean sum squared loss\n",
381 | " NN.train(X, y)\n",
382 | "NN.saveWeights(NN)\n",
383 | "NN.predict()\n",
384 | "\n",
385 | "print(\"Finished training!\")"
386 | ],
387 | "execution_count": 7,
388 | "outputs": [
389 | {
390 | "output_type": "stream",
391 | "text": [
392 | "#0 Loss: 0.24544493854045868\n",
393 | "#100 Loss: 0.0026628002524375916\n",
394 | "#200 Loss: 0.0024748605210334063\n",
395 | "#300 Loss: 0.002363199135288596\n",
396 | "#400 Loss: 0.0022466194350272417\n",
397 | "#500 Loss: 0.0021235516760498285\n",
398 | "#600 Loss: 0.001996910898014903\n",
399 | "#700 Loss: 0.0018705682596191764\n",
400 | "#800 Loss: 0.0017485078424215317\n",
401 | "#900 Loss: 0.0016340742586180568\n",
402 | "Predicted data based on trained weights: \n",
403 | "Input (scaled): \n",
404 | "tensor([0.5000, 1.0000])\n",
405 | "Output: \n",
406 | "tensor([0.9529])\n",
407 | "Finished training!\n"
408 | ],
409 | "name": "stdout"
410 | },
411 | {
412 | "output_type": "stream",
413 | "text": [
414 | "/usr/local/lib/python3.6/dist-packages/torch/serialization.py:360: UserWarning: Couldn't retrieve source code for container of type Neural_Network. It won't be checked for correctness upon loading.\n",
415 | " \"type \" + obj.__name__ + \". It won't be checked \"\n"
416 | ],
417 | "name": "stderr"
418 | }
419 | ]
420 | },
421 | {
422 | "cell_type": "markdown",
423 | "metadata": {
424 | "id": "L9nBzkgdbjcA",
425 | "colab_type": "text"
426 | },
427 | "source": [
428 | "The loss keeps decreasing, which means that the neural network is learning something. That's it. Congratulations! You have just learned how to create and train a neural network from scratch using PyTorch. There are so many things you can do with the shallow network we have just implemented. You can add more hidden layers or try to incorporate the bias terms for practice. I would love to see what you will build from here. Reach me out on [Twitter](https://twitter.com/omarsar0) if you have any further questions or leave your comments here. Until next time!"
429 | ]
430 | },
431 | {
432 | "cell_type": "markdown",
433 | "metadata": {
434 | "id": "zcms4BCySKXj",
435 | "colab_type": "text"
436 | },
437 | "source": [
438 | "## References:\n",
439 | "- [PyTorch nn. Modules](https://pytorch.org/tutorials/beginner/pytorch_with_examples.html#pytorch-custom-nn-modules)\n",
440 | "- [Build a Neural Network with Numpy](https://enlight.nyc/neural-network)\n"
441 | ]
442 | }
443 | ]
444 | }
--------------------------------------------------------------------------------
/_notebooks/README.md:
--------------------------------------------------------------------------------
1 | # Notebooks by dair.ai
2 | This is a place to host and share data science notebooks that range from beginner tutorials for deep learing to complete walkthrough of complex topics such as Transformers for NLP and object detection for CV.
3 |
4 | Sharing is easy! Just upload your notebooks to the `_notebooks` folder and it will be featured on our [website](https://dair.ai/notebooks/) and seen by thousands of avid learners.
5 |
6 |
7 | ### How to Contribute
8 | 1) Have your Jupyter notebook ready for publication. Make sure you check out this [guideline](https://fastpages.fast.ai/jupyter/2020/02/20/test.html) to leverage all the amazing features of fastpages. In order to properly format the notebook when rendered as a web page, you need to include a markdown cell at the beginning of the notebook as in the example below:
9 |
10 | 
11 |
12 | 2) Then ask for an invite to be able to commit to this repo. Email me directly to ellfae@gmail.com or [DM on Twitter](https://twitter.com/omarsar0).
13 | 3) You can then upload your notebook directly to the [`_notebooks`](https://github.com/dair-ai/notebooks/tree/master/_notebooks) folder. Ensure that the notebook follows the following naming convention: `YYYY-MM-DD-Name-of-your-notebook.ipynb`.
14 | 4) Notebooks will then be reviewed and published. This will also be followed by a special feature in the upcoming [NLP Newsletter](https://github.com/dair-ai/nlp_newsletter) where educational resources are shared to thousands of avid readers.
15 |
16 | ---
17 | ### Credits
18 | _powered by [fastpages](https://github.com/fastai/fastpages)_
19 |
--------------------------------------------------------------------------------
/_notebooks/my_icons/fastai_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/_notebooks/my_icons/fastai_logo.png
--------------------------------------------------------------------------------
/_pages/404.html:
--------------------------------------------------------------------------------
1 | ---
2 | permalink: /404.html
3 | layout: default
4 | search_exclude: true
5 | ---
6 |
7 |
20 |
21 |
22 |
404
23 |
Page not found :(
24 |
The requested page could not be found.
25 |
26 |
--------------------------------------------------------------------------------
/_pages/about.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: page
3 | title: About
4 | permalink: /about/
5 | ---
6 |
7 | dair.ai is a community effort to democratize Artificial Intelligence (AI) research, education, and technologies.
8 |
9 | Notebooks is an effort to encourage data scientists from all levels to easily share their notebooks.
10 |
--------------------------------------------------------------------------------
/_pages/search.html:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | permalink: /search/
4 | title: Search
5 | search_exclude: true
6 | ---
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/_pages/submit.html:
--------------------------------------------------------------------------------
1 | ---
2 | layout: page
3 | title: Submit Notebook
4 | permalink: /submit/
5 | ---
6 |
7 | Follow this guide before you upload your notebooks directly to our GitHub repo.
8 |
9 | fastpages takes care of everything else and automatically publishes your notebook to this website.
10 |
--------------------------------------------------------------------------------
/_pages/tags.html:
--------------------------------------------------------------------------------
1 | ---
2 | layout: categories
3 | permalink: /categories/
4 | title: Tags
5 | search_exclude: true
6 | ---
7 |
8 |
Contents
9 |
10 | {% if site.categories.size > 0 %}
11 |
12 | {% for category in site.categories %}
13 | {% capture category_name %}{{ category | first }}{% endcapture %}
14 |
2 |
3 | Do not manually save images into this folder. This is used by GitHub Actions to automatically copy images. Any images you save into this folder could be deleted at build time.
--------------------------------------------------------------------------------
/images/data-science.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/data-science.png
--------------------------------------------------------------------------------
/images/diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/diagram.png
--------------------------------------------------------------------------------
/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/favicon.ico
--------------------------------------------------------------------------------
/images/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/favicon.png
--------------------------------------------------------------------------------
/images/fon-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/fon-1.png
--------------------------------------------------------------------------------
/images/front-matter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/front-matter.png
--------------------------------------------------------------------------------
/images/intro-pytorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/intro-pytorch.png
--------------------------------------------------------------------------------
/images/logistic-regression.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/logistic-regression.png
--------------------------------------------------------------------------------
/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/logo.png
--------------------------------------------------------------------------------
/images/model-nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/model-nn.png
--------------------------------------------------------------------------------
/images/nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/nn.png
--------------------------------------------------------------------------------
/images/notebooks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/notebooks.png
--------------------------------------------------------------------------------
/images/pytorch-quick.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/pytorch-quick.png
--------------------------------------------------------------------------------
/images/rnn-pt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dair-ai/notebooks/7fc69278099f100d39489181d5a7763dfc835eca/images/rnn-pt.png
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 | ---
2 | layout: home
3 | search_exclude: true
4 | ---
5 |
6 | 
7 |
8 | This is a place to host and share data science notebooks that range from beginner tutorials for deep learing to deep dives of complex topics such as Transformers for NLP and object detection for CV.
9 |
10 | Sharing is easy! Just upload your notebooks to the `_notebooks` folder of the [repo](https://github.com/dair-ai/notebooks/tree/master/_notebooks).
11 |
12 | ---
13 |
14 | This site is built with [fastpages](https://github.com/fastai/fastpages).
15 |
16 | ---
17 |
--------------------------------------------------------------------------------