├── .github
└── workflows
│ ├── DocCleanup.yml
│ ├── Documentation.yaml
│ └── TagBot.yml
├── .gitignore
├── LICENSE
├── Project.toml
├── README.md
├── docs
├── Project.toml
├── make.jl
└── src
│ ├── assets
│ ├── favicon.ico
│ ├── logo-dark.svg
│ └── logo.svg
│ ├── benchmarks.svg
│ ├── data
│ ├── MakeVideo.m
│ ├── Stations.mat
│ ├── Stations2.mat
│ ├── Video.gif
│ ├── borders
│ │ ├── borderdata.mat
│ │ ├── borders.m
│ │ ├── borders.png
│ │ ├── borders_documentation.m
│ │ ├── html
│ │ │ ├── borders_documentation.html
│ │ │ ├── borders_documentation.png
│ │ │ ├── borders_documentation_01.png
│ │ │ ├── borders_documentation_02.png
│ │ │ ├── borders_documentation_03.png
│ │ │ ├── borders_documentation_04.png
│ │ │ ├── borders_documentation_05.png
│ │ │ ├── borders_documentation_06.png
│ │ │ └── borders_documentation_07.png
│ │ └── labelborders.m
│ ├── conc_hysplit.mat
│ ├── data.bson
│ └── make_video.jl
│ ├── index.md
│ ├── installation
│ ├── installation.md
│ ├── juliapath_1.png
│ ├── juliapath_2.png
│ ├── tutorial.md
│ ├── tutorial_1.png
│ ├── tutorial_10.png
│ ├── tutorial_11.png
│ ├── tutorial_12.png
│ ├── tutorial_13.png
│ ├── tutorial_14.png
│ ├── tutorial_15.png
│ ├── tutorial_16.png
│ ├── tutorial_17.png
│ ├── tutorial_18.png
│ ├── tutorial_19.png
│ ├── tutorial_2.png
│ ├── tutorial_20.png
│ ├── tutorial_3.png
│ ├── tutorial_4.png
│ ├── tutorial_5.png
│ ├── tutorial_6.png
│ ├── tutorial_7.png
│ ├── tutorial_8.png
│ └── tutorial_9.png
│ ├── lecture_01
│ ├── arrays.md
│ ├── data_structures.md
│ ├── operators.md
│ ├── strings.md
│ ├── types.svg
│ └── variables.md
│ ├── lecture_02
│ ├── conditions.md
│ ├── exercises.md
│ ├── functions.md
│ ├── juliaset.gif
│ ├── juliaset.svg
│ ├── juliaset_ex2.svg
│ ├── juliaset_ex3_1.svg
│ ├── juliaset_ex3_2.svg
│ ├── juliaset_ex3_3.svg
│ ├── juliaset_ex3_4.svg
│ ├── juliasetalg.png
│ ├── loops.md
│ └── scope.md
│ ├── lecture_03
│ ├── DataFrames.md
│ ├── Plots.md
│ ├── animsincos.gif
│ ├── animsurf.gif
│ ├── ggplot.svg
│ ├── interaction.md
│ ├── otherpackages.md
│ ├── pkg.md
│ ├── plots_srs_ex1.svg
│ ├── plots_srs_ex2.svg
│ ├── plots_srs_ex3.svg
│ └── standardlibrary.md
│ ├── lecture_04
│ ├── exceptions.md
│ ├── exercises.md
│ ├── functions.md
│ ├── gameoflife.gif
│ ├── gameoflife_pulsar.gif
│ ├── methods.md
│ └── scope.md
│ ├── lecture_05
│ ├── compositetypes.md
│ ├── currencies.md
│ ├── gauss.svg
│ └── types.svg
│ ├── lecture_06
│ ├── compatibility.md
│ ├── image_1.svg
│ ├── image_2.svg
│ ├── image_3.svg
│ ├── image_4.svg
│ ├── modules.md
│ ├── structure.md
│ └── workflow.md
│ ├── lecture_07
│ ├── documentation.md
│ ├── extensions.md
│ ├── image_5.svg
│ ├── image_6.svg
│ ├── image_8.svg
│ └── tests.md
│ ├── lecture_08
│ ├── constrained.md
│ ├── data
│ │ └── auxiliary.jl
│ ├── exercises.md
│ ├── gradients.md
│ ├── minmax.svg
│ ├── theory.md
│ └── unconstrained.md
│ ├── lecture_09
│ ├── exercises.md
│ ├── iris.png
│ ├── linear.md
│ ├── logistic.md
│ └── theory.md
│ ├── lecture_10
│ ├── exercises.md
│ ├── nn.md
│ ├── nn.png
│ └── theory.md
│ ├── lecture_11
│ ├── Iris_acc.svg
│ ├── Iris_train_test_acc.svg
│ ├── Layers_0.svg
│ ├── Layers_1.svg
│ ├── Layers_9.svg
│ ├── data
│ │ ├── mnist.jl
│ │ ├── mnist.jld2
│ │ ├── mnist_gpu.jl
│ │ ├── mnist_sigmoid.jld2
│ │ ├── turtle.jpg
│ │ ├── turtles.jl
│ │ └── utilities.jl
│ ├── exercises.md
│ ├── iris.md
│ ├── miss.svg
│ ├── mnist_intro.svg
│ ├── mnist_intro2.svg
│ ├── nn.md
│ ├── nn.png
│ ├── nn_intro.svg
│ ├── theory.md
│ ├── turtles.png
│ └── utilities.jl
│ ├── lecture_12
│ ├── glm.md
│ ├── monte.md
│ └── sparse.md
│ └── why.md
├── src
└── JuliaCourseFNSPE.jl
└── test
└── runtests.jl
/.github/workflows/DocCleanup.yml:
--------------------------------------------------------------------------------
1 | name: Doc Preview Cleanup
2 |
3 | on:
4 | pull_request:
5 | types: [closed]
6 |
7 | jobs:
8 | doc-preview-cleanup:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: Checkout gh-pages branch
12 | uses: actions/checkout@v2
13 | with:
14 | ref: gh-pages
15 | - name: Delete preview and history + push changes
16 | run: |
17 | if [ -d "previews/PR$PRNUM" ]; then
18 | git config user.name "Documenter.jl"
19 | git config user.email "documenter@juliadocs.github.io"
20 | git rm -rf "previews/PR$PRNUM"
21 | git commit -m "delete preview"
22 | git branch gh-pages-new $(echo "delete history" | git commit-tree HEAD^{tree})
23 | git push --force origin gh-pages-new:gh-pages
24 | fi
25 | env:
26 | PRNUM: ${{ github.event.number }}
--------------------------------------------------------------------------------
/.github/workflows/Documentation.yaml:
--------------------------------------------------------------------------------
1 | name: Documentation
2 |
3 | on:
4 | push:
5 | branches:
6 | - master # update to match your development branch (master, main, dev, trunk, ...)
7 | tags: '*'
8 | pull_request:
9 |
10 | jobs:
11 | build:
12 | permissions:
13 | actions: write
14 | contents: write
15 | pull-requests: read
16 | statuses: write
17 | runs-on: ubuntu-latest
18 | steps:
19 | - uses: actions/checkout@v4
20 | - uses: julia-actions/setup-julia@v2
21 | with:
22 | version: '1.10'
23 | - uses: julia-actions/cache@v2
24 | - name: Install dependencies
25 | run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
26 | - name: Build and deploy
27 | env:
28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # If authenticating with GitHub Actions token
29 | DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} # If authenticating with SSH deploy key
30 | run: julia --project=docs/ docs/make.jl
--------------------------------------------------------------------------------
/.github/workflows/TagBot.yml:
--------------------------------------------------------------------------------
1 | name: TagBot
2 | on:
3 | issue_comment:
4 | types:
5 | - created
6 | workflow_dispatch:
7 | jobs:
8 | TagBot:
9 | if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot'
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: JuliaRegistries/TagBot@v1
13 | with:
14 | token: ${{ secrets.GITHUB_TOKEN }}
15 | ssh: ${{ secrets.DOCUMENTER_KEY }}
16 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.jl.*.cov
2 | *.jl.cov
3 | *.jl.mem
4 |
5 | Manifest.toml
6 |
7 | .vscode
8 | /_documents/
9 | /tmp/
10 | /_tikz/
11 | docs/build
12 | docs/data
13 | docs/src/assets/themes/*.css
14 |
15 | ################################################################################
16 | # LaTeX #
17 | ################################################################################
18 |
19 | ## Core latex/pdflatex auxiliary files:
20 | *.aux
21 | *.lof
22 | *.log
23 | *.lot
24 | *.fls
25 | *.out
26 | *.toc
27 | *.fmt
28 | *.fot
29 | *.cb
30 | *.cb2
31 | .*.lb
32 |
33 | ## Intermediate documents:
34 | *.dvi
35 | *.xdv
36 | *-converted-to.*
37 | # these rules will exclude image files for figures, et cetera...
38 | # *.ps
39 | # *.eps
40 | # *.pdf
41 |
42 | ## Generated if empty string given at "Please type another file name for output:"
43 | .pdf
44 |
45 | ## Bibliography auxiliary files (bibtex/biblatex/biber):
46 | *.bbl
47 | *.bcf
48 | *.blg
49 | *-blx.aux
50 | *-blx.bib
51 | *.run.xml
52 |
53 | ## Build tool auxiliary files:
54 | *.fdb_latexmk
55 | *.synctex
56 | *.synctex(busy)
57 | *.synctex.gz
58 | *.synctex.gz(busy)
59 | *.pdfsync
60 |
61 | ## Build tool directories for auxiliary files
62 | # latexrun
63 | latex.out/
64 |
65 | ## Auxiliary and intermediate files from other packages:
66 | # algorithms
67 | *.alg
68 | *.loa
69 |
70 | # achemso
71 | acs-*.bib
72 |
73 | # amsthm
74 | *.thm
75 |
76 | # beamer
77 | *.nav
78 | *.pre
79 | *.snm
80 | *.vrb
81 |
82 | # changes
83 | *.soc
84 |
85 | # comment
86 | *.cut
87 |
88 | # cprotect
89 | *.cpt
90 |
91 | # elsarticle (documentclass of Elsevier journals)
92 | *.spl
93 |
94 | # endnotes
95 | *.ent
96 |
97 | # fixme
98 | *.lox
99 |
100 | # feynmf/feynmp
101 | *.mf
102 | *.mp
103 | *.t[1-9]
104 | *.t[1-9][0-9]
105 | *.tfm
106 |
107 | #(r)(e)ledmac/(r)(e)ledpar
108 | *.end
109 | *.?end
110 | *.[1-9]
111 | *.[1-9][0-9]
112 | *.[1-9][0-9][0-9]
113 | *.[1-9]R
114 | *.[1-9][0-9]R
115 | *.[1-9][0-9][0-9]R
116 | *.eledsec[1-9]
117 | *.eledsec[1-9]R
118 | *.eledsec[1-9][0-9]
119 | *.eledsec[1-9][0-9]R
120 | *.eledsec[1-9][0-9][0-9]
121 | *.eledsec[1-9][0-9][0-9]R
122 |
123 | # glossaries
124 | *.acn
125 | *.acr
126 | *.glg
127 | *.glo
128 | *.gls
129 | *.glsdefs
130 |
131 | # gnuplottex
132 | *-gnuplottex-*
133 |
134 | # gregoriotex
135 | *.gaux
136 | *.gtex
137 |
138 | # htlatex
139 | *.4ct
140 | *.4tc
141 | *.idv
142 | *.lg
143 | *.trc
144 | *.xref
145 |
146 | # hyperref
147 | *.brf
148 |
149 | # knitr
150 | *-concordance.tex
151 |
152 | # listings
153 | *.lol
154 |
155 | # luatexja-ruby
156 | *.ltjruby
157 |
158 | # makeidx
159 | *.idx
160 | *.ilg
161 | *.ind
162 | *.ist
163 |
164 | # minitoc
165 | *.maf
166 | *.mlf
167 | *.mlt
168 | *.mtc[0-9]*
169 | *.slf[0-9]*
170 | *.slt[0-9]*
171 | *.stc[0-9]*
172 |
173 | # minted
174 | _minted*
175 | *.pyg
176 |
177 | # morewrites
178 | *.mw
179 |
180 | # nomencl
181 | *.nlg
182 | *.nlo
183 | *.nls
184 |
185 | # pax
186 | *.pax
187 |
188 | # pdfpcnotes
189 | *.pdfpc
190 |
191 | # sagetex
192 | *.sagetex.sage
193 | *.sagetex.py
194 | *.sagetex.scmd
195 |
196 | # scrwfile
197 | *.wrt
198 |
199 | # sympy
200 | *.sout
201 | *.sympy
202 | sympy-plots-for-*.tex/
203 |
204 | # pdfcomment
205 | *.upa
206 | *.upb
207 |
208 | # pythontex
209 | *.pytxcode
210 | pythontex-files-*/
211 |
212 | # tcolorbox
213 | *.listing
214 |
215 | # thmtools
216 | *.loe
217 |
218 | # TikZ & PGF
219 | *.dpth
220 | *.md5
221 | *.auxlock
222 |
223 | # todonotes
224 | *.tdo
225 |
226 | # vhistory
227 | *.hst
228 | *.ver
229 |
230 | # easy-todo
231 | *.lod
232 |
233 | # xcolor
234 | *.xcp
235 |
236 | # xmpincl
237 | *.xmpi
238 |
239 | # xindy
240 | *.xdy
241 |
242 | # xypic precompiled matrices
243 | *.xyc
244 |
245 | # endfloat
246 | *.ttt
247 | *.fff
248 |
249 | # Latexian
250 | TSWLatexianTemp*
251 |
252 | ## Editors:
253 | # WinEdt
254 | *.bak
255 | *.sav
256 |
257 | # Texpad
258 | .texpadtmp
259 |
260 | # LyX
261 | *.lyx~
262 |
263 | # Kile
264 | *.backup
265 |
266 | # KBibTeX
267 | *~[0-9]*
268 |
269 | # auto folder when using emacs and auctex
270 | ./auto/*
271 | *.el
272 |
273 | # expex forward references with \gathertags
274 | *-tags.tex
275 |
276 | # standalone packages
277 | *.sta
278 |
279 | ### LaTeX Patch ###
280 | # glossaries
281 | *.glstex
282 |
283 | ### ADA ###
284 | smazat*
285 | bordel*
286 |
287 | ################################################################################
288 | # MAC #
289 | ################################################################################
290 | .DS_Store
291 | .AppleDouble
292 | .LSOverride
293 |
294 | # Icon must end with two \r
295 | Icon
296 |
297 | # Thumbnails
298 | ._*
299 |
300 | # Files that might appear in the root of a volume
301 | .DocumentRevisions-V100
302 | .fseventsd
303 | .Spotlight-V100
304 | .TemporaryItems
305 | .Trashes
306 | .VolumeIcon.icns
307 | .com.apple.timemachine.donotpresent
308 |
309 | # Directories potentially created on remote AFP share
310 | .AppleDB
311 | .AppleDesktop
312 | Network Trash Folder
313 | Temporary Items
314 | .apdisk
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Lukáš Adam and Václav Mácha
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Project.toml:
--------------------------------------------------------------------------------
1 | name = "JuliaCourseFNSPE"
2 | uuid = "d7226185-11f2-4102-bf41-8fa93d2f6647"
3 | authors = ["Václav Mácha", "Lukáš Adam"]
4 | version = "0.1.0"
5 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ---
6 |
7 | [](https://github.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/blob/master/LICENSE)
8 | [](https://juliateachingctu.github.io/Julia-for-Optimization-and-Learning/stable/)
9 | [](https://juliateachingctu.github.io/Julia-for-Optimization-and-Learning/dev/)
10 |
11 | This repository is supplementary material to the course **Julia for Optimization and Learning**, which is taught at the Czech Technical University in Prague. More information can be found on the [official course website](https://juliateachingctu.github.io/Julia-for-Optimization-and-Learning/stable/) or in the [course syllabus](http://bilakniha.cvut.cz/en/predmet6985806.html)
12 |
--------------------------------------------------------------------------------
/docs/Project.toml:
--------------------------------------------------------------------------------
1 | [deps]
2 | BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
3 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
4 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
5 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
6 | Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
7 | Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
8 | Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
9 | Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
10 | GLM = "38e38edf-8417-5370-95a0-9cbb8c7f171a"
11 | GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
12 | GR = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71"
13 | HypothesisTests = "09f84164-cd44-5f33-b23f-e6b0d136a0d5"
14 | Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
15 | JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
16 | JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
17 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
18 | MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
19 | MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
20 | Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
21 | ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
22 | Query = "1a8c2f83-1ff3-5112-b086-8aa67b057ba1"
23 | RDatasets = "ce6b1742-4840-55fa-b093-852dadbb1d8b"
24 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
25 | SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
26 | Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
27 | StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd"
28 |
29 | [compat]
30 | BSON = "0.3"
31 | BenchmarkTools = "1.5"
32 | CSV = "0.10"
33 | DataFrames = "1.7"
34 | Distributions = "0.25"
35 | Documenter = "1.7"
36 | Flux = "0.14"
37 | GLM = "1.9"
38 | GLPK = "1.2"
39 | GR = "0.73"
40 | HypothesisTests = "0.11"
41 | Ipopt = "1.6"
42 | JLD2 = "0.5"
43 | JuMP = "1.23"
44 | MLDatasets = "0.7"
45 | MLUtils = "0.4"
46 | Plots = "1.40"
47 | ProgressMeter = "1.10"
48 | Query = "1.0"
49 | RDatasets = "0.7"
50 | SpecialFunctions = "2.4"
51 | StatsPlots = "0.15"
52 | julia = "1.10"
53 |
--------------------------------------------------------------------------------
/docs/make.jl:
--------------------------------------------------------------------------------
1 | using Documenter
2 | using Downloads: download
3 |
4 | # download and compile theme
5 | assetsdir(args...) = joinpath(@__DIR__, "src", "assets", args...)
6 | site = "https://github.com/JuliaTeachingCTU/JuliaCTUGraphics/raw/main/"
7 | force = true
8 |
9 | mv(download("$(site)logo/CTU-logo-dark.svg"), assetsdir("logo-dark.svg"); force)
10 | mv(download("$(site)logo/CTU-logo.svg"), assetsdir("logo.svg"); force)
11 | mv(download("$(site)icons/favicon.ico"), assetsdir("favicon.ico"); force)
12 |
13 | # outline
14 | installation = [
15 | "Installation" => "./installation/installation.md",
16 | "Quickstart guide" => "./installation/tutorial.md",
17 | ]
18 |
19 | lecture_01 = [
20 | "Variables" => "./lecture_01/variables.md",
21 | "Elementary functions" => "./lecture_01/operators.md",
22 | "Strings" => "./lecture_01/strings.md",
23 | "Arrays" => "./lecture_01/arrays.md",
24 | "Data structures" => "./lecture_01/data_structures.md",
25 | ]
26 |
27 | lecture_02 = [
28 | "Function basics" => "./lecture_02/functions.md",
29 | "Conditional evaluations" => "./lecture_02/conditions.md",
30 | "Loops and iterators" => "./lecture_02/loops.md",
31 | "Soft local scope" => "./lecture_02/scope.md",
32 | "Exercises" => "./lecture_02/exercises.md",
33 | ]
34 |
35 | lecture_03 = [
36 | "Standard library" => "./lecture_03/standardlibrary.md",
37 | "Package manager" => "./lecture_03/pkg.md",
38 | "Plots.jl" => "./lecture_03/Plots.md",
39 | "DataFrames.jl" => "./lecture_03/DataFrames.md",
40 | "Other useful packages" => "./lecture_03/otherpackages.md",
41 | "Interaction with other languages" => "./lecture_03/interaction.md",
42 | ]
43 |
44 | lecture_04 = [
45 | "Functions" => "./lecture_04/functions.md",
46 | "Methods" => "./lecture_04/methods.md",
47 | "Scope of variables" => "./lecture_04/scope.md",
48 | "Exception handling" => "./lecture_04/exceptions.md",
49 | "Exercises" => "./lecture_04/exercises.md",
50 | ]
51 |
52 | lecture_05 = [
53 | "Abstract and composite types" => "./lecture_05/compositetypes.md",
54 | "Generic programming" => "./lecture_05/currencies.md",
55 | ]
56 |
57 | lecture_06 = [
58 | "Files and modules" => "./lecture_06/modules.md",
59 | "Package structure" => "./lecture_06/structure.md",
60 | "Development workflow" => "./lecture_06/workflow.md",
61 | "Package dependencies" => "./lecture_06/compatibility.md",
62 | ]
63 |
64 | lecture_07 = [
65 | "Tests" => "./lecture_07/tests.md",
66 | "Documentation" => "./lecture_07/documentation.md",
67 | "Extensions" => "./lecture_07/extensions.md",
68 | ]
69 |
70 | lecture_08 = [
71 | "Introduction to continuous optimization" => "./lecture_08/theory.md",
72 | "Gradients" => "./lecture_08/gradients.md",
73 | "Unconstrained optimization" => "./lecture_08/unconstrained.md",
74 | "Constrained optimization" => "./lecture_08/constrained.md",
75 | "Exercises" => "./lecture_08/exercises.md",
76 | ]
77 |
78 | lecture_09 = [
79 | "Introduction to regression and classification" => "./lecture_09/theory.md",
80 | "Linear regression" => "./lecture_09/linear.md",
81 | "Logistic regression" => "./lecture_09/logistic.md",
82 | "Exercises" => "./lecture_09/exercises.md",
83 | ]
84 |
85 | lecture_10 = joinpath.("./lecture_10/", [
86 | "theory.md",
87 | "nn.md",
88 | "exercises.md",
89 | ])
90 |
91 | lecture_11 = joinpath.("./lecture_11/", [
92 | "theory.md",
93 | "iris.md",
94 | "nn.md",
95 | "exercises.md",
96 | ])
97 |
98 | lecture_12 = joinpath.("./lecture_12/", [
99 | "sparse.md",
100 | "monte.md",
101 | "glm.md",
102 | ])
103 |
104 | lecture_13 = joinpath.("./lecture_13/", [
105 | "theory.md",
106 | "ode.md",
107 | "diff_eq.md",
108 | "optimal_control.md",
109 | ])
110 |
111 | # make docs options
112 | makedocs(;
113 | authors="JuliaTeachingCTU",
114 | sitename="Julia for Optimization and Learning",
115 | format=Documenter.HTML(;
116 | prettyurls=get(ENV, "CI", "false") == "true",
117 | collapselevel=1,
118 | assets=[
119 | "assets/favicon.ico",
120 | ],
121 | ansicolor=true
122 | ),
123 | pages=[
124 | "Home" => "index.md",
125 | "Why Julia?" => "why.md",
126 | "Installation" => installation,
127 | "1: Basics I" => lecture_01,
128 | "2: Basics II" => lecture_02,
129 | "3: Packages" => lecture_03,
130 | "4: Functions and methods" => lecture_04,
131 | "5: Type system and generic programming" => lecture_05,
132 | "6: Code organization I" => lecture_06,
133 | "7: Code organization II" => lecture_07,
134 | "8: Optimization" => lecture_08,
135 | "9: Regression and classification" => lecture_09,
136 | "10: Neural networks I." => lecture_10,
137 | "11: Neural networks II." => lecture_11,
138 | "12: Statistics" => lecture_12,
139 | ],
140 | )
141 |
142 | deploydocs(;
143 | repo="github.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning.git"
144 | )
145 |
--------------------------------------------------------------------------------
/docs/src/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/assets/favicon.ico
--------------------------------------------------------------------------------
/docs/src/data/MakeVideo.m:
--------------------------------------------------------------------------------
1 | function MakeVideo(X, t_max, filename)
2 | addpath('./borders');
3 |
4 | load('Stations.mat', 'lons', 'lats', 'mStart', 'y');
5 | load('Stations2.mat', 'outlon0', 'outlon1', 'outlat0', 'outlat1', 'numxgrid', 'numygrid');
6 | load('conc_hysplit.mat', 'data');
7 | data = permute(data, [3 4 2 1]);
8 |
9 | concExpected = zeros(280, 323, 38);
10 | for i=1:length(X)
11 | concExpected = concExpected + X(i)*data(:,:,:,i);
12 | end
13 |
14 | lonsTest = linspace(outlon0, outlon1, numxgrid)';
15 | latsTest = linspace(outlat0, outlat1, numygrid)';
16 |
17 | cropSmall = 0.01;
18 |
19 | colorMap = colormap(hsv);
20 | levels = [0.01; 0.3; 1; 2; 5; Inf];
21 | levelsC = floor((1:length(levels)-1)/(length(levels)-1)*size(colorMap,1));
22 | for timeInstant = 1:t_max
23 | yPred = squeeze(concExpected(:,:,timeInstant+8));
24 | measSmall = mStart == timeInstant & y < cropSmall*max(y(mStart == timeInstant));
25 | measBig = mStart == timeInstant & y >= cropSmall*max(y(mStart == timeInstant));
26 |
27 | figure('visible', 'off');
28 | hold on;
29 | for i=1:length(lonsTest)-1
30 | for j=1:length(latsTest)-1
31 | for k=1:length(levels)-1
32 | if yPred(i,j) >= levels(k) && yPred(i,j) < levels(k+1)
33 | rectangle('Position',[lonsTest(i), latsTest(j), lonsTest(i+1)-lonsTest(i), latsTest(j+1)-latsTest(j)], 'FaceColor', colorMap(levelsC(k),:), 'EdgeColor', 'none');
34 | end
35 | end
36 | end
37 | end
38 |
39 | for k=1:length(levels)-1
40 | measBigPart = measBig & y >= levels(k) & y < levels(k) + 1;
41 | scatter(lons(measBigPart), lats(measBigPart), [], colorMap(levelsC(k),:), 'filled');
42 | end
43 | for k=1:length(levels)-1
44 | rectangle('Position',[outlon0 + (k-1)*2, outlat0 + 1, 2, 1], 'FaceColor', colorMap(levelsC(k),:), 'EdgeColor', 'none');
45 | end
46 | scatter(lons(measBig), lats(measBig), 'k');
47 | scatter(lons(measSmall), lats(measSmall), 'xk');
48 | xlim([outlon0 outlon1]);
49 | ylim([outlat0 outlat1]);
50 | borders('countries','nomap','k');
51 | set(gcf,'color','w');
52 |
53 | frame = getframe(gcf);
54 | [imind, cm] = rgb2ind(frame2im(frame),256);
55 | if timeInstant == 1
56 | imwrite(imind, cm, filename, 'gif', 'Loopcount', inf, 'DelayTime', 0.25);
57 | else
58 | imwrite(imind, cm, filename, 'gif', 'WriteMode', 'append', 'DelayTime', 0.25);
59 | end
60 | end
61 | end
62 |
--------------------------------------------------------------------------------
/docs/src/data/Stations.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/Stations.mat
--------------------------------------------------------------------------------
/docs/src/data/Stations2.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/Stations2.mat
--------------------------------------------------------------------------------
/docs/src/data/Video.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/Video.gif
--------------------------------------------------------------------------------
/docs/src/data/borders/borderdata.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/borderdata.mat
--------------------------------------------------------------------------------
/docs/src/data/borders/borders.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/borders.png
--------------------------------------------------------------------------------
/docs/src/data/borders/borders_documentation.m:
--------------------------------------------------------------------------------
1 | %% |borders| documentation
2 | % This function plots borders of nations or US states. This function does
3 | % *not* require Matlab's Mapping Toolbox. Data are compiled from and a
5 | % dataset.
7 | %
8 | %% Syntax
9 | %
10 | % borders
11 | % borders(place)
12 | % borders(...,LineProperty,LineValue)
13 | % borders(...,PatchProperty,PatchValue)
14 | % borders(...,'NoMappingToolbox')
15 | % h = borders(...)
16 | % [lat,lon] = borders(place)
17 | %
18 | %% Description
19 | %
20 | % |borders| plots national borders.
21 | %
22 | % |borders(place)| plots the borders of a |place|, which can be any country or US state. |place| may also be
23 | % |'countries'| to plot all national borders, |'states'| to plot all US state borders, or |'Continental US'| to
24 | % plot only the continental United States (sorry Guam). Note: to plot the nation of Georgia, use |'Georgia'|.
25 | % To plot the US state of Georgia, specify |'Georgia.'| with a period.
26 | %
27 | % |borders(...,LineProperty,LineValue)| specifies linestyle or markerstyle.
28 | %
29 | % |borders(...,PatchProperty,PatchValue)| outlines states or nations as patch objects if any property begins
30 | % with |'face'|, (e.g., |'facecolor','red'|). Note that plotting all countries as patches can be a bit slow.
31 | %
32 | % |borders(...,'NoMappingToolbox')| plots data in plain old unprojected cartesian coordinates by |plot(lon,lat)|.
33 | % If you do not have Matlab's Mapping Toolbox, this option is selected automatically. If you do have Matlab's Mapping
34 | % Toolbox, but you do not want to plot in map axes, include |'NoMappingToolbox'| or simply |'nomap'|.
35 | %
36 | % |h = borders(...)| returns a handle |h| of plotted object(s).
37 | %
38 | % |[lat,lon] = borders(place)| does not plot any borders, but returns arrays of their geographic coordinates.
39 | %
40 | %% Example 1: Very simple
41 | % To plot all national borders, just type |borders|:
42 |
43 | borders
44 |
45 | %% Example 2: Red Russia, red Russia
46 | % Add Russia to the map as a big red patch:
47 |
48 | borders('russia','facecolor','red')
49 |
50 | %% Example 3: Continental US
51 | % Open a new figure and plot the continental United States oulined in
52 | % black:
53 |
54 | figure
55 | borders('continental us','k')
56 |
57 | %% Example 4: Fancy formatting
58 | % Let's turn Texas blue (that'll be the day), give it a thick magenta
59 | % outline, and give Nebraska a thick green outline. The |labelborders|
60 | % function works just like the |borders| function.
61 |
62 | borders('texas','facecolor','blue','edgecolor','m','linewidth',2)
63 | borders('nebraska','g','linewidth',2)
64 | labelborders('continental us','fontsize',6);
65 | labelborders('Texas','color','r','backgroundcolor','y',...
66 | 'fontangle','italic','fontsize',16)
67 |
68 | %% Example 5: Georgia vs. Georgia.
69 | % There are two Georgias. To distinguish between them, I've placed a
70 | % period at the end of the US state called Georgia. Let us compare:
71 |
72 | figure
73 | subplot(121)
74 | borders 'georgia'
75 | labelborders 'Georgia'
76 | subplot(122)
77 | borders 'georgia.'
78 | labelborders 'Georgia.'
79 |
80 | %% Example 6: Data only
81 | % Just want the outline of a country or state without plotting it? Using
82 | % |borders| with two outputs returns lat, lon arrays without plotting.
83 |
84 | [lat,lon] = borders('kenya');
85 |
86 | %% Example 7: No Mapping Toolbox? No problem!
87 | % You don't need Matlab's Mapping Toolbox to use this function. If you do
88 | % not have the Mapping Toolbox, the |'nomap'| option is selected by
89 | % default. I do have the Mapping Toolbox, so if I don't want data plotted
90 | % in map axes, I have to specify |'nomap'| like this:
91 |
92 | figure
93 | borders('countries','nomap')
94 | axis tight
95 |
96 | %% Example 8: Formatting without the Mapping Toolbox
97 | % With or without Matlab's Mapping Toolbox, formatting patches and country
98 | % outlines is easy:
99 |
100 | figure
101 | borders('mexico','r:','linewidth',2,'nomap')
102 | hold on
103 | borders('belize','facecolor','b','linestyle','-','linewidth',1,'nomap')
104 | labelborders('Mexico','color','r','nomap')
105 | labelborders('Belize','color','m','nomap')
106 |
107 | %% Author Info
108 | % The |borders| and |labelborders| functions were written by of the University of Texas at Austin's Institute for Geophysics (UTIG),
110 | % April 2015.
111 |
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_01.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_02.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_03.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_04.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_05.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_06.png
--------------------------------------------------------------------------------
/docs/src/data/borders/html/borders_documentation_07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/borders/html/borders_documentation_07.png
--------------------------------------------------------------------------------
/docs/src/data/conc_hysplit.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/conc_hysplit.mat
--------------------------------------------------------------------------------
/docs/src/data/data.bson:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/data/data.bson
--------------------------------------------------------------------------------
/docs/src/data/make_video.jl:
--------------------------------------------------------------------------------
1 | ENV["MATLAB_HOME"] = "D:\\Matlab\\R2020b"
2 | using MATLAB
3 | using BSON
4 |
5 | s = MSession()
6 |
7 | X = BSON.load("data.bson")[:X]
8 | X = mxarray(X)
9 | @mput X
10 |
11 | eval_string(s, "MakeVideo(X, 30, \"Video.gif\");")
12 |
--------------------------------------------------------------------------------
/docs/src/index.md:
--------------------------------------------------------------------------------
1 | ```@raw html
2 |
3 |
4 | ```
5 | ---
6 |
7 | ```@setup grsetup
8 | using Plots
9 | ENV["GKSwstype"] = "100"
10 | gr()
11 | ```
12 |
13 | Welcome to our course *Julia for Optimization and Learning*. This course consists of two parts:
14 | - *Basics of Julia*: [Julia](https://julialang.org/) is a fast programming language for scientific computing. Designed and developed at MIT, it quickly keeps gaining popularity and scored rank 25 among programming languages in the [PYPL rating](https://pypl.github.io/PYPL.html) (as of September 2024).
15 | - *Applications*: The second part of the course will be dedicated to applications. The main emphasis will given to machine learning, but we will also go through statistics and differential equations.
16 |
17 | This course is taught at the [Czech Technical University](https://www.cvut.cz/en/) in Prague. It is part of the [prg.ai minor](https://prg.ai/minor/), a study programme combining top courses from four faculties of two Prague universities.
18 |
19 |
20 | ## What will we emphasize?
21 |
22 | The main goals of the course are the following:
23 | - You will learn the *connections between theory and coding*. There are many lectures which teach either only theory or only coding. We will show you both.
24 | - You will learn how to *code efficiently*. We will teach you to split the code into small parts which are simpler to debug or optimize. We will often show you several writing possibilities and comment on the differences.
25 | - You will learn about *machine learning and neural networks*. You will understand neural networks by writing a simple one from scratch. Then you will learn how to use packages to write simple code for complicated networks.
26 | - You will learn *independence*. The problem formulation of many exercises is very general, which simulates when no step-by-step procedure is provided.
27 |
28 |
29 | ## What will you learn?
30 |
31 | Selected examples of what you will be able to write at the end of the course include:
32 | - *Efficient coding*: The following plot can be created in twenty lines of code
33 | 
34 | - *Numerical techniques*: You will learn many techniques to minimize functions
35 | 
36 | - *Neural networks*: And apply techniques to train neural networks
37 | 
38 | - Figure 1 contains digit 5 with probability 0.999683.
39 | - Figure 2 contains digit 0 with probability 1.000000.
40 | - Figure 3 contains digit 4 with probability 0.974734.
41 | - *Connection to Matlab, R or Python*: Do you have a Matlab code which you need to run from Julia? No problem, write five lines of code to get
42 | 
43 |
44 |
45 | ## Technical details
46 |
47 | Scripts for each lecture is available at its own [Github repository](https://github.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning-Scripts). For students attending the course at CTU, we list the requirements for completing the course in [Czech](https://cw.fel.cvut.cz/b231/courses/b0b36jul/start) and [English](https://cw.fel.cvut.cz/b231/courses/b0b36jul/en/start).
48 |
49 |
50 | ## Useful materials
51 |
52 | - [Official documentation](https://docs.julialang.org/en/v1/)
53 | - [Cheatsheet for differences between Julia and Matlab and Python](https://cheatsheets.quantecon.org/)
54 | - [Cheatsheet of basic functions](https://cheatsheets.quantecon.org/julia-cheatsheet.html)
55 | - [Cheatsheet of advanced functions](https://juliadocs.github.io/Julia-Cheat-Sheet/)
56 | - [Think Julia: How to Think Like a Computer Scientist](https://benlauwens.github.io/ThinkJulia.jl/latest/book.html#chap01)
57 | - [From Zero to Julia!](https://techytok.com/from-zero-to-julia/)
58 |
--------------------------------------------------------------------------------
/docs/src/installation/installation.md:
--------------------------------------------------------------------------------
1 | ## Installation
2 |
3 | There are multiple ways how to install Julia and it's version manager Juliaup. We recommend to follow the [official documentation](https://julialang.org/downloads/):
4 |
5 | - **Windows** users can install Julia and also Juliaup directly from [Windows Store](https://apps.microsoft.com/detail/9njnww8pvkmn?hl=cs-cz&gl=CZ) or equivalently use the following command
6 |
7 | ```powershell
8 | winget install julia -s msstore
9 | ```
10 |
11 | - **Linux** and **MacOS** users can install Julia and also Juliaup by running the following command
12 |
13 | ```shell
14 | curl -fsSL https://install.julialang.org | sh
15 | ```
16 |
17 | Once finished, the `julia` and `juliaup` commands should be available via command line interface.
18 |
19 | !!! info "Other installation options:"
20 | For more options how to install Julia and Juliaup, see the [Juliaup Github repository](https://github.com/JuliaLang/juliaup).
21 |
22 | For the upcoming course, we recommend to install Julia version 1.10 and set is as a default Julia. It can be done in the following way
23 |
24 | ```shell
25 | > juliaup add 1.10
26 |
27 | > juliaup default 1.10
28 | Configured the default Julia version to be '1.10'.
29 | ```
30 |
31 | ### Git
32 |
33 | [Git](https://git-scm.com/) is a distributed version control system for tracking changes in any set of text files. It is designed for coordinating work among cooperating programmers during software development. Git installer can be download from the official [download page](https://git-scm.com/downloads). Download the proper installer, run it and follow the instructions. Before using Git, we need to make the necessary settings. It can be done easily using command line interface the two following commands
34 |
35 | ```shell
36 | > git config --global user.name ""
37 |
38 | > git config --global user.email ""
39 | ```
40 |
41 | The commands above set the user name and email for Git. Because Git is designed for collaboration between multiple people, this information is used to track who made which changes.
42 |
43 | !!! info "GitHub Account:"
44 | The Julia package system is based on Git, and the Julia project is hosted on [GitHub](https://github.com/). GitHub is a service that provides internet hosting for software development and version control using Git. We use GitHub to host all the materials and final projects in this course. Therefore, every student needs to create a GitHub account to be able to finish the course. It can be done in a few steps on the official [GitHub page](https://github.com/).
45 |
46 | ### Visual Studio Code
47 |
48 | It is possible to write Julia codes in any text editor, and run them directly from the terminal. However, it is usually better to use an IDE that provides additional features such as syntax highlighting, or code suggestions. We recommend using [Visual Studio Code](https://code.visualstudio.com/), a free source-code editor made by Microsoft. It supports many programming languages (Julia, Python, LaTex, ...) via extensions. The editor is available at the official [download page](https://code.visualstudio.com/download). Download the proper installer, run it and follow the instructions.
49 |
50 | To use the VS Code as an IDE for Julia, we have to install the [Julia extension](https://marketplace.visualstudio.com/items?itemName=julialang.language-julia). It can be done directly from the VS Code. Open the `Extension MarketPlace` by pressing the button in the `Activity bar` (the left bar). Type `julia` in the search bar and select the Julia extension. Then press the `Install` button to install the extension. For more information see the [official documentation](https://www.julia-vscode.org/docs/stable/#)
51 |
52 |
--------------------------------------------------------------------------------
/docs/src/installation/juliapath_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/juliapath_1.png
--------------------------------------------------------------------------------
/docs/src/installation/juliapath_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/juliapath_2.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial.md:
--------------------------------------------------------------------------------
1 | ## Creating new project
2 |
3 | This section provides a step-by-step tutorial showing how to create a new project, add a new file, initialize a git repository, and publish the repository on Github.
4 |
5 | The first thing we have to do when creating a new project is to select a folder where we want to store the project. Open the file `Explorer` in the VS Code by pressing its icon in the activity bar and pressing the `Open Folder` button. Alternatively, use the keyboard shortcut `Ctrl + K Ctrl + O`.
6 |
7 | 
8 |
9 | A system file explorer should open, so find and select the folder you want to use as a project folder. In our case, it is a `Tutorial` folder in `Documents`.
10 |
11 | 
12 |
13 | Now go to the project manager by pressing the appropriate button in the activity bar. Since we are creating our first project, the project manager tab should be empty. Press the `Project Manager: Save Project` button, type a project name in the pop-up bar, and then press `Enter`.
14 |
15 | 
16 |
17 | It will add a new project to the project manager. In our case, it is a project called `Tutorial`.
18 |
19 | 
20 |
21 | Now go back to the file explorer. In the sidebar, under the project name, there should be an empty space. Press the `New File` button next to the project name and write a new file name with the `.jl` extension. Alternatively, use the keyboard shortcut `Ctr + N` to create a new file and then `Ctrl + S` to save the file.
22 |
23 | 
24 |
25 | The new file will open in the editor to the right of the File Explorer sidebar. Type `println("Hello, world.")` in the new file and press `Ctrl + S` to save the change. Now select the code and press `Ctrl + Enter` to execute the code. This shortcut runs the new Julia session and sends the code to the session. Congratulations, you have just created and run a **Hello, world** program in Julia.
26 |
27 | 
28 |
29 | ## Initialize Git repository
30 |
31 | Now that we have created a new project, it is time to initialize the git repository to track the project's changes. Go to the `Source Control` bar by pressing the appropriate button in the activity bar. Then press the `Initialize Repository` button, which will create a new Git repository in the project folder.
32 |
33 | 
34 |
35 | We can check if the Git repository is initialized in the system file explorer. Go to the project folder, and in the file explorer, in the top bar under the `View` tab, select the `Hidden items` option. Now you should see the `.git` folder in the project directory.
36 |
37 | 
38 |
39 | With the initialized Git repository, we can start tracking our work changes. Note the number 1 in the control source icon. It indicates one change against the last version of the project.
40 |
41 | Git provides the `commit` command to capture changes in the project. To create a new git commit, we must first select what changes we want to capture. In our case, it is trivial since there is only one change. In the source control under the `Changes` section, you should see the `test.jl` file. Press the `Stage Changes` button located on the file name's right (`+` icon).
42 |
43 | 
44 |
45 | This moves the file under the `Staged Changes` section. The next step is to add a summary to the git commit. Type any message that describes changes made in the project. It is good to use short but descriptive messages since it will help navigate the project history. We use the `Initial commit` message. To finish the git commit, press the `Commit` button above the message bar or use the keyboard shortcut `Ctrl + Enter` in the message bar.
46 |
47 | 
48 |
49 | Now return to the file explorer bar and open the `Timeline` drop-down menu at the bottom. In the `Timeline` bar, you can see the currently open file history. In our case, we can see the history of the `test.jl` file: one git commit created by user `User Name` and described by the `Initial commit` message. If you click on that git commit, it shows changes made to the current file. On the left-hand side, we can see the file's state before the git commit and on the right-hand side after the git commit. It allows us to see all the changes made in the file. We can see that we added one line of code.
50 |
51 | 
52 |
53 |
54 | ## Publish on GitHub
55 |
56 | Now it's time to publish our work on GitHub. With GitHub, it's easy to share and collaborate on a project with multiple people. If you did not create a GitHub account in the previous section about Git installation, please do it now.
57 |
58 | Firstly we have to create a new empty repository on GitHub. Press the `+` button in the GitHub website's top right corner and select the `New repository` option.
59 |
60 | 
61 |
62 | Enter the desired repository name and select whether the repository should be private or public, and press `Enter`. In our case, we use the `Tutorial.jl` name for the repository since it is an easy way to show that the project is written in Julia.
63 |
64 | 
65 |
66 | When the new repository is created, select the repository URL and copy it. We will need it in the next step.
67 |
68 | 
69 |
70 | The next step is to add a new remote repository to the local Git repository. It can be done directly from the VS Code. Go to the `Source Control` bar by pressing the appropriate button in the activity bar. Then press the `...` button to open the pop-up window and select the option `Remote` and then `Add Remote...`.
71 |
72 | 
73 |
74 | In the pop-up window, enter the URL of the GitHub repository created in the previous steps and press enter.
75 |
76 | 
77 |
78 | The next step is to choose a remote repository name. This name is used by Git to recognize what remote repository we want to work with, since it is possible to have multiple remote repositories linked with the local repository. If there is only one remote repository, the usual choice of the name is `origin`.
79 |
80 | 
81 |
82 | The last step is to push local changes to the remote repository. To do that, press the `Publich on GitHub` button in the left bottom corner.
83 |
84 | 
85 |
86 | And that's all. Now that you've published your first repository on GitHub, you can easily share your project with others.
87 |
88 | 
89 |
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_1.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_10.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_11.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_12.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_13.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_14.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_15.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_16.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_17.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_18.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_19.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_2.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_20.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_3.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_4.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_5.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_6.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_7.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_8.png
--------------------------------------------------------------------------------
/docs/src/installation/tutorial_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/installation/tutorial_9.png
--------------------------------------------------------------------------------
/docs/src/lecture_01/data_structures.md:
--------------------------------------------------------------------------------
1 | ## Tuples
2 |
3 | A tuple is an immutable, ordered, fixed-sized group of elements. Therefore, it is impossible to add new elements or change any tuple element's values. Tuples are created using the following syntax:
4 |
5 | ```jldoctest tuples
6 | julia> t = (1, 2.0, "3")
7 | (1, 2.0, "3")
8 | ```
9 |
10 | It is possible to omit the brackets.
11 |
12 | ```jldoctest tuples
13 | julia> t = 1, 2.0, "3"
14 | (1, 2.0, "3")
15 | ```
16 |
17 | The same syntax is used in [function definitions](@ref Functions) to return multiple values at once. The tuple type consists of the types of all its elements.
18 |
19 | ```jldoctest tuples
20 | julia> typeof(t)
21 | Tuple{Int64, Float64, String}
22 | ```
23 |
24 | In this case, we have a tuple that contains three elements: `Int64`, `Float64`, and `String`.
25 |
26 | To access elements of a tuple, we can use the same syntax as for arrays.
27 |
28 | ```jldoctest tuples
29 | julia> t[1] # the first element
30 | 1
31 |
32 | julia> t[end] # the last element
33 | "3"
34 |
35 | julia> t[1:2] # the first two elements
36 | (1, 2.0)
37 | ```
38 |
39 | A handy feature is the possibility to unpack a tuple over its values.
40 |
41 | ```jldoctest tuples
42 | julia> a, b, c = t
43 | (1, 2.0, "3")
44 |
45 | julia> println("The values stored in the tuple are: $a, $b and $c")
46 | The values stored in the tuple are: 1, 2.0 and 3
47 | ```
48 |
49 | Arrays can be unpacked similarly. However, tuples are usually used for storing a small number of values, while arrays are typically large. Recall that while tuples are immutable, arrays are mutable.
50 |
51 | !!! warning "Exercise:"
52 | Create a tuple that contains the first four letters of the alphabet (these letters should be of type `String`). Then unpack this tuple into four variables `a`, `b`, `c` and `d`.
53 |
54 | !!! details "Solution:"
55 | Such a tuple can be created easily using the standard syntax:
56 |
57 | ```jldoctest tuples_ex
58 | julia> t = ("a", "b", "c", "d")
59 | ("a", "b", "c", "d")
60 | ```
61 |
62 | We can use the four variables and the `=` sign to unpack the tuple.
63 |
64 | ```jldoctest tuples_ex
65 | julia> a, b, c, d = t
66 | ("a", "b", "c", "d")
67 | ```
68 |
69 | ## Named tuples
70 |
71 | Named tuples are similar to tuples, i.e., a named tuple is immutable, ordered, fixed-sized group of elements. The only difference is that each element consists of a name (identifier) and a value. Named tuples are created by the following syntax:
72 |
73 | ```jldoctest named_tuples
74 | julia> t = (a = 1, b = 2.0, c = "3")
75 | (a = 1, b = 2.0, c = "3")
76 | ```
77 |
78 | It is also possible to create a named tuple directly from variables.
79 |
80 | ```jldoctest named_tuples
81 | julia> a = 1;
82 |
83 | julia> b = 2.0;
84 |
85 | julia> c = "3";
86 |
87 | julia> t = (; a, b, c)
88 | (a = 1, b = 2.0, c = "3")
89 | ```
90 |
91 | Here the semicolon is mandatory because, without the semicolon, the result will be a tuple instead of a named tuple. Similarly to tuples, the elements of a named tuple can be accessed via square brackets. However, as opposed to tuples, it is impossible to access multiple elements at once.
92 |
93 | ```jldoctest named_tuples
94 | julia> t[1] # the first element
95 | 1
96 |
97 | julia> t[end] # the last element
98 | "3"
99 |
100 | julia> t[1:2] # error
101 | ERROR: MethodError: no method matching getindex(::@NamedTuple{a::Int64, b::Float64, c::String}, ::UnitRange{Int64})
102 | [...]
103 | ```
104 |
105 | On the other hand, it is possible to get elements of a named tuple via their names or unpack elements directly to variables.
106 |
107 | ```jldoctest named_tuples
108 | julia> t.a
109 | 1
110 |
111 | julia> t.c
112 | "3"
113 |
114 | julia> a, b, c = t
115 | (a = 1, b = 2.0, c = "3")
116 |
117 | julia> println("The values stored in the tuple are: a = $a, b = $b")
118 | The values stored in the tuple are: a = 1, b = 2.0
119 | ```
120 |
121 | ## Dictionaries
122 |
123 | Dictionaries are mutable, unordered (random order) collections of pairs of keys and values. The syntax for creating a dictionary is:
124 |
125 | ```jldoctest dicts
126 | julia> d = Dict("a" => [1, 2, 3], "b" => 1)
127 | Dict{String, Any} with 2 entries:
128 | "b" => 1
129 | "a" => [1, 2, 3]
130 | ```
131 |
132 | Another possibility is to use symbols instead of strings as keys.
133 |
134 | ```jldoctest dicts
135 | julia> d = Dict(:a => [1, 2, 3], :b => 1)
136 | Dict{Symbol, Any} with 2 entries:
137 | :a => [1, 2, 3]
138 | :b => 1
139 | ```
140 |
141 | !!! info "Symbol vs String:"
142 | The use of `Symbol` type might be preferable in certain cases, since its core representation is different. The string `"foo"` is a string literal and evaluates to the string "foo". On the other hand, `Symbol` is a construct used internally to represent a variable in [metaprogramming](https://docs.julialang.org/en/v1/manual/metaprogramming/). Thanks to its internal representation, some operations (like comparison `==`) can be faster on Symbols than on Strings.
143 |
144 | It is possible to use almost any type as a key in a dictionary. Note the element types in the dictionary, when Julia tries to infer the best type to represent keys and values.
145 |
146 | ```jldoctest dicts
147 | julia> d_test = Dict(1 => "a", 2.0 => "b", 3.0f0 => "c")
148 | Dict{Real, String} with 3 entries:
149 | 2.0 => "b"
150 | 3.0 => "c"
151 | 1 => "a"
152 | ```
153 |
154 | !!! info "Ambiguous key values:"
155 | Be aware of using correct keys. In this definition, since both key values are essentially the same, the resulting dictionary has only one key with the last value.
156 |
157 | ```jldoctest dicts
158 | julia> d_test = Dict(1 => "a", 1.0 => "b", 1.0f0 => "c")
159 | Dict{Real, String} with 1 entry:
160 | 1.0 => "c"
161 | ```
162 |
163 | Dictionary's elements can be accessed via square brackets and a key.
164 |
165 | ```jldoctest dicts
166 | julia> d[:a]
167 | 3-element Vector{Int64}:
168 | 1
169 | 2
170 | 3
171 | ```
172 |
173 | If the key does not exist in the dictionary, an error will occur if we try to access it.
174 |
175 | ```jldoctest dicts
176 | julia> d[:c]
177 | ERROR: KeyError: key :c not found
178 |
179 | julia> haskey(d, :c)
180 | false
181 | ```
182 |
183 | The `haskey` function checks whether the dictionary has the `:c` key. To avoid such errors, we can use the `get` function that accepts three arguments: a dictionary, key, and a default value for this key, which is returned if the key does not exist in the dictionary.
184 |
185 | ```jldoctest dicts
186 | julia> get(d, :c, 42)
187 | 42
188 | ```
189 |
190 | There is also an in-place version of the `get` function. The `get!` function adds the default value to the dictionary if the key does not exist.
191 |
192 | ```jldoctest dicts
193 | julia> get!(d, :c, 42)
194 | 42
195 |
196 | julia> get!(d, :d, ["hello", "world"])
197 | 2-element Vector{String}:
198 | "hello"
199 | "world"
200 |
201 | julia> d
202 | Dict{Symbol, Any} with 4 entries:
203 | :a => [1, 2, 3]
204 | :b => 1
205 | :d => ["hello", "world"]
206 | :c => 42
207 | ```
208 |
209 | Unwanted keys from the dictionary can be removed by the `delete!` function.
210 |
211 | ```jldoctest dicts
212 | julia> delete!(d, :d)
213 | Dict{Symbol, Any} with 3 entries:
214 | :a => [1, 2, 3]
215 | :b => 1
216 | :c => 42
217 |
218 | julia> haskey(d, :d)
219 | false
220 | ```
221 |
222 | An alternative is the `pop!` function, which removes the key from the dictionary, and returns the value corresponding to it.
223 |
224 | ```jldoctest dicts
225 | julia> pop!(d, :c)
226 | 42
227 |
228 | julia> haskey(d, :c)
229 | false
230 | ```
231 |
232 | Optionally, it is possible to add a default value for a given key to the `pop!` function, which is returned if the key does not exist in the given dictionary.
233 |
234 | ```jldoctest dicts
235 | julia> haskey(d, :c)
236 | false
237 |
238 | julia> pop!(d, :c, 444)
239 | 444
240 | ```
241 |
--------------------------------------------------------------------------------
/docs/src/lecture_01/variables.md:
--------------------------------------------------------------------------------
1 | # Variables
2 |
3 | In Julia (as in other languages), a variable is a name that refers to a value. Contrary to languages like C or C++, and similarly to Python or MATLAB, variables can be created without type specification, i.e., a variable can be declared simply by using the `=` sign
4 |
5 | ```jldoctest var_declaration
6 | julia> x = 2
7 | 2
8 | ```
9 |
10 | The type of the variable is inferred automatically and can be checked using the `typeof` function
11 |
12 | ```jldoctest var_declaration
13 | julia> typeof(x)
14 | Int64
15 | ```
16 |
17 | In this case, the variable `x` is of type `Int64`, which is a type that represents signed integers. Since `x` is a number, we can apply basic mathematical operations to it
18 |
19 | ```jldoctest var_declaration
20 | julia> y = x + 1
21 | 3
22 |
23 | julia> typeof(y)
24 | Int64
25 | ```
26 |
27 | The type of the variable `x` is preserved because the sum of two integers is also an integer. We can also reuse the name of the variable and assign a new value to it
28 |
29 | ```jldoctest var_declaration
30 | julia> x = 4
31 | 4
32 | ```
33 |
34 | The type of the variable `x` is still `Int64`, but it is also possible to assign a value of a different type to `x`
35 |
36 | ```jldoctest var_declaration
37 | julia> x = 3.1415
38 | 3.1415
39 |
40 | julia> typeof(x)
41 | Float64
42 | ```
43 |
44 | In this case, the variable `x` is of type `Float64`, which is a type that represents floating-point numbers.
45 |
46 | !!! warning "Exercise:"
47 | Create the following three variables:
48 | 1. Variable `x` with value `1.234`.
49 | 2. Variable `y` with value `1//2`.
50 | 3. Variable `z` with value `x + y*im`.
51 | What are the types of these three variables?
52 |
53 | !!! details "Solution:"
54 | All three variables can be declared simply by assigning the value to the given variable name
55 |
56 | ```jldoctest var_types
57 | julia> x = 1.234
58 | 1.234
59 |
60 | julia> y = 1//2
61 | 1//2
62 |
63 | julia> z = x + y*im
64 | 1.234 + 0.5im
65 | ```
66 |
67 | and types can be checked using the `typeof` function
68 |
69 | ```jldoctest var_types
70 | julia> typeof(x)
71 | Float64
72 |
73 | julia> typeof(y)
74 | Rational{Int64}
75 |
76 | julia> typeof(z)
77 | ComplexF64 (alias for Complex{Float64})
78 | ```
79 |
80 | ## Primitive numeric types
81 |
82 | There are many types in Julia. In fact, every object in Julia has its type. As an example, we can mention the hierarchy of primitive numeric types
83 |
84 | 
85 |
86 | All types shown in blue are abstract types, i.e., it is impossible to create an instance of such a type. Abstract types are useful for creating logical type hierarchy. Types highlighted in green are concrete types. In many cases, it is useful to have the choice to choose which type to use. As an example, we can see floating-point numbers. There are four concrete types for floating-point numbers. If we want to maximize the precision of some calculations, we can use `BigFloat`. Using `BigFloat` increases precision but also increases computational time. On the other hand, if we want to speed up the code, we can use the type with lower precision, such as `Float32`. However, in most cases, the user does not have to take care of types and use the default type.
87 |
88 | ## Variable names
89 |
90 | Julia provides an extremely flexible system for naming variables. Variable names are case-sensitive and have no semantic meaning, i.e., the language will not treat variables differently based on their names.
91 |
92 | ```jldoctest
93 | julia> I_am_float = 3.1415
94 | 3.1415
95 |
96 | julia> CALL_ME_RATIONAL = 1//3
97 | 1//3
98 |
99 | julia> MyString = "MyVariable"
100 | "MyVariable"
101 | ```
102 |
103 | Here `I_am_float` contains a floating-point number, `CALL_ME_RATIONAL` is a rational number (can be used if the exact accuracy is needed) and `MyString` contains a string (a piece of text).
104 |
105 | Moreover, in the Julia REPL and several other Julia editing environments, it is possible to use many Unicode (UTF-8 encoding) math symbols by typing the backslashed $\LaTeX$ symbol name followed by tab. It is also possible to use many other non-math symbols. For example, the variable name `δ` can be entered by typing `\delta`
106 |
107 | ```jldoctest
108 | julia> δ = 1
109 | 1
110 | ```
111 |
112 | or pizza symbol `🍕` can be entered by typing `\:pizza:`
113 |
114 | ```jldoctest
115 | julia> 🍕 = "It's time for pizza!!!"
116 | "It's time for pizza!!!"
117 | ```
118 |
119 | The list of all Unicode characters that can be entered via tab completion of $\LaTeX$-like abbreviations in the Julia REPL is provided in the official [manual](https://docs.julialang.org/en/v1/manual/unicode-input/).
120 |
121 | Julia will even let the user redefine built-in constants and functions if needed (although this is not recommended to avoid potential confusions)
122 |
123 | ```jldoctest
124 | julia> π = 2
125 | 2
126 |
127 | julia> π
128 | 2
129 | ```
130 |
131 | However, if the user tries to use a variable name that corresponds to a built-in constant or function already in use, Julia will throw an error
132 |
133 | ```jldoctest
134 | julia> ℯ
135 | ℯ = 2.7182818284590...
136 |
137 | julia> ℯ = 2
138 | ERROR: cannot assign a value to imported variable Base.ℯ from module Main
139 | [...]
140 | ```
141 |
142 | The only explicitly disallowed names for variables are the names of built-in reserved keywords listed in the following table
143 |
144 | | Reserved words: | | | | | |
145 | | :--- | :--- | :--- | :--- | :--- | :--- |
146 | | `baremodule` | `begin` | `break` | `catch` | `const` | `continue` |
147 | | `do` | `else` | `elseif` | `end` | `export` | `false` |
148 | | `finally` | `for` | `function` | `global` | `if` | `import` |
149 | | `let` | `local` | `macro` | `module` | `quote` | `return` |
150 | | `struct` | `true` | `try` | `using` | `while` | |
151 |
152 | ```jldoctest
153 | julia> struct = 3
154 | ERROR: ParseError:
155 | # Error @ none:1:8
156 | struct = 3
157 | # ╙ ── unexpected `=`
158 | [...]
159 | ```
160 |
161 | In many cases, it is beneficial to have the choice to use special symbols as variable names. It may increase the code's readability, especially when the user needs to implement mathematical algorithms, where it is common to use the greek alphabet. However, excessive use of special symbols can be confusing.
162 |
163 | !!! info "Stylistic Conventions:"
164 | While there are almost no restrictions on valid names in Julia, it is useful to adopt the following conventions:
165 | - Names of variables are in lower case.
166 | - Word separation can be indicated by underscores (`_`), but the use of underscores is discouraged unless the name would be hard to read otherwise.
167 | - Do not overuse special symbols, i.e., avoid using symbols like `🍕` as variable names.
168 | For more information about stylistic conventions, see the official [style guide](https://docs.julialang.org/en/v1/manual/style-guide/#Style-Guide-1) or [Blue Style](https://github.com/invenia/BlueStyle).
169 |
--------------------------------------------------------------------------------
/docs/src/lecture_02/exercises.md:
--------------------------------------------------------------------------------
1 | # Julia set
2 |
3 | So far, we used only the standard library shipped with Julia. However, the standard library provides only basic functionality. If we want to get additional functions, we have to use extra packages. There is a [Plots](https://github.com/JuliaPlots/Plots.jl) package for creating plots. Packages can be installed via Pkg REPL. To enter the Pkg REPL from the Julia REPL, press `]` and install the package by
4 |
5 | ```julia
6 | (@v1.10) pkg> add Plots
7 | ```
8 |
9 | We need to use the `using` keyword to load the package. For example, we can use the Plots package to visualize the `sin` and `cos` functions.
10 |
11 | ```@example plots
12 | using Plots
13 | x = 0:0.01π:2π
14 |
15 | plot(x, sin.(x); label = "sinus", linewidth = 2)
16 | plot!(x, cos.(x); label = "cosinus", linewidth = 2)
17 |
18 | savefig("sin.svg") # hide
19 | ```
20 |
21 | 
22 |
23 | There will be a whole [section](@ref Plots.jl) dedicated to the Plots package. However, we need some basic functionality to visualize the outputs of the following exercises.
24 |
25 | !!! warning "Exercise 1:"
26 | Every programmer should be able to rewrite pseudocode to actual code. The goal of this exercise is to rewrite the following pseudocode:
27 |
28 | 
29 |
30 | This pseudocode describes how to compute the [Julia set](https://en.wikipedia.org/wiki/Julia_set) for the following function
31 |
32 | ```math
33 | f_c(z) = z^2 + c,
34 | ```
35 |
36 | where ``c \in \mathbb{C}`` is a complex parameter. To test the resulting code, try the following settings of input parameters
37 | - ``x`` is a vector of 1500 evenly spaced numbers from `-1.5` to `1.5`.
38 | - ``y`` is a vector of 1000 evenly spaced numbers from `-1` to `1`.
39 | - ``c = - 0.4 + 0.61 \cdot i``
40 | - ``R = 2``
41 | - ``N = 1000``
42 |
43 | Use this code given below to plot the heatmap of the matrix ``A``.
44 |
45 | ```julia
46 | using Plots
47 | heatmap(A;
48 | c=:viridis,
49 | clims=(0, 0.15),
50 | cbar=:none,
51 | axis=:none,
52 | ticks=:none
53 | )
54 | ```
55 |
56 | !!! details "Solution:"
57 | Firstly, we have to define all input parameters.
58 |
59 | ```julia
60 | c = - 0.4 + 0.61im
61 | R = 2
62 | N = 1000
63 | L = 1500
64 | K = 1000
65 | ```
66 |
67 | The second step is to define the vectors `x` and `y`. Since we know that these vectors contain evenly spaced numbers, and we also know the starting point, the stopping point, and the length of the vectors, we can use the `range` function.
68 |
69 | ```julia
70 | x = range(-1.5, 1.5; length = L)
71 | y = range(-1.0, 1.0; length = K)
72 | ```
73 |
74 | The next step is to define the `A` matrix of zeros by the `zeros` function.
75 |
76 | ```julia
77 | A = zeros(K, L)
78 | ```
79 |
80 | Now, we rewrite the for loops from the pseudocode. It is possible to rewrite the pseudocode in an almost identical way. However, in many cases, the code can be simplified. For example, we can use the shorter syntax for writing nested `for` loops.
81 |
82 | ```julia
83 | for k in 1:K, l in 1:L
84 | z = x[l] + y[k]*im
85 | for n in 0:N
86 | if abs(z) > R^2 - R
87 | A[k, l] = n/N
88 | break
89 | end
90 | z = z^2 + c
91 | end
92 | end
93 | ```
94 |
95 | Finally, we visualize the heatmap of the matrix `A`.
96 |
97 | ```julia
98 | using Plots
99 | heatmap(A;
100 | c = :viridis,
101 | clims = (0, 0.15),
102 | cbar = :none,
103 | axis = :none,
104 | ticks = :none,
105 | )
106 | ```
107 |
108 | 
109 |
110 |
111 | !!! warning "Exercise 2:"
112 | In the previous exercise, we rewrote pseudocode to an actual Julia code. This exercise will improve the central part of the code: the inner loop. Write a function which replaces the inner loop in the code from the exercise above. Use the following function definition
113 |
114 | ```julia
115 | function juliaset(z, c, R, N)
116 | ???
117 | return ???
118 | end
119 | ```
120 |
121 | where ``z, c \in \mathbb{C}``, ``R \in \mathbb{R}`` and ``N \in \mathbb{N}``. Use the `while` loop to replace the `for` loop in the original pseudocode. Visualize the resulting matrix by the same code as in the previous exercise.
122 |
123 |
124 | **Hint:** recall that the function should return `0` if `n > N` and `n/N` otherwise.
125 |
126 | !!! details "Solution:"
127 | As suggested in the exercise description, we will use the `while` loop. Using the `while` loop, we have to define a stopping condition. In this case, we have two conditions:
128 | 1. maximal number of iterations is `N + 1`,
129 | 2. the absolute value of `z` needs to be smaller or equal to `R^2 - R`.
130 | These two conditions can be merged into `n <= N && abs(z) <= R^2 - R`. Inside the `while` loop, we only have to update `n` and `z`.
131 |
132 | ```julia
133 | function juliaset(z, c, R, N)
134 | n = 0
135 | while n <= N && abs(z) <= R^2 - R
136 | n += 1
137 | z = z^2 + c
138 | end
139 | return n > N ? 0 : n/N
140 | end
141 | ```
142 |
143 | We use the ternary operator to decide which value is returned. Now we need to define all input parameters as in the previous exercise.
144 |
145 | ```julia
146 | c = - 0.4 + 0.61im
147 | R = 2
148 | N = 1000
149 | x = range(-1.5, 1.5; length = 1500)
150 | y = range(-1.0, 1.0; length = 1000)
151 | ```
152 |
153 | We can use a nested `for` loops to create `A`. However, a simpler way is to use the list comprehension or broadcasting to vectorize the `juliaset` function.
154 |
155 | ```julia
156 | A1 = [juliaset(xl + yk*im, c, R, N) for yk in y, xl in x]
157 | A2 = juliaset.(x' .+ y .* im, c, R, N)
158 | ```
159 |
160 | Both `A1` and `A2` are the same. In the second case, we have to pay attention to use the correct form of the input. We use the transposition of `x`. Finally, we can call the same code to create the same plot.
161 |
162 | ```julia
163 | using Plots
164 | heatmap(A1;
165 | c = :viridis,
166 | clims = (0, 0.15),
167 | cbar = :none,
168 | axis = :none,
169 | ticks = :none,
170 | size = (800, 600),
171 | )
172 | ```
173 |
174 | 
175 |
176 |
177 | !!! warning "Exercise 3:"
178 | Try different values of variable `c` to create different plots. For inspiration, check the Wikipedia page about [Julia set](https://en.wikipedia.org/wiki/Julia_set).
179 |
180 | - ``c = 0.285 + 0.01 \cdot i``
181 | 
182 |
183 | - ``c = - 0.835 - 0.2321 \cdot i``
184 | 
185 |
186 | - ``c = -0.8 + 0.156 \cdot i``
187 | 
188 |
189 | - ``c = -0.70176 + 0.3842 \cdot i``
190 | 
191 |
192 | ## Animation
193 |
194 | !!! danger "Warning:"
195 | It takes a lot of time to create the animation below, especially when using the default [GR](https://github.com/jheinen/GR.jl) backend for the Plots package. The plotting time can be reduced by using a different backend such as the [PyPlot](https://github.com/JuliaPy/PyPlot.jl) backend.
196 |
197 | ```julia
198 | using Plots, PyPlot
199 | pyplot()
200 | ```
201 |
202 | The PyPlot package must be installed first. An alternative way is to use the [Makie](https://github.com/JuliaPlots/Makie.jl) package instead of the Plots package.
203 |
204 | We will now create an animation of the Julia sets for `c` defined as follows
205 |
206 | ```math
207 | c_k = 0.7885 \exp \{ k \cdot i \}, \qquad k \in \left [\frac{\pi}{2}, \frac{3\pi}{2} \right ].
208 | ```
209 |
210 | Firstly, we create the vector of all values `c` by combining the `range` function and broadcasting.
211 |
212 | ```julia
213 | cs = 0.7885 .* exp.(range(π/2, 3π/2; length = 500) .* im)
214 | ```
215 |
216 | Note that we use the `length` keyword to specify the length of `cs`. To create an animation, it suffices to use the `for` loop in combination with the `@animate` macro.
217 |
218 | ```julia
219 | anim = @animate for c in cs
220 | A = juliaset.(x' .+ y .* im, c, R, N)
221 | heatmap(A;
222 | c = :viridis,
223 | clims = (0, 0.15),
224 | cbar = :none,
225 | axis = :none,
226 | ticks = :none,
227 | size = (800, 600),
228 | )
229 | end
230 | gif(anim, "juliaset.gif", fps = 20) # save animation as a gif
231 | ```
232 |
233 | The code inside the loop is the same as in the previous exercise.
234 |
235 | 
236 |
--------------------------------------------------------------------------------
/docs/src/lecture_02/functions.md:
--------------------------------------------------------------------------------
1 | ## Function basics
2 |
3 | So far, we did not show how to define functions. In Julia, a function is an object that maps a tuple of argument values to a return value. There are multiple ways to create a function. Each of them is useful in different situations. The following example shows the basic way how to define a function using `function ... end` syntax
4 |
5 | ```jldoctest functions; output = false
6 | function quadratic(x::Real; a::Real = 1, b::Real = 1, c::Real = 1)
7 | value = a*x^2 + b*x + c
8 | deriv = 2* a*x + b
9 |
10 | return value, deriv
11 | end
12 |
13 | # output
14 | quadratic (generic function with 1 method)
15 | ```
16 |
17 | The function definition consists of multiple parts:
18 |
19 | - keywords `function` and `end`
20 | - function name `quadratic`
21 | - positional argument `x` with type annotation
22 | - separator `;` of positional and keyword arguments
23 | - keyword arguments `a`, `b` and `c` with type annotations and default values
24 | - actual code that computed the output values
25 | - `return` keyword that specifies the function output followed by comma separated list of output values
26 |
27 | Note that not all parts of the function definition above are mandatory. The following parts are optional:
28 |
29 | - type annotations
30 | - separator of positional and keyword arguments
31 | - `return` keyword
32 |
33 | However, we highly recommend to use these optional features when writing your own functions, since it will improve code readability and prevent unwanted errors.
34 |
35 | !!! warning "Exercise:"
36 | Test how the function behaves with different input values. Test what happens when the input is not a real number, i.e. `x = "a"`.
37 |
38 | !!! details "Solution:"
39 | Positional arguments do not need to be named:
40 | ```jldoctest functions
41 | julia> quadratic(2)
42 | (7, 5)
43 | ```
44 |
45 | but the keyword arguments have to be specified. Their order of being called does not matter.
46 | ```jldoctest functions
47 | julia> quadratic(2; b=3, a=1)
48 | (11, 7)
49 | ```
50 |
51 | When a function has specific type annotation, the function throws a `MethodError`, because the function specifically needs `x` to be of type `Real`.
52 | ```jldoctest functions
53 | julia> quadratic("a")
54 | ERROR: MethodError: no method matching quadratic(::String)
55 |
56 | Closest candidates are:
57 | quadratic(!Matched::Real; a, b, c)
58 | @ Main none:1
59 |
60 | Stacktrace:
61 | [1] top-level scope
62 | @ none:1
63 | ```
64 |
65 | This is different to a a case when we would define the function without the type annotation:
66 |
67 | ```jldoctest functions; output = false
68 | function quadratic2(x; a = 1, b = 1, c = 1)
69 | value = a*x^2 + b*x + c
70 | deriv = 2* a*x + b
71 |
72 | return value, deriv
73 | end
74 |
75 | # output
76 | quadratic2 (generic function with 1 method)
77 | ```
78 |
79 | which returns a different error when calling for `x = "a"`:
80 |
81 | ```jldoctest functions
82 | julia> quadratic2("a")
83 | ERROR: MethodError: no method matching *(::Int64, ::String)
84 |
85 | Closest candidates are:
86 | *(::Any, ::Any, !Matched::Any, !Matched::Any...)
87 | @ Base operators.jl:587
88 | *(::Real, !Matched::Complex{Bool})
89 | @ Base complex.jl:327
90 | *(!Matched::Missing, ::Union{AbstractChar, AbstractString})
91 | @ Base missing.jl:184
92 | ...
93 |
94 | Stacktrace:
95 | [1] quadratic2(x::String; a::Int64, b::Int64, c::Int64)
96 | @ Main ./none:2
97 | [2] quadratic2(x::String)
98 | @ Main ./none:1
99 | [3] top-level scope
100 | @ none:1
101 | ```
102 |
103 | We will discuss the function declaration in more detail [later](@ref Functions).
--------------------------------------------------------------------------------
/docs/src/lecture_02/juliaset.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_02/juliaset.gif
--------------------------------------------------------------------------------
/docs/src/lecture_02/juliasetalg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_02/juliasetalg.png
--------------------------------------------------------------------------------
/docs/src/lecture_02/scope.md:
--------------------------------------------------------------------------------
1 | # Soft local scope
2 |
3 | The scope of a variable is the region of a code where the variable is visible. There are two main types of scopes in Julia: **global** and **local**, and we will discuss it [later](@ref Scope-of-variables). In this section, we will only focus on loops.
4 |
5 | Every variable created inside a loop is local, i.e., it is possible to use it only inside the loop.
6 |
7 | ```jldoctest
8 | julia> for i in 1:2
9 | t = 1 + i
10 | @show t
11 | end
12 | t = 2
13 | t = 3
14 |
15 | julia> t
16 | ERROR: UndefVarError: `t` not defined
17 | ```
18 |
19 | The variable `i` in the example above is also local. A similar behaviour happens in nested loops:
20 |
21 | ```jldoctest
22 | julia> for j in 1:5
23 | for i in 1:2
24 | @show i + j
25 | end
26 | i
27 | end
28 | i + j = 2
29 | i + j = 3
30 | ERROR: UndefVarError: `i` not defined
31 | [...]
32 | ```
33 |
34 | Variable `j` is a local variable defined in the outer loop. This means that it is visible inside the inner loop and can be used there. On the other hand, variable `i` is a local variable from the inner loop and cannot be accessed in the outer loop.
35 |
36 | What happens if use variables from the global scope inside loops? In this case, it depends whether the loop is created in *interactive* (REPL, Jupyter notebook) or *non-interactive* context (file, eval). In the interactive case (in the REPL in our case), global variables can be accessed and modified in local scopes without any restrictions.
37 |
38 | ```jldoctest
39 | julia> s = 0
40 | 0
41 |
42 | julia> for i = 1:10
43 | t = 1 + i # new local variable t
44 | s = t # assign a new value to the global variable
45 | end
46 |
47 | julia> s
48 | 11
49 | ```
50 |
51 | In this case, if we want to assign a value to a variable, there are two possibilities:
52 | - Variable `t`: there is no global variable with the same name. A new local variable is created.
53 | - Variable `s`: there is a global variable with the same name. A new value is assigned to the global variable.
54 |
55 | However, in the non-interactive case, the variables behave differently. In the following example, we create a Julia code as a string and then evaluate it using the `include_string` function.
56 |
57 | ```jldoctest
58 | julia> code = """
59 | s = 0
60 | for i = 1:10
61 | t = 1 + i # new local variable t
62 | s = t # new local variable s and warning
63 | end
64 | s
65 | """;
66 |
67 | julia> include_string(Main, code)
68 | ┌ Warning: Assignment to `s` in soft scope is ambiguous because a global variable by the same name exists: `s` will be treated as a new local. Disambiguate by using `local s` to suppress this warning or `global s` to assign to the existing global variable.
69 | └ @ string:4
70 | 0
71 | ```
72 |
73 | In this case, if we want to assign a value to a variable inside a loop, there are two possibilities:
74 | - Variable `t`: there is no global variable with the same name. A new local variable is created.
75 | - Variable `s`: there is a global variable with the same name. The assignment in the soft scope is ambiguous, and a new local variable is created.
76 |
77 | In our example, the variable `s` is defined before the loop as global. In the loop, we get a warning that the assignment to `s` in soft scope is ambiguous, and a new local variable `s` is created instead. The behaviour described above can be changed by specifying that variable `s` is `local`.
78 |
79 | ```jldoctest softscope; output = false
80 | code_local = """
81 | s = 0
82 | for i = 1:10
83 | t = 1 + i # new local variable t
84 | local s = t # assigning a new value to the local variable
85 | end
86 | s
87 | """
88 |
89 | # output
90 | "s = 0\nfor i = 1:10\n t = 1 + i # new local variable t\n local s = t # assigning a new value to the local variable\nend\ns\n"
91 | ```
92 |
93 | Another option is to specify that the variable `s` is `global`.
94 |
95 | ```jldoctest softscope; output = false
96 | code_global = """
97 | s = 0
98 | for i = 1:10
99 | t = 1 + i # new local variable t
100 | global s = t # assigning a new value to the global variable
101 | end
102 | s
103 | """
104 |
105 | # output
106 | "s = 0\nfor i = 1:10\n t = 1 + i # new local variable t\n global s = t # assigning a new value to the global variable\nend\ns\n"
107 | ```
108 |
109 | When we evaluate the strings, no warning is produced.
110 |
111 | ```jldoctest softscope
112 | julia> include_string(Main, code_global)
113 | 11
114 |
115 | julia> include_string(Main, code_local)
116 | 0
117 | ```
118 |
119 | There are two obvious questions:
120 | 1. Why does it not work like the REPL everywhere?
121 | 2. Why does it not work like in files everywhere? Why is the warning not skipped?
122 |
123 | Since the behaviour from REPL approximates the behaviour inside a function body, it has the advantage of being intuitive and convenient. In particular, it makes it easy to move code back and forth between a function body and the REPL when trying to debug a function. However, it may easily lead to confusion and errors, especially if the code is long or split into multiple files. The intent of the following code is obvious: we want to modify the existing global variable `s` inside the loop.
124 |
125 | ```julia
126 | s = 0
127 | for i = 1:10
128 | s += i
129 | end
130 | ```
131 |
132 | However, real code is usually more complicated. Consider the following example:
133 |
134 | ```julia
135 | x = 200
136 |
137 | # much later, maybe in a different file
138 |
139 | for i = 1:10
140 | x = 1000
141 | println(x)
142 | end
143 |
144 | # much later, maybe in yet another file
145 |
146 | y = x + 234
147 | ```
148 |
149 | It is not clear what should happen here. Should the variable `x` inside the loop be considered local or global? If it is local inside the loop, then the variable `y` will be `434`. On the other hand, if it is global inside the loop, then we assign a new value to it, and the variable `y` will be `1234`. We can accidentally change a variable value and get incorrect results because we use the same variable name multiple times in different scopes. In this case, it is complicated to find why the results are wrong since there is no error in the code. Julia prints the warning about the ambiguity in such cases to help users. For more information, see the official [documentation](https://docs.julialang.org/en/v1/manual/variables-and-scoping/).
150 |
--------------------------------------------------------------------------------
/docs/src/lecture_03/DataFrames.md:
--------------------------------------------------------------------------------
1 | # DataFrames.jl
2 |
3 | [DataFrames](https://dataframes.juliadata.org/stable/) is a package that provides a set of tools for working with tabular data. Its design and functionality are similar to [pandas](https://pandas.pydata.org/) (in Python) and `data.frame`, `data.table` and dplyr (in R) or `table` (in Matlab). This makes it a great general-purpose data science tool, especially for people coming to Julia from other languages.
4 |
5 | ```@setup dfbasics
6 | using CSV
7 | using DataFrames
8 | ```
9 |
10 | The core of the package is the `DataFrame` structure that represents a data table. The simplest way of constructing a `DataFrame` is to pass column vectors using keyword arguments or pairs.
11 |
12 | ```@example dfbasics
13 | using DataFrames
14 | df = DataFrame(A = 1:4, B = ["M", "F", "F", "M"], C = rand(4))
15 | ```
16 |
17 | Since each column is stored in a `DataFrame` as a separate vector, it is possible to combine columns of different element types. Columns can be accessed directly without copying.
18 |
19 | ```@repl dfbasics
20 | df.A
21 | ```
22 |
23 | Another way is to use the indexing syntax similar to the one for arrays.
24 |
25 | ```@repl dfbasics
26 | df[!, :A]
27 | ```
28 |
29 | We use `!` to select all rows. This creates a pointer to the column. If we use `:`, then we get a copy of a column. Since vectors are mutable structures and accessing a column of `DataFrame` via `!` does not make a copy, it is possible to change elements of the `DataFrame`.
30 |
31 | ```@example dfbasics
32 | df.A[1] = 5
33 | df
34 | ```
35 |
36 | On the other hand, the `:` creates a copy, which will not change the original `DataFrame`.
37 |
38 | ```@example dfbasics
39 | col = df[:, :A]
40 | col[1] = 4
41 | df
42 | ```
43 |
44 | !!! info "Column names:"
45 | DataFrames allow using symbols (like `:A`) and strings (like `"A"`) for all column indexing operations. Using symbols is slightly faster and should be preferred. One exception is when the column names are generated using string manipulation.
46 |
47 | The standard format for storing table data is the `csv` file format. The [CSV](https://github.com/JuliaData/CSV.jl) package provides an interface for saving and loading `csv` files.
48 |
49 | ```@example dfbasics
50 | using CSV
51 | CSV.write("dataframe.csv", df)
52 | table = CSV.read("dataframe.csv", DataFrame; header = true)
53 | ```
54 |
55 | See the package [documentation](https://csv.juliadata.org/stable/) for more information.
56 |
57 | ## Adding columns and rows
58 |
59 | It is common for tables to be created column by column or row by row. `DataFrame`s provides an easy way to extend existing tables. To can add new columns to a `DataFrame` in a direct way.
60 |
61 | ```@example dfbasics
62 | df.D = [:a, :b, :c, :d]
63 | df
64 | ```
65 |
66 | Alternatively, we can use the `insertcols!` function. This function can insert multiple columns at once and also provides advanced options for column manipulation. For example, we can specify the column index into which the columns are to be inserted.
67 |
68 | ```@example dfbasics
69 | insertcols!(df, 3, :B => rand(4), :B => 11:14; makeunique = true)
70 | ```
71 |
72 | New rows can be added to an existing `DataFrame` by the `push!` function. It is possible to append a new row in the form of a vector or a tuple of the correct length or in the form of a dictionary or `DataFrame` with the correct keys.
73 |
74 | ```@example dfbasics
75 | push!(df, [10, "F", 0.1, 15, 0.235, :f])
76 | push!(df, (10, "F", 0.1, 15, 0.235, :f))
77 | push!(df, Dict(:B_1 => 0.1, :B_2 => 15, :A => 10, :D => :f, :B => "F", :C => 0.235))
78 | df
79 | ```
80 |
81 | It is also possible to start with an empty `DataFrame` and build the table incrementally.
82 |
83 | ```@example dfbasics_empty
84 | using DataFrames
85 | df_empty = DataFrame()
86 | df_empty.A = 1:3
87 | df_empty.B = [:a, :b, :c]
88 | df_empty
89 | ```
90 |
91 | However, this approach will not work if the `DataFrame` is created row by row. In this case, the `DataFrame` must be initialized with empty columns of appropriate element types.
92 |
93 | ```@example dfbasics_empty
94 | df_empty = DataFrame(A = Int[], B = Symbol[])
95 | push!(df_empty, [1, :a])
96 | push!(df_empty, (2, :b))
97 | push!(df_empty, Dict(:A => 3, :B => :c))
98 | df_empty
99 | ```
100 |
101 | ## Renaming
102 |
103 | Two functions can be used to rename columns. The `names` function returns column names as a vector of strings, while the `propertynames` function returns a vector of symbols.
104 |
105 | ```@repl dfbasics
106 | names(df)
107 | propertynames(df)
108 | ```
109 |
110 | We use the `rename!` function to change column names. This function can be used to rename all columns at once.
111 |
112 | ```@example dfbasics
113 | rename!(df, [:a, :b, :c, :d, :e, :f])
114 | df
115 | ```
116 |
117 | Another option is to rename only some of the columns specified by their names.
118 |
119 | ```@example dfbasics
120 | rename!(df, :a => :A, :f => :F)
121 | df
122 | ```
123 |
124 | It is also possible to use a function to generate column names.
125 |
126 | ```@example dfbasics
127 | myname(x) = string("column_", uppercase(x))
128 | rename!(myname, df)
129 | df
130 | ```
131 |
132 | ## Working with `DataFrame`s
133 |
134 | ```@setup dfwork
135 | using DataFrames
136 | using RDatasets
137 | ```
138 |
139 | In the next part of the lecture, we will use the [RDatasets](https://github.com/JuliaStats/RDatasets.jl) package. The package provides an easy way for Julia users to use many standard datasets available in the core of the R programming language. We will use the [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set).
140 |
141 | ```@example dfwork
142 | using RDatasets, DataFrames
143 | iris = dataset("datasets", "iris")
144 | first(iris, 6)
145 | ```
146 |
147 | We use the `first` function to print the first `n = 6` rows of a table. Similarly, the `last` function shows the last `n` rows. When working with a new dataset, it is helpful to get a basic description. DataFrames provides the `describe` function that returns descriptive statistics for each column.
148 |
149 | ```@example dfwork
150 | describe(iris)
151 | ```
152 |
153 | We can use the indexing syntax to get a specific subset of a `DataFrame`.
154 |
155 | ```@example dfwork
156 | iris[2:4, [:SepalLength, :Species]]
157 | ```
158 |
159 | Additionally, DataFrames provides `Not`, `Between`, `Cols` and `All` selectors for more complex column selection scenarios.
160 |
161 | ```@example dfwork
162 | iris[2:4, Not([:SepalLength, :Species])]
163 | ```
164 |
165 | The [Query](https://github.com/queryverse/Query.jl) package allows for advanced manipulation with `DataFrame`. The code below selects only rows with `SepalLength >= 6` and `SepalWidth >= 3.4`. Then we create a new DataFrame, where for each of the selected rows, we add the Species, the sum of sepal length and width, and the sum of petal length and width.
166 |
167 | ```@example dfwork
168 | using Query
169 |
170 | table = @from row in iris begin
171 | @where row.SepalLength >= 6 && row.SepalWidth >= 3.4
172 | @select {
173 | row.Species,
174 | SepalSum = row.SepalLength + row.SepalWidth,
175 | PetalSum = row.PetalLength + row.PetalWidth,
176 | }
177 | @collect DataFrame
178 | end
179 | ```
180 |
181 | There are many topics related to DataFrames. However, there is not enough time to cover them all. We refer the reader to the excellent [documentation](https://dataframes.juliadata.org/stable/) with lots of examples.
182 |
183 |
184 | ## Visualizing using StatsPlots
185 |
186 | ```@setup dfplots
187 | using DataFrames
188 | using RDatasets
189 | using StatsPlots
190 | using Query
191 |
192 | iris = dataset("datasets", "iris")
193 | Core.eval(Main, :(using StatsPlots))
194 | ```
195 |
196 | The [StatsPlots](https://github.com/JuliaPlots/StatsPlots.jl) package provides recipes for plotting histograms, boxplots, and many other plots related to statistics. This package also provides the `@df` macro, which allows simple plotting of tabular data. As a simple example, we create a scatter plot of `SepalLength` and `SepalWidth` grouped by `Species`. Keyword arguments can be used in the same way as before.
197 |
198 | ```@example dfplots
199 | using StatsPlots
200 | @df iris scatter(
201 | :SepalLength,
202 | :SepalWidth;
203 | group = :Species,
204 | xlabel = "SepalLength",
205 | ylabel = "SepalWidth",
206 | marker = ([:d :h :star7], 8),
207 | )
208 | ```
209 |
210 | As another example, we mention the `marginalkde` function for plotting marginal kernel density estimations. In statistics, [kernel density estimation (KDE)](https://en.wikipedia.org/wiki/Kernel_density_estimation) is a non-parametric way to estimate the probability density function of a random variable. The `marginalkde` function can be used together with `@df` macro.
211 |
212 | ```@example dfplots
213 | using StatsPlots: marginalkde # hide
214 | @df iris marginalkde(
215 | :SepalLength,
216 | :SepalWidth;
217 | xlabel = "SepalLength",
218 | ylabel = "SepalWidth",
219 | )
220 | ```
221 |
222 | Another example is the `corrplot` function, which shows the correlation between all variables.
223 |
224 | ```@example dfplots
225 | @df iris corrplot(
226 | cols(1:4);
227 | grid = false,
228 | nbins = 15,
229 | fillcolor = :viridis,
230 | markercolor = :viridis,
231 | )
232 | ```
233 |
234 | Because it is shorter, we use `cols(1:4)` instead of the column names.
235 |
--------------------------------------------------------------------------------
/docs/src/lecture_03/animsincos.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_03/animsincos.gif
--------------------------------------------------------------------------------
/docs/src/lecture_03/animsurf.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_03/animsurf.gif
--------------------------------------------------------------------------------
/docs/src/lecture_03/interaction.md:
--------------------------------------------------------------------------------
1 | # Interaction with other languages
2 |
3 | One of the most significant advantages of Julia is its speed. As we discussed in the section [Why julia?](@ref Why-julia?), Julia is fast out-of-box without the necessity to do any additional steps. As a result, Julia solves the so-called Two-Language problem:
4 |
5 | > Users are programming in a high-level language such as R and Python, but the performance-critical parts of the code have to be rewritten in C/Fortran for performance.
6 |
7 | Since Julia is fast enough, most of the libraries are written in pure Julia, and there is no need to use C/Fortran for performance. However, there are many high-quality, mature libraries for numerical computing already written in C and Fortran. It would be a shame if it will not be possible to use them in Julia.
8 |
9 | To allow easy use of this existing code, Julia makes it simple and efficient to call C and Fortran functions. Julia has a **no boilerplate** philosophy: functions can be called directly from Julia without any glue code generation or compilation – even from the interactive prompt. This is accomplished just by making an appropriate call with the `ccall` syntax, which looks like an ordinary function call. Moreover, it is possible to pass Julia functions to native C functions that accept function pointer arguments. This section will show one example of the interaction between Julia and C. Extensive description of all provided functionality can be found in the [official manual](https://docs.julialang.org/en/v1/manual/calling-c-and-fortran-code/).
10 |
11 | The following example is taken from the official manual. Consider the situation that we want to use the `qsort` function from the standard C library. The `qsort` function sorts an array and is declared as follows.
12 |
13 | ```c
14 | void qsort(void *base, size_t nitems, size_t size,
15 | int (*compare)(const void*, const void*))
16 | ```
17 |
18 | The `base` is the pointer to the first element of the array to be sorted. The `nitems` is the number of elements in the array pointed by `base`. The `size` is the size in bytes of each element in the array. Finally, the `compare` is the function that compares two elements. The `compare` function should return a negative integer if the first argument is less than the second, a positive integer if the first argument is greater than the second, and otherwise zero. Such a Julia function can be defined as follows.
19 |
20 | ```julia
21 | mycompare(a, b)::Cint = sign(a - b)
22 | ```
23 |
24 | Since the `qsort` function expects that the return type of the `compare` function is C `int`, we annotate the return type to be `Cint`. In order to pass this function to C, we obtain its address using the macro `@cfunction`.
25 |
26 | ```julia
27 | mycompare_c = @cfunction(mycompare, Cint, (Ref{Cdouble}, Ref{Cdouble}))
28 | ```
29 |
30 | The `@cfunction` macro requires three arguments: the Julia function, the return type, and the tuple of the input argument types. Finally, we can use the `ccall` function to call the `qsort` function.
31 |
32 | ```julia
33 | julia> A = [1.3, -2.7, 4.4, 3.1];
34 |
35 | julia> ccall(:qsort, Cvoid, (Ptr{Cdouble}, Csize_t, Csize_t, Ptr{Cvoid}),
36 | A, length(A), sizeof(eltype(A)), mycompare_c)
37 |
38 | julia> A
39 | 4-element Vector{Float64}:
40 | -2.7
41 | 1.3
42 | 3.1
43 | 4.4
44 | ```
45 |
46 | Besides C and Fortran that can be called directly using `ccall` function, it is possible to interact with other languages using third-party packages. The following table shows an overview of those packages.
47 |
48 | | Language | Calling from Julia | Calling Julia |
49 | | :--- | :--- | :--- |
50 | | C++ | [Cxx.jl package](https://github.com/JuliaInterop/Cxx.jl) | ??? |
51 | | Python | [PyCall.jl](https://github.com/JuliaPy/PyCall.jl) | [PyJulia](https://github.com/JuliaPy/pyjulia) |
52 | | R | [RCall.jl](https://github.com/JuliaInterop/RCall.jl) | [JuliaCall](https://github.com/Non-Contradiction/JuliaCall) |
53 | | Mathematica | [MathLink.jl](https://github.com/JuliaInterop/MathLink.jl) | [ExternalEvaluate](https://reference.wolfram.com/language/ref/ExternalEvaluate.html)|
54 | | MATLAB | [MATLAB.jl](https://github.com/JuliaInterop/MATLAB.jl) | [Mex.jl](https://github.com/jebej/Mex.jl/) |
55 | | Java | [JavaCall.jl](https://github.com/JuliaInterop/JavaCall.jl) | [JuliaCaller](https://github.com/jbytecode/juliacaller) |
56 |
57 | Moreover, other Julia packages provide Julia interface for some well-known libraries from other languages. As an example, we can mention [ScikitLear.jl](https://github.com/cstjean/ScikitLearn.jl), which provides an interface for the [scikit-learn](https://scikit-learn.org/stable/) library from Python or the [RDatasets.jl](https://github.com/JuliaStats/RDatasets.jls) that provides an easy way to load famous R datasets.
58 |
59 | ## RCall.jl
60 |
61 | The [RCall.jl](https://github.com/JuliaInterop/RCall.jl) package provides an interface for calling R functions from Julia and passing data between these two languages. The package provides an interactive REPL for the R language that can be accessed from the Julia REPL by typing the `$` symbol. Consequently, it is possible to easily switch between these languages and use functionality provided by both languages simultaneously.
62 |
63 | ```julia
64 | julia> using RCall, RDatasets
65 |
66 | julia> mtcars = dataset("datasets", "mtcars");
67 |
68 | R> library(ggplot2)
69 |
70 | R> ggplot($mtcars, aes(x = WT, y = MPG)) + geom_point()
71 | ```
72 |
73 | 
74 |
75 | The package also provides string syntax that allows non-interactive usage. The previous example can be rewritten as follows.
76 |
77 | ```julia
78 | using RCall, RDatasets
79 | mtcars = dataset("datasets", "mtcars");
80 |
81 | R"""
82 | library(ggplot2)
83 | ggplot($mtcars, aes(x = WT, y = MPG)) + geom_point()
84 | """
85 | ```
86 |
87 | Note that we use multiline string syntax, but it is also possible to use standard string syntax. This multiline string syntax is very useful, especially when we want to perform multiple operations in R at once and then just return the result to Julia.
88 |
89 | ## MATLAB.jl
90 |
91 | The [MATLAB.jl](https://github.com/JuliaInterop/MATLAB.jl) provides an easy interface for calling Matlab functions and passing data between Julia and Matlab. Consider the situation that you wrote a Matlab function that uses some special functionality that is not available in Julia. MATLAB.jl package provides an interface to call this function directly from Julia, as can be seen in the following example.
92 |
93 | ```julia
94 | using MATLAB, BSON
95 |
96 | X = BSON.load("data.bson")[:X]
97 | mxcall(:MakeVideo, 0, X, "video.gif")
98 | ```
99 |
100 | The `mxcall` function accepts the name of the function as the first argument and the number of the output variables of that function as the second argument. All other inputs to the `mxcall` function are the input arguments of the Matlab function. The result is the following animation.
101 |
102 | 
103 |
104 | Like the RCall.jl package, the MATLAB.jl package also provides string syntax that allows for Matlab syntax. The previous example can be rewritten as follows.
105 |
106 | ```julia
107 | using MATLAB, BSON
108 |
109 | X = BSON.load("data.bson")[:X]
110 | mat"""
111 | MakeVideo($(X), 30, "Video2.gif");
112 | """
113 | ```
114 |
--------------------------------------------------------------------------------
/docs/src/lecture_03/otherpackages.md:
--------------------------------------------------------------------------------
1 | ## Distributions.jl
2 |
3 | ```@setup distr
4 | using Distributions
5 | using StatsPlots
6 | ```
7 |
8 | The [Distributions](https://github.com/JuliaStats/Distributions.jl) package provides an extensive collection of probabilistic distributions and related functions. Each distribution is defined as a custom type, which allows creating instances of distributions.
9 |
10 | ```@repl distr
11 | using Distributions
12 | D = Normal(2, 0.5)
13 | ```
14 |
15 | The example above creates the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) with mean `μ = 2` and standard deviation `σ = 0.5`. The `Distributions` package provides functions to compute mean, variance, or quantiles.
16 |
17 | ```@repl distr
18 | mean(D)
19 | var(D)
20 | quantile(D, 0.9)
21 | ```
22 |
23 | The package also provides ways to evaluate probability or cumulative density functions.
24 |
25 | ```@repl distr
26 | pdf(D, 2)
27 | cdf(D, 2)
28 | ```
29 |
30 | In combination with the `StatsPlots` package, it is possible to plot probability density functions.
31 |
32 | ```@example distr
33 | using StatsPlots
34 | plot(
35 | plot(D; title = "pdf"),
36 | plot(D; func = cdf, title = "cdf");
37 | legend = false,
38 | xlabel = "x",
39 | ylabel = "f(x)",
40 | ylims = (0,1),
41 | linewidth = 2,
42 | layout = (1,2),
43 | size = (800, 400)
44 | )
45 | ```
46 |
47 | The `Distributions` package also provides methods to fit a distribution to a given set of samples.
48 |
49 | ```@repl distr
50 | x = rand(Normal(2, 0.5), 10000); # generate 10000 random numbers from Normal(2, 0.5)
51 | D = fit(Normal, x)
52 | ```
53 |
54 | The `fit` function chooses a reasonable way to fit the distribution, which, in most cases, is the maximum likelihood estimation. However, this is not supported for all distributions. We can quickly check that the fit by using a histogram.
55 |
56 | ```@example distr
57 | histogram(x; normalize = :pdf, legend = false, opacity = 0.5)
58 | plot!(D; linewidth = 2, xlabel = "x", ylabel = "pdf(x)")
59 | ```
60 |
61 | !!! warning "Exercise:"
62 | Create a figure that shows the gamma distributions with the following parameters: `(2, 2)`, `(9, 0.5)`, `(7.5, 1)` and `(0.5, 1)`.
63 |
64 | **Hint:** to plot cumulative probability functions, use the Plots ability to plot functions.
65 |
66 | !!! details "Solution:"
67 | The easiest way to create multiple distributions is to use the broadcasting system.
68 |
69 | ```@example distr
70 | Ds = Gamma.([2, 9, 7.5, 0.5], [2, 0.5, 1, 1])
71 | nothing #hide
72 | ```
73 |
74 | Similarly, we use broadcasting to create a vector of labels.
75 |
76 | ```@example distr
77 | labels = reshape(string.("Gamma", params.(Ds)), 1, :)
78 | nothing #hide
79 | ```
80 |
81 | We need to reshape the labels to become a row vector. The reason is that we want to plot multiple distributions, and the Plot package expects that labels will be a row vector. Now, we call the `plot` function to plot all distributions.
82 |
83 | ```@example distr
84 | plot(Ds;
85 | xaxis = ("x", (0, 20)),
86 | yaxis = ("pdf(x)", (0, 0.5)),
87 | labels = labels,
88 | linewidth = 2,
89 | legend = :topright,
90 | )
91 | ```
92 |
93 | A plot of the cumulative probability functions cannot be done in the same way. However, StatsPlots provides the `func` keyword argument that allows specifying which function should be plotted.
94 |
95 | ```@example distr
96 | plot(Ds;
97 | func = cdf,
98 | xaxis = ("x", (0, 20)),
99 | yaxis = ("cdf(x)", (0, 1.05)),
100 | labels = labels,
101 | linewidth = 2,
102 | legend = :bottomright,
103 | )
104 | ```
105 |
106 | Another possibility is to use the Plots package directly. To do so, we need to define a function with one argument, which at a given point returns the value of the cumulative probability function. Such functions for all our distributions can be easily defined as anonymous functions.
107 |
108 | ```@example distr
109 | cdfs = [x -> cdf(D, x) for D in Ds]
110 | nothing # hide
111 | ```
112 |
113 | The previous expression returns a vector of functions. Now we can use the `plot` function to create a curve for each element of the vector of cumulative probability functions. The example below creates these curves for ``x`` from ``0`` to ``20``.
114 |
115 | ```@example distr
116 | plot(cdfs, 0, 20;
117 | xaxis = ("x", (0, 20)),
118 | yaxis = ("cdf(x)", (0, 1.05)),
119 | labels = labels,
120 | linewidth = 2,
121 | legend = :bottomright,
122 | )
123 |
124 | savefig("Gamma_cdf.svg") # hide
125 | ```
126 |
127 | 
128 |
129 | ## BSON.jl
130 |
131 | [BSON](https://github.com/JuliaIO/BSON.jl) is a package for working with the Binary JSON serialization format. It can be used as a general store for Julia's data structures. To save the data, BSON provides the `bson` function. The data can be passed to the function directly via keyword arguments
132 |
133 | ```@repl bson
134 | using BSON
135 | BSON.bson("test2.bson", a = [1+2im, 3+4im], b = "Hello, World!")
136 | ```
137 |
138 | or as a dictionary
139 |
140 | ```@repl bson
141 | data = Dict(:a => [1+2im, 3+4im], :b => "Hello, World!")
142 | BSON.bson("test1.bson", data)
143 | ```
144 |
145 | To load the data, BSON provides the `load` function that accepts the path to the data.
146 |
147 | ```@repl bson
148 | BSON.load("test1.bson")
149 | BSON.load("test2.bson")
150 | ```
151 |
152 | The package also provides an alternative way to saving and loading data using the `@save` and `@load` macro.
153 |
154 | ```@repl bson
155 | using BSON: @save, @load
156 |
157 | a = [1+2im, 3+4im];
158 | b = "Hello, World!";
159 |
160 | @save "test.bson" a b # Same as above
161 | @load "test.bson" a b # Loads `a` and `b` back into the workspace
162 | ```
163 |
164 | ## ProgressMeter.jl
165 |
166 | The [ProgressMeter](https://github.com/timholy/ProgressMeter.jl) package provides excellent utilities for printing progress bars for long computations. The package provides the `@showprogress` macro that prints the progress bar for `for` loops.
167 |
168 | ```julia
169 | julia> using ProgressMeter
170 |
171 | julia> @showprogress 1 "Computing..." for i in 1:50
172 | sleep(0.1)
173 | end
174 | Computing... 20%|███████▊ | ETA: 0:00:04
175 | ```
176 |
177 | The same syntax can also be used with the `map`, `pmap` or `reduce` functions. Progress bars can also be created manually, which allows additional formatting of the output. For example, it is possible to print and update information related to the computation by the `showvalues` keyword.
178 |
179 | ```julia
180 | julia> x, n = 1 , 10;
181 |
182 | julia> p = Progress(n);
183 |
184 | julia> for iter in 1:10
185 | x *= 2
186 | sleep(0.5)
187 | ProgressMeter.next!(p; showvalues = [(:iter, iter), (:x, x)])
188 | end
189 | Progress: 100%|█████████████████████████████████████████| Time: 0:00:10
190 | iter: 10
191 | x: 1024
192 | ```
193 |
194 | ## BenchmarkTools.jl
195 |
196 | The [BenchmarkTools](https://github.com/JuliaCI/BenchmarkTools.jl) package provides a framework for writing and running groups of benchmarks as well as comparing benchmark results. The primary macro provided by BenchmarkTools is the `@benchmark` macro
197 |
198 | ```@repl benchmark
199 | using BenchmarkTools
200 | @benchmark sin(x) setup=(x=rand())
201 | ```
202 |
203 | The `setup` expression is run once per sample and is not included in the timing results. Another handy macro provided by the package is the `@btime` macro. The output of this macro is similar to the built-in `@time` macro.
204 |
205 | ```@repl benchmark
206 | A = rand(3,3);
207 | @btime inv($A);
208 | ```
209 |
210 | We use `$` to interpolate variable `A` into the benchmark expression. Any expression that is interpolated in such a way is "pre-computed" before the benchmarking begins.
211 |
--------------------------------------------------------------------------------
/docs/src/lecture_03/standardlibrary.md:
--------------------------------------------------------------------------------
1 | # Useful packages
2 |
3 | Multiple standard packages are shipped together with Julia. These packages do not need to be installed. They include `Pkg` and all packages introduced on this page. However, we still need to load them to use them.
4 |
5 | ```julia
6 | julia> using Statistics
7 | ```
8 |
9 | ## Statistics
10 |
11 | The first package we mention is the `Statistics` package, which provides statistical analysis functions such as computation of mean, variance, or standard deviation.
12 |
13 | ```@repl
14 | using Statistics
15 | x = rand(10);
16 | mean(x)
17 | var(x)
18 | std(x)
19 | ```
20 |
21 | See the official [documentation](https://docs.julialang.org/en/v1/stdlib/Statistics/) for more information. More statistics-related functions can be found in the [StatsBase](https://juliastats.org/StatsBase.jl/stable/) package. This package provides functions for computing scalar statistics, high-order moment computation, counting, ranking, covariances, sampling, and empirical density estimation. This course dedicates one lecture to [statisctics](@ref statistics).
22 |
23 | ## LinearAlgebra
24 |
25 | Another package worth mentioning is the [LinearAlgebra](https://docs.julialang.org/en/v1/stdlib/LinearAlgebra/) package, which provides a native implementation of many linear algebra operations. The package provides functions for computing matrix determinant, inversion, norm, eigenvalues, or eigenvectors.
26 |
27 | ```@repl lingebra
28 | using LinearAlgebra
29 | A = [-4.0 -17.0; 2.0 2.0]
30 |
31 | det(A)
32 | inv(A)
33 | norm(A)
34 | eigvals(A)
35 | eigvecs(A)
36 | ```
37 |
38 | The package also provides implementation of multiple matrix types that represent matrices with special symmetries and structures. As examples, we mention `Symmetric`, `Hermitian` or `Diagonal` matrices. These particular matrix types allow for fast computation due to using specialized algorithms. Matrices of these types can be constructed via their constructors.
39 |
40 | ```@repl lingebra
41 | D = Diagonal([1,2,3])
42 | ```
43 |
44 | Another useful function provided by the package is the identity operator `I` representing the identity matrix. The identity operator `I` is defined as a constant and is an instance of `UniformScaling`. The size of this operator is generic and match the other matrix in the binary operations `+`, `-`, `*` and `\`.
45 |
46 | ```@repl lingebra
47 | D + I
48 | D - I
49 | ```
50 |
51 | Note that for `D+I` and `D-I`, the matrix `D` must be square.
52 |
53 | ## Random
54 |
55 | The last package that we will describe in more detail is the [Random](https://docs.julialang.org/en/v1/stdlib/Random/) package. This package provides advanced functionality for generating random numbers in Julia. The package allows setting the seed for the random generator using the `seed!` function. The `seed!` function is used to create a reproducible code that contains randomly generated values.
56 |
57 | ```@repl rand
58 | using Random
59 | using Random: seed!
60 |
61 | seed!(1234);
62 | rand(2)
63 | seed!(1234);
64 | rand(2)
65 | ```
66 |
67 | The `randperm` function constructs a random permutation of a given length.
68 |
69 | ```@repl rand
70 | randperm(4)
71 | ```
72 |
73 | The `shuffle` function returns a randomly permuted copy of a given array.
74 |
75 | ```@repl rand
76 | v = [1,2,3,4]
77 | shuffle(v)
78 | ```
79 |
80 | !!! info "Other useful standard packages:"
81 | There are other useful standard packages in Julia, but there is not enough space to present them all.
82 | - `Test` provides simple unit testing functionality. Unit testing is a process to determine if your code is correct by checking that the results are what you expect. It helps to ensure the code works after changes. Unit tests can also be used during the development phase to specify the expected behaviour when implemented. We will provide more details [later](@ref unit-testing).
83 | - `SparseArrays` provides special types to store and work with sparse arrays.
84 | - `Distributed` includes support for distributed computing.
85 |
86 | The section Standard Library in the official [documentation](https://docs.julialang.org/en/v1/) provides more information.
87 |
--------------------------------------------------------------------------------
/docs/src/lecture_04/exceptions.md:
--------------------------------------------------------------------------------
1 | # Exception handling
2 |
3 | Unexpected behaviour may often occur during running code, which may lead to the situation that some function cannot return a reasonable value. Such behaviour should be handled by either terminating the program with a proper diagnostic error message or allowing that code to take appropriate action.
4 |
5 | In the following example, we define a factorial function in the same way as we did in the [Short-circuit evaluation](@ref Short-circuit-evaluation) section.
6 |
7 | ```jldoctest expections; output = false
8 | function fact(n)
9 | isinteger(n) && n >= 0 || error("argument must be non-negative integer")
10 | return n == 0 ? 1 : n * fact(n - 1)
11 | end
12 |
13 | # output
14 | fact (generic function with 1 method)
15 | ```
16 |
17 | We use the `error` function, which throws the `ErrorException` if the input argument does not meet the given conditions. This function works quite well and returns a reasonable error message for incorrect inputs.
18 |
19 | ```jldoctest expections
20 | julia> fact(1.4)
21 | ERROR: argument must be non-negative integer
22 | [...]
23 |
24 | julia> fact(-5)
25 | ERROR: argument must be non-negative integer
26 | [...]
27 | ```
28 |
29 | However, it is better to use error messages as descriptive as possible. In the case above, the error message can also include the argument value. Julia provides several predefined types of exceptions that can be used to create more descriptive error messages. In our example, we want to check whether the argument is a non-negative integer. The more specific `DomainError` can do this.
30 |
31 | ```jldoctest expections; output = false
32 | function fact(n)
33 | isinteger(n) && n >= 0 || throw(DomainError(n, "argument must be non-negative integer"))
34 | return n == 0 ? 1 : n * fact(n - 1)
35 | end
36 |
37 | # output
38 | fact (generic function with 1 method)
39 | ```
40 |
41 | We must use the `throw` function because the `DomainError(x, msg)` function only creates an instance of the type `DomainError`, but it does not raise an error.
42 |
43 | ```jldoctest expections
44 | julia> fact(1.4)
45 | ERROR: DomainError with 1.4:
46 | argument must be non-negative integer
47 | [...]
48 |
49 | julia> fact(-5)
50 | ERROR: DomainError with -5:
51 | argument must be non-negative integer
52 | [...]
53 | ```
54 |
55 | The error message now contains a short description, the input value, and the type of exception. Now imagine that due to an error, the `fact` function is used to calculate the factorial from a string.
56 |
57 | ```julia
58 | julia> fact("a")
59 | ERROR: MethodError: no method matching isinteger(::String)
60 | Closest candidates are:
61 | isinteger(::BigFloat) at mpfr.jl:859
62 | isinteger(::Missing) at missing.jl:100
63 | isinteger(::Integer) at number.jl:20
64 | ...
65 | Stacktrace:
66 | [1] fact(::String) at ./REPL[1]:2
67 | [2] top-level scope at REPL[2]:1
68 | ```
69 |
70 | In this case, the `MethodError` is raised for the `isinteger` function. Since the `DomainError` function is not even called, the error says nothing about the `fact` function. We can track that the error occurs when calling the `fact` function using the `Stacktrace` section located under the error message. The `Stacktrace` provides us with an ordered list of function calls (starting from the last one) that preceded the error. In this case, the last function call before the error is `fact(::String)`. It tells us that the error occurs in the function `fact` with a string as the input argument. In this particular case, it makes sense to define factorial function only for real numbers. This can be done by entering the input type in the function declaration.
71 |
72 | ```jldoctest expections; output = false
73 | function fact_new(n::Real)
74 | isinteger(n) && n >= 0 || throw(DomainError(n, "argument must be non-negative integer"))
75 | return n == 0 ? 1 : n * fact(n - 1)
76 | end
77 |
78 | # output
79 | fact_new (generic function with 1 method)
80 | ```
81 |
82 | This function declaration will only work for subtypes of `Real`. Otherwise, `the MethodError` will occur.
83 |
84 | ```jldoctest expections
85 | julia> fact_new("aaa")
86 | ERROR: MethodError: no method matching fact_new(::String)
87 | [...]
88 | ```
89 |
90 | The `MethodError` provides two important pieces of information. First, it states that the `fact_new` function is not defined for arguments of type `String`. Second, it shows the list of methods closest to the one we called. In this case, the `fact_new` function has only one method, which works for any subtype of `Real`. This can be verified by using the `methods` function.
91 |
92 | ```jldoctest expections
93 | julia> methods(fact_new)
94 | # 1 method for generic function "fact_new" from Main:
95 | [1] fact_new(n::Real)
96 | @ none:1
97 | ```
98 |
99 | A more precise description and a list of all predefined exception types can be found in the official [documentation](https://docs.julialang.org/en/v1/manual/control-flow/#Exception-Handling).
100 |
--------------------------------------------------------------------------------
/docs/src/lecture_04/exercises.md:
--------------------------------------------------------------------------------
1 | # Conway's Game of Life
2 |
3 | The [Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life) is a cellular automaton devised by the British mathematician John Horton Conway in 1970. It is a zero-player game, meaning that its evolution is determined by its initial state, requiring no further input. One only interacts with the Game of Life by creating an initial configuration.
4 |
5 | The universe of the Game of Life is an infinite, two-dimensional orthogonal grid of square cells, each of which is in one of two possible states: live or dead. Every cell interacts with its eight neighbours. The game evolves. At each time step, the following transitions occur:
6 |
7 | 1. Any live cell with exactly two or three live neighbours survives.
8 | 2. Any dead cell with exactly three live neighbours becomes a live cell.
9 | 3. All other live cells die in the next generation. All other dead cells stay dead.
10 |
11 | The first generation must be initialized. Every new generation is created by applying the above rules simultaneously to every cell in the previous generations; births and deaths occur simultaneously. The moment when this happens is called a tick. Since every generation depends only on the previous one, this process is a [Markov chain](https://en.wikipedia.org/wiki/Markov_chain).
12 |
13 | The following few exercises will implement the Game of Life. We will consider finite universe with periodic boundary conditions.
14 |
15 | !!! warning "Exercise:"
16 | Write a function `neighbours` that return the number of live neighbours of a cell. The function should accept the `world` matrix of boolean values representing the state of all cells (`true` if the cell is alive and `false` otherwise) and index of the row and column of the cell.
17 |
18 | **Hint:** use the following properties of the `mod1` function to implement periodic boundaries.
19 |
20 | ```@repl
21 | mod1(1, 4)
22 | mod1(4, 4)
23 | mod1(5, 4)
24 | ```
25 |
26 | **Bonus:** implement a more general function which computes the number of alive cells in a neighbourhood of given size.
27 |
28 | !!! details "Solution:"
29 | One way to define the `neighbours` function is to check all neighbours manually.
30 |
31 | ```julia
32 | function neighbours(world, row, col)
33 | n, m = size(world)
34 |
35 | # this implements periodic boundaries
36 | down = mod1(row + 1, n)
37 | up = mod1(row - 1, n)
38 | left = mod1(col - 1, m)
39 | right = mod1(col + 1, m)
40 |
41 | return ( world[up, left] + world[up, col] + world[up, right]
42 | + world[row, left] + + world[row, right]
43 | + world[down, left] + world[down, col] + world[down, right])
44 | end
45 | ```
46 |
47 | The approach above can not define a general version of the `neighbours` function. In this case, we can use nested loops. First, we compute proper row indices by `range` combined with the `mod1` function.
48 |
49 | ```julia
50 | rows = mod1.(row .+ (-r:r), size(world, 1))
51 | ```
52 |
53 | Column indexes can be computed similarly. Then we use nested loops to iterate through both rows and columns. Since the iteration includes the middle cell, we need to subtract its state.
54 |
55 | ```julia
56 | function neighbours(world, row, col; r = 1)
57 | rows = mod1.(row .+ (-r:r), size(world, 1))
58 | cols = mod1.(col .+ (-r:r), size(world, 2))
59 |
60 | return sum(world[i, j] for i in rows, j in cols) - world[row, col]
61 | end
62 | ```
63 |
64 | !!! warning "Exercise:"
65 | Add a new method to the `neighbours` function that for the `world` matrix returns a matrix containing numbers of living neighbours.
66 |
67 | !!! details "Solution:"
68 | We created a function that computes the number of living neighbours in the exercise above. One way how to create a matrix with numbers of living neighbours is:
69 |
70 | ```julia
71 | function neighbours(world)
72 | n, m = size(world)
73 | return [neighbours(world, row, col) for row in 1:n, col in 1:m]
74 | end
75 | ```
76 |
77 | This is an example of multiple dispatch. The function `neighbours` can have both one and three input arguments.
78 |
79 | !!! warning "Exercise:"
80 | Write a function `willsurvive` that returns `true` if the cell will survive based on the conditions described at the beginning of the section and `false` otherwise. This function should accept two arguments: state of the cell (`true`/`false`) and the number of living neighbours.
81 |
82 | !!! details "Solution:"
83 | This function can be written using the `if-elseif-else` statement. Since `cell` is a boolean value, we do not need to compare with one as in `cell == 1`.
84 |
85 | ```julia
86 | function willsurvive(cell, k)
87 | if k == 3
88 | return true
89 | elseif k == 2 && cell
90 | return true
91 | else
92 | return false
93 | end
94 | end
95 | ```
96 |
97 | We can write this function in a simpler form. We first realize that the short-circuit evaluation can merge the first two conditions. Since the function returns only `true` or `false`, we can write the function on one line.
98 |
99 | ```julia
100 | willsurvive(cell, k) = k == 3 || k == 2 && cell
101 | ```
102 |
103 | !!! warning "Exercise:"
104 | Combine these functions to write a function `evolve!` that evolves the given `world` matrix into a new generation.
105 |
106 | !!! details "Solution:"
107 | We first compute the matrix with the numbers of living neighbours. Then we iterate over all elements of the `world` matrix and compute new states of all elements with the `willsurvive` function. Since we computed the number of living neighbours before iterating, we can rewrite the `world` matrix.
108 |
109 | ```julia
110 | function evolve!(world)
111 | ks = neighbours(world)
112 | for i in eachindex(world)
113 | world[i] = willsurvive(world[i], ks[i])
114 | end
115 | return
116 | end
117 | ```
118 |
119 | In the four exercises above, we defined functions sufficient to animate the Game of Life. Use the following code to initialize the `world`.
120 |
121 | ```julia
122 | world = zeros(Bool, 30, 30)
123 | row, col = 15, 15
124 |
125 | world[row, col] = 1
126 | world[row, col + 1] = 1
127 | world[row - 1, col + 6] = 1
128 | world[row + 1, col + 1] = 1
129 | world[row + 1, col + 5] = 1
130 | world[row + 1, col + 6] = 1
131 | world[row + 1, col + 7] = 1
132 | ```
133 |
134 | We use the Plots package introduced in the previous [lecture](@ref Julia-set) to create animations.
135 |
136 | ```julia
137 | using Plots
138 |
139 | anim = @animate for i in 1:150
140 | heatmap(world; axis = nothing, border = :none, cbar = false, ratio = :equal)
141 | evolve!(world)
142 | end
143 | gif(anim, "gameoflife.gif"; fps = 10)
144 | ```
145 |
146 | 
147 |
148 | Many different types of patterns occur in the Game of Life. For example, the following initialization is called pulsar.
149 |
150 | ```julia
151 | world = zeros(Bool, 17, 17)
152 | line = zeros(17)
153 | line[5:7] .= 1
154 | line[11:13] .= 1
155 |
156 | for ind in [3,8,10,15]
157 | world[ind, :] .= line
158 | world[:, ind] .= line
159 | end
160 | ```
161 |
162 | 
163 |
--------------------------------------------------------------------------------
/docs/src/lecture_04/gameoflife.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_04/gameoflife.gif
--------------------------------------------------------------------------------
/docs/src/lecture_04/gameoflife_pulsar.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_04/gameoflife_pulsar.gif
--------------------------------------------------------------------------------
/docs/src/lecture_04/scope.md:
--------------------------------------------------------------------------------
1 | # Scope of variables
2 |
3 | The scope of a variable is the region of a code where the variable is visible. There are two main scopes in Julia: **global** and **local**. The global scope can contain multiple local scope blocks. Local scope blocks can be nested. There is also a distinction in Julia between constructs which introduce a *hard scope* and those which only introduce a *soft scope*. This affects whether shadowing a global variable by the same name is allowed.
4 |
5 | The following table shows constructs that introduce scope blocks.
6 |
7 | | Construct | Scope type | Allowed within local|
8 | | :-- | :-- | :-: |
9 | | `module`, `baremodule` | global | ✗ |
10 | | `struct` | local (soft) | ✗ |
11 | | `macro` | local (hard) | ✗ |
12 | | `for`, `while`, `try` | local (soft) | ✔ |
13 | | `let`, `functions`, `comprehensions`, `generators` | local (hard) | ✔ |
14 |
15 | This table contains several constructs which we have not introduced yet. Modules and structures will be discussed later in the course. The rest is described in the official [documentation](https://docs.julialang.org/).
16 |
17 | ## Local scope
18 |
19 | A function declaration introduces a new (hard) local scope. It means that all variables defined inside a function body can be accessed and modified inside the function body. Moreover, it is impossible to access these variables from outside the function.
20 |
21 | ```jldoctest local
22 | julia> function f()
23 | z = 42
24 | return
25 | end
26 | f (generic function with 1 method)
27 |
28 | julia> f()
29 |
30 | julia> z
31 | ERROR: UndefVarError: `z` not defined
32 | ```
33 |
34 | Thanks to this property, we can use the names most suitable for our variables (i, x, y, etc.) without the risk of clashing with declarations elsewhere. It is possible to specify a global variable inside a function by the `global` keyword.
35 |
36 | ```jldoctest
37 | julia> function f()
38 | global z = 42
39 | return
40 | end
41 | f (generic function with 1 method)
42 |
43 | julia> f()
44 |
45 | julia> z
46 | 42
47 | ```
48 |
49 | However, this is not recommended. If we need a variable defined inside a function, we should probably return that variable as an output of the function
50 |
51 | ```jldoctest
52 | julia> function f()
53 | z = 42
54 | return z
55 | end
56 | f (generic function with 1 method)
57 |
58 | julia> z = f()
59 | 42
60 |
61 | julia> z
62 | 42
63 | ```
64 |
65 | In the example above, the `z` variable in the function is local, and the `z` variable outside of the function is global. These two variables are not the same.
66 |
67 | ## Global scope
68 |
69 | Each module introduces a new global scope, separate from the global scope of all other modules. The interactive prompt (aka REPL) is in the global scope of the module `Main`.
70 |
71 | ```jldoctest global
72 | julia> module A
73 | a = 1 # a global in A's scope
74 | b = 2 # b global in A's scope
75 | end
76 | A
77 |
78 | julia> a # errors as Main's global scope is separate from A's
79 | ERROR: UndefVarError: `a` not defined
80 | ```
81 |
82 | Modules can introduce variables of other modules into their scope through the `using` (or `import`) keyword. Variables can be accessed by the dot-notation.
83 |
84 | ```jldoctest global
85 | julia> using .A: b # make variable b from module A available
86 |
87 | julia> A.a
88 | 1
89 |
90 | julia> b
91 | 2
92 | ```
93 |
94 | While variables can be read externally, they can only be changed within the module they belong to.
95 |
96 | ```jldoctest global
97 | julia> b = 4
98 | ERROR: cannot assign a value to imported variable b
99 | [...]
100 | ```
101 |
102 | Global scope variables can be accessed anywhere inside the global scope, even in the local scopes defined in that global scope. In the following example, we define a variable `c` in the `Main` global scope, and then we define a function `foo` (that introduces a new local scope inside the `Main` global scope), and inside this function, we use the variable `c`,
103 |
104 | ```jldoctest
105 | julia> c = 10
106 | 10
107 |
108 | julia> foo(x) = x + c
109 | foo (generic function with 1 method)
110 |
111 | julia> foo(1)
112 | 11
113 | ```
114 |
115 | However, it is not recommended to use global variables in this way. The reason is that global variables can change their type and value at any time, and therefore they cannot be properly optimized by the compiler. We can see the performance drop in a simple test.
116 |
117 | ```@repl global_test
118 | x = rand(10);
119 | y = rand(10);
120 | f_global() = x .+ y
121 | f_local(x, y) = x .+ y
122 |
123 | hcat(f_global(), f_local(x, y))
124 | ```
125 |
126 | In the example above, we defined two functions that do the same thing. The first function has no arguments and returns a sum of two global variables, `x` and `y`. The second function also returns a sum of variables `x` and `y`. However, in this case, these variables are local since they are introduced as the inputs to the function. If we use the `@time` macro, we can measure the time needed to call these two functions.
127 |
128 | ```@repl global_test
129 | @time f_global();
130 | @time f_local(x, y);
131 | ```
132 |
133 | The second function is faster and also needs fewer allocations. The reason is that when we call the `f_local` function for the first time, the function is optimized for the given arguments. Each time a function is called for the first time with new types of arguments, it is compiled. This can be seen in the following example: the first call is slower due to the compilation.
134 |
135 | ```@repl global_test
136 | a, b = 1:10, 11:20;
137 |
138 | @time f_local(a, b);
139 | @time f_local(a, b);
140 | ```
141 |
142 | On the other hand, the `f_global` function cannot be optimized because it contains two global variables, and these two variables can change at any time.
143 |
--------------------------------------------------------------------------------
/docs/src/lecture_06/modules.md:
--------------------------------------------------------------------------------
1 | # Files and modules
2 |
3 | When writing code, it is essential to organize it effectively. There are three main ways of achieving this:
4 | 1. Split code into multiple files.
5 | 2. Use modules to create global scopes.
6 | 3. Create separate packages by extracting code with general functionality.
7 | These three approaches are often used together. This lecture describes how to use them in Julia.
8 |
9 | ## Files
10 |
11 | The first and most basic approach is to split code into multiple files. Such files have to be of an appropriate type, i.e., Julia files with the `.jl` extension. These files can be loaded into the global scope by the `include` function.
12 |
13 | ```julia
14 | include("/absolute/path/to/the/file/filename.jl")
15 | include("../relative/path/to/the/file/filename.jl")
16 | ```
17 |
18 | The `include` function evaluates the source file content in the global scope of the module, where the `include` call occurs. If a file is included multiple times, it is also evaluated multiple times.
19 |
20 | Even though using separate files to organize code can be very useful, this approach also has several disadvantages. For example, since all files are evaluated in the same global scope, we have to avoid clashes of variable/function names from different files. This problem can be solved by using modules as described in the following section.
21 |
22 | !!! info "Main module:"
23 | If we run a code in the REPL, the code is evaluated in the `Main` module, which serves as the default global scope. We can check this by the `@__MODULE__` macro that returns the module in which the macro is evaluated.
24 |
25 | ```julia
26 | julia> @__MODULE__
27 | Main
28 | ```
29 |
30 | The `parentmodule` function determines the module containing the (first) definition of a generic function.
31 |
32 | ```julia
33 | julia> foo() = 1
34 | foo (generic function with 1 method)
35 |
36 | julia> parentmodule(foo)
37 | Main
38 | ```
39 |
40 | ## Modules
41 |
42 | Modules allow users to specify which data from the module is visible outside of the module. In the section [Scope of variables](@ref Scope-of-variables), we briefly mentioned that modules in Julia introduce a new global scope. In other words, modules in Julia are separate variable workspaces that provide three key features. They all help to prevent unexpected name clashes.
43 |
44 | - They define top-level definitions (global variables) without worrying about name conflicts.
45 | - They control the visibility of variables/functions/types outside of the module via exporting.
46 | - They control the visibility of variables/functions/types from other modules via importing.
47 |
48 | The following example defines the module `Points`. We create it with the `module` keyword and load the `LinearAlgebra` package by the `using` keyword. Then we use the `export` keyword to export the `Point` type and the `distance` function. Finally, we write the actual content of the module.
49 |
50 | ```@example modules
51 | module Points
52 |
53 | using LinearAlgebra
54 |
55 | export Point, distance
56 |
57 | struct Point{T <: Real}
58 | x::T
59 | y::T
60 | end
61 |
62 | coordinates(p::Point) = (p.x, p.y)
63 | Base.show(io::IO, p::Point) = print(io, coordinates(p))
64 | distance(p::Point, q::Point) = norm(coordinates(q) .- coordinates(p), 2)
65 |
66 | end
67 | nothing # hide
68 | ```
69 |
70 | Assume now that we want to load this module from a different file. Since each package core is a module, packages are loaded in the same way as modules. We need to specify `using Main.Points` or `using .Points` because we defined the package in the `Main` scope. If we loaded an external package `Points`, we would use `using Points`. After loading a package, we can directly access all the exported data.
71 |
72 | ```@repl modules
73 | using .Points # alternatively using Main.Points
74 |
75 | p = Point(4,2)
76 | q = Point(2,2)
77 | distance(p, q)
78 | ```
79 |
80 | It is also possible to access all non-exported functions and types. To do so, we need to specify which module they are defined in. For example, we can call the non-exported `coordinates` function by the following syntax:
81 |
82 | ```@repl modules
83 | Points.coordinates(p)
84 | Points.coordinates(q)
85 | ```
86 |
87 | When writing a module, we have to decide which functions and types we want to export. The rule of thumb is that we export only the data end-users should use.
88 |
89 | To redefine or extend an imported function, we need to specify the module. We can use the following way to redefine the `distance` function:
90 |
91 | ```@example modules
92 | using .Points: coordinates
93 |
94 | function Points.distance(p::Point, q::Point)
95 | d = sqrt(sum(abs2, coordinates(q) .- coordinates(p)))
96 | return "Distance is $d"
97 | end
98 |
99 | nothing # hide
100 | ```
101 |
102 | We can see the same syntax in the `Points` module, where we extend the `show` function from the `Base` module. We used the `using .Points: coordinates` syntax to call the `coordinates` function without specifying the module name.
103 |
104 | ```@repl modules
105 | p = Point(4,2)
106 | q = Point(2,2)
107 | distance(p, q)
108 | ```
109 |
110 | Besides the `using` keyword, Julia also provides the `import` keyword to import modules and packages. Its behaviour is slightly different; for more information, see the [official documentation](https://docs.julialang.org/en/v1/manual/modules/#Summary-of-module-usage).
111 |
112 | !!! compat "Relative and absolute module paths:"
113 | In the previous section, we added a dot before the module name in the `using` keyword. The reason is that if we import a module, the system consults an internal table of top-level modules to find the given module name. If the module does not exist, the system attempts to `require(:ModuleName)`, which typically results in loading code from an installed package.
114 | However, if we evaluate code in the REPL, the code is evaluated in the `Main` module. Then `Points` are not in a top-level module but in a submodule of `Main`.
115 |
116 | ```julia
117 | julia> Points
118 | Main.Points
119 |
120 | julia> parentmodule(Points)
121 | Main
122 | ```
123 |
124 | Non-top-level modules can be loaded by both absolute and relative paths.
125 |
126 | ```julia
127 | using Main.Points
128 | using .Points
129 | ```
130 |
131 | Adding one more leading dot moves the path one additional level up in the module hierarchy. For example, `using ..Points` would look for `Points` in the enclosing module for `Main` rather than `Main` itself.
132 |
133 | !!! compat "Modules and files:"
134 | Since modules are associated only with module expressions, files are largely unrelated to modules. One can have multiple files in a module.
135 |
136 | ```julia
137 | module MyModule
138 |
139 | include("file1.jl")
140 | include("file2.jl")
141 |
142 | end
143 | ```
144 |
145 | It is also possible to have multiple modules in a file.
146 |
147 | ```julia
148 | module MyModule1
149 | ...
150 | end
151 |
152 | module MyModule2
153 | ...
154 | end
155 | ```
--------------------------------------------------------------------------------
/docs/src/lecture_06/structure.md:
--------------------------------------------------------------------------------
1 | # Package structure
2 |
3 | The cool thing about Julia is the simplicity of creating packages and sharing them with others. This section contains a step-by-step tutorial on how to build a package from scratch. Moreover, we will use this package later in the course.
4 |
5 | ## Built-in package generator
6 |
7 | We first generate an empty package `PackageName` by the built-in function `generate` in the Pkg REPL.
8 |
9 | ```julia
10 | (@v1.10) pkg> generate PackageName
11 | Generating project PackageName:
12 | PackageName/Project.toml
13 | PackageName/src/PackageName.jl
14 | ```
15 |
16 | This way generates the new package in the working directory. However, we may also specify an absolute or relative path to generate it elsewhere. The `generate` function creates a new folder (with the name matching the package name) with the following content.
17 |
18 | ```julia
19 | ├── Project.toml
20 | └── src
21 | └── PackageName.jl
22 | ```
23 |
24 | The new package consists of the `Project.toml` file and the `src` folder with one `.jl` file. The `src/PackageName.jl` file contains a module `PackageName`. The package, the `.jl` file, and the module share the same name. **Since we will modify multiple files during this lecture, we will often specify which file we work with.**
25 |
26 | ```julia
27 | # /src/PackageName.jl
28 | module PackageName
29 |
30 | greet() = print("Hello World!")
31 |
32 | end # module
33 | ```
34 |
35 | Since the `generate` function creates an empty package, the `Project.toml` contains only information describing the package name, its unique UUID, version, and author list.
36 |
37 | ```toml
38 | name = "PackageName"
39 | uuid = "fa38fd22-11d6-48c8-ae38-ef06258216d8"
40 | authors = ["Author Name"]
41 | version = "0.1.0"
42 | ```
43 |
44 | Since the `Project.toml` file `src/*.jl` files are sufficient for determining a package, packages are modules with their own environment.
45 |
46 | ## PkgTemplates
47 |
48 | The built-in `generate` function provides only basic functionality for generating packages. Even though it is sufficient in many cases, the [PkgTemplates](https://github.com/invenia/PkgTemplates.jl) package offers a straightforward and customizable way for creating packages.
49 |
50 | !!! warning "Exercise:"
51 | The goal of this exercise is to create a new package by the PkgTemplates package. Install PkgTemplates and then use the following code to generate a new package template.
52 |
53 | ```julia
54 | using PkgTemplates
55 |
56 | template = Template(;
57 | user="GithubUserName", # github user name
58 | authors=["Author1", "Author2"], # list of authors
59 | dir="/Path/To/Dir/", # dir in which the package will be created
60 | host="github.com", # URL to the code hosting service where packages will reside
61 | julia=v"1.10", # compat version of Julia
62 | plugins=[
63 | ProjectFile(; version=v"0.1.0"), # Add version
64 | Readme(; inline_badges=true), # add readme file with badges
65 | Tests(; project=false, aqua=true), # add unit test deps and Aquq
66 | Git(; manifest=false), # add manifest.toml to .gitignore
67 | License(; name="MIT"), # add MIT licence
68 | # disable other plugins
69 | !GitHubActions,
70 | !CompatHelper,
71 | !TagBot,
72 | !Dependabot,
73 | ],
74 | )
75 | ```
76 |
77 | Do not forget to change `user`, `authors` and `dir`.
78 |
79 | In the rest of the lecture, we will write code to visualize grayscale and colour images. Come up with a proper package name and use the following code to generate a new package.
80 |
81 | ```julia
82 | template("PackageName")
83 | ```
84 |
85 | For naming conventions, see the official [package naming guidelines](https://julialang.github.io/Pkg.jl/v1/creating-packages/#Package-naming-guidelines). Finally, create the folder `examples` in the main package folder.
86 |
87 | !!! details "Solution:"
88 | There is no best way to choose the correct package name. We decided to use `ImageInspector` and create the package by the following code:
89 |
90 | ```julia
91 | template("ImageInspector")
92 | ```
93 |
94 | After creating the `ImageInspector` package, we can add the `examples` folder manually or use the `mkdir` function to create it. For the latter, we use the `joinpath` function to specify the correct path.
95 |
96 | ```julia
97 | mkdir(joinpath("/Path/To/Dir/", "ImageInspector", "examples"))
98 | ```
99 |
100 | The generated folder contains more files than the folder generated by the built-in `generate` function.
101 |
102 | ```julia
103 | ├── .git
104 | ├── .gitignore
105 | ├── LICENSE
106 | ├── Manifest.toml
107 | ├── Project.toml
108 | ├── README.md
109 | ├── examples
110 | ├── src
111 | │ └── ImageInspector.jl
112 | └── test
113 | └── runtests.jl
114 | ```
115 |
116 | !!! compat "Interactive package generation:"
117 | PkgTemplates package also provides an interactive way to generate a template using the following command:
118 |
119 | ```julia
120 | Template(; interactive=true)
121 | ```
122 |
123 | The exercise above used a simple template. However, PkgTemplates provides many additional features to simplify the package generation process. Some plugins add documentation or integration with GitHub features. See the official [PkgTemplates documentation](https://invenia.github.io/PkgTemplates.jl/stable/) for more information.
--------------------------------------------------------------------------------
/docs/src/lecture_06/workflow.md:
--------------------------------------------------------------------------------
1 | # Development workflow
2 |
3 | In the previous section, we created a new empty package. In this section, we will fill the package with content. **Before we continue, open the main folder of the ImageInspector package in a new VS Code window.** One can access it from `File -> Open folder`.
4 |
5 | ## Development mode
6 |
7 | The content of the `ImageInspector` folder can be divided into four parts:
8 | - *Root folder* contains information about the package and git.
9 | - *Folder src* contains the package source code.
10 | - *Folder tests* contains the testing scripts for verifying the code correctness.
11 | - *Folder examples* is used to run examples.
12 | The first three are standard, while we added the last folder manually. We can add more folders, such as `data`.
13 |
14 | We first activate a new environment in the `examples` folder.
15 |
16 | ```julia
17 | (ImageInspector) pkg> activate ./examples
18 |
19 | (examples) pkg>
20 | ```
21 |
22 | Then we use the `dev` (or `develop`) command to tell Julia that the `ImageInspector` folder is a package, and we want to start its development. The important thing to realize is that the *working directory* is `.../ImageInspector`, while the *working environment* is `.../ImageInspector/examples`. Since the dot in `dev .` specifies the working directory, this command will add the package from the working directory into the working environment.
23 |
24 | ```julia
25 | julia> pwd()
26 | .../ImageInspector
27 |
28 | (examples) pkg> dev . # or dev /absolute/or/relative/path/ImageInspector/
29 |
30 | (examples) pkg> st
31 | Status `.../ImageInspector/examples/Project.toml`
32 | [5c9991e7] ImageInspector v0.1.0 `..`
33 | ```
34 |
35 | Like the `add` command, the `dev` command allows us to load the package by `using` or `import`. The difference between `add` and `dev` is that the `dev` command tracks the package current state and not a concrete git commit in some branch.
36 |
37 | !!! warning "Default Julia enviroment in VS Code:"
38 | The VS Code allows setting a default Julia environment that is activated when Julia REPL is opened. We can do this by pressing `Julia env: ` located at the bottom info bar and selecting the desired environment.
39 |
40 | ## Revise.jl
41 |
42 | We now create a script `/examples/example.jl` for testing the package functionality. In the rest of the lecture, we will use relative paths from the main folder of the `ImageInspector` package to specify the code location.
43 |
44 | ```julia
45 | # /examples/example.jl
46 | using ImageInspector
47 | ```
48 |
49 | Julia can load a package only once per Julia session. If we load a package by the `using` or `import` commands and then make changes in the code, these changes will not be reloaded. This holds even if we try to reload the package by running `using` or `import` again. For example, we add the `greet` function to the `ImageInspector` package.
50 |
51 | ```julia
52 | # /src/ImageInspector.jl
53 | module ImageInspector
54 |
55 | export greet
56 |
57 | greet() = print("Hello World!")
58 |
59 | end
60 | ```
61 |
62 | Since we have already loaded the package, this change is not reloaded. If we call the `greet` function, we get the `UndefVarError` error.
63 |
64 | ```julia
65 | julia> greet()
66 | ERROR: UndefVarError: greet not defined
67 | ```
68 |
69 | In this case, we have to restart Julia. There are two ways how to exit Julia interactive session: using keyword shortcut `ctrl + D` or using the `exit()` function. Even though we can use the `greet()` function after the restart, we will not do it yet. The reason is that we would have to restart Julia again after making any changes to the package. Since this is not a convenient way to code, we will use the [Revise](https://github.com/timholy/Revise.jl) package. Even though it provides lots of convenient features, we will present only its basic use. First, we install it.
70 |
71 | ```julia
72 | (examples) pkg> add Revise
73 | ```
74 |
75 | If we develop a package and load the Revise package first, all package changes will be reloaded without restarting Julia.
76 |
77 | ```julia
78 | # /examples/example.jl
79 | using Revise # this must come before `using ImageInspector`
80 | using ImageInspector
81 |
82 | greet()
83 | ```
84 |
85 | ```julia
86 | Hello World!
87 | ```
88 |
89 | We now add the `greet2` function.
90 |
91 | ```julia
92 | # /src/ImageInspector.jl
93 | module ImageInspector
94 |
95 | export greet, greet2
96 |
97 | greet() = print("Hello World!")
98 | greet2() = print("Hello World!!!!")
99 |
100 | end
101 | ```
102 |
103 | Since we are using the `Revise` package, it should be possible to call the `greet2` function without restarting Julia session.
104 |
105 | ```julia
106 | julia> greet2()
107 | Hello World!!!!
108 | ```
109 |
110 | !!! info "Automatic Revise loading"
111 | `Revise` package can be loaded automaticaly at the start of every Julia session. The easiest way how to achieve such behavior is to use `StartupCustomizer` package. Let's start with installing the package into the default Julia enviroment
112 |
113 | ```julia
114 | (@v1.10) pkg> add StartupCustomizer
115 | ```
116 |
117 | When the package is installed, we can run the following commands, that will install `Revise` into the default Julia enviroment and modify the Julia startup file, to load `Revise` at the beggining of every Julia session.
118 |
119 | ```julia
120 | julia> import StartupCustomizer
121 |
122 | julia> StartupCustomizer.add(StartupCustomizer.Revise())
123 | ```
124 |
125 | We can check how the Julia startup file was modified
126 |
127 | ```shell
128 | > cat ~/.julia/config/startup.jl
129 | # begin StartupCustomizer.Revise()
130 | try
131 | using Revise
132 | catch e
133 | @warn "Error initializing Revise" exception=(e, catch_backtrace())
134 | end
135 | # end StartupCustomizer.Revise()
136 | ```
137 |
138 | `StartupCustomizer` also supports other plugins, such as `OhMyREPL`, that will enable code highlightning in your REPL. We can add this pluggin in the similar way as we added the Revise plugin.
139 |
140 | ```julia
141 | julia> StartupCustomizer.add(StartupCustomizer.OhMyREPL())
142 | ```
--------------------------------------------------------------------------------
/docs/src/lecture_07/documentation.md:
--------------------------------------------------------------------------------
1 | # Documentation
2 |
3 |
4 | ## Docstrings
5 |
6 | Writing documentation is a good coding practice. It helps others to understand your code. It may even help the author after working on the code after an extended break. The most used documentation type is the [docstring](https://docs.julialang.org/en/v1/manual/documentation/), a multiline string describing the functionality.
7 |
8 | ````julia
9 | # /src/ImageInspector.jl
10 | """
11 | image(x::AbstractMatrix{T}; flip = true)
12 |
13 | Converts a matrix of real numbers to a matrix of `Gray` points. If the keyword argument
14 | `flip` is true, the matrix is transposed.
15 |
16 | # Example
17 |
18 | ```julia-repl
19 | julia> x = [0.1 0.25; 0.4 0.6]
20 | 2×2 Matrix{Float64}:
21 | 0.1 0.25
22 | 0.4 0.6
23 |
24 | julia> image(x)
25 | 2×2 Array{Gray{Float64},2} with eltype Gray{Float64}:
26 | Gray{Float64}(0.1) Gray{Float64}(0.4)
27 | Gray{Float64}(0.25) Gray{Float64}(0.6)
28 |
29 | julia> image(x; flip = false)
30 | 2×2 Array{Gray{Float64},2} with eltype Gray{Float64}:
31 | Gray{Float64}(0.1) Gray{Float64}(0.25)
32 | Gray{Float64}(0.4) Gray{Float64}(0.6)
33 | ```
34 | """
35 | function image(x::AbstractMatrix{T}; flip = true) where {T <: Real}
36 | xx = flip ? PermutedDimsArray(x, (2, 1)) : x
37 | return Gray.(xx)
38 | end
39 | ````
40 |
41 | We first wrote a function header, and then we used one tab as an indentation. Then we wrote a short description of the function. Finally, we wrote usage examples. To get a well-looking format of the docstring, we use [markdown](https://en.wikipedia.org/wiki/Markdown) `# Example` to represents a title. We use the `julia-repl` block to write code. Now we type the function name into the Julia help.
42 |
43 |
44 | ```julia
45 | help?> image
46 | search: image imag
47 |
48 | image(x::AbstractMatrix{T}; flip = true)
49 |
50 | Converts a matrix of real numbers to a matrix of `Gray` points. If the keyword argument
51 | `flip` is true, the matrix is transposed.
52 |
53 | Example
54 | ≡≡≡≡≡≡≡≡≡
55 |
56 | julia> x = [0.1 0.25; 0.4 0.6]
57 | 2×2 Matrix{Float64}:
58 | 0.1 0.25
59 | 0.4 0.6
60 |
61 | julia> image(x)
62 | 2×2 Array{Gray{Float64},2} with eltype Gray{Float64}:
63 | Gray{Float64}(0.1) Gray{Float64}(0.4)
64 | Gray{Float64}(0.25) Gray{Float64}(0.6)
65 |
66 | julia> image(x; flip = false)
67 | 2×2 Array{Gray{Float64},2} with eltype Gray{Float64}:
68 | Gray{Float64}(0.1) Gray{Float64}(0.25)
69 | Gray{Float64}(0.4) Gray{Float64}(0.6)
70 | ```
71 |
72 | ## Creating reports
73 |
74 | Reports may be written externally in Latex. However, when we want to show some code, it may be advantageous to write them directly in Julia and export them to [Jupyter notebooks](https://jupyter.org/). The [Literate](https://fredrikekre.github.io/Literate.jl/v2/) package allows combining Julia code with the [Markdown syntax](https://www.markdownguide.org/cheat-sheet) in a script. We mention the following code, which should be read with the soft wrapping on, as an example:
75 |
76 | ```julia
77 | # # ImageInspector
78 | #
79 | # ImageInspector is a small package for educational purposes. Its main goal is not presenting functionality, but presenting package structure. This is its short documentation created in the package [Literate](https://fredrikekre.github.io/Literate.jl/v2) which uses the [Markdown](https://www.markdownguide.org/cheat-sheet) syntax.
80 | #
81 | # To use the package, we need to load first the required packages.
82 |
83 | using ImageInspector
84 | using Plots
85 |
86 |
87 | # ## Grayscale images
88 | #
89 | # As a test example, we create the real matrix `img1` representing a circle. We first discretize the domain $[-1,1]$ in `xs`. We assign black colour whenever $x^2 + y^2 \le 1$. Since the white colour is represented by `[1; 1; 1]` and the black colour by `[0; 0; 0]`, we can do it by the following code:
90 |
91 | xs = -1:0.001:1
92 | img1 = [x^2 + y^2 > 1 for x in xs, y in xs];
93 |
94 | # This is a two-dimensional matrix, which represents a grayscale image. We convert it to an image by calling `image` and then we plot it.
95 |
96 | plot(image(img1); axis=nothing, border=:none)
97 | ```
98 |
99 | The Markdown syntax starts with `#`. Among others, it allows to use:
100 | - Links such as `[Literate](https://fredrikekre.github.io/Literate.jl/v2)`.
101 | - Variables or latex syntax such as `$[-1,1]$`.
102 |
103 | Exporting the script into a notebook is simple.
104 |
105 | ```julia
106 | julia> Import Literate
107 |
108 | julia> Literate.notebook("report.jl"; execute=true)
109 | ```
110 |
111 | The resulting notebook can be found at our [Github](https://github.com/JuliaTeachingCTU/ImageInspector.jl/blob/master/reports/report.ipynb). All required data are in the [reports folder](https://github.com/JuliaTeachingCTU/ImageInspector.jl/tree/master/reports).
112 |
113 | # Adding content
114 |
115 | We will add more functions to the `ImageInspector` package. To plot multiple images at once, we will define two functions. The first one computes an optimal grid size for a given number of images.
116 |
117 | ```julia
118 | # /src/ImageInspector.jl
119 | function gridsize(n::Int; nrows::Int = -1, ncols::Int = - 1)
120 | if nrows < 1
121 | if ncols < 1
122 | nrows = round(Int, sqrt(n))
123 | ncols = ceil(Int, n / nrows)
124 | else
125 | nrows = ceil(Int, n / ncols)
126 | end
127 | else
128 | ncols = ceil(Int, n / nrows)
129 | end
130 | return nrows, ncols
131 | end
132 | ```
133 |
134 | The second function consists of two methods and converts an array of real numbers to one big image of the appropriate colour type.
135 |
136 | ```julia
137 | # /src/ImageInspector.jl
138 | imagegrid(x, ind::Int; flip = true, kwargs...) = image(x, ind; flip)
139 |
140 | function imagegrid(x, inds; flip = true, sep = 1, kwargs...)
141 | imgs = image(x, inds; flip)
142 | n = length(imgs)
143 | nrows, ncols = gridsize(n; kwargs...)
144 |
145 | h, w = size(imgs[1])
146 | A = fill(
147 | eltype(imgs[1])(1), # white color in proper color type
148 | nrows*h + (nrows + 1)*sep, # height of the reculting image
149 | ncols*w + (ncols + 1)*sep, # width of the reculting image
150 | )
151 |
152 | for i in 1:nrows, j in 1:ncols
153 | k = j + (i - 1) * ncols
154 | k > n && break
155 |
156 | rows = (1:h) .+ (i - 1)*h .+ i*sep
157 | cols = (1:w) .+ (j - 1)*w .+ j*sep
158 | A[rows, cols] = imgs[k]
159 | end
160 | return A
161 | end
162 | ```
163 |
164 | We use the `sep` keyword argument to specify the separator width between images. With all functions defined, we can test them.
165 |
166 | ```julia
167 | # /examples/example.jl
168 | X = MLDatasets.FashionMNIST(Float64, :train)[:][1];
169 |
170 | plot(imagegrid(X, 1:10; nrows = 2, sep = 2); axis = nothing, border = :none)
171 | ```
172 |
173 | 
174 |
175 | !!! warning "Exercise:"
176 | Add doc strings for all functions in the ImageInspector package.
--------------------------------------------------------------------------------
/docs/src/lecture_07/extensions.md:
--------------------------------------------------------------------------------
1 | # Extensions
2 |
3 | ## Extension for Plots
4 |
5 | We used the same settings for the `plot` function in all previous examples. Therefore, it makes sense to write an auxiliary function setting attributes for the `plot` function. However, this function will depend on the `Plots` package, and if we add `Plots` to `ImageInspector`, it will significantly slow the loading time.
6 |
7 | To define an extension, we need firstly modify the `Project.toml`. We have to add two new sections. The first new section `weakdeps` specifies all the dependencies we need for our extension. In our case, we only need `Plots`, so we add the following in the `Project.toml`
8 |
9 | ```toml
10 | [weakdeps]
11 | Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
12 | ```
13 |
14 | The second new section is `extensions` and in this section, we have to specify the extension name and the dependencies that the extension uses. In our case, the name is `PlotsExt` and we only need `Plots` (more dependencies can be specified as a list of their names)
15 |
16 | ```toml
17 | [extensions]
18 | PlotsExt = "Plots"
19 | ```
20 |
21 | We can also specify which versions of `Plots` package our extension supports. It can be done by adding a new record in the `compat` section
22 |
23 | ```toml
24 | [compat]
25 | Aqua = "0.8"
26 | Colors = "0.12, 0.13"
27 | Plots = "1"
28 | Test = "1.9"
29 | julia = "1.9"
30 | ```
31 |
32 | Now, we define an empty function `imageplot` inside of the ImageInstructor, i. e., we add the foollowing code to the `src/ImageInstructor.jl`
33 |
34 | ```julia
35 | # src/ImageInstructor.jl
36 | function imageplot end
37 | ```
38 |
39 | This step is needed, since we will add methods to this function inside our extention.
40 |
41 | The last step is to create the extension itself. The code for extension must be stored in `ext` folder in the root dir of the package. The code for the extension is them must be defined in the file with the same name, i. e., we have to create a new file `ext/PlotsExt.jl` and add the code into it
42 |
43 | ```julia
44 | # ext/PlotsExt.jl
45 | module PlotsExt
46 |
47 | import Plots
48 | using ImageInspector
49 | using ImageInspector.Colors
50 |
51 | function ImageInspector.imageplot(x, ind; flip=true, nrows=-1, ncols=-1, sep=1, kwargs...)
52 | img = imagegrid(x, ind; flip, nrows, ncols, sep)
53 | return imageplot(img; kwargs...)
54 | end
55 |
56 | function ImageInspector.imageplot(x; flip=true, kwargs...)
57 | img = image(x; flip)
58 | return imageplot(img; kwargs...)
59 | end
60 |
61 | function ImageInspector.imageplot(
62 | x::AbstractMatrix{<:Color};
63 | legend=false,
64 | axis=nothing,
65 | border=:none,
66 | kwargs...
67 | )
68 | return Plots.plot(x; legend, axis, border, kwargs...)
69 | end
70 |
71 | end
72 | ```
73 |
74 | Note, that we defined a new module, that has the same name as our extension. And that's all. Now we can test, whether the extension works. We have to start a new Julia session and activate `examples` enviroment. Now, if we do not load `Plots`, the `imageplot` function will have no methods, as can be seen below
75 |
76 | ```julia
77 | julia> using ImageInspector, MLDatasets
78 |
79 | julia> x = CIFAR10(split=:train).features;
80 |
81 | julia> imageplot(x, 1:10; nrows = 2, sep = 1)
82 | ERROR: MethodError: no method matching imageplot(::Array{Float32, 4}, ::UnitRange{Int64}; nrows::Int64, sep::Int64)
83 | [...]
84 | ```
85 |
86 | After loading the `Plots` package, the `imageplot` function will start working.
87 |
88 | ```julia
89 | julia> using Plots
90 |
91 | julia> imageplot(x, 1:10; nrows = 2, sep = 1)
92 | ```
93 |
94 | 
95 |
96 |
97 | ## Extension for Makie
98 |
99 | We can create multiple extensions for one package. For example, we can also create an extension for `Makie.jl`, which is an alternative package for generating plots. To do so, we have to follow the same steps as in the case of extension for `Plots`.
100 |
101 | The first step is to modify the `Project.toml` file in the following way
102 |
103 | ```toml
104 | [weakdeps]
105 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
106 | Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
107 |
108 | [extensions]
109 | MakieExt = "CairoMakie"
110 | PlotsExt = "Plots"
111 |
112 | [compat]
113 | Aqua = "0.8"
114 | CairoMakie = "0.12"
115 | Colors = "0.12, 0.13"
116 | Plots = "1"
117 | Test = "1.9"
118 | julia = "1.9"
119 | ```
120 |
121 | In other words, our extension for `Makie` has name `MakieExt` and depends on `CairoMakie`. Now we can create the extension itself by creating file `ext/MakieExt.jl` and adding the following code into it
122 |
123 | ```julia
124 | module MakieExt
125 |
126 | import CairoMakie
127 | using ImageInspector
128 | using ImageInspector.Colors
129 |
130 | function ImageInspector.imageplot(x, ind; flip=true, nrows=-1, ncols=-1, sep=1, kwargs...)
131 | img = imagegrid(x, ind; flip, nrows, ncols, sep)
132 | return imageplot(img; kwargs...)
133 | end
134 |
135 | function ImageInspector.imageplot(x; flip=true, kwargs...)
136 | img = image(x; flip)
137 | return imageplot(img; kwargs...)
138 | end
139 |
140 | function ImageInspector.imageplot(x::AbstractMatrix{<:Color}; kwargs...)
141 |
142 | f, ax = CairoMakie.image(reverse(x'; dims=2); kwargs...)
143 | CairoMakie.hidedecorations!(ax)
144 | CairoMakie.hidespines!(ax)
145 | return f
146 | end
147 |
148 | end
149 | ```
150 |
151 | Now it's time to test the extension. To do so, we first have to install `CairoMakie` into `examples` enviroment
152 |
153 | ```julia
154 | (ImageInspector) pkg> activate ./examples
155 |
156 | (examples) pkg> add CairoMakie
157 | ```
158 |
159 | We have to start a new Julia session and activate `examples` enviroment. Now, if we do not load `CairoMakie`, the `imageplot` function will have no methods, as can be seen below
160 |
161 | ```julia
162 | julia> using ImageInspector, MLDatasets
163 |
164 | julia> x = CIFAR10(split=:train).features;
165 |
166 | julia> imageplot(x, 1:10; nrows = 2, sep = 1)
167 | ERROR: MethodError: no method matching imageplot(::Array{Float32, 4}, ::UnitRange{Int64}; nrows::Int64, sep::Int64)
168 | [...]
169 | ```
170 |
171 | After loading the `CairoMakie` package, the `imageplot` function will start working.
172 |
173 | ```julia
174 | julia> using CairoMakie
175 |
176 | julia> imageplot(x, 1:10; nrows = 2, sep = 1)
177 | ```
178 |
179 | 
--------------------------------------------------------------------------------
/docs/src/lecture_08/constrained.md:
--------------------------------------------------------------------------------
1 | ```@setup optim
2 | using Plots
3 | using Random
4 |
5 | function create_anim(
6 | f,
7 | path,
8 | xlims,
9 | ylims,
10 | file_name = joinpath(pwd(), randstring(12) * ".gif");
11 | xbounds = xlims,
12 | ybounds = ylims,
13 | fps = 15,
14 | )
15 | xs = range(xlims...; length = 100)
16 | ys = range(ylims...; length = 100)
17 | plt = contourf(xs, ys, f; color = :jet)
18 |
19 | # add constraints if provided
20 | if !(xbounds == xlims && ybounds == ylims)
21 | x_rect = [xbounds[1]; xbounds[2]; xbounds[2]; xbounds[1]; xbounds[1]]
22 | y_rect = [ybounds[1]; ybounds[1]; ybounds[2]; ybounds[2]; ybounds[1]]
23 |
24 | plot!(x_rect, y_rect; line = (2, :dash, :red), label="")
25 | end
26 |
27 | # add an empty plot
28 | plot!(Float64[], Float64[]; line = (4, :arrow, :black), label = "")
29 |
30 | # extract the last plot series
31 | plt_path = plt.series_list[end]
32 |
33 | # create the animation and save it
34 | anim = Animation()
35 | for x in eachcol(path)
36 | push!(plt_path, x[1], x[2]) # add a new point
37 | frame(anim)
38 | end
39 | gif(anim, file_name; fps = fps, show_msg = false)
40 | return nothing
41 | end
42 |
43 | f(x) = sin(x[1] + x[2]) + cos(x[1])^2
44 | g(x) = [cos(x[1] + x[2]) - 2*cos(x[1])*sin(x[1]); cos(x[1] + x[2])]
45 |
46 | f(x1,x2) = f([x1;x2])
47 | ```
48 |
49 | # [Constrained optimization](@id lagrangian)
50 |
51 | The usual formulation of constrained optimization is
52 |
53 | ```math
54 | \tag{P}
55 | \begin{aligned}
56 | \text{minimize}\qquad &f(x) \\
57 | \text{subject to}\qquad &g_i(x) \le 0,\ i=1,\dots,I, \\
58 | &h_j(x) = 0,\ j=1,\dots,J.
59 | \end{aligned}
60 | ```
61 |
62 | Functions ``g_i`` generate inequality constraints, while functions ``h_j`` generate equality constraints. Box constraints such as ``x\in[0,1]`` are the simplest case of the former. This optimization problem is also called the primal formulation. It is closely connected with the Lagrangian
63 |
64 | ```math
65 | L(x;\lambda,\mu) = f(x) + \sum_{i=1}^I \lambda_i g_i(x) + \sum_{j=1}^J \mu_j h_j(x).
66 | ```
67 |
68 | Namely, it is simple to show that the primal formulation (P) is equivalent to
69 |
70 | ```math
71 | \operatorname*{minimize}_x\quad \operatorname*{maximize}_{\lambda\ge 0,\mu}\quad L(x;\lambda,\mu).
72 | ```
73 |
74 | The dual problem then switches the minimization and maximization to arrive at
75 |
76 | ```math
77 | \tag{D} \operatorname*{maximize}_{\lambda\ge 0,\mu} \quad\operatorname*{minimize}_x\quad L(x;\lambda,\mu).
78 | ```
79 |
80 | Even though the primal and dual formulations are not generally equivalent, they are often used interchangeably.
81 |
82 | !!! info "Linear programming:"
83 | The linear program
84 |
85 | ```math
86 | \begin{aligned}
87 | \text{minimize}\qquad &c^\top x \\
88 | \text{subject to}\qquad &Ax=b, \\
89 | &x\ge 0
90 | \end{aligned}
91 | ```
92 |
93 | is equivalent to
94 |
95 | ```math
96 | \begin{aligned}
97 | \text{maximize}\qquad &b^\top \mu \\
98 | \text{subject to}\qquad &A^\top \mu\le c.
99 | \end{aligned}
100 | ```
101 |
102 | We can observe several things:
103 | 1. Primal and dual problems switch minimization and maximization.
104 | 2. Primal and dual problems switch variables and constraints.
105 |
106 | For the unconstrained optimization, we showed that each local minimum satisfies the optimality condition ``\nabla f(x)=0``. This condition does not have to hold for constrained optimization, where the optimality conditions are of a more complex form.
107 |
108 | !!! todo "Theorem: Karush-Kuhn-Tucker conditions"
109 | Let ``f``, ``g_i`` and ``h_j`` be differentiable function and let a constraint qualification hold. If ``x`` is a local minimum of the primal problem (P), then there are $\lambda\ge 0$ and $\mu$ such that
110 |
111 | ```math
112 | \begin{aligned}
113 | &\text{Optimality:} && \nabla_x L(x;\lambda,\mu) = 0, \\
114 | &\text{Feasibility:} && \nabla_\lambda L(x;\lambda,\mu)\le 0,\ \nabla_\mu L(x;\lambda,\mu) = 0, \\
115 | &\text{Complementarity:} && \lambda^\top g(x) = 0.
116 | \end{aligned}
117 | ```
118 |
119 | If $f$ and $g$ are convex and $h$ is linear, then every stationary point is a global minimum of (P).
120 |
121 | When there are no constraints, the Lagrangian ``L`` reduces to the objective ``f``, and the optimality conditions are equivalent. Therefore, the optimality conditions for constrained optimization generalize those for unconstrained optimization.
122 |
123 | ## Numerical method
124 |
125 | We present only the simplest method for constraint optimization. Projected gradients
126 |
127 | ```math
128 | \begin{aligned}
129 | y^{k+1} &= x^k - \alpha^k\nabla f(x^k), \\
130 | x^{k+1} &= P_X(y^{k+1})
131 | \end{aligned}
132 | ```
133 |
134 | compute the gradient as for standard gradient descent, and then project the point onto the feasible set. Since the projection needs to be simple to calculate, projected gradients are used for simple ``X`` such as boxes or balls.
135 |
136 | We will use projected gradients to solve
137 |
138 | ```math
139 | \begin{aligned}
140 | \text{minimize}\qquad &\sin(x_1 + x_2) + \cos(x_1)^2 \\
141 | \text{subject to}\qquad &x_1, x_2\in [-1,1].
142 | \end{aligned}
143 | ```
144 |
145 | The implementation of projected gradients is the same as gradient descent but it needs projection function ```P``` as input. For reasons of plotting, it returns both ``x`` and ``y``.
146 |
147 | ```@example optim
148 | function optim(f, g, P, x, α; max_iter=100)
149 | xs = zeros(length(x), max_iter+1)
150 | ys = zeros(length(x), max_iter)
151 | xs[:,1] = x
152 | for i in 1:max_iter
153 | ys[:,i] = xs[:,i] - α*g(xs[:,i])
154 | xs[:,i+1] = P(ys[:,i])
155 | end
156 | return xs, ys
157 | end
158 |
159 | nothing # hide
160 | ```
161 |
162 | The projection function ```P``` computes the projection on ```[x_min, x_max]```. Since it is a box, the projection is computed componentwise:
163 |
164 | ```@example optim
165 | P(x, x_min, x_max) = min.(max.(x, x_min), x_max)
166 |
167 | nothing # hide
168 | ```
169 |
170 | Now we can call projected gradients from the same starting point as before.
171 |
172 | ```@example optim
173 | x_min = [-1; -1]
174 | x_max = [0; 0]
175 |
176 | xs, ys = optim(f, g, x -> P(x,x_min,x_max), [0;-1], 0.1)
177 |
178 | nothing # hide
179 | ```
180 |
181 | We use the keyword arguments `xbounds` and `ybounds` to plot the feasible region in the animation. First, we plot only the iterations `xs`.
182 |
183 | ```@example optim
184 | xlims = (-3, 1)
185 | ylims = (-2, 1)
186 |
187 | create_anim(f, xs, xlims, ylims, "anim6.gif";
188 | xbounds=(x_min[1], x_max[1]),
189 | ybounds=(x_min[2], x_max[2]),
190 | )
191 |
192 | nothing # hide
193 | ```
194 |
195 | 
196 |
197 |
198 | To plot the path, we need to merge them by following one point from ```xs``` by a point from ```ys``` and so on. Since ```xs``` and ```ys``` have different number of entries, we can do it via
199 |
200 | ```@example optim
201 | xys = hcat(reshape([xs[:,1:end-1]; ys][:], 2, :), xs[:,end])
202 |
203 | nothing # hide
204 | ```
205 |
206 | It is probably not the nicest thing to do, but it is Saturday evening, I am tired, and it works. Sorry :) The animation can now be created in the same way as before.
207 |
208 | ```@example optim
209 | create_anim(f, xys, xlims, ylims, "anim7.gif";
210 | xbounds=(x_min[1], x_max[1]),
211 | ybounds=(x_min[2], x_max[2]),
212 | )
213 |
214 | nothing # hide
215 | ```
216 |
217 | 
218 |
219 | The animation shows that projected gradients converge to the global minimum. Most of the iterations are outside of the feasible region, but they are projected back to the boundary. One can use the optimality conditions to verify that the gradient of the objective and the active constraint have the same direction.
220 |
--------------------------------------------------------------------------------
/docs/src/lecture_08/data/auxiliary.jl:
--------------------------------------------------------------------------------
1 | using Plots
2 |
3 | f(x) = (1+0.5*x)*sin(x)
4 |
5 | xs = range(3, 12, length=1000)
6 | fs = f.(xs)
7 | i_min = findall((fs[2:end-1] .< fs[1:end-2]) .& (fs[2:end-1] .< fs[3:end])) .- 1
8 | i_max = findall((fs[2:end-1] .> fs[1:end-2]) .& (fs[2:end-1] .> fs[3:end])) .- 1
9 |
10 | plot(xs, fs, label="", ticks=false)
11 | scatter!(xs[i_min], fs[i_min], label="Local minimum")
12 | scatter!(xs[i_max], fs[i_max], label="Local maximum")
13 |
14 | file_name = joinpath("src", "lecture_07", "minmax.svg")
15 | savefig(file_name)
16 |
--------------------------------------------------------------------------------
/docs/src/lecture_08/theory.md:
--------------------------------------------------------------------------------
1 | # Introduction to continuous optimization
2 |
3 | Optimization problems optimize (minimize or maximize) a given function on a given set. There are many applications:
4 | - Maximize profit under market forecasts.
5 | - Given a set of points, find a visiting order which minimizes the distance. This application includes various tasks ranging from delivery services to snow ploughing.
6 | - Make a prediction based on known data. Specific examples are whether a client gets a loan or whether an autonomous vehicle sees a pedestrian. Almost all tasks in machine learning minimize the difference between a prediction and a label.
7 | - Find the optimal shape of a machine so that a criterion is maximized. Specific examples are designing planes with minimal drag or optimizing engines to maximize power under a reasonable oil consumption.
8 |
9 | These applications are very different from each other. They differ in their assumptions about the world, in their formulation and solution way.
10 | - Profit maximization needs to model future uncertainty. The formulation will probably contain expectations and chance constraints, while the variables will be continuous.
11 | - Finding the minimal way is often reformulated as finding the shortest path in a graph. Problems like this operate typically with binary variables and no uncertainty.
12 | - Machine learning requires loads of data and usually ignores any physical models. Due to the abundance of data, the objective function evaluations are lengthy, and specially designed optimization algorithms are used.
13 | - Topology optimization is based on complicated physical models. Since these often come in a black-box form, additional information such as gradient is often not available. Moreover, the optimizer needs to consider conflicting criteria such as speed and consumption.
14 |
15 | This short analysis implies that there is no single "optimization topic", and the theory of optimization contains many different subfields. In the following four lectures, we will study the field of continuous optimization, which assumes that all functions are (sub)differentiable and all variables are continuous. This includes most machine learning applications, to which we dedicate three lectures.
16 |
17 | ## Problem definition
18 |
19 | The goal of an optimization problem is to minimize or maximize a function ``f`` over a set ``X``:
20 |
21 | ```math
22 | \begin{aligned}
23 | \text{minimize}\qquad &f(x) \\
24 | \text{subject to}\qquad &x\in X.
25 | \end{aligned}
26 | ```
27 |
28 | Should we consider both minimization and maximization problems? No. Because
29 |
30 | ```math
31 | \text{maximize}\qquad f(x)
32 | ```
33 |
34 | is equivalent to
35 |
36 | ```math
37 | -\text{minimize}\qquad -f(x).
38 | ```
39 |
40 | This trick has a neat consequence: All numerical and theoretical results are derived only for minimization problems. If we deal with a maximization problem, we convert it first to a minimization problem and then use known results.
41 |
--------------------------------------------------------------------------------
/docs/src/lecture_09/exercises.md:
--------------------------------------------------------------------------------
1 | ```@setup ex_log
2 | using Plots
3 | using StatsPlots
4 | using RDatasets
5 | using Statistics
6 | using LinearAlgebra
7 | using Query
8 |
9 | function log_reg(X, y, w; max_iter=100)
10 | X_mult = [row*row' for row in eachrow(X)]
11 | for i in 1:max_iter
12 | y_hat = 1 ./(1 .+exp.(-X*w))
13 | grad = X'*(y_hat.-y) / size(X,1)
14 | hess = y_hat.*(1 .-y_hat).*X_mult |> mean
15 | w -= hess \ grad
16 | end
17 | return w
18 | end
19 |
20 | iris = dataset("datasets", "iris")
21 | iris_reduced = @from i in iris begin
22 | @where i.Species != "setosa"
23 | @select {
24 | i.PetalLength,
25 | i.PetalWidth,
26 | intercept = 1,
27 | i.Species,
28 | label = i.Species == "virginica",
29 | }
30 | @collect DataFrame
31 | end
32 |
33 | X = Matrix(iris_reduced[:, 1:3])
34 | y = iris_reduced.label
35 |
36 | w = log_reg(X, y, zeros(size(X,2)))
37 |
38 | σ(z) = 1/(1+exp(-z))
39 | ```
40 |
41 | # [Exercises](@id l8-exercises)
42 |
43 | !!! warning "Exercise 1:"
44 | The logistic regression on the iris dataset failed in 6 out of 100 samples. But the visualization shows the failure only in 5 cases. How is it possible?
45 |
46 | !!! details "Solution:"
47 | We use the `iris_reduced` dataframe and add the column `prediction` to it.
48 |
49 | ```@example ex_log
50 | df = iris_reduced
51 | df.prediction = σ.(X*w) .>= 0.5
52 |
53 | nothing # hide
54 | ```
55 |
56 | Now we show all misclassified samples.
57 |
58 | ```@example ex_log
59 | sort(df[df.label .!= df.prediction, :], [:PetalLength, :PetalWidth])
60 | ```
61 |
62 | A quick look at the image shows that the point ``(4.8,1.8)`` is misclassified, but the image shows it correctly. Let us show all such points.
63 |
64 | ```@example ex_log
65 | df[(df.PetalLength .== 4.8) .& (df.PetalWidth .== 1.8), :]
66 | ```
67 |
68 | As we can see, there are three samples with the same data. Two of them have label 1 and one label 0. Since the incorrectly classified sample was redrawn, it was not possible to see it.
69 |
70 | !!! warning "Exercise 2: Disadvantages of the sigmoid function"
71 | Show that Newton's method fails when started from the vector ``(1,2,3)``. Can you guess why it happened? What are the consequences for optimization? Is gradient descent going to suffer from the same problems?
72 |
73 | !!! details "Solution:"
74 | First, we run the logistic regression as before, only with a different starting point
75 |
76 | ```julia
77 | log_reg(X, y, [1;2;3])
78 | ```
79 | ```julia
80 | ERROR: SingularException(1)
81 | ```
82 |
83 | This resulted in an error (or possibly in NaNs for older versions of Julia). When something fails, it may be a good idea to run a step-by-step analysis. In this case, we will run the first iteration of Newton's method
84 |
85 | ```@repl ex_log
86 | w = [1;2;3];
87 | X_mult = [row*row' for row in eachrow(X)];
88 | y_hat = 1 ./(1 .+exp.(-X*w))
89 | grad = X'*(y_hat.-y) / size(X,1)
90 | hess = y_hat.*(1 .-y_hat).*X_mult |> mean
91 | w -= hess \ grad
92 | ```
93 |
94 | Starting from the bottom, we can see that even though we started with relatively small ``w``, the next iteration is four degrees of magnitude larger. This happened because the Hessian ```hess``` is much smaller than the gradient ```grad```. This indicates that there is some kind of numerical instability. The prediction ```y_hat``` should lie in the interval ``[0,1]`` but it seems that it is almost always close to 1. Let us verify this by showing the extrema of ```y_hat```
95 |
96 | ```@example ex_log
97 | extrema(y_hat)
98 | ```
99 |
100 | They are indeed too large.
101 |
102 | Now we explain the reason. We know that the prediction equals to
103 |
104 | ```math
105 | \hat y_i = \sigma(w^\top x_i),
106 | ```
107 |
108 | where ``\sigma`` is the sigmoid function. Since the mimimum from ``w^\top x_i``
109 |
110 | ```@example ex_log
111 | minimum(X*[1;2;3])
112 | ```
113 |
114 | is large, all ``w^\top x_i`` are large. But plotting the sigmoid funtion
115 |
116 | ```@example ex_log
117 | xs = -10:0.01:10
118 | plot(xs, σ, label="", ylabel="Sigmoid function")
119 |
120 | savefig("sigmoid.svg") # hide
121 | ```
122 |
123 | 
124 |
125 | it is clear that all ``w^\top x_i`` hit the part of the sigmoid which is flat. This means that the derivative is almost zero, and the Hessian is "even smaller" zero. Then the ratio of the gradient and Hessian is huge.
126 |
127 | The gradient descent will probably run into the same difficulty. Since the gradient will be too small, it will take a huge number of iterations to escape the flat region of the sigmoid. This is a known problem of the sigmoid function. It is also the reason why it was replaced in neural networks by other activation functions.
128 |
129 | !!! warning "Exercise 3 (theory)"
130 | Show the details for the derivation of the loss function of the logistic regression.
131 |
132 | !!! details "Solution:"
133 | Since ``\hat y`` equals the probability of predicting ``1``, we have
134 |
135 | ```math
136 | \hat y = \frac{1}{1+e^{-w^\top x}}
137 | ```
138 |
139 | Then the cross-entropy loss reduces to
140 |
141 | ```math
142 | \begin{aligned}
143 | \operatorname{loss}(y,\hat y) &= - y\log \hat y - (1-y)\log(1-\hat y) \\
144 | &= y\log(1+e^{-w^\top x}) - (1-y)\log(e^{-w^\top x}) + (1-y)\log(1+e^{-w^\top x}) \\
145 | &= \log(1+e^{-w^\top x}) + (1-y)w^\top x.
146 | \end{aligned}
147 | ```
148 |
149 | Then it remains to sum this term over all samples.
150 |
151 | !!! warning "Exercise 4 (theory)"
152 | Show that if the Newton's method converged for the logistic regression, then it found a point globally minimizing the logistic loss.
153 |
154 | !!! details "Solution:"
155 | We derived that the Hessian of the objective function for logistic regression is
156 |
157 | ```math
158 | \nabla^2 L(w) = \frac 1n \sum_{i=1}^n\hat y_i(1-\hat y_i)x_i x_i^\top.
159 | ```
160 |
161 | For any vector ``a``, we have
162 |
163 | ```math
164 | a^\top x_i x_i^\top a = (x_i^\top a)^\top (x_i^\top a) = \|x_i^\top a\|^2 \ge 0,
165 | ```
166 |
167 | which implies that ``x_i x_i^\top`` is a positive semidefinite matrix (it is known as rank-1 matrix as its rank is always 1 if ``x_i`` is a non-zero vector). Since ``y_i(1-\hat y_i)\ge 0``, it follows that ``\nabla^2 L(w)`` is a positive semidefinite matrix. If a Hessian of a function is positive semidefinite everywhere, the function is immediately convex. Since Newton's method found a stationary point, this points is a global minimum.
--------------------------------------------------------------------------------
/docs/src/lecture_09/iris.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_09/iris.png
--------------------------------------------------------------------------------
/docs/src/lecture_09/linear.md:
--------------------------------------------------------------------------------
1 | ```@setup linear
2 | abstract type Step end
3 |
4 | struct GD <: Step
5 | α::Real
6 | end
7 |
8 | optim_step(s::GD, f, g, x) = -s.α*g(x)
9 |
10 | function optim(f, g, x, s::Step; max_iter=100)
11 | for i in 1:max_iter
12 | x += optim_step(s, f, g, x)
13 | end
14 | return x
15 | end
16 | ```
17 |
18 | # Linear regression
19 |
20 | We start with linear regression, where the labels are continuous variables.
21 |
22 | ## Theory of linear regression
23 |
24 | Linear regression uses the linear prediction function ``\operatorname{predict}(w;x) = w^\top x`` and the mean square error ``\operatorname{loss}(y, \hat y) = (y - \hat y)^2`` as the loss function. When we have a dataset with ``n`` data points (samples) ``x_i`` and labels ``y_i``, linear regression may be written as the following optimization problem.
25 |
26 | ```math
27 | \operatorname{minimize}_w\qquad \frac 1n\sum_{i=1}^n (w^\top x_i - y_i)^2.
28 | ```
29 |
30 | The objective function is minimal if the predictions ``w^\top x_i`` equal to the labels ``y_i`` for all samples ``i=1,\dots,n``.
31 |
32 | Some algorithms use the sum instead of the mean in the objective function. These approaches are equivalent. For the former case, it is simpler to work in the matrix notation, where we form a matrix ``X`` whose rows are the samples ``x_i``. It is not difficult to show that the previous problem is equivalent to
33 |
34 | ```math
35 | \operatorname{minimize}_w\qquad \|Xw - y\|^2,
36 | ```
37 |
38 | where the norm is the ``l_2`` norm. Since this is a convex quadratic problem, it is equivalent to its optimality conditions. Setting the derivative to zero yields
39 |
40 | ```math
41 | 2X^\top (Xw-y) = 0.
42 | ```
43 |
44 | From here, we obtain the closed-form solution to the linear regression
45 |
46 | ```math
47 | w = (X^\top X)^{-1}X^\top y.
48 | ```
49 |
50 | !!! info "Closed-form solution:"
51 | Linear regression is probably the only machine learning model with a closed-form solution. All other models must be solved by iterative algorithms such as gradient descent. In some cases, it may be advantageous to use iterative algorithms even for linear regression. For example, this includes the case of a large number of features ``m`` because then ``X^\top X`` is an ``m\times m`` matrix that may be difficult to invert.
52 |
53 | ## UCI repository
54 |
55 | Training a machine learning model requires data. Neural networks require lots of data. Since collecting data is difficult, there are many datasets at the [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/index.php). We will use the iris (kosatec in Czech) dataset which predicts one of the three types of iris based on sepal (kališní lístek in Czech) and petal (okvětní lístek in Czech) widths and lengths.
56 |
57 | 
58 |
59 | If you do not see any differences between these three species, machine learning to the rescue!
60 |
61 | ## Loading and preparing data
62 |
63 | To experiment with machine learning models, we use the ```RDatasets``` package, which stores many machine learning datasets, and we load the data by
64 |
65 | ```@example linear
66 | using Plots
67 | using StatsPlots
68 | using RDatasets
69 |
70 | iris = dataset("datasets", "iris")
71 |
72 | iris[1:5,:] # hide
73 | ```
74 |
75 | Printing the first five entries of the data shows that they are saved in DataFrame, and the columns (features) are sepal length, sepal width, petal length and petal width.
76 |
77 | When designing a classification method, a good practice is to perform at least a basic analysis of the data. That may include checking for NaNs, infinite values, obvious errors, standard deviations of features or others. Here, we only plot the data.
78 |
79 | !!! warning "Exercise:"
80 | We will simplify the goal and estimate the dependence of petal width on petal length. Create the data ``X`` (do not forget to add the bias) and the labels ``y``.
81 |
82 | Make a graph of the dependence of petal width on petal length.
83 |
84 | !!! details "Solution:"
85 | Since the petal length and width are the third and fourth columns, we assign them to ```X``` and ```y```, respectively. We can use ```iris[:, 4]``` or ```iris[:, :PetalWidth]``` instead of ```iris.PetalWidth```, but the first possibility is vulnerable to errors. We need to concatenate ```X``` it with a vector of ones to add the bias.
86 |
87 | ```@example linear
88 | y = iris.PetalWidth
89 | X = hcat(iris.PetalLength, ones(length(y)))
90 |
91 | nothing # hide
92 | ```
93 |
94 | The best visualization is by the scatter plot. We use the version from the `StatsPlots` package but the one from the `Plots` package would be naturally sufficient.
95 |
96 | ```@example linear
97 | @df iris scatter(
98 | :PetalLength,
99 | :PetalWidth;
100 | label="",
101 | xlabel = "Petal length",
102 | ylabel = "Petal width"
103 | )
104 |
105 | savefig("iris_lin1.svg") # hide
106 |
107 | nothing # hide
108 | ```
109 |
110 | 
111 |
112 | The figure shows a positive correlation between length and width. This is natural as bigger petals mean both longer and wider petals. We will quantify this dependence by linear regression.
113 |
114 |
115 | ## Training the classifier
116 |
117 | !!! warning "Exercise:"
118 | Use the closed-form formula to get the coefficients ``w`` for the linear regression. Then use the ```optim``` method derived in the previous lecture to solve the optimization problem via gradient descent. The results should be identical.
119 |
120 | !!! details "Solution:"
121 | The closed-form expression is ``(X^\top X)^{-1}X^\top y``. In the [exercises](@ref l7-exercises) to the previous lecture, we explained that writing ```(X'*X) \ (X'*y)``` is better than `inv(X'*X)*X'*y` because the former does not compute the matrix inverse. As a side-note, can you guess the difference between `inv(X'*X)*X'*y` and `inv(X'*X)*(X'*y)`?
122 |
123 | ```@example linear
124 | w = (X'*X) \ (X'*y)
125 |
126 | nothing # hide
127 | ```
128 |
129 | For the gradient descent, we first realize that the formula for the derivate is ``X^\top (Xw-y)``. Defining the derivative function in ```g```, we call the ```optim``` method in the same way as in the last lecture. Since we use the sum and not mean in the objective, we need to use a much smaller stepsize.
130 |
131 | ```@example linear
132 | g(w) = X'*(X*w-y)
133 | w2 = optim([], g, zeros(size(X,2)), GD(1e-4); max_iter=10000)
134 |
135 | nothing # hide
136 | ```
137 |
138 | The difference between the solutions is
139 |
140 | ```@example linear
141 | using LinearAlgebra
142 |
143 | norm(w-w2)
144 | ```
145 |
146 | which is acceptable.
147 |
148 | The correct solution is
149 |
150 | ```@example linear
151 | w # hide
152 | ```
153 |
154 | Now we can estimate the petal width if only petal length is known.
155 |
156 | !!! warning "Exercise:"
157 | Write the dependence on the petal width on the petal length. Plot it in the previous graph.
158 |
159 | !!! details "Solution:"
160 | The desired dependence is
161 |
162 | ```math
163 | \text{width} \approx -0.36 + 0.42*\text{length}.
164 | ```
165 |
166 | Before plotting the prediction, we save it into ```f_pred```.
167 |
168 | ```@example linear
169 | f_pred(x::Real, w) = w[1]*x + w[2]
170 |
171 | nothing # hide
172 | ```
173 |
174 | Then we create the limits ```x_lim``` and finally plot the prediction function.
175 |
176 | ```@example linear
177 | x_lims = extrema(iris.PetalLength) .+ [-0.1, 0.1]
178 |
179 | @df iris scatter(
180 | :PetalLength,
181 | :PetalWidth;
182 | xlabel = "Petal length",
183 | ylabel = "Petal width",
184 | label = "",
185 | legend = :topleft,
186 | )
187 |
188 | plot!(x_lims, x -> f_pred(x,w); label = "Prediction", line = (:black,3))
189 |
190 | savefig("iris_lin2.svg") # hide
191 | ```
192 |
193 | 
194 |
--------------------------------------------------------------------------------
/docs/src/lecture_09/theory.md:
--------------------------------------------------------------------------------
1 | # Introduction to regression and classification
2 |
3 | Regression and classification are a part of machine learning which predicts certain variables based on labelled data. Both regression and classification operate on several premises:
4 | - We differentiate between datapoints ``x`` and labels ``y``. While data points are relatively simple to obtain, labels ``y`` are relatively hard to obtain.
5 | - We consider some parameterized function ``\operatorname{predict}(w;x)`` and try to find an unknown variable ``w`` to correctly predict the labels from samples (data points)
6 |
7 | ```math
8 | \operatorname{predict}(w;x) \approx y.
9 | ```
10 |
11 | - We have a labelled datasets with ``n`` samples ``x_1,\dots,x_n`` and labels ``y_1,\dots,y_n``.
12 | - We use the labelled dataset to train the weights ``w``.
13 | - When an unlabelled sample arrives, we use the prediction function to predict its label.
14 |
15 | The [MNIST](https://en.wikipedia.org/wiki/MNIST_database) dataset contains ``n=50000`` images of grayscale digits. Each image ``x_i`` from the dataset has the size ``28\times 28`` and was manually labelled by ``y_i\in\{0,\dots,9\}``. When the weights ``w`` of a prediction function ``\operatorname{predict}(w;x)`` are trained on this dataset, the prediction function can predict which digit appears on images it has never seen before. This is an example where the images ``x`` are relatively simple to obtain, but the labels ``y`` are hard to obtain due to the need to do it manually.
16 |
17 | ## Regression and classification
18 |
19 | The difference between regression and classification is simple:
20 | - Regression predicts a continuous variable ``y`` (such as height based on weight).
21 | - Classification predict a variable ``y`` with a finite number of states (such as cat/dog/none from images).
22 |
23 | The body-mass index is used to measure fitness. It has a simple formula
24 |
25 | ```math
26 | \operatorname{BMI} = \frac{w}{h^2},
27 | ```
28 |
29 | where ``w`` is the weight and ``h`` is the height. If we do not know the formula, we may estimate it from data. We denote ``x=(w,h)`` the samples and ``y=\operatorname{BMI}`` the labels. Then *regression* considers the following data.
30 |
31 | | ``x^1`` | ``x^2`` | ``y`` |
32 | | :-- | :-- | :-- |
33 | | 94 | 1.8 | 29.0 |
34 | | 50 | 1.59 | 19.8 |
35 | | 70 | 1.7 | 24.2 |
36 | | 110 | 1.7 | 38.1 |
37 |
38 | The upper index denotes components while the lower index denotes samples. Sometimes it is not necessary to determine the exact BMI value but only whether a person is healthy, which is defined as any BMI value in the interval ``[18.5, 25]``. When we assign label ``0`` to underweight people, label ``1`` to normal people and label ``2`` to overweight people, then *classification* considers the following data.
39 |
40 | | ``x^1`` | ``x^2`` | ``y`` |
41 | | :-- | :-- | :-- |
42 | | 94 | 1.8 | 2 |
43 | | 50 | 1.59 | 1 |
44 | | 70 | 1.7 | 1 |
45 | | 110 | 1.7 | 2 |
46 |
47 | ## Mathematical formulation
48 |
49 | Recall that the samples are denoted ``x_i`` while the labels ``y_i``. Having ``n`` datapoints in the dataset, the training procedure finds weights ``w`` by solving
50 |
51 | ```math
52 | \operatorname{minimize}_w\qquad \frac 1n \sum_{i=1}^n\operatorname{loss}\big(y_i, \operatorname{predict}(w;x_i)\big).
53 | ```
54 |
55 | This minimizes the average discrepancy between labels and predictions. We need to specify the prediction function ``\operatorname{predict}`` and the loss function ``\operatorname{loss}``. This lecture considers linear predictions
56 |
57 | ```math
58 | \operatorname{predict}(w;x) = w^\top x,
59 | ```
60 |
61 | while non-linear predictions are considered in the following lecture.
62 |
63 | !!! info "Linear classifiers:"
64 | We realize that
65 |
66 | ```math
67 | w^\top x + b = (w, b)^\top \begin{pmatrix}x \\ 1\end{pmatrix}.
68 | ```
69 |
70 | That means that if we add ``1`` to each sample ``x_i``, it is sufficient to consider the classifier in the form ``w^\top x`` without the bias (shift, intercept) ``b``. This allows for simpler implementation.
71 |
72 | !!! compat "BONUS: Data transformation"
73 | Linear models have many advantages, such as simplicity or guaranteed convergence for optimization methods. Sometimes it is possible to transform non-linear dependences into linear ones. For example, the body-mass index
74 |
75 | ```math
76 | \operatorname{BMI} = \frac{w}{h^2}
77 | ```
78 |
79 | is equivalent to the linear dependence
80 |
81 | ```math
82 | \log \operatorname{BMI} = \log w - 2\log h
83 | ```
84 |
85 | in logarithmic variables. We show the same table as for regression but with logarithmic variable values.
86 |
87 | | ``\log x^1`` | ``\log x^2`` | ``\log y`` |
88 | | :-- | :-- | :-- |
89 | | 4.54 | 0.59 | 3.37 |
90 | | 3.91 | 0.46 | 2.99 |
91 | | 4.25 | 0.53 | 3.19 |
92 | | 4.25 | 0.53 | 3.64 |
93 |
94 | It is not difficult to see the simple linear relation with coefficients ``1`` and ``-2``, namely ``\log y = \log x^1 - 2\log x^2.``
95 |
--------------------------------------------------------------------------------
/docs/src/lecture_10/nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_10/nn.png
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/mnist.jl:
--------------------------------------------------------------------------------
1 | using Flux
2 | using Flux: onecold
3 | using MLDatasets
4 |
5 | include(joinpath(dirname(@__FILE__), ("utilities.jl")))
6 |
7 | T = Float32
8 | dataset = MLDatasets.MNIST
9 |
10 | X_train, y_train, X_test, y_test = load_data(dataset; T=T, onehot=true)
11 |
12 | model = Chain(
13 | Conv((2, 2), 1 => 16, sigmoid),
14 | MaxPool((2, 2)),
15 | Conv((2, 2), 16 => 8, sigmoid),
16 | MaxPool((2, 2)),
17 | Flux.flatten,
18 | Dense(288, size(y_train, 1)),
19 | softmax,
20 | )
21 |
22 | file_name = evaldir("mnist_sigmoid.jld2")
23 | train_or_load!(file_name, model, X_train, y_train)
24 |
25 | ii1 = findall(onecold(y_train, 0:9) .== 1)[1:5]
26 | ii2 = findall(onecold(y_train, 0:9) .== 9)[1:5]
27 |
28 | for qwe = 0:9
29 | ii0 = findall(onecold(y_train, 0:9) .== qwe)[1:5]
30 |
31 | p0 = [plot_image(X_train[:, :, :, i:i][:, :, 1, 1]) for i in ii0]
32 | p1 = [plot_image((model[1:2](X_train[:, :, :, i:i]))[:, :, 1, 1]) for i in ii0]
33 | p2 = [plot_image((model[1:4](X_train[:, :, :, i:i]))[:, :, 1, 1]) for i in ii0]
34 |
35 | p = plot(p0..., p1..., p2...; layout=(3, 5))
36 | display(p)
37 | end
38 |
39 | p0 = [plot_image(X_train[:, :, :, i:i][:, :, 1, 1]) for i in ii1]
40 | p1 = [plot_image((model[1:2](X_train[:, :, :, i:i]))[:, :, 1, 1]) for i in ii1]
41 | p2 = [plot_image((model[1:4](X_train[:, :, :, i:i]))[:, :, 1, 1]) for i in ii1]
42 |
43 | plot(p0..., p1..., p2...; layout=(3, 5))
44 |
45 |
46 | p0 = [plot_image(X_train[:, :, :, i:i][:, :, 1, 1]) for i in ii2]
47 | p1 = [plot_image((model[1:2](X_train[:, :, :, i:i]))[:, :, 1, 1]) for i in ii2]
48 | p2 = [plot_image((model[1:4](X_train[:, :, :, i:i]))[:, :, 1, 1]) for i in ii2]
49 |
50 | plot(p0..., p1..., p2...; layout=(3, 5))
51 |
52 | for i in 1:length(model)
53 | println(size(model[1:i](X_train[:, :, :, 1:1])))
54 | end
55 |
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/mnist.jld2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_11/data/mnist.jld2
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/mnist_gpu.jl:
--------------------------------------------------------------------------------
1 | using MLDatasets
2 | using Flux
3 |
4 | include(joinpath(dirname(@__FILE__), ("utilities.jl")))
5 |
6 | dataset = MLDatasets.MNIST
7 | T = Float32
8 | X_train, y_train, X_test, y_test = load_data(dataset; T=T, onehot=true)
9 |
10 | # model = Chain(
11 | # Conv((2, 2), 1 => 16, sigmoid),
12 | # MaxPool((2, 2)),
13 | # Conv((2, 2), 16 => 8, sigmoid),
14 | # MaxPool((2, 2)),
15 | # Flux.flatten,
16 | # Dense(288, size(y_train, 1)),
17 | # softmax,
18 | # ) |> gpu
19 |
20 | model = Chain(
21 | Conv((2, 2), 1 => 16, relu),
22 | MaxPool((2, 2)),
23 | Conv((2, 2), 16 => 8, relu),
24 | MaxPool((2, 2)),
25 | Flux.flatten,
26 | Dense(288, size(y_train, 1)),
27 | softmax,
28 | )
29 |
30 | file_name = evaldir("mnist.jld2")
31 | train_or_load!(file_name, model, X_train, y_train; n_epochs=100, force=true)
32 |
33 | accuracy(model, X_test, y_test)
34 |
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/mnist_sigmoid.jld2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_11/data/mnist_sigmoid.jld2
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/turtle.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_11/data/turtle.jpg
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/turtles.jl:
--------------------------------------------------------------------------------
1 | using Images, ImageMagick, ImageFiltering
2 | using Plots
3 |
4 | file_name_in = joinpath("data", "turtle.jpg")
5 | file_name_out = joinpath("data", "turtles.png")
6 |
7 | img = load(file_name_in)
8 |
9 | K1 = Float64.([1 1 1; 1 1 1; 1 1 1] ./ 9)
10 | img1 = imfilter(img, K1)
11 |
12 | K2 = Float64.([-1 -1 -1; -1 8 -1; -1 -1 -1])
13 | img2 = imfilter(img, K2)
14 |
15 | function fix_bounds!(img)
16 | for i in 1:size(img,1)
17 | for j in 1:size(img,2)
18 | a1 = min(max(img[i,j].r, 0), 1)
19 | a2 = min(max(img[i,j].g, 0), 1)
20 | a3 = min(max(img[i,j].b, 0), 1)
21 | img[i,j] = RGB(a1,a2,a3)
22 | end
23 | end
24 | end
25 |
26 | fix_bounds!(img1)
27 | save("turtle1.jpg", img1)
28 |
29 | fix_bounds!(img2)
30 | save("turtle2.jpg", img2)
31 |
32 | p = plot(
33 | axis = nothing,
34 | layout = @layout([a b c]),
35 | size=1.5.*(700,300)
36 | )
37 |
38 | plot!(p[1], img, ratio=1)
39 | plot!(p[2], img1, ratio=1)
40 | plot!(p[3], img2, ratio=1)
41 |
42 | savefig(p, file_name_out)
43 |
--------------------------------------------------------------------------------
/docs/src/lecture_11/data/utilities.jl:
--------------------------------------------------------------------------------
1 | using MLDatasets
2 | using Flux
3 | using JLD2
4 | using Random
5 | using Statistics
6 | using Base.Iterators: partition
7 | using Flux: crossentropy, onehotbatch, onecold
8 | using Plots
9 | using Pkg
10 |
11 | if haskey(Pkg.project().dependencies, "CUDA")
12 | using CUDA
13 | else
14 | gpu(x) = x
15 | end
16 |
17 | evaldir(args...) = joinpath(dirname(@__FILE__), args...)
18 |
19 | accuracy(model, x, y) = mean(onecold(cpu(model(x))) .== onecold(cpu(y)))
20 |
21 | function reshape_data(X::AbstractArray{T,3}, y::AbstractVector) where {T}
22 | s = size(X)
23 | return reshape(X, s[1], s[2], 1, s[3]), reshape(y, 1, :)
24 | end
25 |
26 | function load_data(dataset; onehot=false, T=Float32)
27 | classes = 0:9
28 | X_train, y_train = reshape_data(dataset(T, :train)[:]...)
29 | X_test, y_test = reshape_data(dataset(T, :test)[:]...)
30 | y_train = T.(y_train)
31 | y_test = T.(y_test)
32 |
33 | if onehot
34 | y_train = onehotbatch(y_train[:], classes)
35 | y_test = onehotbatch(y_test[:], classes)
36 | end
37 |
38 | return X_train, y_train, X_test, y_test
39 | end
40 |
41 | function train_model!(
42 | model,
43 | X,
44 | y;
45 | opt=Adam(0.001),
46 | batch_size=128,
47 | n_epochs=10,
48 | file_name="",
49 | )
50 |
51 | loss(x, y) = crossentropy(model(x), y)
52 |
53 | batches_train = map(partition(randperm(size(y, 2)), batch_size)) do inds
54 | return (gpu(X[:, :, :, inds]), gpu(y[:, inds]))
55 | end
56 |
57 | for epoch in 1:n_epochs
58 | @show epoch
59 | Flux.train!(loss, Flux.params(model), batches_train, opt)
60 | end
61 |
62 | !isempty(file_name) && jldsave(file_name; model_state=Flux.state(model) |> cpu)
63 |
64 | return
65 | end
66 |
67 | function train_or_load!(file_name, model, args...; force=false, kwargs...)
68 |
69 | !isdir(dirname(file_name)) && mkpath(dirname(file_name))
70 |
71 | if force || !isfile(file_name)
72 | train_model!(model, args...; file_name=file_name, kwargs...)
73 | else
74 | model_state = JLD2.load(file_name, "model_state")
75 | Flux.loadmodel!(model, model_state)
76 | end
77 | end
78 |
79 | plot_image(x::AbstractArray{T,2}) where {T} = plot(Gray.(x'), axis=nothing)
80 |
81 | function plot_image(x::AbstractArray{T,4}) where {T}
82 | @assert size(x, 4) == 1
83 | plot_image(x[:, :, :, 1])
84 | end
85 |
86 | function plot_image(x::AbstractArray{T,3}) where {T}
87 | @assert size(x, 3) == 1
88 | plot_image(x[:, :, 1])
89 | end
90 |
--------------------------------------------------------------------------------
/docs/src/lecture_11/iris.md:
--------------------------------------------------------------------------------
1 | # Introduction to Flux
2 |
3 | Flux is a library for using neural networks. This part will present the basics of Flux on the Iris dataset from the previous lecture. We include the auxiliary functions from the previous lesson into the `utilities.jl` file, which we include by
4 |
5 | ```@example iris
6 | include("utilities.jl")
7 |
8 | nothing # hide
9 | ```
10 |
11 | We set the seed and load the data in the same way as during the last lecture.
12 |
13 | ```@example iris
14 | using RDatasets
15 | using Random
16 |
17 | Random.seed!(666)
18 |
19 | iris = dataset("datasets", "iris")
20 |
21 | X = Matrix{Float32}(iris[:, 1:4])
22 | y = iris.Species
23 |
24 | X_train, y_train, X_test, y_test, classes = prepare_data(X', y; dims=2)
25 |
26 | nothing # hide
27 | ```
28 |
29 | ## Creating the network
30 |
31 | We recall that machine learning minimizes the discrepancy between the predictions ``\operatorname{predict}(w; x_i)`` and labels ``y_i``. Mathematically, this amount to minimizing the following objective function.
32 |
33 | ```math
34 | L(w) = \frac1n\sum_{i=1}^n \operatorname{loss}(y_i, \operatorname{predict}(w; x_i)).
35 | ```
36 |
37 | To build the objective ``L``, we first specify the prediction function ``\operatorname{predict}``, which we denote by model `m`. We start by creating the same network by the function `Chain`. Its inputs are the individual layers. Dense layers are created by `Dense` with the correct number of input and output neurons. We also need to specify the activation functions.
38 |
39 | ```@example iris
40 | using Flux
41 |
42 | n_hidden = 5
43 | m = Chain(
44 | Dense(size(X_train,1) => n_hidden, relu),
45 | Dense(n_hidden => size(y_train,1), identity),
46 | softmax,
47 | )
48 |
49 | nothing # hide
50 | ```
51 |
52 | Since `identity` is the default argument, it is possible to remove it in the second layer. However, we recommend keeping it for clarity.
53 |
54 | We can evaluate the whole dataset.
55 |
56 | ```@example iris
57 | m(X_train)
58 | ```
59 |
60 | Because there are ``3`` classes and ``120`` samples in the training set, it returns an array of size ``3\times 120``. Each column corresponds to one sample and forms a vector of probabilities due to the last layer of softmax.
61 |
62 | We access the neural network parameters by using `params(m)`. We can select the second layer of `m` by `m[2]`. Since the second layer has ``5 `` input and ``3`` output neurons, its parameters are a matrix of size ``3\times 5`` and a vector of length ``3``. The parameters `params(m[2])` are a tuple of the matrix and the vector. This also implies that the parameters are initialized randomly, and we do not need to take care of it. We can also easily modify any parameters.
63 |
64 | ```@example iris
65 | using Flux: params
66 |
67 | params(m[2])[2] .= [-1;0;1]
68 |
69 | nothing # hide
70 | ```
71 |
72 | ## Training the network
73 |
74 | To train the network, we need to define the objective function ``L``. Since we already defined ``\operatorname{predict}``, it suffices to define the loss function ``\operatorname{loss}``. Since we work with a multi-class problem, the loss function is usually the cross-entropy.
75 |
76 | ```@example iris
77 | using Flux: crossentropy
78 |
79 | L(ŷ, y) = crossentropy(ŷ, y)
80 |
81 | nothing # hide
82 | ```
83 |
84 | The `loss` function should be defined between predicted $\hat{y}$ and true label $y$. Therefore, we can evaluate the objective function by
85 |
86 | ```@example iris
87 | L(m(X_train), y_train)
88 | ```
89 |
90 | where `ŷ = m(x)`.
91 |
92 | This computes the objective function on the whole training set. Since Flux is (unlike our implementation from the last lecture) smart, there is no need to take care of individual samples.
93 |
94 | !!! info "Notation:"
95 | While the [standard definition](https://en.wikipedia.org/wiki/Cross_entropy) of cross-entropy is ``\operatorname{loss}(y,\hat y)``, [Flux](https://fluxml.ai/Flux.jl/stable/models/losses/) uses ``\operatorname{loss}(\hat y,y)``.
96 |
97 | Since we have the model and the loss function, the only remaining thing is the gradient. Flux again provides a smart way to compute it.
98 |
99 | ```@example iris
100 | grads = Flux.gradient(m -> L(m(X_train), y_train), m)
101 |
102 | nothing # hide
103 | ```
104 |
105 | The function `gradient` takes as inputs a function to differentiate, and arguments that specify the parameters we want to differentiate with respect to. Since the argument is the model `m` itself, the gradient is taken with respect to the parameters of `m`. The `L` function needs to be evaluated at the correct points `m(X_train)` (predictions) and `y_train` (true labels).
106 |
107 | The `grads` structure is a tuple holding a named tuple with the `:layers` key. Each layer then holds the parameters of the model, in this case, the weights $W$, bias $b$, and optionally parameters of the activation function $\sigma$.
108 |
109 | ```julia
110 | julia> grads[1][:layers][2]
111 | (weight = Float32[0.30140522 0.007785671 … -0.070617765 0.014230583; 0.06814249 -0.07018863 … 0.17996183 -0.20995824; -0.36954764 0.062402964 … -0.10934405 0.19572766], bias = Float32[0.0154182855, 0.022615476, -0.03803377], σ = nothing)
112 | ```
113 |
114 | Now, we train the classifiers for 250 iterations. In each iteration, we compute the gradient with respect to all network parameters and perform the gradient descent with stepsize ``0.1``. Since Flux@0.14, there's been a change from implicit definition to explicit definition of optimisers. Since now, we need to use `Flux.setup(optimiser, model)` to create an optimiser that would optimise over the model's parameters.
115 |
116 | ```@example iris
117 | opt = Descent(0.1)
118 | opt_state = Flux.setup(opt, m)
119 | max_iter = 250
120 |
121 | acc_train = zeros(max_iter)
122 | acc_test = zeros(max_iter)
123 | for i in 1:max_iter
124 | gs = Flux.gradient(m -> L(m(X_train), y_train), m)
125 | Flux.update!(opt_state, m, gs[1])
126 | acc_train[i] = accuracy(X_train, y_train)
127 | acc_test[i] = accuracy(X_test, y_test)
128 | end
129 |
130 | nothing # hide
131 | ```
132 |
133 | Both the accuracy on the training and testing set keeps increasing as the training progresses. This is a good check that we are not over-fitting.
134 |
135 | ```@example iris
136 | using Plots
137 |
138 | plot(acc_train, xlabel="Iteration", ylabel="Accuracy", label="train", ylim=(-0.01,1.01))
139 | plot!(acc_test, xlabel="Iteration", label="test", ylim=(-0.01,1.01))
140 |
141 | savefig("Iris_train_test_acc.svg") # hide
142 | ```
143 |
144 | 
145 |
--------------------------------------------------------------------------------
/docs/src/lecture_11/nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_11/nn.png
--------------------------------------------------------------------------------
/docs/src/lecture_11/theory.md:
--------------------------------------------------------------------------------
1 | # Theory of neural networks
2 |
3 | In the previous lecture, we presented an introduction to neural networks. We also showed how to train neural networks using gradient descent. This lecture is going to show more layers and a more sophisticated way of training.
4 |
5 | ## Convolutional layers
6 |
7 | The last lecture concentrated on the dense layer. Even though it is widely used due to its simplicity, it suffers from several disadvantages, especially in visual recognition. These disadvantages include:
8 | - *Large number of parameters*. For an image with ``500\times 500\times 3`` pixels and the output layer of only ``1000`` neurons, the dense layer would contain ``750`` million parameters. This is too much to optimize.
9 | - *No structural information*. Dense layers assign a weight to every pixel and then add the weighted values. This means that information from the top-leftmost and bottom-rightmost pixels of the image will be combined. Since a combination of these two pixels should carry no meaningful information, redundant computation is performed.
10 | Convolutional layers were designed to alleviate these issues.
11 |
12 | #### Motivation
13 |
14 | To understand the convolutional layers, we need to go back to the definition of convolution. Having a function ``f`` and a kernel ``g``, their convolution is defined by
15 |
16 | ```math
17 | (f\ast g)(x) = \int_{-\infty}^{\infty} f(x - t)g(t) dt.
18 | ```
19 |
20 | Let us consider the simplest case when
21 |
22 | ```math
23 | g(t) = \begin{cases} \frac{1}{2\varepsilon} &\text{if }t\in[-\varepsilon,\varepsilon], \\ 0 &\text{otherwise.} \end{cases}
24 | ```
25 |
26 | Then
27 |
28 | ```math
29 | (f\ast g)(x) = \int_{-\infty}^{\infty} f(x - t)g(t) dt = \frac{1}{2\varepsilon}\int_{-\varepsilon}^{\varepsilon}f(x - t)dt.
30 | ```
31 |
32 | Then ``(f\ast g)(x)`` does not take the value of ``f`` at ``x`` but integrates ``f`` over a small neighbourhood of ``x``. Applying this kernel results in a smoothening of ``f``.
33 |
34 | In image processing, the image ``f`` is not represented by a function but by a collection of pixels. The kernel ``g`` is represented by a small matrix. For the commonly used ``3\times 3`` kernel matrix, the convolution has the form
35 |
36 | ```math
37 | (f\ast g)(x,y) = \sum_{i=-1}^1\sum_{j=1}^1 f(x+i,y+j)g(i,j).
38 | ```
39 |
40 | The following kernels
41 |
42 | ```math
43 | K_1 = \begin{pmatrix} 0 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{pmatrix}, \qquad
44 | K_2 = \frac 19\begin{pmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{pmatrix}, \qquad
45 | K_3 = \begin{pmatrix} -1 & -1 & -1 \\ -1 & 8 & -1 \\ -1 & -1 & -1 \end{pmatrix}
46 | ```
47 |
48 | perform identity, image smoothening and edge detection, respectively.
49 |
50 | 
51 |
52 | #### Formulas
53 |
54 | Traditional techniques for image processing use multiple fixed kernels and combine their results. The idea of convolutional layers is to remove all human-made assumptions about which kernels to choose and learn the kernels' parameters based purely on data. Even though it gives superb results, it also removes any insight or interpretation humans may make.
55 |
56 | 
57 |
58 | The input of a convolutional layer has dimension ``I_1\times J_1\times C_1``, where ``I_1\times J_1`` is the size of the image and ``C_1`` is the number of channels (1 for grayscale, 3 for coloured, anything for hidden layers). Its input is also the kernel ``K``. The output of the convolutional layer has dimension ``I_2\times J_2\times C_2`` and its value at some ``(i_0,j_0,c_0)`` equals to
59 |
60 | ```math
61 | \text{output}(i_0,j_0,c_0) = l\left(\sum_{c=1}^C\sum_{i=-a}^{a}\sum_{j=-b}^b \Big( K_{c_0}(i,j,c) \text{input}(i_0+i,j_0+j,c) + b(c)\Big)\right).
62 | ```
63 |
64 | After the linear operation inside, an activation function ``l`` is applied. Without it, the whole network would a product of linear function and, therefore, linear function (written in a complicated form).
65 |
66 | The natural question is the interpretation of the linear operator and the number of parameters:
67 | - The kernel matrix ``K`` contains ``(2a+1)(2b+1)C_1C_2`` parameters. What does it mean? First, there is a separate kernel for each output channels. Second, the kernel also averages (more precisely, computes a linear combination) over all input channels. However, the coefficients of this linear combination do not depend on the position ``(i_0,j_0)``.
68 | - The bias ``b`` has dimension ``C_2``. Again, it does not depend on the position ``(i_0,j_0)``.
69 | The important thing to realize is that the number of parameters does not depend on the size of the image or the hidden layers. For example, even for an input image ``500\times 500\times 3``, the convolutional layer contains only 448 parameters for ``3\times 3`` kernel and ``16`` output channels (do the computations).
70 |
71 | This results in fixing the two issues mentioned above.
72 | - The number of parameters of convolutional layers stays relatively small.
73 | - Using kernels means that only local information from neighbouring pixels is propagated to subsequent layers.
74 |
75 | ## Network structure
76 |
77 | When an input is an image, the usual structure of the neural network is the following:
78 | - Convolutional layer followed by a pooling layer.
79 | - This is repeated many times.
80 | - Flatten layer (it reshapes the three-dimensional tensor into a vector).
81 | - Dense (fully connected) layer.
82 | - Softmax layer.
83 | - Cross-entropy loss function.
84 |
85 | !!! compat "BONUS: Additional layers"
86 | Practical convolutional layers involve additional complexities such as layers with even size (we showed only even sizes), padding (should zeros be added or should the output image be smaller) or stride (should there be any distance between convolutions). This goes, however, beyond the lecture.
87 |
88 | #### Recurrent layer
89 |
90 | Recurrent layers are designed to handle one-dimensional data. They are similar to convolutional layers with ``J_1=J_2=C_1=C_2=1``. Unlike convolutional layers, they store additional hidden variables. The most-known representative is the long short-term memory (LSTM) cell.
91 |
92 | #### Pooling layer
93 |
94 | The goal of pooling layers is to reduce the size of the network. They take a small (such as ``2\times 2``) window and perform a simple operation on this window (such as maximum or mean). Since the pooled windows do not overlap, this reduces the size of each dimension in half. Pooling layers do not have any trainable parameters.
95 |
96 | #### Skip connections
97 |
98 | From the previous lecture, we know that the gradient is computed via the chain rule
99 | ```math
100 | \nabla f = \nabla f_M\nabla f_{M-1}\dots\nabla f_1.
101 | ```
102 | Since the formula contains multiplication, if any of the gradients is too small, then the whole gradient will be too small. Specifically, the deeper the network, the higher the chance that the initial point will be in a point with a small gradient and the training will progress slowly. This phenomenon is called vanishing gradients.
103 |
104 | To solve the issue with vanishing gradients, skip connections are sometimes added. Even though it is not a layer, we include it here. They do precisely what their name suggests: They skip one or more layers. This makes the network more flexible: Due to its deep structure, it can approximate complicated functions, and due to its shallow structure (because of skip connections), the initial training can be fast.
105 |
106 | ## Stochastic gradient descent
107 |
108 | We recall that machine learning problems minimize the loss function
109 |
110 | ```math
111 | L(w) = \frac1n\sum_{i=1}^n \operatorname{loss}(y_i, f(w; x_i)).
112 | ```
113 |
114 | Its gradient equals to
115 |
116 | ```math
117 | \nabla L(w) = \frac1n\sum_{i=1}^n \operatorname{loss}'(y_i, f(w; x_i))\nabla_w f(w; x_i).
118 | ```
119 |
120 | If the dataset contains many samples (``n`` is large), then it takes long time to compute the gradient. Therefore, the full gradient is replaced by its stochastic (random) approximation
121 |
122 | ```math
123 | \frac1{|I|}\sum_{i\in I} \operatorname{loss}'(y_i, f(w; x_i))\nabla_w f(w; x_i).
124 | ```
125 |
126 | Here, the minibatch``I`` is a small (``32, 64, \dots``) subset of all samples ``\{1,\dots,n\}``. Sometimes the gradient descent is replaced by other options such as ADAM or RMSprop, which in some way consider the history of gradients.
127 |
128 | This technique is called stochastic gradient descent. During one epoch (the time when the optimizer evaluates each sample once), it performs many gradient updates (unlike the standard gradient descent, which performs only one update). Even though these updates are imprecise, numerical experiments show that stochastic gradient descent is much faster than standard gradient descent. The probable reason is that the entire dataset contains lots of duplicate information, and the full gradient performs unnecessary computation, which slows it down.
129 |
--------------------------------------------------------------------------------
/docs/src/lecture_11/turtles.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/docs/src/lecture_11/turtles.png
--------------------------------------------------------------------------------
/docs/src/lecture_11/utilities.jl:
--------------------------------------------------------------------------------
1 | using Statistics
2 |
3 | function split(X, y::AbstractVector; dims=1, ratio_train=0.8, kwargs...)
4 | n = length(y)
5 | size(X, dims) == n || throw(DimensionMismatch("..."))
6 |
7 | n_train = round(Int, ratio_train * n)
8 | i_rand = randperm(n)
9 | i_train = i_rand[1:n_train]
10 | i_test = i_rand[n_train+1:end]
11 |
12 | return selectdim(X, dims, i_train), y[i_train], selectdim(X, dims, i_test), y[i_test]
13 | end
14 |
15 | function normalize(X_train, X_test; dims=1, kwargs...)
16 | col_mean = mean(X_train; dims)
17 | col_std = std(X_train; dims)
18 |
19 | return (X_train .- col_mean) ./ col_std, (X_test .- col_mean) ./ col_std
20 | end
21 |
22 | function prepare_data(X, y; do_normal=true, do_onehot=true, kwargs...)
23 | X_train, y_train, X_test, y_test = split(X, y; kwargs...)
24 |
25 | if do_normal
26 | X_train, X_test = normalize(X_train, X_test; kwargs...)
27 | end
28 |
29 | classes = unique(y)
30 |
31 | if do_onehot
32 | y_train = onehot(y_train, classes)
33 | y_test = onehot(y_test, classes)
34 | end
35 |
36 | return X_train, y_train, X_test, y_test, classes
37 | end
38 |
39 | function onehot(y, classes)
40 | y_onehot = falses(length(classes), length(y))
41 | for (i, class) in enumerate(classes)
42 | y_onehot[i, y.==class] .= 1
43 | end
44 | return y_onehot
45 | end
46 |
47 | onecold(y, classes=1:size(y, 1)) = [classes[argmax(y_col)] for y_col in eachcol(y)]
48 |
49 | accuracy(x, y) = mean(onecold(m(x)) .== onecold(y))
50 |
--------------------------------------------------------------------------------
/src/JuliaCourseFNSPE.jl:
--------------------------------------------------------------------------------
1 | module JuliaCourseFNSPE
2 |
3 | # Write your package code here.
4 |
5 | end
6 |
--------------------------------------------------------------------------------
/test/runtests.jl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaTeachingCTU/Julia-for-Optimization-and-Learning/2ceaa05d0bac5e43db96348c30d7f0176e0f81e4/test/runtests.jl
--------------------------------------------------------------------------------