├── .github
└── FUNDING.yml
├── .gitignore
├── .nbexports
├── introductory-tutorials
│ ├── broader-topics-and-ecosystem
│ │ ├── intro-to-julia-DataFrames
│ │ │ ├── 01_constructors.jl
│ │ │ ├── 02_basicinfo.jl
│ │ │ ├── 03_missingvalues.jl
│ │ │ ├── 04_loadsave.jl
│ │ │ ├── 05_columns.jl
│ │ │ ├── 06_rows.jl
│ │ │ ├── 07_factors.jl
│ │ │ ├── 08_joins.jl
│ │ │ ├── 09_reshaping.jl
│ │ │ ├── 10_transforms.jl
│ │ │ ├── 11_performance.jl
│ │ │ ├── 12_pitfalls.jl
│ │ │ └── 13_extras.jl
│ │ ├── intro-to-julia-for-data-science
│ │ │ ├── 1. Julia for Data Science - Data.jl
│ │ │ ├── 2. Julia for Data Science - Algorithms.jl
│ │ │ ├── 3. Julia for Data Science - Plotting.jl
│ │ │ └── short-version
│ │ │ │ ├── 01.Julia_for_data_science-Data.jl
│ │ │ │ ├── 02.Julia_for_data_science-Algorithms.jl
│ │ │ │ ├── 03.Julia_for_data_science-Plotting.jl
│ │ │ │ └── 04.Bonus_MNIST_example.jl
│ │ ├── intro-to-juliadb
│ │ │ ├── 1. JuliaDB Basics.jl
│ │ │ ├── 2. Table Usage.jl
│ │ │ ├── 2.5. NDSparse Usage.jl
│ │ │ ├── 3. Distributed Data.jl
│ │ │ └── 4. OnlineStats Integration.jl
│ │ ├── intro-to-ml
│ │ │ ├── 00. Motivation - Classifying fruit.jl
│ │ │ ├── 01. ML - Representing data in a computer.jl
│ │ │ ├── 02. Tools - Using arrays to store data.jl
│ │ │ ├── 03. ML - Representing data with models.jl
│ │ │ ├── 04. Tools - Functions.jl
│ │ │ ├── 05. ML - Building models.jl
│ │ │ ├── 06. Tools - Adding a function parameter.jl
│ │ │ ├── 07. ML - Model complexity.jl
│ │ │ ├── 08. Tools - Multiple function parameters.jl
│ │ │ ├── 09. ML - What is learning.jl
│ │ │ ├── 10. Tools - Minimizing functions - how a computer learns.jl
│ │ │ ├── 11. ML - Intro to neurons.jl
│ │ │ ├── 12. Tools - Learning with a single neuron.jl
│ │ │ ├── 13. ML - Intro to Flux.jl.jl
│ │ │ ├── 14. Tools - Learning with a single neuron using Flux.jl.jl
│ │ │ ├── 15. ML - Intro to neural networks.jl
│ │ │ ├── 16. Tools - Using Flux to build a single layer neural net.jl
│ │ │ ├── 17. ML - Introduction to deep learning.jl
│ │ │ ├── 18. Tools - Multi-layer neural networks with Flux.jl
│ │ │ ├── 19. Recognizing handwriting using a neural network.jl
│ │ │ ├── 20. Automatic Differentiation in 10 Minutes.jl
│ │ │ ├── 21. Express path to classifying images.jl
│ │ │ └── 22. Julia is fast.jl
│ │ ├── intro-to-solving-diffeq-in-julia
│ │ │ ├── 1.ODEIntroduction.jl
│ │ │ ├── 2.ChoosingODEAlgorithm.jl
│ │ │ ├── 3.OptimizingDiffEqCode.jl
│ │ │ ├── 4.CallbacksAndEvents.jl
│ │ │ └── 5.FormattingPlots.jl
│ │ ├── intro-to-the-queryverse
│ │ │ └── AnthoffQueryverseJune2018.jl
│ │ └── introduction-to-dynamicalsystems.jl
│ │ │ ├── 0. Introduction, Documentation and Prerequisites.jl
│ │ │ ├── 1. Creating a dynamical system.jl
│ │ │ ├── 2. Orbit diagrams, Surfaces of Section.jl
│ │ │ ├── 3. Quantifying Chaos (Lyapunov Exponents).jl
│ │ │ ├── 4. Entropies & Dimensions.jl
│ │ │ ├── 5. Delay Coordinates & Neighborhoods.jl
│ │ │ ├── 6. Miscellaneous Algorithms.jl
│ │ │ └── 7. Timeseries Prediction.jl
│ ├── intro-to-julia-ES
│ │ ├── 2. Cadenas.jl
│ │ ├── 3. Estructuras de datos.jl
│ │ ├── 4. Bucles.jl
│ │ ├── 5. Condicionales.jl
│ │ ├── 6. Funciones.jl
│ │ ├── 7. Paquetes.jl
│ │ ├── 8. Graficas.jl
│ │ ├── 9. Despacho multiple.jl
│ │ ├── 1. Calentando motores.jl
│ │ ├── 10. Algebra lineal basica.jl
│ │ ├── 11.Factorizaciones y otras diversiones.jl
│ │ ├── 12. Julia es rapido.jl
│ │ ├── Jupyter_notebooks.jl
│ │ └── Soluciones a ejercicios.jl
│ └── intro-to-julia
│ │ ├── 00. Jupyter_notebooks.jl
│ │ ├── 01. Getting started.jl
│ │ ├── 02. Strings.jl
│ │ ├── 03. Data structures.jl
│ │ ├── 04. Loops.jl
│ │ ├── 05. Conditionals.jl
│ │ ├── 06. Functions.jl
│ │ ├── 07. Packages.jl
│ │ ├── 08. Plotting.jl
│ │ ├── 09. Julia is fast.jl
│ │ ├── 10. Multiple dispatch.jl
│ │ ├── 10.1 Multiple dispatch.jl
│ │ ├── 11. Basic linear algebra.jl
│ │ ├── 12. Factorizations and other fun.jl
│ │ ├── AutoDiff.jl
│ │ ├── Exercise_solutions.jl
│ │ ├── Exploring_benchmarking_and_performance.jl
│ │ ├── Exploring_benchmarking_and_performance_solutions.jl
│ │ ├── Local_installations.jl
│ │ ├── Reduce and Parallel Prefix - JuliaBox.jl
│ │ ├── Working with matrices.jl
│ │ ├── calculate_pi.jl
│ │ ├── calculate_pi_solution.jl
│ │ ├── compressing_an_image.jl
│ │ ├── compressing_an_image_solutions.jl
│ │ ├── create_a_caesar_cipher.jl
│ │ ├── create_a_caesar_cipher_solutions.jl
│ │ ├── long-version
│ │ ├── 000 Jupyter Notebooks.jl
│ │ ├── 001 Variations on Sum.jl
│ │ ├── 010 Arithmetic, Assignment, and Strings.jl
│ │ ├── 020 Arrays and Loops.jl
│ │ ├── 030 Functions and Broadcasting.jl
│ │ ├── 040 Data structures.jl
│ │ ├── 050 Compressing an Image.jl
│ │ ├── 060 Packages.jl
│ │ ├── 070 Plotting.jl
│ │ ├── 100 Working with Matrices.jl
│ │ ├── 110 Multiple dispatch.jl
│ │ ├── 115 Multiple dispatch examples.jl
│ │ ├── 120 Types.jl
│ │ ├── 130 OneHot Vector.jl
│ │ ├── 140 ModInt.jl
│ │ ├── 150 Iterators.jl
│ │ ├── 160 AutoDiff.jl
│ │ ├── 170 Basic linear algebra.jl
│ │ └── 180 Factorizations and other fun.jl
│ │ └── short-version
│ │ ├── 00.Jupyter_notebooks.jl
│ │ ├── 01.Getting_to_know_Julia.jl
│ │ ├── 02.Linear_Algebra.jl
│ │ ├── 03.Using_packages.jl
│ │ ├── 04.Intro_to_plotting.jl
│ │ ├── 05.Julia_is_fast.jl
│ │ └── 06.Multiple_dispatch.jl
├── more-advanced-materials
│ ├── ML-demos
│ │ ├── Metalhead.jl
│ │ ├── TextAnalysis.jl
│ │ ├── knet-nlp
│ │ │ ├── charlm
│ │ │ │ ├── charlm.jl
│ │ │ │ ├── juliatrain.jl
│ │ │ │ └── shaketrain.jl
│ │ │ ├── imdb
│ │ │ │ ├── imdbdemo.jl
│ │ │ │ └── imdbtrain.jl
│ │ │ └── macnet
│ │ │ │ └── visualize.jl
│ │ └── knet-tutorial
│ │ │ ├── 00.Julia_is_fast.jl
│ │ │ ├── 10.Getting_to_know_Julia.jl
│ │ │ ├── 15.quickstart.jl
│ │ │ ├── 20.mnist.jl
│ │ │ ├── 23.learning.jl
│ │ │ ├── 25.iterators.jl
│ │ │ ├── 30.lin.jl
│ │ │ ├── 40.mlp.jl
│ │ │ ├── 50.cnn.jl
│ │ │ ├── 60.rnn.jl
│ │ │ ├── 70.imdb.jl
│ │ │ ├── 80.charlm.jl
│ │ │ ├── 90.s2s.jl
│ │ │ └── colab_install_julia.txt
│ ├── metaprogramming
│ │ └── Metaprogramming.jl
│ └── parallelism-demos
│ │ ├── 01. Distributed Arrays.jl
│ │ ├── 02. Parallelism Basics.jl
│ │ ├── 03. JuliaRun-parallel.jl
│ │ ├── 04. TracyWidom.jl
│ │ ├── 05. JuliaRun-parallel-batch.jl
│ │ └── 06. JuliaRun-http-service.jl
└── zh-cn
│ └── intro-to-julia-ZH
│ ├── 00.上手Jupyter_notebook.jl
│ ├── 01.新手入门.jl
│ ├── 02.字符串.jl
│ ├── 03.数据结构.jl
│ ├── 04.循环.jl
│ ├── 05.条件判断.jl
│ ├── 06.函数.jl
│ ├── 07.包(Packages).jl
│ ├── 08.绘图.jl
│ ├── 09.Julia很快.jl
│ ├── 10.1多重派发.jl
│ ├── 10.多重派发.jl
│ ├── 11.基本线性代数.jl
│ ├── 12. 矩阵分解与其他妙用.jl
│ └── 简短版
│ ├── 00.上手Jupyter_notebook.jl
│ ├── 01.了解Julia.jl
│ ├── 02.Julia中的线性代数.jl
│ ├── 03.包的使用.jl
│ ├── 04.Julia绘图简介.jl
│ ├── 05.Julia很快.jl
│ └── 06.多重派发.jl
├── CONTRIBUTING.md
├── LICENSE.md
├── README.md
├── README_DATALOSS_WARNING.md
├── es-es
└── intro-to-julia-ES
│ ├── 1. Calentando motores.ipynb
│ ├── 2. Cadenas.ipynb
│ ├── 3. Estructuras de datos.ipynb
│ ├── 4. Bucles.ipynb
│ ├── 5. Condicionales.ipynb
│ ├── 6. Funciones.ipynb
│ ├── 7. Paquetes.ipynb
│ ├── 8. Graficas.ipynb
│ ├── 9. Despacho multiple.ipynb
│ ├── 10. Algebra lineal basica.ipynb
│ ├── 11.Factorizaciones y otras diversiones.ipynb
│ ├── 12. Julia es rapido.ipynb
│ ├── README.md
│ └── Soluciones a ejercicios.ipynb
├── introductory-tutorials
├── broader-topics-and-ecosystem
│ ├── intro-to-julia-DataFrames
│ │ ├── 01_constructors.ipynb
│ │ ├── 02_basicinfo.ipynb
│ │ ├── 03_missingvalues.ipynb
│ │ ├── 04_loadsave.ipynb
│ │ ├── 05_columns.ipynb
│ │ ├── 06_rows.ipynb
│ │ ├── 07_factors.ipynb
│ │ ├── 08_joins.ipynb
│ │ ├── 09_reshaping.ipynb
│ │ ├── 10_transforms.ipynb
│ │ ├── 11_performance.ipynb
│ │ ├── 12_pitfalls.ipynb
│ │ ├── 13_extras.ipynb
│ │ ├── LICENSE
│ │ └── README.md
│ ├── intro-to-julia-for-data-science
│ │ ├── 1. Julia for Data Science - Data.ipynb
│ │ ├── 2. Julia for Data Science - Algorithms.ipynb
│ │ ├── 3. Julia for Data Science - Plotting.ipynb
│ │ ├── LICENSE.md
│ │ ├── Manifest.toml
│ │ ├── Project.toml
│ │ ├── README.md
│ │ └── short-version
│ │ │ ├── 01.Julia_for_data_science-Data.ipynb
│ │ │ ├── 02.Julia_for_data_science-Algorithms.ipynb
│ │ │ ├── 03.Julia_for_data_science-Plotting.ipynb
│ │ │ └── 04.Bonus_MNIST_example.ipynb
│ ├── intro-to-juliadb
│ │ ├── 1. JuliaDB Basics.ipynb
│ │ ├── 2. Table Usage.ipynb
│ │ ├── 2.5. NDSparse Usage.ipynb
│ │ ├── 3. Distributed Data.ipynb
│ │ ├── 4. OnlineStats Integration.ipynb
│ │ ├── LICENSE.md
│ │ ├── README.md
│ │ ├── diamonds.csv
│ │ ├── stocks
│ │ ├── stocks.jdb
│ │ └── stocksample
│ │ │ ├── aapl.us.txt
│ │ │ ├── amzn.us.txt
│ │ │ ├── dis.us.txt
│ │ │ ├── googl.us.txt
│ │ │ ├── ibm.us.txt
│ │ │ ├── msft.us.txt
│ │ │ ├── nflx.us.txt
│ │ │ └── tsla.us.txt
│ ├── intro-to-ml
│ │ ├── 00. Motivation - Classifying fruit.ipynb
│ │ ├── 01. ML - Representing data in a computer.ipynb
│ │ ├── 02. Tools - Using arrays to store data.ipynb
│ │ ├── 03. ML - Representing data with models.ipynb
│ │ ├── 04. Tools - Functions.ipynb
│ │ ├── 05. ML - Building models.ipynb
│ │ ├── 06. Tools - Adding a function parameter.ipynb
│ │ ├── 07. ML - Model complexity.ipynb
│ │ ├── 08. Tools - Multiple function parameters.ipynb
│ │ ├── 09. ML - What is learning.ipynb
│ │ ├── 10. Tools - Minimizing functions - how a computer learns.ipynb
│ │ ├── 11. ML - Intro to neurons.ipynb
│ │ ├── 12. Tools - Learning with a single neuron.ipynb
│ │ ├── 13. ML - Intro to Flux.jl.ipynb
│ │ ├── 14. Tools - Learning with a single neuron using Flux.jl.ipynb
│ │ ├── 15. ML - Intro to neural networks.ipynb
│ │ ├── 16. Tools - Using Flux to build a single layer neural net.ipynb
│ │ ├── 17. ML - Introduction to deep learning.ipynb
│ │ ├── 18. Tools - Multi-layer neural networks with Flux.ipynb
│ │ ├── 19. Recognizing handwriting using a neural network.ipynb
│ │ ├── 20. Automatic Differentiation in 10 Minutes.ipynb
│ │ ├── 21. Express path to classifying images.ipynb
│ │ ├── 22. Julia is fast.ipynb
│ │ ├── data
│ │ │ ├── 104_100.jpg
│ │ │ ├── 107_100.jpg
│ │ │ ├── 10_100.jpg
│ │ │ ├── 8_100.jpg
│ │ │ ├── Apple_Braeburn.dat
│ │ │ ├── Apple_Golden_1.dat
│ │ │ ├── Apple_Golden_2.dat
│ │ │ ├── Apple_Golden_3.dat
│ │ │ ├── Banana.dat
│ │ │ ├── Celeste.png
│ │ │ ├── Grape_White.dat
│ │ │ ├── Grape_White_2.dat
│ │ │ ├── apples.dat
│ │ │ ├── array2d.png
│ │ │ ├── array_cartoon.png
│ │ │ ├── array_comprehension.png
│ │ │ ├── bananas.dat
│ │ │ ├── data_flow.png
│ │ │ ├── deep-neural-net.png
│ │ │ ├── fruit-salad.png
│ │ │ ├── model_fitting.png
│ │ │ ├── philip.jpg
│ │ │ ├── single-layer.png
│ │ │ ├── single-neuron.png
│ │ │ ├── what_is_model.png
│ │ │ └── without_arrays.png
│ │ ├── draw_neural_net.jl
│ │ └── motivation.html
│ ├── intro-to-solving-diffeq-in-julia
│ │ ├── 1.ODEIntroduction.ipynb
│ │ ├── 2.ChoosingODEAlgorithm.ipynb
│ │ ├── 3.OptimizingDiffEqCode.ipynb
│ │ ├── 4.CallbacksAndEvents.ipynb
│ │ ├── 5.FormattingPlots.ipynb
│ │ ├── License.md
│ │ └── README.md
│ ├── intro-to-the-queryverse
│ │ ├── AnthoffQueryverseJune2018.ipynb
│ │ ├── README.md
│ │ ├── data
│ │ │ ├── cars.csv
│ │ │ ├── cars.dta
│ │ │ ├── cars.feather
│ │ │ ├── cars.sas7bdat
│ │ │ ├── cars.sav
│ │ │ └── cars.xlsx
│ │ ├── screenshots
│ │ │ ├── voyager1.png
│ │ │ ├── voyager2.png
│ │ │ └── voyager3.png
│ │ └── slides
│ │ │ ├── Slide1.PNG
│ │ │ ├── Slide10.PNG
│ │ │ ├── Slide11.PNG
│ │ │ ├── Slide12.PNG
│ │ │ ├── Slide13.PNG
│ │ │ ├── Slide14.PNG
│ │ │ ├── Slide15.PNG
│ │ │ ├── Slide16.PNG
│ │ │ ├── Slide17.PNG
│ │ │ ├── Slide18.PNG
│ │ │ ├── Slide19.PNG
│ │ │ ├── Slide2.PNG
│ │ │ ├── Slide20.PNG
│ │ │ ├── Slide21.PNG
│ │ │ ├── Slide22.PNG
│ │ │ ├── Slide23.PNG
│ │ │ ├── Slide24.PNG
│ │ │ ├── Slide25.PNG
│ │ │ ├── Slide26.PNG
│ │ │ ├── Slide27.PNG
│ │ │ ├── Slide28.PNG
│ │ │ ├── Slide29.PNG
│ │ │ ├── Slide3.PNG
│ │ │ ├── Slide30.PNG
│ │ │ ├── Slide31.PNG
│ │ │ ├── Slide32.PNG
│ │ │ ├── Slide33.PNG
│ │ │ ├── Slide4.PNG
│ │ │ ├── Slide5.PNG
│ │ │ ├── Slide6.PNG
│ │ │ ├── Slide7.PNG
│ │ │ ├── Slide8.PNG
│ │ │ └── Slide9.PNG
│ └── introduction-to-dynamicalsystems.jl
│ │ ├── 0. Introduction, Documentation and Prerequisites.ipynb
│ │ ├── 1. Creating a dynamical system.ipynb
│ │ ├── 2. Orbit diagrams, Surfaces of Section.ipynb
│ │ ├── 3. Quantifying Chaos (Lyapunov Exponents).ipynb
│ │ ├── 4. Entropies & Dimensions.ipynb
│ │ ├── 5. Delay Coordinates & Neighborhoods.ipynb
│ │ ├── 6. Miscellaneous Algorithms.ipynb
│ │ ├── 7. Timeseries Prediction.ipynb
│ │ ├── KS150_tr40000_D2_tau1_B5_k1.png
│ │ ├── KS150_tr40000_D2_τ1_B5_k1.png
│ │ ├── KS_NRMSE_L6_Q64_D1_tau1_B5_k1_nn4_nw3PndWDWSt.png
│ │ ├── KS_NRMSE_L6_Q64_D1_τ1_B5_k1_nn4_nw3PndWDWSt.png
│ │ ├── README.md
│ │ ├── barkley_crossprediction.gif
│ │ ├── barkley_stts_prediction.gif
│ │ ├── lyapunov.png
│ │ └── standardmap_fp.png
├── intro-to-julia-ES
│ ├── 1. Calentando motores.ipynb
│ └── Jupyter_notebooks.ipynb
└── intro-to-julia
│ ├── 00. Jupyter_notebooks.ipynb
│ ├── 01. Getting started.ipynb
│ ├── 02. Strings.ipynb
│ ├── 03. Data structures.ipynb
│ ├── 04. Loops.ipynb
│ ├── 05. Conditionals.ipynb
│ ├── 06. Functions.ipynb
│ ├── 07. Packages.ipynb
│ ├── 08. Plotting.ipynb
│ ├── 09. Julia is fast.ipynb
│ ├── 10. Multiple dispatch.ipynb
│ ├── 10.1 Multiple dispatch.ipynb
│ ├── 11. Basic linear algebra.ipynb
│ ├── 12. Factorizations and other fun.ipynb
│ ├── AutoDiff.ipynb
│ ├── Exercise_solutions.ipynb
│ ├── Exploring_benchmarking_and_performance.ipynb
│ ├── Exploring_benchmarking_and_performance_solutions.ipynb
│ ├── LICENSE.md
│ ├── Local_installations.ipynb
│ ├── README.md
│ ├── Reduce and Parallel Prefix - JuliaBox.ipynb
│ ├── Working with matrices.ipynb
│ ├── calculate_pi.ipynb
│ ├── calculate_pi_solution.ipynb
│ ├── compressing_an_image.ipynb
│ ├── compressing_an_image_solutions.ipynb
│ ├── create_a_caesar_cipher.ipynb
│ ├── create_a_caesar_cipher_solutions.ipynb
│ ├── derfunc.gif
│ ├── images
│ ├── 104_100.jpg
│ ├── area_ratio.png
│ ├── banana_10svals.png
│ ├── banana_30svals.png
│ ├── banana_3svals.png
│ ├── banana_5svals.png
│ └── hint.png
│ ├── long-version
│ ├── 000 Jupyter Notebooks.ipynb
│ ├── 001 Variations on Sum.ipynb
│ ├── 010 Arithmetic, Assignment, and Strings.ipynb
│ ├── 020 Arrays and Loops.ipynb
│ ├── 030 Functions and Broadcasting.ipynb
│ ├── 040 Data structures.ipynb
│ ├── 050 Compressing an Image.ipynb
│ ├── 060 Packages.ipynb
│ ├── 070 Plotting.ipynb
│ ├── 100 Working with Matrices.ipynb
│ ├── 110 Multiple dispatch.ipynb
│ ├── 115 Multiple dispatch examples.ipynb
│ ├── 120 Types.ipynb
│ ├── 130 OneHot Vector.ipynb
│ ├── 140 ModInt.ipynb
│ ├── 150 Iterators.ipynb
│ ├── 160 AutoDiff.ipynb
│ ├── 170 Basic linear algebra.ipynb
│ ├── 180 Factorizations and other fun.ipynb
│ └── images
│ │ ├── 104_100.jpg
│ │ ├── area_ratio.png
│ │ ├── banana_10svals.png
│ │ ├── banana_30svals.png
│ │ ├── banana_3svals.png
│ │ ├── banana_5svals.png
│ │ └── hint.png
│ └── short-version
│ ├── 00.Jupyter_notebooks.ipynb
│ ├── 01.Getting_to_know_Julia.ipynb
│ ├── 02.Linear_Algebra.ipynb
│ ├── 03.Using_packages.ipynb
│ ├── 04.Intro_to_plotting.ipynb
│ ├── 05.Julia_is_fast.ipynb
│ └── 06.Multiple_dispatch.ipynb
├── jupyter_notebook_config.py
├── jupyter_script_export_template.tpl
├── more-advanced-materials
├── ML-demos
│ ├── Metalhead.ipynb
│ ├── TextAnalysis.ipynb
│ ├── knet-nlp
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── charlm
│ │ │ ├── charlm.ipynb
│ │ │ ├── charlm.jl
│ │ │ ├── juliatrain.ipynb
│ │ │ └── shaketrain.ipynb
│ │ ├── imdb
│ │ │ ├── imdb.jl
│ │ │ ├── imdbdemo.ipynb
│ │ │ └── imdbtrain.ipynb
│ │ └── macnet
│ │ │ ├── data
│ │ │ └── .gitignore
│ │ │ ├── demosetup.jl
│ │ │ ├── models
│ │ │ └── .gitignore
│ │ │ ├── src
│ │ │ ├── loss.jl
│ │ │ └── newmacnetwork.jl
│ │ │ └── visualize.ipynb
│ └── knet-tutorial
│ │ ├── 00.Julia_is_fast.ipynb
│ │ ├── 10.Getting_to_know_Julia.ipynb
│ │ ├── 15.quickstart.ipynb
│ │ ├── 20.mnist.ipynb
│ │ ├── 23.learning.ipynb
│ │ ├── 25.iterators.ipynb
│ │ ├── 30.lin.ipynb
│ │ ├── 40.mlp.ipynb
│ │ ├── 50.cnn.ipynb
│ │ ├── 60.rnn.ipynb
│ │ ├── 70.imdb.ipynb
│ │ ├── 80.charlm.ipynb
│ │ ├── 90.s2s.ipynb
│ │ ├── README.md
│ │ ├── colab_install_julia.ipynb
│ │ └── images
│ │ ├── LSTM3-chain.png
│ │ ├── LSTM3-var-GRU.png
│ │ ├── diags.png
│ │ ├── rnn-vs-mlp.png
│ │ ├── s2s-dims.png
│ │ └── seq2seq.png
├── metaprogramming
│ ├── LICENSE.md
│ └── Metaprogramming.ipynb
└── parallelism-demos
│ ├── 01. Distributed Arrays.ipynb
│ ├── 02. Parallelism Basics.ipynb
│ ├── 03. JuliaRun-parallel.ipynb
│ ├── 04. TracyWidom.ipynb
│ ├── 05. JuliaRun-parallel-batch.ipynb
│ ├── 06. JuliaRun-http-service.ipynb
│ └── LICENSE.md
└── zh-cn
├── README.md
└── intro-to-julia-ZH
├── 00.上手Jupyter_notebook.ipynb
├── 01.新手入门.ipynb
├── 02.字符串.ipynb
├── 03.数据结构.ipynb
├── 04.循环.ipynb
├── 05.条件判断.ipynb
├── 06.函数.ipynb
├── 07.包(Packages).ipynb
├── 08.绘图.ipynb
├── 09.Julia很快.ipynb
├── 10.1多重派发.ipynb
├── 10.多重派发.ipynb
├── 11.基本线性代数.ipynb
├── 12.矩阵分解与其他妙用.ipynb
└── 简短版
├── 00.上手Jupyter_notebook.ipynb
├── 01.了解Julia.ipynb
├── 02.Julia中的线性代数.ipynb
├── 03.包的使用.ipynb
├── 04.Julia绘图简介.ipynb
├── 05.Julia很快.ipynb
└── 06.多重派发.ipynb
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [JuliaLang]
4 | open_collective: julialang
5 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.ipynb_checkpoints
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-DataFrames/08_joins.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Introduction to DataFrames
3 | # **[Bogumił Kamiński](http://bogumilkaminski.pl/about/), Apr 21, 2017**
4 | # ------------------------------------------------------------------------------------------
5 |
6 | using DataFrames # load package
7 |
8 | # ------------------------------------------------------------------------------------------
9 | # ## Joining DataFrames
10 | # ------------------------------------------------------------------------------------------
11 |
12 | # ------------------------------------------------------------------------------------------
13 | # ### Preparing DataFrames for a join
14 | # ------------------------------------------------------------------------------------------
15 |
16 | x = DataFrame(ID=[1,2,3,4,missing], name = ["Alice", "Bob", "Conor", "Dave","Zed"])
17 | y = DataFrame(id=[1,2,5,6,missing], age = [21,22,23,24,99])
18 | x,y
19 |
20 | rename!(x, :ID=>:id) # names of columns on which we want to join must be the same
21 |
22 | # ------------------------------------------------------------------------------------------
23 | # ### Standard joins: inner, left, right, outer, semi, anti
24 | # ------------------------------------------------------------------------------------------
25 |
26 | join(x, y, on=:id) # :inner join by default, missing is joined
27 |
28 | join(x, y, on=:id, kind=:left)
29 |
30 | join(x, y, on=:id, kind=:right)
31 |
32 | join(x, y, on=:id, kind=:outer)
33 |
34 | join(x, y, on=:id, kind=:semi)
35 |
36 | join(x, y, on=:id, kind=:anti)
37 |
38 | # ------------------------------------------------------------------------------------------
39 | # ### Cross join
40 | # ------------------------------------------------------------------------------------------
41 |
42 | # cross-join does not require on argument
43 | # it produces a Cartesian product or arguments
44 | function expand_grid(;xs...) # a simple replacement for expand.grid in R
45 | reduce((x,y) -> join(x, DataFrame(Pair(y...)), kind=:cross),
46 | DataFrame(Pair(xs[1]...)), xs[2:end])
47 | end
48 |
49 | expand_grid(a=[1,2], b=["a","b","c"], c=[true,false])
50 |
51 | # ------------------------------------------------------------------------------------------
52 | # ### Complex cases of joins
53 | # ------------------------------------------------------------------------------------------
54 |
55 | x = DataFrame(id1=[1,1,2,2,missing,missing],
56 | id2=[1,11,2,21,missing,99],
57 | name = ["Alice", "Bob", "Conor", "Dave","Zed", "Zoe"])
58 | y = DataFrame(id1=[1,1,3,3,missing,missing],
59 | id2=[11,1,31,3,missing,999],
60 | age = [21,22,23,24,99, 100])
61 | x,y
62 |
63 | join(x, y, on=[:id1, :id2]) # joining on two columns
64 |
65 | join(x, y, on=[:id1], makeunique=true) # with duplicates all combinations are produced (here :inner join)
66 |
67 | join(x, y, on=[:id1], kind=:semi) # but not by :semi join (as it would duplicate rows)
68 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-DataFrames/09_reshaping.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Introduction to DataFrames
3 | # **[Bogumił Kamiński](http://bogumilkaminski.pl/about/), Apr 21, 2018**
4 | # ------------------------------------------------------------------------------------------
5 |
6 | using DataFrames # load package
7 |
8 | # ------------------------------------------------------------------------------------------
9 | # ## Reshaping DataFrames
10 | # ------------------------------------------------------------------------------------------
11 |
12 | # ------------------------------------------------------------------------------------------
13 | # ### Wide to long
14 | # ------------------------------------------------------------------------------------------
15 |
16 | x = DataFrame(id=[1,2,3,4], id2=[1,1,2,2], M1=[11,12,13,14], M2=[111,112,113,114])
17 |
18 | melt(x, :id, [:M1, :M2]) # first pass id-variables and then measure variables; meltdf makes a view
19 |
20 | # optionally you can rename columns; melt and stack are identical but order of arguments is reversed
21 | stack(x, [:M1, :M2], :id, variable_name=:key, value_name=:observed) # first measures and then id-s; stackdf creates view
22 |
23 | # if second argument is omitted in melt or stack , all other columns are assumed to be the second argument
24 | # but measure variables are selected only if they are <: AbstractFloat
25 | melt(x, [:id, :id2])
26 |
27 | melt(x, [1, 2]) # you can use index instead of symbol
28 |
29 | bigx = DataFrame(rand(10^6, 10)) # a test comparing creation of new DataFrame and a view
30 | bigx[:id] = 1:10^6
31 | @time melt(bigx, :id)
32 | @time melt(bigx, :id)
33 | @time meltdf(bigx, :id)
34 | @time meltdf(bigx, :id);
35 |
36 | x = DataFrame(id = [1,1,1], id2=['a','b','c'], a1 = rand(3), a2 = rand(3))
37 |
38 | melt(x)
39 |
40 | melt(DataFrame(rand(3,2))) # by default stack and melt treats floats as value columns
41 |
42 | df = DataFrame(rand(3,2))
43 | df[:key] = [1,1,1]
44 | mdf = melt(df) # duplicates in key are silently accepted
45 |
46 | # ------------------------------------------------------------------------------------------
47 | # ### Long to wide
48 | # ------------------------------------------------------------------------------------------
49 |
50 | x = DataFrame(id = [1,1,1], id2=['a','b','c'], a1 = rand(3), a2 = rand(3))
51 |
52 | y = melt(x, [1,2])
53 | display(x)
54 | display(y)
55 |
56 | unstack(y, :id2, :variable, :value) # stndard unstack with a unique key
57 |
58 | unstack(y, :variable, :value) # all other columns are treated as keys
59 |
60 | # by default :id, :variable and :value names are assumed; in this case it produces duplicate keys
61 | unstack(y)
62 |
63 | df = stack(DataFrame(rand(3,2)))
64 |
65 | unstack(df, :variable, :value) # unable to unstack when no key column is present
66 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-DataFrames/10_transforms.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Introduction to DataFrames
3 | # **[Bogumił Kamiński](http://bogumilkaminski.pl/about/), Apr 21, 2018**
4 | # ------------------------------------------------------------------------------------------
5 |
6 | using DataFrames # load package
7 |
8 | # ------------------------------------------------------------------------------------------
9 | # ## Split-apply-combine
10 | # ------------------------------------------------------------------------------------------
11 |
12 | x = DataFrame(id=[1,2,3,4,1,2,3,4], id2=[1,2,1,2,1,2,1,2], v=rand(8))
13 |
14 | gx1 = groupby(x, :id)
15 |
16 | gx2 = groupby(x, [:id, :id2])
17 |
18 | vcat(gx2...) # back to the original DataFrame
19 |
20 | x = DataFrame(id = [missing, 5, 1, 3, missing], x = 1:5)
21 |
22 | showall(groupby(x, :id)) # by default groups include mising values and are not sorted
23 |
24 | showall(groupby(x, :id, sort=true, skipmissing=true)) # but we can change it :)
25 |
26 | x = DataFrame(id=rand('a':'d', 100), v=rand(100));
27 | by(x, :id, y->mean(y[:v])) # apply a function to each group of a data frame
28 |
29 | by(x, :id, y->mean(y[:v]), sort=true) # we can sort the output
30 |
31 | by(x, :id, y->DataFrame(res=mean(y[:v]))) # this way we can set a name for a column - DataFramesMeta @by is better
32 |
33 | x = DataFrame(id=rand('a':'d', 100), x1=rand(100), x2=rand(100))
34 | aggregate(x, :id, sum) # apply a function over all columns of a data frame in groups given by id
35 |
36 | aggregate(x, :id, sum, sort=true) # also can be sorted
37 |
38 | # ------------------------------------------------------------------------------------------
39 | # *We omit the discussion of of map/combine as I do not find them very useful (better to use
40 | # by)*
41 | # ------------------------------------------------------------------------------------------
42 |
43 | x = DataFrame(rand(3, 5))
44 |
45 | map(mean, eachcol(x)) # map a function over each column and return a data frame
46 |
47 | foreach(c -> println(c[1], ": ", mean(c[2])), eachcol(x)) # a raw iteration returns a tuple with column name and values
48 |
49 | colwise(mean, x) # colwise is similar, but produces a vector
50 |
51 | x[:id] = [1,1,2]
52 | colwise(mean,groupby(x, :id)) # and works on GroupedDataFrame
53 |
54 | map(r -> r[:x1]/r[:x2], eachrow(x)) # now the returned value is DataFrameRow which works similarly to a one-row DataFrame
55 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-DataFrames/12_pitfalls.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Introduction to DataFrames
3 | # **[Bogumił Kamiński](http://bogumilkaminski.pl/about/), Apr 21, 2018**
4 | # ------------------------------------------------------------------------------------------
5 |
6 | using DataFrames
7 |
8 | # ------------------------------------------------------------------------------------------
9 | # ## Possible pitfalls
10 | # ------------------------------------------------------------------------------------------
11 |
12 | # ------------------------------------------------------------------------------------------
13 | # ### Know what is copied when creating a `DataFrame`
14 | # ------------------------------------------------------------------------------------------
15 |
16 | x = DataFrame(rand(3, 5))
17 |
18 | y = DataFrame(x)
19 | x === y # no copyinng performed
20 |
21 | y = copy(x)
22 | x === y # not the same object
23 |
24 | all(x[i] === y[i] for i in ncol(x)) # but the columns are the same
25 |
26 | x = 1:3; y = [1, 2, 3]; df = DataFrame(x=x,y=y) # the same when creating arrays or assigning columns, except ranges
27 |
28 | y === df[:y] # the same object
29 |
30 | typeof(x), typeof(df[:x]) # range is converted to a vector
31 |
32 | # ------------------------------------------------------------------------------------------
33 | # ### Do not modify the parent of `GroupedDataFrame`
34 | # ------------------------------------------------------------------------------------------
35 |
36 | x = DataFrame(id=repeat([1,2], outer=3), x=1:6)
37 | g = groupby(x, :id)
38 |
39 | x[1:3, 1]=[2,2,2]
40 | g # well - it is wrong now, g is only a view
41 |
42 | # ------------------------------------------------------------------------------------------
43 | # ### Remember that you can filter columns of a `DataFrame` using booleans
44 | # ------------------------------------------------------------------------------------------
45 |
46 | srand(1)
47 | x = DataFrame(rand(5, 5))
48 |
49 | x[x[:x1] .< 0.25] # well - we have filtered columns not rows by accident as you can select columns using booleans
50 |
51 | x[x[:x1] .< 0.25, :] # probably this is what we wanted
52 |
53 | # ------------------------------------------------------------------------------------------
54 | # ### Column selection for DataFrame creates aliases unless explicitly copied
55 | # ------------------------------------------------------------------------------------------
56 |
57 | x = DataFrame(a=1:3)
58 | x[:b] = x[1] # alias
59 | x[:c] = x[:, 1] # also alias
60 | x[:d] = x[1][:] # copy
61 | x[:e] = copy(x[1]) # explicit copy
62 | display(x)
63 | x[1,1] = 100
64 | display(x)
65 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/07. ML - Model complexity.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Model complexity
3 | #
4 | # In the last notebook, we saw that we could customize a model by adding a parameter. Doing
5 | # so, we were able to fit that model to a data point. This fit was perfect, insofar as
6 | # numerics would allow.
7 | #
8 | # In the next notebook, we'll see that as we add more data to our data set, fitting a model
9 | # to our data usually becomes more challenging and the result will be less perfect.
10 | #
11 | # For one thing, we will find that we can add complexity to our model to capture added
12 | # complexity in the data. We can do this by adding more parameters to our model. We'll see
13 | # that for a data set with two data points, we can again get a "perfect" fit to our model by
14 | # adding a second parameter to our model.
15 | #
16 | # However, we can't simply add a parameter to our model every time we add a data point to
17 | # our data set, since this will lead to a phenomenon called **overfitting**.
18 | #
19 | # In the image below, we depict a data set that is close to linear, and models that exhibit
20 | # underfitting, fitting well, and overfitting, from left to right:
21 | #
22 | #
23 | #
24 | #
25 | # In the first image, the model accounts for the slope along which the data falls, but not
26 | # the offset.
27 | #
28 | # In the second image, the model accounts for both the slope and offset of the data. Adding
29 | # this second parameter (the offset) to the model creates a much better fit.
30 | #
31 | # However, we can imagine that a model can have too many parameters, where we begin to fit
32 | # not only the high level features of the data, but also the noise. This overfitting is
33 | # depicted in the third image.
34 | # ------------------------------------------------------------------------------------------
35 |
36 | # ------------------------------------------------------------------------------------------
37 | # Our aim will be to fit the data well, but avoiding *over*fitting the data!
38 | # ------------------------------------------------------------------------------------------
39 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/13. ML - Intro to Flux.jl.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Intro to Flux.jl
3 | # ------------------------------------------------------------------------------------------
4 |
5 | # ------------------------------------------------------------------------------------------
6 | # We have learned how machine learning allows us to classify data as apples or bananas with
7 | # a single neuron. However, some of those details are pretty fiddly! Fortunately, Julia has
8 | # a powerful package that does much of the heavy lifting for us, called
9 | # [`Flux.jl`](https://fluxml.github.io/).
10 | #
11 | # *Using `Flux` will make classifying data and images much easier!*
12 | # ------------------------------------------------------------------------------------------
13 |
14 | # ------------------------------------------------------------------------------------------
15 | # ## Using `Flux.jl`
16 | #
17 | # In the next notebook, we are going to see how Flux allows us to redo the calculations from
18 | # the previous notebook in a simpler way. We can get started with `Flux.jl` via:
19 | # ------------------------------------------------------------------------------------------
20 |
21 | using Flux
22 |
23 | # ------------------------------------------------------------------------------------------
24 | # #### Helpful built-in functions
25 | #
26 | # When working we'll `Flux`, we'll make use of built-in functionality that we've had to
27 | # create for ourselves in previous notebooks.
28 | #
29 | # For example, the sigmoid function, σ, that we have been using already lives within `Flux`:
30 | # ------------------------------------------------------------------------------------------
31 |
32 | ?σ
33 |
34 | # ------------------------------------------------------------------------------------------
35 | # Importantly, `Flux` allows us to *automatically create neurons* with the **`Dense`**
36 | # function. For example, in the last notebook, we were looking at a neuron with 2 inputs and
37 | # 1 output:
38 | #
39 | #
40 | #
41 | # We could create a neuron with two inputs and one output via
42 | # ------------------------------------------------------------------------------------------
43 |
44 | model = Dense(2, 1, σ)
45 |
46 | # ------------------------------------------------------------------------------------------
47 | # This `model` object comes with places to store weights and biases:
48 | # ------------------------------------------------------------------------------------------
49 |
50 | model.W
51 |
52 | model.b
53 |
54 | typeof(model.W)
55 |
56 | # ------------------------------------------------------------------------------------------
57 | # Unlike in previous notebooks, note that `W` is no longer a `Vector` (1D `Array`) and `b`
58 | # is no longer a number! Both are now stored in so-called `TrackedArray`s and `W` is
59 | # effectively being treated as a matrix with a single row. We'll see why in the next
60 | # notebook.
61 | # ------------------------------------------------------------------------------------------
62 |
63 |
64 |
65 | # ------------------------------------------------------------------------------------------
66 | # Other helpful built-in functionality includes ways to automatically calculate gradients
67 | # and also the cost function that we've used in previous notebooks -
68 | #
69 | # $$L(w, b) = \sum_i \left[y_i - f(x_i, w, b) \right]^2$$
70 | #
71 | # This is the "mean square error" function, which in `Flux` is named **`Flux.mse`**.
72 | # ------------------------------------------------------------------------------------------
73 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/17. ML - Introduction to deep learning.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Going deep: Deep neural networks
3 | #
4 | # So far, we've learned that if we want to classify more than two fruits, we'll need to go
5 | # beyond using a single neuron and use *multiple* neurons to get multiple outputs. We can
6 | # think of stacking these multiple neurons together in a single neural layer.
7 | #
8 | # Even so, we found that using a single neural layer was not enough to fully distinguish
9 | # between bananas, grapes, **and** apples. To do this properly, we'll need to add more
10 | # complexity to our model. We need not just a neural network, but a *deep neural network*.
11 | #
12 | # There is one step remaining to build a deep neural network. We have been saying that a
13 | # neural network takes in data and then spits out `0` or `1` predictions that together
14 | # declare what kind of fruit the picture is. However, what if we instead put the output of
15 | # one neural network layer into another neural network layer?
16 | #
17 | # This gets pictured like this below:
18 | #
19 | #
20 | #
21 | # On the left we have 3 data points in blue. Those 3 data points each get fed into 4 neurons
22 | # in purple. Each of those 4 neurons produces a single output, but those output are each fed
23 | # into three neurons (the second layer of purple). Each of those 3 neurons spits out a
24 | # single value, and those values are fed as inputs into the last layer of 6 neurons. The 6
25 | # values that those final neurons produce are the output of the neural network. This is a
26 | # deep neural network.
27 | # ------------------------------------------------------------------------------------------
28 |
29 | # ------------------------------------------------------------------------------------------
30 | # ### Why would a deep neural network be better?
31 | #
32 | # This is a little perplexing when you first see it. We used neurons to train the model
33 | # before: why would sticking the output from neurons into other neurons help us fit the data
34 | # better? The answer can be understood by drawing pictures. Geometrically, the matrix
35 | # multiplication inside of a layer of neurons is streching and rotating the axis that we can
36 | # vary:
37 | #
38 | # [Show linear transformation of axis, with data]
39 | #
40 | # A nonlinear transformation, such as the sigmoid function, then adds a bump to the line:
41 | #
42 | # [Show the linear transformed axis with data, and then a bumped version that fits the data
43 | # better]
44 | #
45 | # Now let's repeat this process. When we send the data through another layer of neurons, we
46 | # get another rotation and another bump:
47 | #
48 | # [Show another rotation, then another bump]
49 | #
50 | # Visually, we see that if we keep doing this process we can make the axis line up with any
51 | # data. What this means is that **if we have enough layers, then our neural network can
52 | # approximate any model**.
53 | #
54 | # The trade-off is that with more layers we have more parameters, so it may be harder (i.e.
55 | # computationally intensive) to train the neural network. But we have the guarantee that the
56 | # model has enough freedom such that there are parameters that will give the correct output.
57 | #
58 | # Because this model is so flexible, the problem is reduced to that of learning: do the same
59 | # gradient descent method on this much larger model (but more efficiently!) and we can make
60 | # it classify our data correctly. This is the power of deep learning.
61 | # ------------------------------------------------------------------------------------------
62 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia-ES/ 5. Condicionales.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Condicionales
3 | #
4 | # En Julia, la sintaxis
5 | #
6 | # ```julia
7 | # if *condición 1*
8 | # *opción 1*
9 | # elseif *condición 2*
10 | # *opción 2*
11 | # else
12 | # *opción 3*
13 | # end
14 | # ```
15 | #
16 | # Nos permite eventualmente evaluar una de nuestras opciones.
17 | #
18 | # Por ejemplo, tal vez queremos implementar la prueba de FizzBuzz: Dado un número N, imprime
19 | # "Fizz" si N es divisible entre 3, "Buzz" si N es divisible entre 5, y "FizzBuzz" si N es
20 | # divisible entre ambos 3 y 5. En cualquier otro caso, imprimo el número mismo.
21 | # ------------------------------------------------------------------------------------------
22 |
23 | N =
24 |
25 | if (N % 3 == 0) & (N % 5 == 0)
26 | println("FizzBuzz")
27 | elseif N % 3 == 0
28 | println("Fizz")
29 | elseif N % 5 == 0
30 | println("Buzz")
31 | else
32 | println(N)
33 | end
34 |
35 | # ------------------------------------------------------------------------------------------
36 | # Ahora digamos que queremos regresar el mayor número de ambos. Escoge tus propios x y y
37 | # ------------------------------------------------------------------------------------------
38 |
39 | x =
40 | y =
41 |
42 | if x > y
43 | x
44 | else
45 | y
46 | end
47 |
48 | # ------------------------------------------------------------------------------------------
49 | # Para el último bloque, podemos usar el operador ternario, con la sintaxis
50 | #
51 | # ```julia
52 | # a ? b : c
53 | # ```
54 | #
55 | # que equivale a
56 | #
57 | # ```julia
58 | # if a
59 | # b
60 | # else
61 | # c
62 | # end
63 | # ```
64 | # ------------------------------------------------------------------------------------------
65 |
66 | (x > y) ? x : y
67 |
68 | # ------------------------------------------------------------------------------------------
69 | # Un truco relacionado es la evaluación de corto-circuito
70 | #
71 | # ```julia
72 | # a && b
73 | # ```
74 | # ------------------------------------------------------------------------------------------
75 |
76 | (x > y) && println(x)
77 |
78 | (x < y) && println(y)
79 |
80 | # ------------------------------------------------------------------------------------------
81 | # Cuando escribimos `a && b`, `b` se ejecuta sólo si `a` se evalúa a `true`.
82 | #
83 | # Si `a` se evalúa a `false`, la expresión `a && b` regresa `false`
84 | # ------------------------------------------------------------------------------------------
85 |
86 | # ------------------------------------------------------------------------------------------
87 | # ### Ejercicios
88 | #
89 | # 5.1 Reescribe FizzBuzz sin usar `elseif`.
90 | # ------------------------------------------------------------------------------------------
91 |
92 |
93 |
94 | # ------------------------------------------------------------------------------------------
95 | # 5.2 Reescribe FizzBuzz usando el operador ternario.
96 | # ------------------------------------------------------------------------------------------
97 |
98 |
99 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia-ES/ 7. Paquetes.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Paquetes
3 | #
4 | # Julia tiene màs de 1686 paquetes registrados, conformando una gran parte del ecosistema de
5 | # Julia.
6 | #
7 | # Para ver todos los paquetes, visita
8 | #
9 | # https://pkg.julialang.org/
10 | #
11 | # o bien
12 | #
13 | # https://juliaobserver.com/
14 | #
15 | # Ahora vamos a aprender a usarlos
16 | # ------------------------------------------------------------------------------------------
17 |
18 | # ------------------------------------------------------------------------------------------
19 | # La primera vez que quieres usar un paquete en Julia, hay que agregarlo
20 | # ------------------------------------------------------------------------------------------
21 |
22 | #Pkg.add("Example")
23 |
24 | # ------------------------------------------------------------------------------------------
25 | # Cada vez que usas Julia (empezar una nueva sesión en el REPL, abrir un notebook por
26 | # primera vez, por ejemplo), tienes que cargar el paquete usando la palabra reservada
27 | # `using`
28 | # ------------------------------------------------------------------------------------------
29 |
30 | using Example
31 |
32 | # ------------------------------------------------------------------------------------------
33 | # En el código fuente de `Example.jl` en
34 | #
35 | # https://github.com/JuliaLang/Example.jl/blob/master/src/Example.jl
36 | #
37 | # Vemos una función declarada como
38 | #
39 | # ```
40 | # hello(who::String) = "Hello, $who"
41 | # ```
42 | #
43 | # Si cargamos `Example`, debemos poder llamar `hello`
44 | # ------------------------------------------------------------------------------------------
45 |
46 | hello("it's me. I was wondering if after all these years you'd like to meet.")
47 |
48 | # ------------------------------------------------------------------------------------------
49 | # Ahora vamos a jugar con el paquete de Colors
50 | # ------------------------------------------------------------------------------------------
51 |
52 | #Pkg.add("Colors")
53 |
54 | using Colors
55 |
56 | # ------------------------------------------------------------------------------------------
57 | # Creemos una bandeja de 100 colores
58 | # ------------------------------------------------------------------------------------------
59 |
60 | bandeja = distinguishable_colors(100)
61 |
62 | # ------------------------------------------------------------------------------------------
63 | # y podemos crear una matriz colorida aleatoriamente con rand
64 | # ------------------------------------------------------------------------------------------
65 |
66 | rand(bandeja, 3, 3)
67 |
68 | # ------------------------------------------------------------------------------------------
69 | # En el próximo notebook, vamos a usar un nuevo paquete para graficar datos
70 | # ------------------------------------------------------------------------------------------
71 |
72 | # ------------------------------------------------------------------------------------------
73 | # ### Ejercicios
74 | #
75 | # 7.1 Usa el paquete de (código fuente en https://github.com/JuliaMath/Primes.jl) para
76 | # encontrar el número primer más grande menor a 1,000,000
77 | # ------------------------------------------------------------------------------------------
78 |
79 |
80 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia-ES/Jupyter_notebooks.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Comenzando con Jupyter Notebooks
3 | #
4 | # Un documento de **Jupyter** está compuesto por celdas. Las celdas se delimitan por cajas
5 | # con bordes grises o verdes, dependiendo si se están editando o no.
6 | #
7 | # Las celdas se clasifican en dos tipos: celdas de código y de texto tipo *markdown*.
8 | #
9 | # ### Editando celdas tipo *Markdown*
10 | # Para ver el texto plano de esta celda, solamente debes hacer doble click sobre el texto.
11 | # Automáticamente podrás editar el texto y caerás en cuenta lo sencillo es darle formato.
12 | #
13 | # Aquí encontrarás un [tutorial](https://www.markdowntutorial.com) y [manual de referencias
14 | # rápido](https://en.support.wordpress.com/markdown-quick-reference/) con la información
15 | # básica que vas a necesitar para darle un formato pulcro a tus archivos.
16 | #
17 | # ### Corriendo una celda de código
18 | # Para ejecutar una celda de código, seleccióna la celda y (1) presiona `Shift` + `Enter` o
19 | # (2) haz click en el botón de correr que se encuentra en la barra de herramientas superior:
20 | # ------------------------------------------------------------------------------------------
21 |
22 | 1 + 1
23 | 2 + 2
24 |
25 | # ------------------------------------------------------------------------------------------
26 | # Si eres buen observador podrás notar que solamente la última línea de cada celda se
27 | # imprime a pantalla al ejecutarse. Se puede suprimir esta impresión a pantalla con un
28 | # semicolon.
29 | # ------------------------------------------------------------------------------------------
30 |
31 | 1 + 1
32 | 2 + 2;
33 |
34 | # ------------------------------------------------------------------------------------------
35 | # ### ¿Cómo buscar información sobre las funciones?
36 | #
37 | # ¡Igualito que en la REPL!
38 | #
39 | # ------------------------------------------------------------------------------------------
40 |
41 | ?println
42 |
43 | # ------------------------------------------------------------------------------------------
44 | # ### ¿Cómo usar comandos del shell?
45 | #
46 | # ¡Lo mismo!
47 | #
48 | # ------------------------------------------------------------------------------------------
49 |
50 | ;ls
51 |
52 | ;pwd
53 |
54 | # ------------------------------------------------------------------------------------------
55 | # Como ya podrás imaginarte, todos los comandos que hemos usados en la REPL se podrán usar
56 | # acá....
57 | #
58 | # Para una referencia completa sobre lo que se puede hacer en esta interfaz, como siempre,
59 | # ver la [documentación oficial](https://jupyter-
60 | # notebook.readthedocs.io/en/stable/examples/Notebook/Notebook%20Basics.html#)
61 | # ------------------------------------------------------------------------------------------
62 |
63 |
64 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/00. Jupyter_notebooks.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Getting started with Jupyter notebooks
3 | #
4 | # ### Running a cell
5 | # To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or
6 | # (2) hit the run button (the right pointing arrow) above.
7 | # ------------------------------------------------------------------------------------------
8 |
9 | 1 + 1
10 | 2 + 2
11 |
12 | # ------------------------------------------------------------------------------------------
13 | # If you're new to jupyter notebooks, note that only the last line of a cell prints by
14 | # default when you execute that cell and that you can suppress this output with a semicolon
15 | # ------------------------------------------------------------------------------------------
16 |
17 | 1 + 1
18 | 2 + 2;
19 |
20 | # ------------------------------------------------------------------------------------------
21 | # ### How to get docs for Julia functions
22 | #
23 | # To get docs for a function you're not familiar with, precede it with a question mark.
24 | # (This works at the REPL too!)
25 | # ------------------------------------------------------------------------------------------
26 |
27 | ?println
28 |
29 | # ------------------------------------------------------------------------------------------
30 | # ### How to use shell commands
31 | #
32 | # Type `;` and then you can use shell commands. For example,
33 | # ------------------------------------------------------------------------------------------
34 |
35 | ;ls
36 |
37 | ;pwd
38 |
39 | # ------------------------------------------------------------------------------------------
40 | # Shell commands also work at the REPL!
41 | # ------------------------------------------------------------------------------------------
42 |
43 |
44 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/Local_installations.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Get Julia running locally
3 | #
4 | #
5 | # ## Local package installations
6 | #
7 | # If you'd like to run these tutorial notebooks locally, you'll want to install all the
8 | # packages used in them. Since installation can take some time, you may want to run this
9 | # notebook before getting started with the tutorial, rather than trying to install
10 | # everything as you go.
11 | #
12 | # #### Installations
13 | # ------------------------------------------------------------------------------------------
14 |
15 | using Pkg
16 | Pkg.add(["Example", "Colors", "Primes", "Plots", "BenchmarkTools"])
17 |
18 | # ------------------------------------------------------------------------------------------
19 | # #### Loading all packages
20 | # ------------------------------------------------------------------------------------------
21 |
22 | using Example, Colors, Plots, BenchmarkTools, Primes
23 |
24 | # ------------------------------------------------------------------------------------------
25 | # #### Tests
26 | #
27 | # `plot` should generate a plot,
28 | # ------------------------------------------------------------------------------------------
29 |
30 | plot(x -> x^2, -10:10)
31 |
32 | # ------------------------------------------------------------------------------------------
33 | # `RGB(0, 0, 0)` should return a black square,
34 | # ------------------------------------------------------------------------------------------
35 |
36 | RGB(0, 0, 0)
37 |
38 | # ------------------------------------------------------------------------------------------
39 | # and `@btime primes(1000000);` should report an execution time in ms and memory used. For
40 | # example, on one computer, this yielded "2.654 ms (5 allocations: 876.14 KiB)".
41 | # ------------------------------------------------------------------------------------------
42 |
43 | @btime primes(1000000);
44 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/calculate_pi.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## How can we calculate $\pi$?
3 | #
4 | # Given a square of length $2r$, the square's area is
5 | #
6 | # $$A_{square} = (2r)^2 = 4r^2$$
7 | #
8 | # whereas the area of a circle with radius $r$ is
9 | # $$A_{circle} = \pi r^2$$
10 | #
11 | #
12 | #
13 | # Therefore the ratio of the area of the circle to that of the square above is
14 | #
15 | # $$\frac{A_{circle}}{A_{square}} = \frac{\pi r^2}{4r^2} = \frac{\pi}{4}$$
16 | #
17 | # and we can define $\pi$ as
18 | #
19 | # $$\pi = 4\frac{A_{circle}}{A_{square}}$$
20 | #
21 | # This suggests a way to calculate $\pi$: if we have a square and the largest circle that
22 | # fits inside that square, we can determine the ratio of areas of a circle and a square. We
23 | # can calculate this ratio using a monte carlo simulation. We select random points inside a
24 | # square, and we keep track of how often those points also fall inside the circle that fits
25 | # perfectly inside that square.
26 | #
27 | # Given a large enough sampling points, $\frac{A_{circle}}{A_{square}}$ will be equal to the
28 | # fraction of randomly chosen points inside the square that also fall inside the circle.
29 | # Then we can figure out $\pi$!
30 | #
31 | # #### Pseudo-code
32 | #
33 | # Given the above, our algorithm for determining $\pi$ looks like this:
34 | #
35 | # 1. For each of $N$ iterations,
36 | # 1. Select a random point inside a square of area $4r^2$ as Cartesian, $(x, y)$,
37 | # coordinates.
38 | # 1. Determine if the point also falls inside the circle embedded within this square of
39 | # area $\pi r^2$.
40 | # 1. Keep track of whether or not this point fell inside the circle. At the end of $N$
41 | # iterations, you want to know $M$ -- the number of the $N$ random points that fell inside
42 | # the circle!
43 | # 1. Calculate $\pi$ as $4\frac{M}{N}$
44 | #
45 | # #### Exercise
46 | #
47 | # Write a function that calculates $\pi$ using Julia.
48 | #
49 | # The algorithm above should work for any value of $r$ that you choose to use. Make sure you
50 | # make $N$ big enough that the value of $\pi$ is correct to at least a couple numbers after
51 | # the decimal point!
52 | #
53 | # *Hint*:
54 | #
55 | # This will probably be easier if you center your circle and square at the coordinate (0, 0)
56 | # and use a radius of 1. For example, to choose random coordinates within your square at
57 | # position (x, y), you may want to choose x and y so that they are each a value between -1
58 | # and +1. Then any point within a distance of 1 from (0, 0) will fall inside the circle!
59 | #
60 | #
61 | #
62 | #
63 | # ------------------------------------------------------------------------------------------
64 |
65 |
66 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/long-version/000 Jupyter Notebooks.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Getting started with Jupyter notebooks
3 | #
4 | # Jupyter notebooks are a convenient way to run, display, and present interactive code. The
5 | # main concept is a cell — a single chunk of text. Cells may be markdown (like this one) or
6 | # code (like the next).
7 | #
8 | # ### Running a cell
9 | # To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or
10 | # (2) hit the run button (the right pointing arrow) above.
11 | # ------------------------------------------------------------------------------------------
12 |
13 | 1 + 1
14 | 2 + 2
15 |
16 | # ------------------------------------------------------------------------------------------
17 | # If you're new to jupyter notebooks, note that only the last line of a cell prints by
18 | # default when you execute that cell and that you can suppress this output with a semicolon
19 | # ------------------------------------------------------------------------------------------
20 |
21 | 1 + 1
22 | 2 + 2;
23 |
24 | # ------------------------------------------------------------------------------------------
25 | # ## Changing modes and inserting new cells
26 | #
27 | # You can use the menu above or key combinations — ESC will drop out of editing a cell and
28 | # into a command mode where there are special keyboard shortcuts to insert/cut/modify cells
29 | # themselves. Try `ESC` and then `a` or `b` for "above" and "below".
30 | #
31 | # See the Help menu for all available shortcuts
32 | # ------------------------------------------------------------------------------------------
33 |
34 | # ------------------------------------------------------------------------------------------
35 | # ## Special modes for cells
36 | #
37 | # Cells can change their behavior depending on their very first character
38 | # ------------------------------------------------------------------------------------------
39 |
40 | # ------------------------------------------------------------------------------------------
41 | # ### How to get docs for Julia functions
42 | #
43 | # To get docs for a function you're not familiar with, precede it with a question mark.
44 | # (This works at the REPL too!)
45 | # ------------------------------------------------------------------------------------------
46 |
47 | ?println
48 |
49 | # ------------------------------------------------------------------------------------------
50 | # ### How to use shell commands
51 | #
52 | # Type `;` and then you can use shell commands. For example,
53 | # ------------------------------------------------------------------------------------------
54 |
55 | ;ls
56 |
57 | ;pwd
58 |
59 | # ------------------------------------------------------------------------------------------
60 | # ### Interacting with the package manager
61 | #
62 | # Julia's package manager has a special "command" syntax mode — you can enter it with a `]`
63 | # character.
64 | # ------------------------------------------------------------------------------------------
65 |
66 | ]status
67 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/long-version/130 OneHot Vector.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # Often used in machine learning, a "one hot" vector is a vector of all zeros, except for a
3 | # single `1` entry.
4 | # Representing it as a standard vector is memory-inefficient, so it cries out for a special
5 | # implementation.
6 | # ------------------------------------------------------------------------------------------
7 |
8 | struct OneHotVector <: AbstractVector{Int}
9 | idx::Int
10 | len::Int
11 | end
12 |
13 | Base.size(v::OneHotVector) = (v.len,)
14 |
15 | Base.getindex(v::OneHotVector, i::Integer) = Int(i == v.idx)
16 |
17 | OneHotVector(3, 10)
18 |
19 | A = rand(5,5)
20 |
21 | A * OneHotVector(3, 5)
22 |
23 | Vector(OneHotVector(3,5))
24 |
25 | # ------------------------------------------------------------------------------------------
26 | # ## Exercise
27 | #
28 | # Generalize it to any element type.
29 | # ------------------------------------------------------------------------------------------
30 |
31 |
32 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/long-version/140 ModInt.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # ModInt: a simple modular integer type
3 | # ------------------------------------------------------------------------------------------
4 |
5 | struct ModInt{n} <: Integer
6 | k::Int
7 |
8 | # Constructor definition...
9 | # note the left side looks like the call it defines
10 | ModInt{n}(k::Int) where {n} = new(mod(k,n))
11 | end
12 |
13 | a = ModInt{13}(1238279873492834)
14 |
15 | b = ModInt{13}(9872349827349827)
16 |
17 | a + b
18 |
19 | # ------------------------------------------------------------------------------------------
20 | # To extend standard functions we need to import them.
21 | # ------------------------------------------------------------------------------------------
22 |
23 | import Base: +
24 |
25 | +(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k + b.k)
26 |
27 | a + b
28 |
29 | import Base: *, -
30 |
31 | *(a::ModInt{n}, b::ModInt{n}) where{n} = ModInt{n}(a.k * b.k)
32 | -(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k - b.k)
33 | -(a::ModInt{n}) where {n} = ModInt{n}(-a.k)
34 |
35 | a * b
36 |
37 | a - b
38 |
39 | -b
40 |
41 | Base.show(io::IO, a::ModInt{n}) where {n} =
42 | get(io, :compact, false) ? show(io, a.k) : print(io, "$(a.k) mod $n")
43 |
44 | a
45 |
46 | b
47 |
48 | a + 1
49 |
50 | Base.promote_rule(::Type{ModInt{n}}, ::Type{Int}) where {n} = ModInt{n}
51 |
52 | Base.convert(::Type{ModInt{n}}, x::Int) where {n} = ModInt{n}(x)
53 |
54 | a + 1
55 |
56 | 1 + a
57 |
58 | A = map(ModInt{13}, rand(1:100, 5, 5))
59 |
60 | A^100000000
61 |
62 | 2A^100 .- 1
63 |
64 | # ------------------------------------------------------------------------------------------
65 | # ### Summary
66 | #
67 | # Here is all the code that defines the `ModInt` type:
68 | # ```jl
69 | # struct ModInt{n} <: Integer
70 | # k::Int
71 | #
72 | # # Constructor definition...
73 | # # note the left side looks like the call it defines
74 | # ModInt{n}(k::Int) where {n} = new(mod(k,n))
75 | # end
76 | #
77 | # import Base: +, *, -
78 | #
79 | # +(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k + b.k)
80 | # *(a::ModInt{n}, b::ModInt{n}) where{n} = ModInt{n}(a.k * b.k)
81 | # -(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k - b.k)
82 | # -(a::ModInt{n}) where {n} = ModInt{n}(-a.k)
83 | #
84 | # Base.show(io::IO, a::ModInt{n}) where {n} =
85 | # get(io, :compact, false) ? show(io, a.k) : print(io, "$(a.k) mod $n")
86 | #
87 | # Base.promote_rule(::Type{ModInt{n}}, ::Type{Int}) where {n} = ModInt{n}
88 | # Base.convert(::Type{ModInt{n}}, x::Int) where {n} = ModInt{n}(x)
89 | # ```
90 | # ------------------------------------------------------------------------------------------
91 |
92 | # ------------------------------------------------------------------------------------------
93 | # ### Exercise
94 | #
95 | # Add two methods that allows operations between modular integers with different modulus
96 | # using the rule that they should combine in the modulus that is the `lcm` (least common
97 | # multiple) of the moduli of the arguments.
98 | #
99 | # **Hint:** try something, see what fails, define something to make that work.
100 | # ------------------------------------------------------------------------------------------
101 |
102 | x = ModInt{12}(9)
103 |
104 | y = ModInt{15}(13)
105 |
106 | # two method definitions here...
107 |
108 | @assert x + y == ModInt{60}(22)
109 | @assert x * y == ModInt{60}(57)
110 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/short-version/00.Jupyter_notebooks.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # ## Getting started with Jupyter notebooks
3 | #
4 | # ### Running a cell
5 | # To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or
6 | # (2) hit the run button (the right pointing arrow) above.
7 | # ------------------------------------------------------------------------------------------
8 |
9 | 1 + 1
10 | 2 + 2
11 |
12 | # ------------------------------------------------------------------------------------------
13 | # If you're new to jupyter notebooks, note that only the last line of a cell prints by
14 | # default when you execute that cell and that you can suppress this output with a semicolon
15 | # ------------------------------------------------------------------------------------------
16 |
17 | 1 + 1
18 | 2 + 2;
19 |
20 | # ------------------------------------------------------------------------------------------
21 | # ### How to get docs for Julia functions
22 | #
23 | # To get docs for a function you're not familiar with, precede it with a question mark.
24 | # (This works in the terminal too!)
25 | # ------------------------------------------------------------------------------------------
26 |
27 | ?println
28 |
29 | # ------------------------------------------------------------------------------------------
30 | # ### How to use shell commands
31 | #
32 | # Type `;` and then you can use shell commands. For example,
33 | # ------------------------------------------------------------------------------------------
34 |
35 | ;ls
36 |
37 | ;pwd
38 |
--------------------------------------------------------------------------------
/.nbexports/introductory-tutorials/intro-to-julia/short-version/03.Using_packages.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Packages
3 | #
4 | # Julia has over 2000 registered packages, making packages a huge part of the Julia
5 | # ecosystem.
6 | #
7 | # Even so, the package ecosystem still has some growing to do. Notably, we have first class
8 | # function calls to other languages, providing excellent foreign function interfaces. We
9 | # can easily call into python or R, for example, with `PyCall` or `Rcall`.
10 | #
11 | # This means that you don't have to wait until the Julia ecosystem is fully mature, and that
12 | # moving to Julia doesn't mean you have to give up your favorite package/library from
13 | # another language!
14 | #
15 | # To see all available packages, check out
16 | #
17 | # https://pkg.julialang.org/
18 | # or
19 | # https://juliaobserver.com/
20 | #
21 | # For now, let's learn how to use a package.
22 | # ------------------------------------------------------------------------------------------
23 |
24 | # ------------------------------------------------------------------------------------------
25 | # The first time you use a package on a given Julia installation, you need to use the
26 | # package manager to explicitly add it:
27 | # ------------------------------------------------------------------------------------------
28 |
29 | using Pkg
30 | Pkg.add("Example")
31 |
32 | # ------------------------------------------------------------------------------------------
33 | # Every time you use Julia (start a new session at the REPL, or open a notebook for the
34 | # first time, for example), you load the package with the `using` keyword
35 | # ------------------------------------------------------------------------------------------
36 |
37 | using Example
38 |
39 | # ------------------------------------------------------------------------------------------
40 | # In the source code of `Example.jl` at
41 | # https://github.com/JuliaLang/Example.jl/blob/master/src/Example.jl
42 | # we see the following function declared
43 | #
44 | # ```
45 | # hello(who::String) = "Hello, $who"
46 | # ```
47 | #
48 | # Having loaded `Example`, we should now be able to call `hello`
49 | # ------------------------------------------------------------------------------------------
50 |
51 | hello("it's me. I was wondering if after all these years you'd like to meet.")
52 |
53 | # ------------------------------------------------------------------------------------------
54 | # Now let's play with the Colors package
55 | # ------------------------------------------------------------------------------------------
56 |
57 | Pkg.add("Colors")
58 |
59 | using Colors
60 |
61 | # ------------------------------------------------------------------------------------------
62 | # Let's create a palette of 100 different colors
63 | # ------------------------------------------------------------------------------------------
64 |
65 | palette = distinguishable_colors(100)
66 |
67 | # ------------------------------------------------------------------------------------------
68 | # and then we can create a randomly checkered matrix using the `rand` command
69 | # ------------------------------------------------------------------------------------------
70 |
71 | rand(3, 3)
72 |
73 | rand(1:10, 3, 3)
74 |
75 | rand(palette, 3, 3)
76 |
77 |
78 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/TextAnalysis.jl:
--------------------------------------------------------------------------------
1 | using TextAnalysis
2 |
3 | model = TextAnalysis.SentimentAnalyzer()
4 |
5 | model(StringDocument("hello world"))
6 |
7 | model(StringDocument("an incredibly boring film"))
8 |
9 | model(StringDocument("a highly enjoyable ride"))
10 |
11 | model(StringDocument("a great film"))
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-nlp/charlm/charlm.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Character based RNN language model
3 | # Based on http://karpathy.github.io/2015/05/21/rnn-effectiveness
4 | # ------------------------------------------------------------------------------------------
5 |
6 | # ------------------------------------------------------------------------------------------
7 | # ## Setup
8 | # 1-Adds required packages to Julia.
9 | # 2-Loads the data and a pretrained model.
10 | # ------------------------------------------------------------------------------------------
11 |
12 | include("charlm.jl")
13 |
14 | # ------------------------------------------------------------------------------------------
15 | # ## Sample Data-1
16 | # A random subset of the Shakespeare training data
17 | # ------------------------------------------------------------------------------------------
18 |
19 | LEN = 500
20 | r = rand(1:length(shake_text)-LEN)
21 | println(shake_text[r:r+LEN])
22 | flush(STDOUT)
23 |
24 | # ------------------------------------------------------------------------------------------
25 | # ## Sample Generation-1
26 | # Random Shakespeare style text generated by the model
27 | # ------------------------------------------------------------------------------------------
28 |
29 | generate(shake_model, LEN)
30 |
31 | # ------------------------------------------------------------------------------------------
32 | # ## Sample Data-2
33 | # A random subset of the code in Julia base
34 | # ------------------------------------------------------------------------------------------
35 |
36 | r = rand(1:length(julia_text)-LEN)
37 | println(julia_text[r:r+LEN])
38 | flush(STDOUT)
39 |
40 | # ------------------------------------------------------------------------------------------
41 | # ## Sample Generation-2
42 | # Random Julia code generated by the model
43 | # ------------------------------------------------------------------------------------------
44 |
45 | generate(julia_model, LEN)
46 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-nlp/charlm/shaketrain.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Character based RNN language model trained on 'The Complete Works of William
3 | # Shakespeare'
4 | # Based on http://karpathy.github.io/2015/05/21/rnn-effectiveness
5 | # ------------------------------------------------------------------------------------------
6 |
7 | # Load 'The Complete Works of William Shakespeare'
8 | using Knet
9 | include(Knet.dir("data","gutenberg.jl"))
10 | trn,tst,chars = shakespeare()
11 | map(summary,(trn,tst,chars))
12 |
13 | # Print a sample
14 | println(string(chars[trn[1020:1210]]...))
15 |
16 | RNNTYPE = :lstm
17 | BATCHSIZE = 256
18 | SEQLENGTH = 100
19 | INPUTSIZE = 168
20 | VOCABSIZE = 84
21 | HIDDENSIZE = 334
22 | NUMLAYERS = 1
23 | DROPOUT = 0.0
24 | LR=0.001
25 | BETA_1=0.9
26 | BETA_2=0.999
27 | EPS=1e-08
28 | EPOCHS = 30;
29 |
30 | # Minibatch data
31 | function mb(a)
32 | N = div(length(a),BATCHSIZE)
33 | x = reshape(a[1:N*BATCHSIZE],N,BATCHSIZE)' # reshape full data to (B,N) with contiguous rows
34 | minibatch(x[:,1:N-1], x[:,2:N], SEQLENGTH) # split into (B,T) blocks
35 | end
36 | dtrn,dtst = mb(trn),mb(tst)
37 | map(length, (dtrn,dtst))
38 |
39 | # Define model
40 | function initmodel()
41 | w(d...)=KnetArray(xavier(Float32,d...))
42 | b(d...)=KnetArray(zeros(Float32,d...))
43 | r,wr = rnninit(INPUTSIZE,HIDDENSIZE,rnnType=RNNTYPE,numLayers=NUMLAYERS,dropout=DROPOUT)
44 | wx = w(INPUTSIZE,VOCABSIZE)
45 | wy = w(VOCABSIZE,HIDDENSIZE)
46 | by = b(VOCABSIZE,1)
47 | return r,wr,wx,wy,by
48 | end;
49 |
50 | # Given the current character, predict the next character
51 | function predict(ws,xs,hx,cx;pdrop=0)
52 | r,wr,wx,wy,by = ws
53 | x = wx[:,xs] # xs=(B,T) x=(X,B,T)
54 | x = dropout(x,pdrop)
55 | y,hy,cy = rnnforw(r,wr,x,hx,cx,hy=true,cy=true) # y=(H,B,T) hy=cy=(H,B,L)
56 | y = dropout(y,pdrop)
57 | y2 = reshape(y,size(y,1),size(y,2)*size(y,3)) # y2=(H,B*T)
58 | return wy*y2.+by, hy, cy
59 | end
60 |
61 | # Define loss and its gradient
62 | function loss(w,x,y,h;o...)
63 | py,hy,cy = predict(w,x,h...;o...)
64 | h[1],h[2] = getval(hy),getval(cy)
65 | return nll(py,y)
66 | end
67 |
68 | lossgradient = gradloss(loss);
69 |
70 | function train(model,data,optim)
71 | hiddens = Any[nothing,nothing]
72 | Σ,N=0,0
73 | for (x,y) in data
74 | grads,loss1 = lossgradient(model,x,y,hiddens;pdrop=DROPOUT)
75 | update!(model, grads, optim)
76 | Σ,N=Σ+loss1,N+1
77 | end
78 | return Σ/N
79 | end;
80 |
81 | function test(model,data)
82 | hiddens = Any[nothing,nothing]
83 | Σ,N=0,0
84 | for (x,y) in data
85 | Σ,N = Σ+loss(model,x,y,hiddens), N+1
86 | end
87 | return Σ/N
88 | end;
89 |
90 | # Train model or load from file if exists
91 | using JLD
92 | model=optim=nothing; knetgc()
93 | if !isfile("shakespeare.jld")
94 | model = initmodel()
95 | optim = optimizers(model, Adam; lr=LR, beta1=BETA_1, beta2=BETA_2, eps=EPS); info("Training...")
96 | @time for epoch in 1:EPOCHS
97 | @time trnloss = train(model,dtrn,optim) # ~18 seconds
98 | @time tstloss = test(model,dtst) # ~0.5 seconds
99 | println((:epoch, epoch, :trnppl, exp(trnloss), :tstppl, exp(tstloss)))
100 | end
101 | save("shakespeare.jld","model",model)
102 | else
103 | model = load("shakespeare.jld","model")
104 | end
105 | summary(model)
106 |
107 | # Sample from trained model
108 | function generate(model,n)
109 | function sample(y)
110 | p,r=Array(exp.(y-logsumexp(y))),rand()
111 | for j=1:length(p); (r -= p[j]) < 0 && return j; end
112 | end
113 | h,c = nothing,nothing
114 | x = findfirst(chars,'\n')
115 | for i=1:n
116 | y,h,c = predict(model,[x],h,c)
117 | x = sample(y)
118 | print(chars[x])
119 | end
120 | println()
121 | end;
122 |
123 | generate(model,1000)
124 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-nlp/imdb/imdbdemo.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # IMDB Movie Review Sentiment Analysis Demo
3 | # ------------------------------------------------------------------------------------------
4 |
5 | # ------------------------------------------------------------------------------------------
6 | # ## Setup
7 | # 1-Adds required packages to Julia.
8 | # 2-Loads the data and a pretrained model.
9 | # ------------------------------------------------------------------------------------------
10 |
11 | include("imdb.jl")
12 |
13 | # ------------------------------------------------------------------------------------------
14 | # ## Sample Data
15 | # The model was trained using 25000 movie reviews such as the following (shift-ENTER to see
16 | # random example)
17 | # Each review was tokenized, lowercased, truncated to max 150 words and a 30,000 word
18 | # vocabulary.
19 | # ------------------------------------------------------------------------------------------
20 |
21 | r = rand(1:length(xtrn))
22 | println(reviewstring(xtrn[r],ytrn[r]))
23 | flush(STDOUT)
24 |
25 | # ------------------------------------------------------------------------------------------
26 | # ## Test
27 | # We test the model on 25000 never before seen reviews on the test set (shift-ENTER to see
28 | # random example)
29 | # The test accuracy is around 86%
30 | # ------------------------------------------------------------------------------------------
31 |
32 | r = rand(1:length(xtst))
33 | println(reviewstring(xtst[r],ytst[r]))
34 | println("\nModel prediction: "*predictstring(xtst[r]))
35 | flush(STDOUT)
36 |
37 | # ------------------------------------------------------------------------------------------
38 | # ## User Input
39 | # In this cell you can enter your own review and let the model guess the sentiment
40 | # ------------------------------------------------------------------------------------------
41 |
42 | userinput = readline(STDIN)
43 | words = split(lowercase(userinput))
44 | ex = [get!(imdbdict,wr,UNK) for wr in words]
45 | ex[ex.>MAXFEATURES]=UNK
46 | println("\nModel prediction: "*predictstring(ex))
47 |
48 |
49 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-nlp/imdb/imdbtrain.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # IMDB Training Notebook
3 | # ------------------------------------------------------------------------------------------
4 |
5 | using Knet
6 | # Hyperparams LSTM
7 | EPOCHS=3
8 | BATCHSIZE=64
9 | EMBEDSIZE=125
10 | NUMHIDDEN=100
11 | LR=0.0001
12 | BETA_1=0.9
13 | BETA_2=0.999
14 | EPS=1e-08
15 | MAXLEN=150 #maximum size of the word sequence
16 | MAXFEATURES=30000 #vocabulary size
17 | DROPOUT=0.35
18 | SEED=1311194
19 | gpu(0)
20 | atype = gpu()<0 ? Array{Float32}:KnetArray{Float32}
21 |
22 | #define model"
23 | function initmodel()
24 | rnnSpec,rnnWeights = rnninit(EMBEDSIZE,NUMHIDDEN; rnnType=:lstm)
25 | inputMatrix = atype(xavier(Float32,EMBEDSIZE,MAXFEATURES))
26 | outputMatrix = atype(xavier(Float32,2,NUMHIDDEN))
27 | return rnnSpec,(rnnWeights,inputMatrix,outputMatrix)
28 | end
29 |
30 | function savemodel(weights,rnnSpec;localfile="model_imdb.jld")
31 | save(localfile,"weights",weights,"rnnSpec",rnnSpec)
32 | end
33 |
34 | # define loss and its gradient
35 | function predict(weights, inputs, rnnSpec;train=false)
36 | rnnWeights, inputMatrix, outputMatrix = weights # (1,1,W), (X,V), (2,H)
37 | indices = hcat(inputs...)' # (B,T)
38 | rnnInput = inputMatrix[:,indices] # (X,B,T)
39 | if train
40 | rnnInput = dropout(rnnInput,DROPOUT)
41 | end
42 | rnnOutput = rnnforw(rnnSpec, rnnWeights, rnnInput)[1] # (H,B,T)
43 | if train
44 | rnnOutput = dropout(rnnOutput,DROPOUT)
45 | end
46 | return outputMatrix * rnnOutput[:,:,end] # (2,H) * (H,B) = (2,B)
47 | end
48 |
49 | loss(w,x,y,r;train=false)=nll(predict(w,x,r;train=train),y)
50 | lossgradient = grad(loss);
51 |
52 | # load data
53 | include("imdb.jl")
54 | @time (xtrn,ytrn,xtst,ytst,imdbdict)=imdb(maxlen=MAXLEN,maxval=MAXFEATURES,seed=SEED)
55 | for d in (xtrn,ytrn,xtst,ytst); println(summary(d)); end
56 | imdbarray = Array{String}(88584)
57 | for (k,v) in imdbdict; imdbarray[v]=k; end
58 |
59 | rnd = rand(1:length(xtrn))
60 | println("Sample review:\n",join(imdbarray[xtrn[rnd]]," "),"\n")
61 | println("Classification: ",join(ytrn[rnd]))
62 |
63 | # prepare for training
64 | weights = nothing; knetgc(); # Reclaim memory from previous run
65 | rnnSpec,weights = initmodel()
66 | optim = optimizers(weights, Adam; lr=LR, beta1=BETA_1, beta2=BETA_2, eps=EPS);
67 |
68 | # 29s
69 | info("Training...")
70 | @time for epoch in 1:EPOCHS
71 | @time for (x,y) in minibatch(xtrn,ytrn,BATCHSIZE;shuffle=true)
72 | grads = lossgradient(weights,x,y,rnnSpec;train=true)
73 | update!(weights, grads, optim)
74 | end
75 | end
76 |
77 | info("Testing...")
78 | @time accuracy(weights, minibatch(xtst,ytst,BATCHSIZE), (w,x)->predict(w,x,rnnSpec))
79 |
80 | savemodel(weights,rnnSpec)
81 |
82 |
83 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-nlp/macnet/visualize.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Visual Q&A Demo
3 | # ------------------------------------------------------------------------------------------
4 |
5 | # ------------------------------------------------------------------------------------------
6 | # ## Setup
7 | # 1-Adds required packages to Julia.
8 | # 2-Downloads sample data and a pretrained model.
9 | # ------------------------------------------------------------------------------------------
10 |
11 | include("demosetup.jl")
12 |
13 | # ------------------------------------------------------------------------------------------
14 | # ## Initialization
15 | # 1-Loads the sample demo data (image features,questions,vocabulary).
16 | # 2-Loads the pretrained model.
17 | # ------------------------------------------------------------------------------------------
18 |
19 | include("src/newmacnetwork.jl")
20 | feats,qstsns,(w2i,a2i,i2w,i2a) = loadDemoData("data/demo/");
21 | _,wrun,r,_,p = loadmodel("models/macnet.jld";onlywrun=true);
22 | if !(typeof(first(wrun)) <: atype)
23 | wrun = map(atype,wrun);
24 | end;
25 |
26 | # ------------------------------------------------------------------------------------------
27 | # ## Sample Data
28 | # 1-Randomly selects (question,image) pair from the sample data
29 | # 2-Make predictions for the question and checks whether the prediction is correct
30 | # ------------------------------------------------------------------------------------------
31 |
32 | rnd = rand(1:length(qstsns))
33 | inst = qstsns[rnd]
34 | feat = feats[:,:,:,rnd:rnd]
35 | question = Array{Int}(inst[2])
36 | answer = inst[3];
37 | family = inst[4];
38 | results,prediction = singlerun(wrun,r,feat,question;p=p);
39 | answer==prediction
40 |
41 | img = load("data/demo/CLEVR_v1.0/images/val/$(inst[1])")
42 |
43 | textq = i2w[question];
44 | println("Question: ",join(textq," "))
45 | texta = i2a[answer];
46 | println("Answer: $(texta)\nPrediction: $(i2a[prediction]) ")
47 |
48 | # ------------------------------------------------------------------------------------------
49 | # ## User Data
50 | # You can enter your own question about the image and test whether the prediction is correct
51 | # ------------------------------------------------------------------------------------------
52 |
53 | userinput = readline(STDIN)
54 | words = split(userinput) # tokenize(userinput)
55 | question = [get!(w2i,wr,1) for wr in words]
56 | results,prediction = singlerun(wrun,r,feat,question;p=p);
57 | println("Question: $(join(i2w[question]," "))")
58 | println("Prediction: $(i2a[prediction])")
59 |
60 | # ------------------------------------------------------------------------------------------
61 | # ## Visualize
62 | # `visualize` function visualizes attention maps for each time step of the mac network
63 | # ------------------------------------------------------------------------------------------
64 |
65 | visualize(img,results;p=p)
66 |
67 |
68 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-tutorial/15.quickstart.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Quick start
3 | # (c) Deniz Yuret, 2019
4 | #
5 | # This notebook is for the impatient reader who wants to get a flavor of Julia/Knet possibly
6 | # to compare it with other deep learning frameworks. In 15 lines of code and 30 seconds of
7 | # GPU time we define, train, and evaluate the LeNet convolutional neural network model from
8 | # scratch without any predefined layers.
9 | # ------------------------------------------------------------------------------------------
10 |
11 | using Knet
12 |
13 | # Define convolutional layer:
14 | struct Conv; w; b; f; end
15 | (c::Conv)(x) = c.f.(pool(conv4(c.w, x) .+ c.b))
16 | Conv(w1,w2,cx,cy,f=relu) = Conv(param(w1,w2,cx,cy), param0(1,1,cy,1), f);
17 |
18 | # Define dense layer:
19 | struct Dense; w; b; f; end
20 | (d::Dense)(x) = d.f.(d.w * mat(x) .+ d.b)
21 | Dense(i::Int,o::Int,f=relu) = Dense(param(o,i), param0(o), f);
22 |
23 | # Define a chain of layers:
24 | struct Chain; layers; Chain(args...)=new(args); end
25 | (c::Chain)(x) = (for l in c.layers; x = l(x); end; x)
26 | (c::Chain)(x,y) = nll(c(x),y)
27 |
28 | # Load MNIST data
29 | include(Knet.dir("data","mnist.jl"))
30 | dtrn, dtst = mnistdata();
31 |
32 | # Train and test LeNet (about 30 secs on a gpu to reach 99% accuracy)
33 | LeNet = Chain(Conv(5,5,1,20), Conv(5,5,20,50), Dense(800,500), Dense(500,10,identity))
34 | progress!(adam(LeNet, repeat(dtrn,10)))
35 | accuracy(LeNet, dtst)
36 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-tutorial/20.mnist.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Load and minibatch MNIST data
3 | # (c) Deniz Yuret, 2019
4 | # * Objective: Load the [MNIST](http://yann.lecun.com/exdb/mnist) dataset, convert into
5 | # Julia arrays, split into minibatches using Knet's
6 | # [minibatch](http://denizyuret.github.io/Knet.jl/latest/reference/#Knet.minibatch) function
7 | # and [Data](https://github.com/denizyuret/Knet.jl/blob/master/src/data.jl) iterator type.
8 | # * Prerequisites: [Julia arrays](https://docs.julialang.org/en/v1/manual/arrays)
9 | # * New functions: [dir](http://denizyuret.github.io/Knet.jl/latest/reference/#Knet.dir),
10 | # [minibatch, Data](http://denizyuret.github.io/Knet.jl/latest/reference/#Knet.minibatch),
11 | # [mnist, mnistview](https://github.com/denizyuret/Knet.jl/blob/master/data/mnist.jl)
12 | #
13 | # In the next few notebooks, we build classification models for the MNIST handwritten digit
14 | # recognition dataset. MNIST has 60000 training and 10000 test examples. Each input x
15 | # consists of 784 pixels representing a 28x28 image. The corresponding output indicates the
16 | # identity of the digit 0..9.
17 | #
18 | # 
19 | #
20 | # [image source](http://yann.lecun.com/exdb/lenet)
21 | # ------------------------------------------------------------------------------------------
22 |
23 | # Load packages, import symbols
24 | using Pkg; for p in ("Knet","Images","ImageMagick"); haskey(Pkg.installed(),p) || Pkg.add(p); end
25 | using Knet: Knet, dir, minibatch, Data
26 |
27 | # This loads the MNIST handwritten digit recognition dataset:
28 | include(Knet.dir("data","mnist.jl")) # Knet.dir constructs a path relative to Knet root
29 | xtrn,ytrn,xtst,ytst = mnist() # mnist() loads MNIST data and converts into Julia arrays
30 | println.(summary.((xtrn,ytrn,xtst,ytst)));
31 |
32 | # mnistview(x,i) converts the i'th instance in x into an image
33 | # Here is the first five images from the test set:
34 | using Images
35 | hcat([mnistview(xtst,i) for i=1:5]...)
36 |
37 | # Here are their labels (10 is used to represent 0)
38 | println(Int.(ytst[1:5]));
39 |
40 | # `minibatch` splits the data tensors to small chunks called minibatches.
41 | # It returns a Knet.Data struct: an iterator of (x,y) pairs.
42 | dtrn = minibatch(xtrn,ytrn,100)
43 | dtst = minibatch(xtst,ytst,100)
44 |
45 | # Each minibatch is an (x,y) pair where x is 100 (28x28x1) images and y are the corresponding 100 labels.
46 | # Here is the first minibatch in the test set:
47 | (x,y) = first(dtst)
48 | println.(summary.((x,y)));
49 |
50 | # Iterators can be used in for loops, e.g. `for (x,y) in dtrn`
51 | # dtrn generates 600 minibatches of 100 images (total 60000)
52 | # dtst generates 100 minibatches of 100 images (total 10000)
53 | n = 0
54 | for (x,y) in dtrn
55 | n += 1
56 | end
57 | n
58 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/ML-demos/knet-tutorial/colab_install_julia.txt:
--------------------------------------------------------------------------------
1 | # To run julia notebooks in colab, run this installation script first, should take 10-15 minutes.
2 | # From: @jekbradbury https://discourse.julialang.org/t/julia-on-google-colab-free-gpu-accelerated-shareable-notebooks/15319
3 | !wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
4 | !dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
5 | !apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub
6 | !apt update -q
7 | !apt install cuda gcc-6 g++-6 -y -q
8 | !ln -s /usr/bin/gcc-6 /usr/local/cuda/bin/gcc
9 | !ln -s /usr/bin/g++-6 /usr/local/cuda/bin/g++
10 |
11 | !curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/1.0/julia-1.0.0-linux-x86_64.tar.gz" -o julia.tar.gz
12 | !tar -xzf julia.tar.gz -C /usr --strip-components 1
13 | !rm -rf julia.tar.gz*
14 | !julia -e 'using Pkg; pkg"add IJulia; add Knet; precompile"'
15 |
16 |
17 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/parallelism-demos/01. Distributed Arrays.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Distributed Arrays
3 | # ------------------------------------------------------------------------------------------
4 |
5 | using JuliaRunClient
6 |
7 | initializeCluster(12)
8 |
9 | @everywhere using DistributedArrays
10 |
11 | a=dzeros(12,12)
12 |
13 | a=dfill(5.0, 12, 12)
14 |
15 | b=drand(1000,1000)
16 |
17 | procs(b)
18 |
19 | @time sum(b)
20 |
21 | convert(Array{Float64}, a)
22 |
23 | ?DArray
24 |
25 | @everywhere function par(I)
26 | # create our local patch
27 | # I is a tuple of intervals, each interval is
28 | # regarded as a 1D array with integer entries
29 | # size(I[1], 1) gives the number of entries in I[1]
30 | # size(I[2], 1) gives the number of entries in I[2]
31 | d=(size(I[1], 1), size(I[2], 1))
32 | m = fill(myid(), d)
33 | return m
34 | end
35 |
36 | m = DArray(par, (800, 800))
37 |
38 | m.indexes
39 |
40 | rank(m)
41 |
42 | mm = @spawnat 2 rank(localpart(m))
43 | fetch(mm)
44 |
45 | # ------------------------------------------------------------------------------------------
46 | # #### Credits
47 | # * http://www.csd.uwo.ca/~moreno/cs2101a_moreno/Parallel_computing_with_Julia.pdf
48 | # *
49 | # ------------------------------------------------------------------------------------------
50 |
51 | releaseCluster()
52 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/parallelism-demos/02. Parallelism Basics.jl:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------
2 | # # Julia Parallelism Basics
3 | # ------------------------------------------------------------------------------------------
4 |
5 | addprocs(2)
6 |
7 | nworkers()
8 |
9 | workers()
10 |
11 | r = remotecall(rand, 2, 2, 2)
12 |
13 | fetch(r)
14 |
15 | s = @spawnat 3 1+fetch(r)
16 |
17 | fetch(s)
18 |
19 | remotecall_fetch(getindex, 2, r, 1, 1)
20 |
21 | r = @spawn rand(2,2)
22 |
23 | s = @spawn 1+fetch(r)
24 |
25 | function rand2(dims...)
26 | return 2*rand(dims...)
27 | end
28 |
29 | rand2(2,2)
30 |
31 | r2 = @spawn rand2(2,2)
32 |
33 | fetch(r2)
34 |
35 | @everywhere function rand2(dims...)
36 | return 2*rand(dims...)
37 | end
38 |
39 | r2 = @spawn rand2(2,2)
40 | fetch(r2)
41 |
42 | # ------------------------------------------------------------------------------------------
43 | # ## Data Movement
44 | # ------------------------------------------------------------------------------------------
45 |
46 | @time begin
47 | A = rand(1000,1000)
48 | Bref = @spawn A^2
49 | fetch(Bref)
50 | end
51 |
52 | @time begin
53 | Bref = @spawn rand(1000,1000)^2
54 | fetch(Bref)
55 | end
56 |
57 | # ------------------------------------------------------------------------------------------
58 | # ## Shared Arrays
59 | #
60 | # Shared Arrays are created by mapping the same region in memory to different processes.
61 | # ------------------------------------------------------------------------------------------
62 |
63 | s = SharedArray{Float64}(100,100)
64 |
65 | localindexes(s)
66 |
67 | fetch(@spawnat 2 localindexes(s))
68 |
69 | fetch(@spawnat 3 localindexes(s))
70 |
71 | for i in workers()
72 | @spawnat i s[localindexes(s)] = myid()
73 | end
74 |
75 | s
76 |
77 | fetch(@spawnat 2 s[100,100])
78 |
79 | fetch(@spawnat 3 s[1,1])
80 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/parallelism-demos/03. JuliaRun-parallel.jl:
--------------------------------------------------------------------------------
1 | using JuliaRunClient
2 |
3 | initializeCluster(2);
4 |
5 | function estimate_pi(N, loops)
6 | n = sum(pmap((x)->darts_in_circle(N), 1:loops))
7 | 4 * n / (loops * N)
8 | end
9 |
10 | @everywhere function darts_in_circle(N)
11 | n = 0
12 | for i in 1:N
13 | if rand()^2 + rand()^2 < 1
14 | n += 1
15 | end
16 | end
17 | n
18 | end
19 |
20 | estimate_pi(10, 2) #compile the function on all nodes
21 |
22 | @time estimate_pi(1_000_000, 50)
23 |
24 | releaseCluster();
25 |
26 | ## Ignore if you see a message as below
27 | ## ERROR (unhandled task failure): EOFError: read end of file or ERROR (unhandled task failure): read: connection reset by peer (ECONNRESET)
28 |
29 | sleep(30)
30 |
31 | nworkers()
32 |
33 |
34 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/parallelism-demos/04. TracyWidom.jl:
--------------------------------------------------------------------------------
1 | using JuliaRunClient
2 | ctx = Context()
3 | nb = self()
4 |
5 | initParallel()
6 |
7 | NWRKRS = 2
8 | println("scale up to $NWRKRS")
9 |
10 | @result setJobScale(ctx, nb, NWRKRS)
11 | waitForWorkers(NWRKRS)
12 |
13 | using StatsBase
14 |
15 | @everywhere using StatsBase
16 |
17 | @everywhere function montecarlo(howmany, data_generator, bins)
18 | h = Histogram(bins)
19 | for i=1:howmany
20 | push!(h, data_generator() )
21 | end
22 | return h.weights
23 | end
24 |
25 | w = @parallel (+) for i=1:nworkers()
26 | montecarlo(100000, randn, -3:.1:3)
27 | end;
28 |
29 | using Plots
30 |
31 | @everywhere function tracywidom_sample(β=2,n=200)
32 | h=n^(-1/3)
33 | x=[0:h:10;]
34 | N=length(x)
35 | d=(-2/h^2 .-x) + 2/sqrt(h*β)*randn(N) # diagonal
36 | e=ones(N-1)/h^2 # subdiagonal
37 | eigvals(SymTridiagonal(d,e))[N]
38 | end
39 |
40 | plot()
41 | for β = [1,2,4]
42 | bins = -4:.05:0.95
43 | w=
44 | @parallel (+) for i=1:nworkers()
45 | montecarlo(10000,()->tracywidom_sample(β), -4:.05:1)
46 | end;
47 | plot!(bins, w/sum(w)*bins.step.hi)
48 | end
49 |
50 | plot!()
51 |
52 | # Scale down
53 | @result setJobScale(ctx, self(), 0)
54 |
55 | nworkers()
56 |
57 |
58 |
--------------------------------------------------------------------------------
/.nbexports/more-advanced-materials/parallelism-demos/06. JuliaRun-http-service.jl:
--------------------------------------------------------------------------------
1 | using JuliaRunClient
2 | ctx = Context()
3 |
4 | job = JuliaBatch("mywebserver")
5 |
6 | # code for a simple webserver
7 | webserver_script = """
8 | using HttpServer
9 |
10 | http = HttpHandler() do req::Request, res::Response
11 | Response("JuliaRun says hello from " * gethostname())
12 | end
13 |
14 | server = Server(http)
15 | run(server, 8000)
16 | """
17 |
18 | open("/mnt/juliabox/helloweb.jl", "w") do f
19 | println(f, webserver_script)
20 | end
21 |
22 | # start a webserver job, specifying port 8000 to be opened up
23 | @result submitJob(ctx, job; start_script="/mnt/juliabox/helloweb.jl", run_volume="juliabox", image="juliabox", cpu="1", memory="1Gi", shell="/juliabox/scripts/master.sh", ports="8000:8000")
24 |
25 | # check that the job has started
26 | @result getJobStatus(ctx, job)
27 |
28 | # Get the IP and port that was assigned for the webserver to listen on.
29 | # The webserver may actually be running any of the physical nodes of the cluster.
30 | # And it is running in an isolated container, with its own virtual IP address
31 | # This IP address is accessible only to the user who started the webserver.
32 | ip, portspec = @result getJobEndpoint(ctx, job)
33 |
34 | # We can connect to it from here (or anything else run by this user)
35 | using Requests
36 | url = "http://$(ip):$(first(values(portspec)))/"
37 |
38 | String(Requests.bytes(get(url)))
39 |
40 | # We stop the webserver now
41 | @result deleteJob(ctx, job; force=true)
42 |
43 |
44 |
--------------------------------------------------------------------------------
/.nbexports/zh-cn/intro-to-julia-ZH/00.上手Jupyter_notebook.jl:
--------------------------------------------------------------------------------
1 |
2 | # ------------------------------------------------------------------------------------------
3 | # ## 上手 Jupyter notebook
4 | #
5 | # ### 目录
6 | # - [运行代码块(cell)](#运行代码块(cell))
7 | # - [查看 Julia 的帮助文档](#使用-shell-命令)
8 | # - [使用 shell 命令](#使用-shell-命令)
9 | # ------------------------------------------------------------------------------------------
10 |
11 | # ------------------------------------------------------------------------------------------
12 | # ### 运行代码块(cell)
13 | # 选中一个代码块之后(选中的代码块会被绿色矩形框起来),有以下几种方法可以运行它:
14 | # 1. 按下组合键 `Shift` + `Enter`
15 | # 2. 点击上方工具栏中的 **运行(Run)** 按钮
16 | # 3. 按下组合键 `Ctrl` + `Enter`
17 | # ------------------------------------------------------------------------------------------
18 |
19 | 1 + 1
20 | 2 + 2
21 |
22 | # ------------------------------------------------------------------------------------------
23 | # 新手注意!执行一个代码块时默认打印最后一行。想要什么也不输出,在最后一行的行尾加上分号 `;` 就行了。如下所示
24 | # ------------------------------------------------------------------------------------------
25 |
26 | 1 + 1
27 | 2 + 2;
28 |
29 | # ------------------------------------------------------------------------------------------
30 | # ### 查看 Julia 的帮助文档
31 | #
32 | # 遇到不熟悉的 Julia 函数或语法,在函数名或表达式前加个问号 `?` 就可以查询对应的帮助文档。(在Julia的REPL中也好使哦!)
33 | # ------------------------------------------------------------------------------------------
34 |
35 | ?println
36 |
37 | # ------------------------------------------------------------------------------------------
38 | # ### 使用 shell 命令
39 | #
40 | # 在 shell 命令前加上分号 `;` 即可,像这样:
41 | #
42 | # > 【译注】以下命令仅在 macOS 和 *nix 系统下可用
43 | # ------------------------------------------------------------------------------------------
44 |
45 | ;ls
46 |
47 | ;pwd
48 |
49 | # ------------------------------------------------------------------------------------------
50 | # shell 命令在 Julia 的 REPL 中也好使哦!
51 | #
52 | # > 【译注】对于 Windows 系统,你需要使用 Windows cmd 命令,像这样:
53 | # ------------------------------------------------------------------------------------------
54 |
55 | ;cd
56 |
57 | # ------------------------------------------------------------------------------------------
58 | # 但并不是所有的 cmd 命令都可以使用。如 `dir` 命令不可用,会报错
59 | # ------------------------------------------------------------------------------------------
60 |
61 | ;dir
62 |
63 | # ------------------------------------------------------------------------------------------
64 | # 中文社区的讨论贴
65 | # - [Windows 中 Julia 的 shell 模式与 `;dir` 报错 - 综合讨论区 / 心得体会 -
66 | # Julia中文社区](https://discourse.juliacn.com/t/topic/2890)
67 | # ------------------------------------------------------------------------------------------
68 |
--------------------------------------------------------------------------------
/.nbexports/zh-cn/intro-to-julia-ZH/04.循环.jl:
--------------------------------------------------------------------------------
1 |
2 | # ------------------------------------------------------------------------------------------
3 | # # 循环
4 | #
5 | # 话题:
6 | # 1. `while`循环
7 | # 2. `for`循环
8 | #
9 | #
10 | # ## `while` 循环
11 | #
12 | # `while` 循环语法:
13 | #
14 | # ```julia
15 | # while
16 | #
17 | # end
18 | # ```
19 | #
20 | # 举个例子,可以使用 `while` 来数数或者遍历一个数组。
21 | # ------------------------------------------------------------------------------------------
22 |
23 | n = 0
24 | while n < 10
25 | n += 1
26 | println(n)
27 | end
28 | n
29 |
30 | myfriends = ["Ted", "Robyn", "Barney", "Lily", "Marshall"]
31 |
32 | i = 1
33 | while i <= length(myfriends)
34 | friend = myfriends[i]
35 | println("Hi $friend, it's great to see you!")
36 | i += 1
37 | end
38 |
39 | # ------------------------------------------------------------------------------------------
40 | # ## `for` 循环
41 | #
42 | # `for` 循环语法:
43 | # ```julia
44 | # for in
45 | #
46 | # end
47 | # ```
48 | #
49 | # 上面两个例子都可以用 `for` 循环改写:
50 | # ------------------------------------------------------------------------------------------
51 |
52 | for n in 1:10
53 | println(n)
54 | end
55 |
56 | myfriends = ["Ted", "Robyn", "Barney", "Lily", "Marshall"]
57 |
58 | for friend in myfriends
59 | println("Hi $friend, it's great to see you!")
60 | end
61 |
62 | # ------------------------------------------------------------------------------------------
63 | # 让我们来试试用 `for` 循环创建加和表,表的每个元素为其行索引和列索引的和。
64 | #
65 | # 首先,来初始化一个全零的数组(矩阵)。
66 | # ------------------------------------------------------------------------------------------
67 |
68 | m, n = 5, 5
69 | A = fill(0, (m, n))
70 |
71 | for i in 1:m
72 | for j in 1:n
73 | A[i, j] = i + j
74 | end
75 | end
76 | A
77 |
78 | # ------------------------------------------------------------------------------------------
79 | # 趁机介绍一下像上面这种嵌套 `for` 循环的语法糖
80 | # ------------------------------------------------------------------------------------------
81 |
82 | B = fill(0, (m, n))
83 |
84 | for i in 1:m, j in 1:n
85 | B[i, j] = i + j
86 | end
87 | B
88 |
89 | # ------------------------------------------------------------------------------------------
90 | # 创建同样的表但更具有“Julia”风格的方法是使用**数组解析式(array comprehension)**。
91 | # ------------------------------------------------------------------------------------------
92 |
93 | C = [i + j for i in 1:m, j in 1:n]
94 |
95 | # ------------------------------------------------------------------------------------------
96 | # ## 练习
97 | #
98 | # ### 4.1
99 | # 循环遍历 1 到 100 的整数并打印它们的平方。
100 | # ------------------------------------------------------------------------------------------
101 |
102 | # 在此作答
103 |
104 |
105 | # ------------------------------------------------------------------------------------------
106 | # ### 4.2
107 | # 给上一题的代码添一点以创建一个字典,使字典存着每个整数和它的平方的键值对,就像这样
108 | #
109 | # ```julia
110 | # squares[10] == 100
111 | # ```
112 | # ------------------------------------------------------------------------------------------
113 |
114 | # 在此作答
115 |
116 |
117 | @assert squares[10] == 100
118 | @assert squares[11] == 121
119 |
120 | # ------------------------------------------------------------------------------------------
121 | # ### 4.3
122 | # 用数组解析式来创建一个数组 `squares_arr`,让它存着从1到100的整数的平方。
123 | # ------------------------------------------------------------------------------------------
124 |
125 | squares_arr = [i for i in 1:100]
126 |
127 | @assert length(squares_arr) == 100
128 | @assert sum(squares_arr) == 338350
129 |
130 | # ------------------------------------------------------------------------------------------
131 | # 请在完成练习运行 `@assert` 块以检测结果是否正确,若无报错即为正确。
132 | # ------------------------------------------------------------------------------------------
133 |
--------------------------------------------------------------------------------
/.nbexports/zh-cn/intro-to-julia-ZH/07.包(Packages).jl:
--------------------------------------------------------------------------------
1 |
2 | # ------------------------------------------------------------------------------------------
3 | # # 软件包(Packages)
4 | #
5 | # Julia 有超过 2000 个登记在案的包,这使得第三方的软件包成为 Julia 语言生态的重要组成部分。
6 | # 尽管如此,软件包生态依然有很大的成长空间。
7 | #
8 | # 值得一提的是,调用其它语言的函数在 Julia 中也是一等公民,,提供了很棒的外部函数接口(FFI)。
9 | # 我们可以很容易地通过 `PyCall` 或 `Rcall` 调用 Python 或 R。
10 | # 这意味着你不必等到 Julia 的生态完全成熟就能迁移现有项目到 Julia,
11 | # 而且迁移到 Julia 并不意味着放弃你在其他语言中惯用的包或者库!
12 | #
13 | # 查看全部可获得的包,请看
14 | # https://pkg.julialang.org/
15 | # 或者
16 | # https://juliaobserver.com/
17 | #
18 | # 现在我们先学习一下该如何使用包。
19 | # ------------------------------------------------------------------------------------------
20 |
21 | # ------------------------------------------------------------------------------------------
22 | # 在安装好的 Julia 中第一次要使用一个包的时候,你需要用包管理器(package manager)明确添加(add)它:
23 | # ------------------------------------------------------------------------------------------
24 |
25 | using Pkg
26 | Pkg.add("Example")
27 |
28 | # ------------------------------------------------------------------------------------------
29 | # 之后每次使用 Julia 时(比如打开一个新的 REPL 会话或者第一次打开一个 notebook),
30 | # 你需要通过 `using` 关键字载入要用的包
31 | # ------------------------------------------------------------------------------------------
32 |
33 | using Example
34 |
35 | # ------------------------------------------------------------------------------------------
36 | # 在 `Example.jl` 的源码
37 | # https://github.com/JuliaLang/Example.jl/blob/master/src/Example.jl
38 | # 中我们可以看到声明了下面这个函数
39 | #
40 | # ```julia
41 | # hello(who::String) = "Hello, $who"
42 | # ```
43 | #
44 | # 载入了 `Example` 我们现在应该可以调用 `hello` 函数了
45 | # ------------------------------------------------------------------------------------------
46 |
47 | hello("it's me. I was wondering if after all these years you'd like to meet.")
48 |
49 | # ------------------------------------------------------------------------------------------
50 | # 现在让我们来玩 `Colors` 包吧
51 | # ------------------------------------------------------------------------------------------
52 |
53 | Pkg.add("Colors")
54 |
55 | using Colors
56 |
57 | # ------------------------------------------------------------------------------------------
58 | # 我们来创建一个 100 个不同颜色的调色板
59 | # ------------------------------------------------------------------------------------------
60 |
61 | palette = distinguishable_colors(100)
62 |
63 | # ------------------------------------------------------------------------------------------
64 | # 然后通过 `rand` 函数来创建随机矩阵
65 | # ------------------------------------------------------------------------------------------
66 |
67 | rand(palette, 3, 3)
68 |
69 | # ------------------------------------------------------------------------------------------
70 | # 在下一个 notebook 中,我们将使用一个新的包来绘制数据集。
71 | # ------------------------------------------------------------------------------------------
72 |
73 | # ------------------------------------------------------------------------------------------
74 | # ### 练习
75 | #
76 | # #### 7.1
77 | # 加载 `Primes` 包(源码在https://github.com/JuliaMath/Primes.jl )
78 | # ------------------------------------------------------------------------------------------
79 |
80 | # 在此作答。做点什么以安装要求的包。
81 |
82 | using Primes
83 |
84 | @assert @isdefined Primes
85 |
86 | # ------------------------------------------------------------------------------------------
87 | # #### 7.2
88 | # 使用 `primes` 函数获得所有小于 1,000,000 的质数,并把它存入变量 `primes_list`
89 | # ------------------------------------------------------------------------------------------
90 |
91 | # 在此作答
92 | primes_list = [];
93 |
94 | @assert primes_list == primes(1000000)
95 |
96 | # ------------------------------------------------------------------------------------------
97 | # 请在完成练习运行 `@assert` 块以检测结果是否正确,若无报错即为正确。
98 | # ------------------------------------------------------------------------------------------
99 |
--------------------------------------------------------------------------------
/.nbexports/zh-cn/intro-to-julia-ZH/简短版/00.上手Jupyter_notebook.jl:
--------------------------------------------------------------------------------
1 |
2 | # ------------------------------------------------------------------------------------------
3 | # ## 上手 Jupyter notebook
4 | #
5 | # ### 目录
6 | # - [运行代码块(cell)](#运行代码块(cell))
7 | # - [查看 Julia 的帮助文档](#使用-shell-命令)
8 | # - [使用 shell 命令](#使用-shell-命令)
9 | # ------------------------------------------------------------------------------------------
10 |
11 | # ------------------------------------------------------------------------------------------
12 | # ### 运行代码块(cell)
13 | # 选中一个代码块之后(选中的代码块会被绿色矩形框起来),有以下几种方法可以运行它:
14 | # 1. 按下组合键 `Shift` + `Enter`
15 | # 2. 点击上方工具栏中的 **运行(Run)** 按钮
16 | # 3. 按下组合键 `Ctrl` + `Enter`
17 | # ------------------------------------------------------------------------------------------
18 |
19 | 1 + 1
20 | 2 + 2
21 |
22 | # ------------------------------------------------------------------------------------------
23 | # 新手注意!执行一个代码块时默认打印最后一行。想要什么也不输出,在最后一行的行尾加上分号 `;` 就行了。如下所示
24 | # ------------------------------------------------------------------------------------------
25 |
26 | 1 + 1
27 | 2 + 2;
28 |
29 | # ------------------------------------------------------------------------------------------
30 | # ### 查看 Julia 的帮助文档
31 | #
32 | # 遇到不熟悉的 Julia 函数或语法,在函数名或表达式前加个问号 `?` 就可以查询对应的帮助文档。(在Julia的REPL中也好使哦!)
33 | # ------------------------------------------------------------------------------------------
34 |
35 | ?println
36 |
37 | # ------------------------------------------------------------------------------------------
38 | # ### 使用 shell 命令
39 | #
40 | # 在 shell 命令前加上分号 `;` 即可,像这样:
41 | #
42 | # > 【译注】以下命令仅在 macOS 和 *nix 系统下可用
43 | # ------------------------------------------------------------------------------------------
44 |
45 | ;ls
46 |
47 | ;pwd
48 |
49 | # ------------------------------------------------------------------------------------------
50 | # shell 命令在 Julia 的 REPL 中也好使哦!
51 | #
52 | # > 【译注】对于 Windows 系统,你需要使用 Windows cmd 命令,像这样:
53 | # ------------------------------------------------------------------------------------------
54 |
55 | ;cd
56 |
57 | # ------------------------------------------------------------------------------------------
58 | # 但并不是所有的 cmd 命令都可以使用。如 `dir` 命令不可用,会报错
59 | # ------------------------------------------------------------------------------------------
60 |
61 | ;dir
62 |
63 | # ------------------------------------------------------------------------------------------
64 | # 中文社区的讨论贴
65 | # - [Windows 中 Julia 的 shell 模式与 `;dir` 报错 - 综合讨论区 / 心得体会 -
66 | # Julia中文社区](https://discourse.juliacn.com/t/topic/2890)
67 | # ------------------------------------------------------------------------------------------
68 |
--------------------------------------------------------------------------------
/.nbexports/zh-cn/intro-to-julia-ZH/简短版/03.包的使用.jl:
--------------------------------------------------------------------------------
1 |
2 | # ------------------------------------------------------------------------------------------
3 | # # 软件包
4 | #
5 | # Julia 有超过 2000 个登记在案的包,这使得第三方的软件包成为 Julia 语言生态的重要组成部分。
6 | # 尽管如此,软件包生态依然有很大的成长空间。
7 | #
8 | # 值得一提的是,在 Julia 中调用其它语言的函数也是一等公民。Julia 提供了很棒的外部函数接口(FFI)。
9 | # 我们可以很容易地通过 `PyCall` 或 `Rcall` 调用 Python 或 R。
10 | # 这意味着你不必等到 Julia 的生态完全成熟就能迁移现有项目到 Julia,
11 | # 而且迁移到 Julia 并不意味着放弃你在其他语言中惯用的包或者库!
12 | #
13 | # 查看全部可获得的包,请看
14 | # https://pkg.julialang.org/
15 | # 或者
16 | # https://juliaobserver.com/
17 | #
18 | # 现在我们先学习一下该如何使用包。
19 | # ------------------------------------------------------------------------------------------
20 |
21 | # ------------------------------------------------------------------------------------------
22 | # 在安装好的 Julia 中第一次要使用一个包的时候,你需要用包管理器(package manager)明确添加(add)它:
23 | # ------------------------------------------------------------------------------------------
24 |
25 | using Pkg
26 | Pkg.add("Example")
27 |
28 | # ------------------------------------------------------------------------------------------
29 | # 之后每次使用 Julia 时(比如打开一个新的 REPL 会话或者第一次打开一个 notebook),
30 | # 你需要通过 `using` 关键字载入要用的包
31 | # ------------------------------------------------------------------------------------------
32 |
33 | using Example
34 |
35 | # ------------------------------------------------------------------------------------------
36 | # 在 `Example.jl` 的源码
37 | # https://github.com/JuliaLang/Example.jl/blob/master/src/Example.jl
38 | # 中我们可以看到声明了下面这个函数
39 | #
40 | # ```julia
41 | # hello(who::String) = "Hello, $who"
42 | # ```
43 | #
44 | # 载入了 `Example` 我们现在应该可以调用 `hello` 函数了
45 | # ------------------------------------------------------------------------------------------
46 |
47 | hello("it's me. I was wondering if after all these years you'd like to meet.")
48 |
49 | # ------------------------------------------------------------------------------------------
50 | # 现在让我们来玩 `Colors` 包吧
51 | # ------------------------------------------------------------------------------------------
52 |
53 | Pkg.add("Colors")
54 |
55 | using Colors
56 |
57 | # ------------------------------------------------------------------------------------------
58 | # 首先创建一个 100 个不同颜色的调色板
59 | # ------------------------------------------------------------------------------------------
60 |
61 | palette = distinguishable_colors(100)
62 |
63 | # ------------------------------------------------------------------------------------------
64 | # 然后通过 `rand` 函数来创建随机矩阵
65 | # ------------------------------------------------------------------------------------------
66 |
67 | rand(3, 3)
68 |
69 | rand(1:10, 3, 3)
70 |
71 | rand(palette, 3, 3)
72 |
--------------------------------------------------------------------------------
/.nbexports/zh-cn/intro-to-julia-ZH/简短版/04.Julia绘图简介.jl:
--------------------------------------------------------------------------------
1 |
2 | # ------------------------------------------------------------------------------------------
3 | # # Julia 绘图简介
4 | #
5 | # Julia 有包括调用 `PyPlot` 在内的、多种不同的绘图方式。
6 | #
7 | # 这里将介绍如何使用 `Plots.jl`。
8 | # ------------------------------------------------------------------------------------------
9 |
10 | using Pkg; Pkg.add("Plots")
11 | using Plots
12 |
13 | # ------------------------------------------------------------------------------------------
14 | # `Plots.jl` 的优势之一是可以无缝地切换后端(backends)。
15 | # 在这个 notebook 中,我们将尝试 `gr()` 和 `plotlyjs()` 后端。
16 | #
17 | # 以科学调查之名,我们用这个 notebook 调查一下大概1860到2000年的全球温度和海盗数量的关系。
18 | # ------------------------------------------------------------------------------------------
19 |
20 | globaltemperatures = [14.4, 14.5, 14.8, 15.2, 15.5, 15.8]
21 | numpirates = [45000, 20000, 15000, 5000, 400, 17]
22 |
23 | # ------------------------------------------------------------------------------------------
24 | # **要绘制出图线,先载入GR后端**
25 | # ------------------------------------------------------------------------------------------
26 |
27 | gr()
28 |
29 | # ------------------------------------------------------------------------------------------
30 | # 现在我们可以调用如 `plot` 和 `scatter` 的函数来绘制图像。
31 | # ------------------------------------------------------------------------------------------
32 |
33 | plot(numpirates, globaltemperatures, label="line")
34 | scatter!(numpirates, globaltemperatures, label="points")
35 |
36 | # ------------------------------------------------------------------------------------------
37 | # `scatter!` 函数名后面的 `!` 意味着它是一个原地修改传入变量的函数,表示散点图会被添加到已存在的图像上。
38 | #
39 | # 与此对应的,你可以试试换成 `scatter` 看看会发生什么。
40 | #
41 | # 接着,我们通过 `xlabel!`,`ylabel!` 和 `title!` 函数来给图像加上更多信息。
42 | # ------------------------------------------------------------------------------------------
43 |
44 | xlabel!("Number of Pirates [Approximate]")
45 | ylabel!("Global Temperature (C)")
46 | title!("Influence of pirate population on global warming")
47 |
48 | # ------------------------------------------------------------------------------------------
49 | # 这看上去仍不对劲。自1860以来海盗数量是减少的,而从左往右看其实时间上是倒序的。
50 | # 我们来把 X 轴反过来,可以更清楚地看到时间顺序下海盗人口是如何导致全球温度的变化!
51 | # ------------------------------------------------------------------------------------------
52 |
53 | xflip!()
54 |
55 | # ------------------------------------------------------------------------------------------
56 | # 绘图就是这样简单!
57 | #
58 | # 注意:这是一个关于人们是如何经常结合相关性和因果性的笑话。
59 | #
60 | # **不需要修改语法,我们可以在 `unicodeplots()` 后端中绘制同样的图像**
61 | # ------------------------------------------------------------------------------------------
62 |
63 | using Pkg; Pkg.add("UnicodePlots")
64 | unicodeplots()
65 |
66 | plot(numpirates, globaltemperatures, label="line")
67 | scatter!(numpirates, globaltemperatures, label="points")
68 | xlabel!("Number of Pirates [Approximate]")
69 | ylabel!("Global Temperature (C)")
70 | title!("Influence of pirate population on global warming")
71 |
72 | # ------------------------------------------------------------------------------------------
73 | # 注意到第二幅图和第一幅的区别!
74 | # ------------------------------------------------------------------------------------------
75 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Changes to Jupyter Notebooks are notoriously difficult to review in git /
4 | GitHub.
5 |
6 | In order to make it easier to review changes to notebook files, we use custom
7 | Jupyter configuration and template files to automatically generate an output
8 | "script" file with the contents of the notebook exported to a plain text file.
9 | For julia notebooks, these produce `.jl` files.
10 |
11 | In order to make sure this configuration applies to your changes, **please
12 | ensure that you launch Jupyter from the top of this repo directory!** You can
13 | do that via one of these approaches:
14 | - Launch Jupyter via the commandline from this repo:
15 | ```bash
16 | $ cd JuliaBoxTutorials
17 | $ jupyter notebook
18 | ```
19 | - Launch Jupyter via IJulia from this repo:
20 | ```bash
21 | $ cd JuliaBoxTutorials
22 | $ julia
23 | julia> IJulia.notebook(dir=".") # This `dir="."` is required!
24 | ```
25 |
26 | This will ensure that every time you save an .ipynb file, it will export a .jl script in our `.nbexports` directory! :)
27 |
28 | ## Jupyter configuration details
29 |
30 | The jupyter configuration is managed via two files:
31 | - `jupyter_notebook_config.py`: Configure jupyter to export a script whenever a
32 | notebook is saved.
33 | - `jupyter_script_export_template.tpl`: A template file specifying how to
34 | export the script. This is needed because by default, jupyter doesn't output
35 | the Markdown cells in Julia notebooks into comment blocks. (It only does that
36 | for python notebooks.)
37 |
38 |
39 | NOTE: Since these are only generated when saving a notebook file, if you delete
40 | or rename an ipynb file, you'll need to manually delete the outdated .nbexport
41 | file. See below.
42 |
43 | ## Manual nbconvert
44 |
45 | To manually trigger exporting script files, you can use nbconvert, via the following command:
46 | ```bash
47 | $ jupyter nbconvert --to script "/path/to/nb.ipynb" --template=./jupyter_script_export_template.tpl
48 | ```
49 |
50 | To re-generate all script files, you can run:
51 | ```bash
52 | $ rm .nbexports/*
53 | $ julia
54 | julia> let nbexports = "$(pwd())/.nbexports", tmpl = "$(pwd())/jupyter_script_export_template.tpl"
55 | for (root, dirs, files) in walkdir(".")
56 | for file in files
57 | if endswith(file, ".ipynb")
58 | outdir = joinpath(nbexports, root)
59 | run(`jupyter nbconvert --to script $root/$file --template=$tmpl --output-dir=$outdir`)
60 | end
61 | end
62 | end
63 | end
64 | ```
65 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright 2019-21 Julia Computing and the individual notebook authors.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # JuliaBoxTutorials
2 |
3 | This repository contains introductory tutorials for the [Julia language](http://julialang.org/) in the form of [Jupyter Notebooks](https://jupyter.org/about). You can run the notebooks locally by [installing nteract](https://nteract.io).
4 |
5 | ## Running Julia on your Computer
6 |
7 | You can also do these tutorials by installing `julia` on your computer, setting up Jupyter, and downloading this tutorial repository to your computer.
8 | If you're new to Julia, you can do that by following these steps:
9 |
10 | 1. Download julia from https://julialang.org/downloads/ (download the latest "stable" version).
11 | - Follow the instructions to install it on your computer (e.g. On macOS, drag it to Applications. On Windows, run the installer.)
12 | 2. Install julia's Jupyter Notebooks integration: IJulia.jl
13 | - Open the installed julia application, and you are presented with a "REPL" prompt. This is the main Julia interface. There, type this closing bracket
14 | character: ] to open the package manager. Then type `add IJulia` to install the jupyter notebook interface for julia.
15 | - Then exit the package manager by pressing delete (as if you're deleting the `]` you typed to enter package mode)
16 | - Now you can open the jupyter notebooks by entering `using IJulia`, then once that loads, entering `IJulia.notebook()`, which should
17 | open a Jupyter tab in your browser.
18 | 3. Last, download the tutorials from this repository, via the github Clone/Download button above, or clicking this link:
19 | - https://github.com/JuliaAcademy/JuliaTutorials/archive/refs/heads/main.zip
20 | - (If you've never used GitHub before, it's a place to collaborate on open source software. Julia itself is also [developed on github!](https://github.com/JuliaLang/julia))
21 |
22 | And now from the Jupyter tab in your browser, you can navigate to the folder where you downloaded the tutorials, and then click
23 | on the name of one of them to get started! Enjoy!
24 |
--------------------------------------------------------------------------------
/README_DATALOSS_WARNING.md:
--------------------------------------------------------------------------------
1 | WARNING: THIS IS AN EPHEMERAL DISK.
2 |
3 | Any data stored in this folder is SUBJECT TO LOSS and THERE IS NO WAY TO
4 | RECOVER IT.
5 |
6 | Please do not use this folder for storing any data.
7 |
8 |
--------------------------------------------------------------------------------
/es-es/intro-to-julia-ES/README.md:
--------------------------------------------------------------------------------
1 | These materials are based on those in the "intro-to-julia" directory and were translated by Miguel Raz Guzmán Macedo.
2 |
3 |
4 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-DataFrames/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017-18 Bogumił Kamiński
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-for-data-science/LICENSE.md:
--------------------------------------------------------------------------------
1 | The tutorials in this folder are licensed under the MIT "Expat" License:
2 |
3 | Copyright (c) 2018: Huda Nassar
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-for-data-science/Project.toml:
--------------------------------------------------------------------------------
1 | name = "Julia for Data Science"
2 | authors = ["Huda Nassar "]
3 |
4 | [deps]
5 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
6 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
7 | Clustering = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5"
8 | Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d"
9 | DataArrays = "0fe7c1db-08c2-54a3-a222-6d1d3b7a471a"
10 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
11 | FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
12 | Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
13 | IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a"
14 | Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0"
15 | LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
16 | MAT = "23992714-dd62-5051-b70f-ba57cb901cac"
17 | MultivariateStats = "6f286f6a-111f-5878-ab1e-185364afe411"
18 | NearestNeighbors = "b8a86587-4115-5ab1-83bc-aa920d37bbce"
19 | Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
20 | PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0"
21 | PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee"
22 | RDatasets = "ce6b1742-4840-55fa-b093-852dadbb1d8b"
23 | StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd"
24 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-julia-for-data-science/README.md:
--------------------------------------------------------------------------------
1 | These tutorials were created by Huda Nassar.
2 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-juliadb/LICENSE.md:
--------------------------------------------------------------------------------
1 | The contents of this directory are under The MIT License.
2 |
3 | Copyright (c) 2018: Julia Computing, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-juliadb/README.md:
--------------------------------------------------------------------------------
1 | These tutorials were prepared by Josh Day and Shashi Gowda.
2 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-juliadb/stocks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-juliadb/stocks
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-juliadb/stocks.jdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-juliadb/stocks.jdb
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/07. ML - Model complexity.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "source": [
6 | "## Model complexity\n\nIn the last notebook, we saw that we could customize a model by adding a parameter. Doing so, we were able to fit that model to a data point. This fit was perfect, insofar as numerics would allow.\n\nIn the next notebook, we'll see that as we add more data to our data set, fitting a model to our data usually becomes more challenging and the result will be less perfect.\n\nFor one thing, we will find that we can add complexity to our model to capture added complexity in the data. We can do this by adding more parameters to our model. We'll see that for a data set with two data points, we can again get a \"perfect\" fit to our model by adding a second parameter to our model.\n\nHowever, we can't simply add a parameter to our model every time we add a data point to our data set, since this will lead to a phenomenon called **overfitting**.\n\nIn the image below, we depict a data set that is close to linear, and models that exhibit underfitting, fitting well, and overfitting, from left to right:\n\n
\n\n\nIn the first image, the model accounts for the slope along which the data falls, but not the offset. \n\nIn the second image, the model accounts for both the slope and offset of the data. Adding this second parameter (the offset) to the model creates a much better fit.\n\nHowever, we can imagine that a model can have too many parameters, where we begin to fit not only the high level features of the data, but also the noise. This overfitting is depicted in the third image."
7 | ],
8 | "metadata": {}
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "source": [
13 | "Our aim will be to fit the data well, but avoiding *over*fitting the data!"
14 | ],
15 | "metadata": {}
16 | }
17 | ],
18 | "nbformat_minor": 2,
19 | "metadata": {
20 | "language_info": {
21 | "file_extension": ".jl",
22 | "mimetype": "application/julia",
23 | "name": "julia",
24 | "version": "0.6.2"
25 | },
26 | "kernelspec": {
27 | "name": "julia-0.6",
28 | "display_name": "Julia 0.6.2",
29 | "language": "julia"
30 | }
31 | },
32 | "nbformat": 4
33 | }
34 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/17. ML - Introduction to deep learning.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "source": [
6 | "## Going deep: Deep neural networks\n\nSo far, we've learned that if we want to classify more than two fruits, we'll need to go beyond using a single neuron and use *multiple* neurons to get multiple outputs. We can think of stacking these multiple neurons together in a single neural layer.\n\nEven so, we found that using a single neural layer was not enough to fully distinguish between bananas, grapes, **and** apples. To do this properly, we'll need to add more complexity to our model. We need not just a neural network, but a *deep neural network*. \n\nThere is one step remaining to build a deep neural network. We have been saying that a neural network takes in data and then spits out `0` or `1` predictions that together declare what kind of fruit the picture is. However, what if we instead put the output of one neural network layer into another neural network layer?\n\nThis gets pictured like this below:\n\n
\n\nOn the left we have 3 data points in blue. Those 3 data points each get fed into 4 neurons in purple. Each of those 4 neurons produces a single output, but those output are each fed into three neurons (the second layer of purple). Each of those 3 neurons spits out a single value, and those values are fed as inputs into the last layer of 6 neurons. The 6 values that those final neurons produce are the output of the neural network. This is a deep neural network."
7 | ],
8 | "metadata": {}
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "source": [
13 | "### Why would a deep neural network be better?\n\nThis is a little perplexing when you first see it. We used neurons to train the model before: why would sticking the output from neurons into other neurons help us fit the data better? The answer can be understood by drawing pictures. Geometrically, the matrix multiplication inside of a layer of neurons is streching and rotating the axis that we can vary:\n\n[Show linear transformation of axis, with data]\n\nA nonlinear transformation, such as the sigmoid function, then adds a bump to the line:\n\n[Show the linear transformed axis with data, and then a bumped version that fits the data better]\n\nNow let's repeat this process. When we send the data through another layer of neurons, we get another rotation and another bump:\n\n[Show another rotation, then another bump]\n\nVisually, we see that if we keep doing this process we can make the axis line up with any data. What this means is that **if we have enough layers, then our neural network can approximate any model**. \n\nThe trade-off is that with more layers we have more parameters, so it may be harder (i.e. computationally intensive) to train the neural network. But we have the guarantee that the model has enough freedom such that there are parameters that will give the correct output. \n\nBecause this model is so flexible, the problem is reduced to that of learning: do the same gradient descent method on this much larger model (but more efficiently!) and we can make it classify our data correctly. This is the power of deep learning."
14 | ],
15 | "metadata": {}
16 | }
17 | ],
18 | "nbformat_minor": 2,
19 | "metadata": {
20 | "language_info": {
21 | "file_extension": ".jl",
22 | "mimetype": "application/julia",
23 | "name": "julia",
24 | "version": "0.6.2"
25 | },
26 | "kernelspec": {
27 | "name": "julia-0.6",
28 | "display_name": "Julia 0.6.2",
29 | "language": "julia"
30 | }
31 | },
32 | "nbformat": 4
33 | }
34 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/104_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/104_100.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/107_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/107_100.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/10_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/10_100.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/8_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/8_100.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/Celeste.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/Celeste.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/array2d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/array2d.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/array_cartoon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/array_cartoon.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/array_comprehension.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/array_comprehension.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/data_flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/data_flow.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/deep-neural-net.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/deep-neural-net.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/fruit-salad.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/fruit-salad.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/model_fitting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/model_fitting.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/philip.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/philip.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/single-layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/single-layer.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/single-neuron.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/single-neuron.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/what_is_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/what_is_model.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/without_arrays.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/data/without_arrays.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-ml/draw_neural_net.jl:
--------------------------------------------------------------------------------
1 | using Plots; gr()
2 |
3 | function draw_neuron(x, y, r; c=:blue)
4 |
5 | θs = 0:0.1:2pi
6 | xs = x .+ r.*cos.(θs)
7 | ys = y .+ r.*sin.(θs)
8 |
9 |
10 | plot!(xs, ys, seriestype=:shape, c=c, alpha=0.5, aspect_ratio=1, leg=false)
11 |
12 | end
13 |
14 |
15 | #neuron_coords(x, N, spacing) = range(-(N - 1)/2 * spacing, spacing, N)
16 |
17 | """
18 | Vertical position of neuron in layer i, position j, with a total of N neurons
19 | """
20 | neuron_coords(j, N, y_spacing) = (-(N - 1)/2 + j) * y_spacing
21 |
22 | function draw_neurons(x, N, spacing, r; c=:blue)
23 |
24 | ys = neuron_coords(x, N, spacing)
25 |
26 | draw_neuron.(x, ys, r; c=c)
27 |
28 | end
29 |
30 |
31 | function draw_layer(x, spacing, N1, N2, r)
32 |
33 | plot!(framestyle=:none, grid=:none)
34 |
35 | first_x = x
36 | second_x = x + 1
37 |
38 | first = neuron_coords(x + 1, N1, spacing)
39 | second = neuron_coords(x, N2, spacing)
40 |
41 | draw_neurons(x, N1, 1, r; c=:blue)
42 | draw_neurons(x+1, N2, 1, r; c=:red)
43 |
44 | for i in 1:N1
45 | for j in 1:N2
46 |
47 | vec = [second_x - first_x, second[j] - first[i]]
48 | normalize!(vec)
49 |
50 | start = [first_x, first[i]] + 1.2*r*vec
51 | finish = [second_x, second[j]] - 1.2*r*vec
52 |
53 |
54 | plot!([start[1], finish[1]], [start[2], finish[2]], c=:black, alpha=0.5)
55 | end
56 | end
57 |
58 | end
59 |
60 | #draw_layer(1, 1, 3, 4, 0.2)
61 |
62 | function draw_link(x1, y1, x2, y2, r)
63 | vec = [x2 - x1, y2 - y1]
64 | normalize!(vec)
65 |
66 | start = [x1, y1] + 1.2 * r * vec
67 | finish = [x2, y2] - 1.2 * r * vec
68 |
69 | plot!([start[1], finish[1]], [start[2], finish[2]], c=:black, alpha=0.5)
70 | end
71 |
72 | """
73 | Takes a vector of neurons per layer
74 | """
75 | function draw_network(neurons_per_layer)
76 |
77 | x_spacing = 1
78 | y_spacing = 1
79 | r = 0.2
80 |
81 | num_layers = length(neurons_per_layer)
82 |
83 | plot(framestyle=:none, grid=:none)
84 |
85 | # draw input links
86 | N1 = neurons_per_layer[1]
87 |
88 | for j in 1:N1
89 | y = neuron_coords(j, N1, y_spacing)
90 | draw_link(0, y, 1, y, r)
91 | end
92 |
93 | # draw neurons
94 | for layer in 1:length(neurons_per_layer)
95 | N = neurons_per_layer[layer]
96 |
97 | if layer == 1
98 | c = :green
99 | elseif layer == num_layers
100 | c = :red
101 | else
102 | c = :blue
103 | end
104 |
105 | for j in 1:N
106 |
107 | draw_neuron(layer, neuron_coords(j, N, y_spacing), r, c=c)
108 |
109 | end
110 | end
111 |
112 | # draw links
113 | for layer in 1:length(neurons_per_layer)-1
114 | N1 = neurons_per_layer[layer]
115 | N2 = neurons_per_layer[layer+1]
116 |
117 | for j1 in 1:N1
118 | for j2 in 1:N2
119 |
120 | draw_link(layer, neuron_coords(j1, N1, y_spacing), layer+1,
121 | neuron_coords(j2, N2, y_spacing), r)
122 | end
123 |
124 | end
125 | end
126 |
127 | # draw output links
128 | N_last = neurons_per_layer[end]
129 |
130 | for j in 1:N_last
131 | y = neuron_coords(j, N_last, y_spacing)
132 | draw_link(num_layers, y, num_layers+1, y, r)
133 | end
134 |
135 | plot!()
136 |
137 | end
138 |
139 | draw_network([3, 2, 2])
140 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-solving-diffeq-in-julia/License.md:
--------------------------------------------------------------------------------
1 | The tutorials in this folder are licensed under the MIT "Expat" License:
2 |
3 | Copyright (c) 2018: ChrisRackauckas.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-solving-diffeq-in-julia/README.md:
--------------------------------------------------------------------------------
1 | These materials were created by Chris Rackauckas.
2 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/README.md:
--------------------------------------------------------------------------------
1 | These tutorials were created by David Anthoff.
2 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.dta:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.dta
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.feather:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.feather
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.sas7bdat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.sas7bdat
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.sav
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/data/cars.xlsx
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/screenshots/voyager1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/screenshots/voyager1.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/screenshots/voyager2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/screenshots/voyager2.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/screenshots/voyager3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/screenshots/voyager3.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide1.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide10.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide10.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide11.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide11.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide12.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide12.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide13.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide13.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide14.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide14.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide15.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide15.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide16.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide16.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide17.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide17.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide18.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide18.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide19.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide19.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide2.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide20.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide20.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide21.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide21.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide22.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide22.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide23.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide23.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide24.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide24.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide25.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide25.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide26.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide26.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide27.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide27.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide28.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide28.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide29.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide29.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide3.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide3.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide30.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide30.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide31.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide31.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide32.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide32.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide33.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide33.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide4.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide4.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide5.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide5.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide6.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide6.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide7.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide7.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide8.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide8.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide9.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/intro-to-the-queryverse/slides/Slide9.PNG
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS150_tr40000_D2_tau1_B5_k1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS150_tr40000_D2_tau1_B5_k1.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS150_tr40000_D2_τ1_B5_k1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS150_tr40000_D2_τ1_B5_k1.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS_NRMSE_L6_Q64_D1_tau1_B5_k1_nn4_nw3PndWDWSt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS_NRMSE_L6_Q64_D1_tau1_B5_k1_nn4_nw3PndWDWSt.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS_NRMSE_L6_Q64_D1_τ1_B5_k1_nn4_nw3PndWDWSt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/KS_NRMSE_L6_Q64_D1_τ1_B5_k1_nn4_nw3PndWDWSt.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/README.md:
--------------------------------------------------------------------------------
1 | # tutorials
2 | Jupyter notebooks and markdownfiles that serve as tutorials for the packages of JuliaDynamics
3 |
4 | These materials were created by George Datseris.
5 |
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/barkley_crossprediction.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/barkley_crossprediction.gif
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/barkley_stts_prediction.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/barkley_stts_prediction.gif
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/lyapunov.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/lyapunov.png
--------------------------------------------------------------------------------
/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/standardmap_fp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/broader-topics-and-ecosystem/introduction-to-dynamicalsystems.jl/standardmap_fp.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/00. Jupyter_notebooks.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Getting started with Jupyter notebooks\n",
8 | "\n",
9 | "### Running a cell\n",
10 | "To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or (2) hit the run button (the right pointing arrow) above."
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "1 + 1\n",
20 | "2 + 2"
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {},
26 | "source": [
27 | "If you're new to jupyter notebooks, note that only the last line of a cell prints by default when you execute that cell and that you can suppress this output with a semicolon"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "1 + 1\n",
37 | "2 + 2;"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "### How to get docs for Julia functions\n",
45 | "\n",
46 | "To get docs for a function you're not familiar with, precede it with a question mark. (This works at the REPL too!)"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "?println"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "### How to use shell commands\n",
63 | "\n",
64 | "Type `;` and then you can use shell commands. For example,"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | ";ls"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | ";pwd"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {
88 | "collapsed": true
89 | },
90 | "source": [
91 | "Shell commands also work at the REPL!"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "metadata": {},
98 | "outputs": [],
99 | "source": []
100 | }
101 | ],
102 | "metadata": {
103 | "kernelspec": {
104 | "display_name": "Julia 1.0.0",
105 | "language": "julia",
106 | "name": "julia-1.0"
107 | },
108 | "language_info": {
109 | "file_extension": ".jl",
110 | "mimetype": "application/julia",
111 | "name": "julia",
112 | "version": "1.0.1"
113 | }
114 | },
115 | "nbformat": 4,
116 | "nbformat_minor": 2
117 | }
118 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/LICENSE.md:
--------------------------------------------------------------------------------
1 | The contents of this directory are under The MIT License.
2 |
3 | Copyright (c) 2018: Julia Computing, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/Local_installations.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Get Julia running locally\n",
8 | "\n",
9 | "\n",
10 | "## Local package installations\n",
11 | "\n",
12 | "If you'd like to run these tutorial notebooks locally, you'll want to install all the packages used in them. Since installation can take some time, you may want to run this notebook before getting started with the tutorial, rather than trying to install everything as you go.\n",
13 | "\n",
14 | "#### Installations"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": null,
20 | "metadata": {},
21 | "outputs": [],
22 | "source": [
23 | "using Pkg\n",
24 | "Pkg.add([\"Example\", \"Colors\", \"Primes\", \"Plots\", \"BenchmarkTools\"])"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "#### Loading all packages"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "using Example, Colors, Plots, BenchmarkTools, Primes"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "#### Tests\n",
48 | "\n",
49 | "`plot` should generate a plot,"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": null,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "plot(x -> x^2, -10:10)"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "`RGB(0, 0, 0)` should return a black square,"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "RGB(0, 0, 0)"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {},
80 | "source": [
81 | "and `@btime primes(1000000);` should report an execution time in ms and memory used. For example, on one computer, this yielded \"2.654 ms (5 allocations: 876.14 KiB)\"."
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "@btime primes(1000000);"
91 | ]
92 | }
93 | ],
94 | "metadata": {
95 | "kernelspec": {
96 | "display_name": "Julia 1.0.0",
97 | "language": "julia",
98 | "name": "julia-1.0"
99 | },
100 | "language_info": {
101 | "file_extension": ".jl",
102 | "mimetype": "application/julia",
103 | "name": "julia",
104 | "version": "1.0.0"
105 | }
106 | },
107 | "nbformat": 4,
108 | "nbformat_minor": 2
109 | }
110 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/README.md:
--------------------------------------------------------------------------------
1 | Contributors to these tutorials include Jane Herriman, Andreas Noack, Sacha Verweij, and Alan Edelman
2 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/calculate_pi.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## How can we calculate $\\pi$?\n",
8 | "\n",
9 | "Given a square of length $2r$, the square's area is\n",
10 | "\n",
11 | "$$A_{square} = (2r)^2 = 4r^2$$\n",
12 | "\n",
13 | "whereas the area of a circle with radius $r$ is\n",
14 | "$$A_{circle} = \\pi r^2$$\n",
15 | "\n",
16 | "
\n",
17 | "\n",
18 | "Therefore the ratio of the area of the circle to that of the square above is\n",
19 | "\n",
20 | "$$\\frac{A_{circle}}{A_{square}} = \\frac{\\pi r^2}{4r^2} = \\frac{\\pi}{4}$$\n",
21 | "\n",
22 | "and we can define $\\pi$ as\n",
23 | "\n",
24 | "$$\\pi = 4\\frac{A_{circle}}{A_{square}}$$\n",
25 | "\n",
26 | "This suggests a way to calculate $\\pi$: if we have a square and the largest circle that fits inside that square, we can determine the ratio of areas of a circle and a square. We can calculate this ratio using a monte carlo simulation. We select random points inside a square, and we keep track of how often those points also fall inside the circle that fits perfectly inside that square.\n",
27 | "\n",
28 | "Given a large enough sampling points, $\\frac{A_{circle}}{A_{square}}$ will be equal to the fraction of randomly chosen points inside the square that also fall inside the circle. Then we can figure out $\\pi$!\n",
29 | "\n",
30 | "#### Pseudo-code\n",
31 | "\n",
32 | "Given the above, our algorithm for determining $\\pi$ looks like this:\n",
33 | "\n",
34 | "1. For each of $N$ iterations,\n",
35 | " 1. Select a random point inside a square of area $4r^2$ as Cartesian, $(x, y)$, coordinates.\n",
36 | " 1. Determine if the point also falls inside the circle embedded within this square of area $\\pi r^2$.\n",
37 | " 1. Keep track of whether or not this point fell inside the circle. At the end of $N$ iterations, you want to know $M$ -- the number of the $N$ random points that fell inside the circle!\n",
38 | "1. Calculate $\\pi$ as $4\\frac{M}{N}$\n",
39 | "\n",
40 | "#### Exercise\n",
41 | "\n",
42 | "Write a function that calculates $\\pi$ using Julia.\n",
43 | "\n",
44 | "The algorithm above should work for any value of $r$ that you choose to use. Make sure you make $N$ big enough that the value of $\\pi$ is correct to at least a couple numbers after the decimal point!\n",
45 | "\n",
46 | "*Hint*:\n",
47 | "\n",
48 | "This will probably be easier if you center your circle and square at the coordinate (0, 0) and use a radius of 1. For example, to choose random coordinates within your square at position (x, y), you may want to choose x and y so that they are each a value between -1 and +1. Then any point within a distance of 1 from (0, 0) will fall inside the circle!\n",
49 | "\n",
50 | "
\n",
51 | "\n"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {
58 | "collapsed": true
59 | },
60 | "outputs": [],
61 | "source": []
62 | }
63 | ],
64 | "metadata": {
65 | "kernelspec": {
66 | "display_name": "Julia 1.0.0",
67 | "language": "julia",
68 | "name": "julia-1.0"
69 | },
70 | "language_info": {
71 | "file_extension": ".jl",
72 | "mimetype": "application/julia",
73 | "name": "julia",
74 | "version": "1.0.0"
75 | }
76 | },
77 | "nbformat": 4,
78 | "nbformat_minor": 2
79 | }
80 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/derfunc.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/derfunc.gif
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/104_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/104_100.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/area_ratio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/area_ratio.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/banana_10svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/banana_10svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/banana_30svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/banana_30svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/banana_3svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/banana_3svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/banana_5svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/banana_5svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/images/hint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/images/hint.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/000 Jupyter Notebooks.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Getting started with Jupyter notebooks\n",
8 | "\n",
9 | "Jupyter notebooks are a convenient way to run, display, and present interactive code. The main concept is a cell — a single chunk of text. Cells may be markdown (like this one) or code (like the next).\n",
10 | "\n",
11 | "### Running a cell\n",
12 | "To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or (2) hit the run button (the right pointing arrow) above."
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "1 + 1\n",
22 | "2 + 2"
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {},
28 | "source": [
29 | "If you're new to jupyter notebooks, note that only the last line of a cell prints by default when you execute that cell and that you can suppress this output with a semicolon"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "1 + 1\n",
39 | "2 + 2;"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "## Changing modes and inserting new cells\n",
47 | "\n",
48 | "You can use the menu above or key combinations — ESC will drop out of editing a cell and into a command mode where there are special keyboard shortcuts to insert/cut/modify cells themselves. Try `ESC` and then `a` or `b` for \"above\" and \"below\".\n",
49 | "\n",
50 | "See the Help menu for all available shortcuts"
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "metadata": {},
56 | "source": [
57 | "## Special modes for cells\n",
58 | "\n",
59 | "Cells can change their behavior depending on their very first character"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "### How to get docs for Julia functions\n",
67 | "\n",
68 | "To get docs for a function you're not familiar with, precede it with a question mark. (This works at the REPL too!)"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "?println"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "metadata": {},
83 | "source": [
84 | "### How to use shell commands\n",
85 | "\n",
86 | "Type `;` and then you can use shell commands. For example,"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | ";ls"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | ";pwd"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "metadata": {},
110 | "source": [
111 | "### Interacting with the package manager\n",
112 | "\n",
113 | "Julia's package manager has a special \"command\" syntax mode — you can enter it with a `]` character."
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "metadata": {},
120 | "outputs": [],
121 | "source": [
122 | "]status"
123 | ]
124 | }
125 | ],
126 | "metadata": {
127 | "kernelspec": {
128 | "display_name": "Julia 1.3.1",
129 | "language": "julia",
130 | "name": "julia-1.3"
131 | },
132 | "language_info": {
133 | "file_extension": ".jl",
134 | "mimetype": "application/julia",
135 | "name": "julia",
136 | "version": "1.3.1"
137 | }
138 | },
139 | "nbformat": 4,
140 | "nbformat_minor": 2
141 | }
142 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/130 OneHot Vector.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Often used in machine learning, a \"one hot\" vector is a vector of all zeros, except for a single `1` entry.\n",
8 | "Representing it as a standard vector is memory-inefficient, so it cries out for a special implementation."
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "struct OneHotVector <: AbstractVector{Int}\n",
18 | " idx::Int\n",
19 | " len::Int\n",
20 | "end"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "Base.size(v::OneHotVector) = (v.len,)"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "Base.getindex(v::OneHotVector, i::Integer) = Int(i == v.idx)"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "OneHotVector(3, 10)"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "A = rand(5,5)"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "A * OneHotVector(3, 5)"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "Vector(OneHotVector(3,5))"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {},
80 | "source": [
81 | "## Exercise\n",
82 | "\n",
83 | "Generalize it to any element type."
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": []
92 | }
93 | ],
94 | "metadata": {
95 | "kernelspec": {
96 | "display_name": "Julia 1.3.1",
97 | "language": "julia",
98 | "name": "julia-1.3"
99 | },
100 | "language_info": {
101 | "file_extension": ".jl",
102 | "mimetype": "application/julia",
103 | "name": "julia",
104 | "version": "1.3.1"
105 | }
106 | },
107 | "nbformat": 4,
108 | "nbformat_minor": 2
109 | }
110 |
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/104_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/104_100.jpg
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/area_ratio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/area_ratio.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/banana_10svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/banana_10svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/banana_30svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/banana_30svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/banana_3svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/banana_3svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/banana_5svals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/banana_5svals.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/long-version/images/hint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/introductory-tutorials/intro-to-julia/long-version/images/hint.png
--------------------------------------------------------------------------------
/introductory-tutorials/intro-to-julia/short-version/00.Jupyter_notebooks.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Getting started with Jupyter notebooks\n",
8 | "\n",
9 | "### Running a cell\n",
10 | "To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or (2) hit the run button (the right pointing arrow) above."
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "1 + 1\n",
20 | "2 + 2"
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {},
26 | "source": [
27 | "If you're new to jupyter notebooks, note that only the last line of a cell prints by default when you execute that cell and that you can suppress this output with a semicolon"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "1 + 1\n",
37 | "2 + 2;"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "### How to get docs for Julia functions\n",
45 | "\n",
46 | "To get docs for a function you're not familiar with, precede it with a question mark. (This works in the terminal too!)"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "?println"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "### How to use shell commands\n",
63 | "\n",
64 | "Type `;` and then you can use shell commands. For example,"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | ";ls"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | ";pwd"
83 | ]
84 | }
85 | ],
86 | "metadata": {
87 | "kernelspec": {
88 | "display_name": "Julia 1.0.0",
89 | "language": "julia",
90 | "name": "julia-1.0"
91 | },
92 | "language_info": {
93 | "file_extension": ".jl",
94 | "mimetype": "application/julia",
95 | "name": "julia",
96 | "version": "1.0.0"
97 | }
98 | },
99 | "nbformat": 4,
100 | "nbformat_minor": 2
101 | }
102 |
--------------------------------------------------------------------------------
/jupyter_notebook_config.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 |
4 | dir_path = os.path.dirname(os.path.realpath(__file__))
5 | nbexports_path = os.path.join(dir_path, ".nbexports")
6 |
7 | from notebook.utils import to_api_path
8 |
9 | _script_exporter = None
10 |
11 | def script_post_save(model, os_path, contents_manager, **kwargs):
12 | if model['type'] != 'notebook':
13 | return
14 |
15 | from nbconvert.exporters.script import ScriptExporter
16 | from nbconvert.exporters.html import HTMLExporter
17 |
18 | global _script_exporter
19 | if _script_exporter is None:
20 | _script_exporter = ScriptExporter(parent=contents_manager)
21 | _script_exporter.template_file = os.path.join(dir_path, 'jupyter_script_export_template.tpl')
22 |
23 | export_script(_script_exporter, model, os_path, contents_manager, **kwargs)
24 |
25 | def export_script(exporter, model, os_path, contents_manager, **kwargs):
26 | """convert notebooks to Python script after save with nbconvert
27 | replaces `ipython notebook --script`
28 | """
29 | base, ext = os.path.splitext(os_path)
30 | script, resources = exporter.from_filename(os_path)
31 | script_fname = base + resources.get('output_extension', '.txt')
32 | script_repopath = to_api_path(script_fname, contents_manager.root_dir)
33 | log = contents_manager.log
34 | script_fullpath = os.path.join(dir_path, ".nbexports", script_repopath)
35 | os.makedirs(os.path.dirname(script_fullpath), exist_ok=True)
36 | log.info("Saving script /%s", script_fullpath)
37 | with io.open(script_fullpath, 'w', encoding='utf-8', newline='\n') as f:
38 | f.write(script)
39 |
40 |
41 | c.FileContentsManager.post_save_hook = script_post_save
42 |
43 |
--------------------------------------------------------------------------------
/jupyter_script_export_template.tpl:
--------------------------------------------------------------------------------
1 | {# Lines inside these brackets are comments #}
2 | {#- Brackets with a `-` mean to skip whitespace before or after the block. -#}
3 |
4 | {#-
5 | # This file defines a Jinja Template for converting .ipynb files into scripts
6 | # for either delve or julia. We need this because the default script exporter
7 | # doesn't render Markdown for any languages except Python.
8 | # Exporting the markdown makes github reviews of .ipynb files easier.
9 | # This template is invoked by our custom `jupyter_notebook_config.py`. You can
10 | # read more about the Jinja template specification here:
11 | # http://jinja.pocoo.org/docs/2.10/templates/#comments
12 | # and here:
13 | # https://nbconvert.readthedocs.io/en/latest/customizing.html
14 | # And the filter functions used in this file are defined here:
15 | # https://github.com/pallets/jinja/blob/master/jinja2/filters.py
16 | # and here:
17 | # https://github.com/jupyter/nbconvert/blob/master/nbconvert/filters/strings.py
18 | -#}
19 |
20 | {#- ---------
21 | # Lines up here, before the `extends` section, go at the top of the file, before any other
22 | # content from the notebook itself.
23 | #----------- -#}
24 |
25 | {%- if 'name' in nb.metadata.get('kernelspec', {}) and
26 | nb.metadata.kernelspec.name == 'julia' -%}
27 | # This file was generated from a Julia language jupyter notebook.
28 | {% endif -%}
29 |
30 | {% extends 'script.tpl'%}
31 |
32 | {% block markdowncell %}
33 | {#- Turn the contents of the markdown cell into a wrapped comment block, and trim empty lines. -#}
34 |
35 | {#-
36 | # NOTE: We used `kernelspec.name` not `language_info.name`, for reasons specific to
37 | # our custom jupyter kernel. I think `language_info.name` might be more robust?
38 | -#}
39 | {%- if 'name' in nb.metadata.get('kernelspec', {}) and
40 | nb.metadata.kernelspec.name == 'julia' -%}
41 | {%- set commentprefix = '# ' -%}
42 | {#-
43 | # Add other languages here as if-else block, e.g. C++ would use '// '
44 | -#}
45 | {%- else -%}
46 | {#- Assume python by default -#}
47 | {%- set commentprefix = '# ' -%}
48 | {%- endif -%}
49 |
50 | {%- set commentlen = 92-(commentprefix|length) -%}
51 | {{- '\n' -}}
52 | {{- commentprefix ~ '-' * commentlen -}}
53 | {{- '\n' -}}
54 |
55 | {#- Turn the contents of the markdown cell into a wrapped comment block, and trim empty lines. -#}
56 | {#- Note: `comment_lines` and `wrap_text` are defined in nbconvert/filters/strings.py -#}
57 | {{- cell.source | wrap_text(width=commentlen) | comment_lines(prefix=commentprefix) | replace(commentprefix~"\n", commentprefix|trim ~ "\n") -}}
58 |
59 | {{- '\n' -}}
60 | {{- commentprefix ~ '-' * commentlen -}}
61 | {{- '\n' -}}
62 |
63 | {% endblock markdowncell %}
64 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/TextAnalysis.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "using TextAnalysis"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {
18 | "collapsed": false
19 | },
20 | "outputs": [
21 | {
22 | "data": {
23 | "text/plain": [
24 | "Sentiment Analysis Model Trained on IMDB with a 88587 word corpus"
25 | ]
26 | },
27 | "execution_count": 2,
28 | "metadata": {},
29 | "output_type": "execute_result"
30 | }
31 | ],
32 | "source": [
33 | "model = TextAnalysis.SentimentAnalyzer()"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": 3,
39 | "metadata": {
40 | "collapsed": false
41 | },
42 | "outputs": [
43 | {
44 | "data": {
45 | "text/plain": [
46 | "0.46740916f0"
47 | ]
48 | },
49 | "execution_count": 3,
50 | "metadata": {},
51 | "output_type": "execute_result"
52 | }
53 | ],
54 | "source": [
55 | "model(StringDocument(\"hello world\"))"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 4,
61 | "metadata": {
62 | "collapsed": false,
63 | "scrolled": true
64 | },
65 | "outputs": [
66 | {
67 | "data": {
68 | "text/plain": [
69 | "0.56199634f0"
70 | ]
71 | },
72 | "execution_count": 4,
73 | "metadata": {},
74 | "output_type": "execute_result"
75 | }
76 | ],
77 | "source": [
78 | "model(StringDocument(\"an incredibly boring film\"))"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 5,
84 | "metadata": {
85 | "collapsed": false
86 | },
87 | "outputs": [
88 | {
89 | "data": {
90 | "text/plain": [
91 | "0.47946903f0"
92 | ]
93 | },
94 | "execution_count": 5,
95 | "metadata": {},
96 | "output_type": "execute_result"
97 | }
98 | ],
99 | "source": [
100 | "model(StringDocument(\"a highly enjoyable ride\"))"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 6,
106 | "metadata": {
107 | "collapsed": false
108 | },
109 | "outputs": [
110 | {
111 | "data": {
112 | "text/plain": [
113 | "0.4626028f0"
114 | ]
115 | },
116 | "execution_count": 6,
117 | "metadata": {},
118 | "output_type": "execute_result"
119 | }
120 | ],
121 | "source": [
122 | "model(StringDocument(\"a great film\"))"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {
129 | "collapsed": true
130 | },
131 | "outputs": [],
132 | "source": []
133 | }
134 | ],
135 | "metadata": {
136 | "kernelspec": {
137 | "display_name": "Julia 0.6.4-pre",
138 | "language": "julia",
139 | "name": "julia-0.6"
140 | },
141 | "language_info": {
142 | "file_extension": ".jl",
143 | "mimetype": "application/julia",
144 | "name": "julia",
145 | "version": "0.6.4"
146 | }
147 | },
148 | "nbformat": 4,
149 | "nbformat_minor": 2
150 | }
151 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 KnetML
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/README.md:
--------------------------------------------------------------------------------
1 | # NLP Demos with Knet
2 |
3 | ## `charlm/charlm.ipynb`
4 |
5 | Character based RNN language model is trained on 'The Complete Works of William Shakespeare' and Julia code base. Trained models can generate Shakespeare-like writings and random Julia codes.
6 |
7 | [Reference](http://karpathy.github.io/2015/05/21/rnn-effectiveness)
8 |
9 | ## `imdb/imdbdemo.ipynb`
10 |
11 | IMDB model is trained using 25000 movie reviews and it can label unseen reviews as negative/positive
12 |
13 | [Reference](https://github.com/fchollet/keras/raw/master/keras/datasets/imdb.py)
14 |
15 | ## `macnet/visualize.ipynb`
16 |
17 | Macnet is a visual question answering model which is trained on different (image,question) from [CLEVR](https://cs.stanford.edu/people/jcjohns/clevr/) dataset. It can predict an answer for a given question related with an image.
18 |
19 | [Reference](https://arxiv.org/abs/1803.03067)
20 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/charlm/charlm.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Character based RNN language model\n",
8 | "Based on http://karpathy.github.io/2015/05/21/rnn-effectiveness"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "## Setup\n",
16 | "1-Adds required packages to Julia. \n",
17 | "2-Loads the data and a pretrained model."
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "include(\"charlm.jl\")"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "metadata": {},
32 | "source": [
33 | "## Sample Data-1\n",
34 | "A random subset of the Shakespeare training data"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "LEN = 500\n",
44 | "r = rand(1:length(shake_text)-LEN)\n",
45 | "println(shake_text[r:r+LEN])\n",
46 | "flush(STDOUT)"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "## Sample Generation-1\n",
54 | "Random Shakespeare style text generated by the model"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "generate(shake_model, LEN)"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "## Sample Data-2\n",
71 | "A random subset of the code in Julia base"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": null,
77 | "metadata": {},
78 | "outputs": [],
79 | "source": [
80 | "r = rand(1:length(julia_text)-LEN)\n",
81 | "println(julia_text[r:r+LEN])\n",
82 | "flush(STDOUT)"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "## Sample Generation-2\n",
90 | "Random Julia code generated by the model"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "generate(julia_model, LEN)"
100 | ]
101 | }
102 | ],
103 | "metadata": {
104 | "kernelspec": {
105 | "display_name": "Julia 0.6.4-pre",
106 | "language": "julia",
107 | "name": "julia-0.6"
108 | },
109 | "language_info": {
110 | "file_extension": ".jl",
111 | "mimetype": "application/julia",
112 | "name": "julia",
113 | "version": "0.6.4"
114 | }
115 | },
116 | "nbformat": 4,
117 | "nbformat_minor": 2
118 | }
119 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/charlm/charlm.jl:
--------------------------------------------------------------------------------
1 | if ENV["HOME"] == "/mnt/juliabox"
2 | Pkg.dir(path...)=joinpath("/home/jrun/.julia/v0.6",path...)
3 | else
4 | for p in ("JLD","Knet")
5 | Pkg.installed(p) == nothing && Pkg.add(p)
6 | end
7 | end
8 |
9 | using JLD,Knet
10 |
11 | info("Loading Shakespeare data")
12 | include(Knet.dir("data","gutenberg.jl"))
13 | trn,tst,chars = shakespeare()
14 | shake_text = String(chars[vcat(trn,tst)])
15 |
16 | info("Loading Shakespeare model")
17 | isfile("shakespeare.jld") || download("http://people.csail.mit.edu/deniz/models/nlp-demos/shakespeare.jld","shakespeare.jld")
18 | shake_model = load("shakespeare.jld","model")
19 |
20 | info("Reading Julia files")
21 | base = joinpath(Base.JULIA_HOME, Base.DATAROOTDIR, "julia", "base")
22 | julia_text = ""
23 | for (root,dirs,files) in walkdir(base)
24 | for f in files
25 | f[end-2:end] == ".jl" || continue
26 | julia_text *= readstring(joinpath(root,f))
27 | end
28 | # println((root,length(files),all(f->contains(f,".jl"),files)))
29 | end
30 |
31 | info("Loading Julia model")
32 | isfile("juliacharlm.jld") || download("http://people.csail.mit.edu/deniz/models/nlp-demos/juliacharlm.jld","juliacharlm.jld")
33 | julia_model = load("juliacharlm.jld","model")
34 |
35 | # Given the current character, predict the next character
36 | function predict(ws,xs,hx,cx;pdrop=0)
37 | r,wr,wx,wy,by = ws
38 | x = wx[:,xs] # xs=(B,T) x=(X,B,T)
39 | x = dropout(x,pdrop)
40 | y,hy,cy = rnnforw(r,wr,x,hx,cx,hy=true,cy=true) # y=(H,B,T) hy=cy=(H,B,L)
41 | y = dropout(y,pdrop)
42 | y2 = reshape(y,size(y,1),size(y,2)*size(y,3)) # y2=(H,B*T)
43 | return wy*y2.+by, hy, cy
44 | end
45 |
46 | # Sample from trained model
47 | function generate(model,n)
48 | function sample(y)
49 | p,r=Array(exp.(y-logsumexp(y))),rand()
50 | for j=1:length(p); (r -= p[j]) < 0 && return j; end
51 | end
52 | h,c = nothing,nothing
53 | chars = model[end]
54 | x = findfirst(chars,'\n')
55 | for i=1:n
56 | y,h,c = predict(model,[x],h,c)
57 | x = sample(y)
58 | print(chars[x])
59 | end
60 | println()
61 | end
62 |
63 | nothing
64 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/imdb/imdbdemo.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# IMDB Movie Review Sentiment Analysis Demo"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "## Setup\n",
15 | "1-Adds required packages to Julia. \n",
16 | "2-Loads the data and a pretrained model."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "metadata": {},
23 | "outputs": [],
24 | "source": [
25 | "include(\"imdb.jl\")"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## Sample Data\n",
33 | "The model was trained using 25000 movie reviews such as the following (shift-ENTER to see random example) \n",
34 | "Each review was tokenized, lowercased, truncated to max 150 words and a 30,000 word vocabulary. "
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "metadata": {
41 | "scrolled": true
42 | },
43 | "outputs": [],
44 | "source": [
45 | "r = rand(1:length(xtrn))\n",
46 | "println(reviewstring(xtrn[r],ytrn[r]))\n",
47 | "flush(STDOUT)"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "metadata": {},
53 | "source": [
54 | "## Test\n",
55 | "We test the model on 25000 never before seen reviews on the test set (shift-ENTER to see random example) \n",
56 | "The test accuracy is around 86% "
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "r = rand(1:length(xtst))\n",
66 | "println(reviewstring(xtst[r],ytst[r]))\n",
67 | "println(\"\\nModel prediction: \"*predictstring(xtst[r]))\n",
68 | "flush(STDOUT)"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "## User Input\n",
76 | "In this cell you can enter your own review and let the model guess the sentiment"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "userinput = readline(STDIN)\n",
86 | "words = split(lowercase(userinput))\n",
87 | "ex = [get!(imdbdict,wr,UNK) for wr in words]\n",
88 | "ex[ex.>MAXFEATURES]=UNK\n",
89 | "println(\"\\nModel prediction: \"*predictstring(ex))"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": []
98 | }
99 | ],
100 | "metadata": {
101 | "kernelspec": {
102 | "display_name": "Julia 0.6.4-pre",
103 | "language": "julia",
104 | "name": "julia-0.6"
105 | },
106 | "language_info": {
107 | "file_extension": ".jl",
108 | "mimetype": "application/julia",
109 | "name": "julia",
110 | "version": "0.6.4"
111 | }
112 | },
113 | "nbformat": 4,
114 | "nbformat_minor": 2
115 | }
116 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/macnet/data/.gitignore:
--------------------------------------------------------------------------------
1 | *.jld
2 | *.json
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/macnet/demosetup.jl:
--------------------------------------------------------------------------------
1 | if ENV["HOME"] == "/mnt/juliabox"
2 | Pkg.dir(path...)=joinpath("/home/jrun/.julia/v0.6",path...)
3 | else
4 | for p in ("Knet","JLD","JSON","Images") # ,"WordTokenizers")
5 | Pkg.installed(p) == nothing && Pkg.add(p)
6 | end
7 | end
8 | using Images,JLD,Knet # ,WordTokenizers
9 | global atype = gpu()<0 ? Array{Float32}:KnetArray{Float32}
10 |
11 | server="people.csail.mit.edu/deniz/"
12 | if !isdir("data/demo")
13 | info("Downloading sample questions and images from CLEVR dataset...")
14 | download(server*"data/mac-network/demo.tar.gz","demo.tar.gz")
15 | run(`tar -xzf demo.tar.gz`)
16 | rm("demo.tar.gz")
17 | end
18 | if !isfile("models/macnet.jld")
19 | info("Downloading pre-trained model from our servers...")
20 | download(server*"models/mac-network/demo_model.jld","models/macnet.jld")
21 | end
22 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-nlp/macnet/models/.gitignore:
--------------------------------------------------------------------------------
1 | *.jld
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/15.quickstart.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Quick start\n",
8 | "(c) Deniz Yuret, 2019\n",
9 | "\n",
10 | "This notebook is for the impatient reader who wants to get a flavor of Julia/Knet possibly to compare it with other deep learning frameworks. In 15 lines of code and 30 seconds of GPU time we define, train, and evaluate the LeNet convolutional neural network model from scratch without any predefined layers."
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 1,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "using Knet"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 2,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "# Define convolutional layer:\n",
29 | "struct Conv; w; b; f; end\n",
30 | "(c::Conv)(x) = c.f.(pool(conv4(c.w, x) .+ c.b))\n",
31 | "Conv(w1,w2,cx,cy,f=relu) = Conv(param(w1,w2,cx,cy), param0(1,1,cy,1), f);"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 3,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "# Define dense layer:\n",
41 | "struct Dense; w; b; f; end\n",
42 | "(d::Dense)(x) = d.f.(d.w * mat(x) .+ d.b)\n",
43 | "Dense(i::Int,o::Int,f=relu) = Dense(param(o,i), param0(o), f);"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 4,
49 | "metadata": {},
50 | "outputs": [],
51 | "source": [
52 | "# Define a chain of layers:\n",
53 | "struct Chain; layers; Chain(args...)=new(args); end\n",
54 | "(c::Chain)(x) = (for l in c.layers; x = l(x); end; x)\n",
55 | "(c::Chain)(x,y) = nll(c(x),y)"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 5,
61 | "metadata": {},
62 | "outputs": [
63 | {
64 | "name": "stderr",
65 | "output_type": "stream",
66 | "text": [
67 | "┌ Info: Loading MNIST...\n",
68 | "└ @ Main /home/deniz/.julia/dev/Knet/data/mnist.jl:33\n"
69 | ]
70 | }
71 | ],
72 | "source": [
73 | "# Load MNIST data\n",
74 | "include(Knet.dir(\"data\",\"mnist.jl\"))\n",
75 | "dtrn, dtst = mnistdata();"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": 6,
81 | "metadata": {},
82 | "outputs": [
83 | {
84 | "name": "stdout",
85 | "output_type": "stream",
86 | "text": [
87 | "3.40e-02 100.00%┣████████████████████████████████████████████████████████████┫ 6000/6000 [00:25/00:25, 238.78i/s]\n"
88 | ]
89 | },
90 | {
91 | "data": {
92 | "text/plain": [
93 | "0.9921"
94 | ]
95 | },
96 | "execution_count": 6,
97 | "metadata": {},
98 | "output_type": "execute_result"
99 | }
100 | ],
101 | "source": [
102 | "# Train and test LeNet (about 30 secs on a gpu to reach 99% accuracy)\n",
103 | "LeNet = Chain(Conv(5,5,1,20), Conv(5,5,20,50), Dense(800,500), Dense(500,10,identity))\n",
104 | "progress!(adam(LeNet, repeat(dtrn,10)))\n",
105 | "accuracy(LeNet, dtst)"
106 | ]
107 | }
108 | ],
109 | "metadata": {
110 | "kernelspec": {
111 | "display_name": "Julia 1.0.3",
112 | "language": "julia",
113 | "name": "julia-1.0"
114 | },
115 | "language_info": {
116 | "file_extension": ".jl",
117 | "mimetype": "application/julia",
118 | "name": "julia",
119 | "version": "1.0.3"
120 | }
121 | },
122 | "nbformat": 4,
123 | "nbformat_minor": 2
124 | }
125 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/README.md:
--------------------------------------------------------------------------------
1 | # Knet Tutorial
2 |
3 | This tutorial introduces the programming language Julia and the Knet deep learning
4 | framework. By the end, the reader should be able to define, train, evaluate, and visualize
5 | basic MLP, CNN, and RNN models. Each notebook is written to work stand-alone but they rely
6 | on concepts introduced in earlier notebooks, so I recommend reading them in order. Every
7 | Knet function outside of the standard Julia library is defined or explained before use.
8 |
9 | To run the notebooks on your computer, install and run IJulia by typing the following at the
10 | `julia>` prompt (see [IJulia.jl](https://github.com/JuliaLang/IJulia.jl) for more
11 | information):
12 |
13 | ```julia-repl
14 | julia> using Pkg; Pkg.add("IJulia"); Pkg.add("Knet")
15 | julia> using IJulia, Knet
16 | julia> notebook(dir=Knet.dir("tutorial"))
17 | ```
18 |
19 | To run the notebooks in the cloud you can use [JuliaBox](), [Google
20 | Colab](https://colab.research.google.com/notebooks/welcome.ipynb), or services like
21 | [AWS](http://aws.amazon.com). To run on JuliaBox, click the Git button in the Dashboard and
22 | clone `https://github.com/denizyuret/Knet.jl.git`. The tutorial should be available under
23 | `Knet/tutorial` on the Jupyter screen. To run on Colab add Julia support first using the
24 | [colab_install_julia](colab_install_julia.ipynb) notebook, then open the notebooks in
25 | [Google
26 | Drive](https://drive.google.com/drive/folders/19D-R31unxZV_PUYYYpCfd-gnbdUiZfNb?usp=sharing).
27 | To run on AWS follow the instructions in the [Knet Installation
28 | Section](http://denizyuret.github.io/Knet.jl/latest/install.html#Using-Amazon-AWS-1).
29 |
30 | **Contents:**
31 | * [Julia is fast:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/00.Julia_is_fast.ipynb)
32 | comparison of Julia's speed to C, Python and numpy.
33 | * [Getting to know Julia:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/10.Getting_to_know_Julia.ipynb)
34 | basic Julia tutorial from [JuliaBox](http://juliabox.com).
35 | * [Quick start:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/15.quickstart.ipynb)
36 | if you are familiar with other deep learning frameworks and want to see a quick Julia example.
37 | * [The MNIST dataset:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/20.mnist.ipynb)
38 | introduction to the MNIST handwritten digit recognition dataset.
39 | * [Julia iterators:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/25.iterators.ipynb)
40 | iterators are useful for generating and training with data.
41 | * [Creating a model:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/30.lin.ipynb)
42 | define, train, visualize simple linear models, introduce gradients, SGD, using the GPU.
43 | * [Multilayer perceptrons:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/40.mlp.ipynb)
44 | multi layer perceptrons, nonlinearities, model capacity, overfitting, regularization, dropout.
45 | * [Convolutional networks:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/50.cnn.ipynb)
46 | convolutional neural networks, sparse and shared weights using conv4 and pool operations.
47 | * [Recurrent networks:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/60.rnn.ipynb)
48 | introduction to recurrent neural networks.
49 | * [IMDB sentiment analysis:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/70.imdb.ipynb)
50 | a simple RNN sequence classification model for sentiment analysis of IMDB movie reviews.
51 | * [Language modeling:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/80.charlm.ipynb)
52 | a character based RNN language model that can write Shakespeare sonnets and Julia programs.
53 | * [Sequence to sequence:](https://github.com/denizyuret/Knet.jl/blob/master/tutorial/90.s2s.ipynb)
54 | a sequence to sequence RNN model typically used for machine translation.
55 |
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/colab_install_julia.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "colab-install-julia.ipynb",
7 | "version": "0.3.2",
8 | "provenance": [],
9 | "collapsed_sections": []
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "accelerator": "GPU"
16 | },
17 | "cells": [
18 | {
19 | "metadata": {
20 | "id": "cEOANYIVIdR5",
21 | "colab_type": "code",
22 | "colab": {}
23 | },
24 | "cell_type": "code",
25 | "source": [
26 | "# To run julia notebooks in colab, run this installation script first, should take 10-15 minutes.\n",
27 | "# From: @jekbradbury https://discourse.julialang.org/t/julia-on-google-colab-free-gpu-accelerated-shareable-notebooks/15319\n",
28 | "!wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb\n",
29 | "!dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb\n",
30 | "!apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub\n",
31 | "!apt update -q\n",
32 | "!apt install cuda gcc-6 g++-6 -y -q\n",
33 | "!ln -s /usr/bin/gcc-6 /usr/local/cuda/bin/gcc\n",
34 | "!ln -s /usr/bin/g++-6 /usr/local/cuda/bin/g++\n",
35 | "\n",
36 | "!curl -sSL \"https://julialang-s3.julialang.org/bin/linux/x64/1.0/julia-1.0.0-linux-x86_64.tar.gz\" -o julia.tar.gz\n",
37 | "!tar -xzf julia.tar.gz -C /usr --strip-components 1\n",
38 | "!rm -rf julia.tar.gz*\n",
39 | "!julia -e 'using Pkg; pkg\"add IJulia; add Knet; precompile\"'"
40 | ],
41 | "execution_count": 0,
42 | "outputs": []
43 | },
44 | {
45 | "metadata": {
46 | "id": "bqp7S32ULebR",
47 | "colab_type": "code",
48 | "colab": {}
49 | },
50 | "cell_type": "code",
51 | "source": [
52 | ""
53 | ],
54 | "execution_count": 0,
55 | "outputs": []
56 | }
57 | ]
58 | }
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/images/LSTM3-chain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/more-advanced-materials/ML-demos/knet-tutorial/images/LSTM3-chain.png
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/images/LSTM3-var-GRU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/more-advanced-materials/ML-demos/knet-tutorial/images/LSTM3-var-GRU.png
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/images/diags.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/more-advanced-materials/ML-demos/knet-tutorial/images/diags.png
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/images/rnn-vs-mlp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/more-advanced-materials/ML-demos/knet-tutorial/images/rnn-vs-mlp.png
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/images/s2s-dims.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/more-advanced-materials/ML-demos/knet-tutorial/images/s2s-dims.png
--------------------------------------------------------------------------------
/more-advanced-materials/ML-demos/knet-tutorial/images/seq2seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JuliaAcademy/JuliaTutorials/724e15a350d150a9773afe51a3830709dbed422f/more-advanced-materials/ML-demos/knet-tutorial/images/seq2seq.png
--------------------------------------------------------------------------------
/more-advanced-materials/metaprogramming/LICENSE.md:
--------------------------------------------------------------------------------
1 | The contents of this directory are under The MIT License.
2 |
3 | Copyright (c) 2018: Julia Computing, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/more-advanced-materials/parallelism-demos/03. JuliaRun-parallel.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "using JuliaRunClient"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "initializeCluster(2);"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "function estimate_pi(N, loops) \n",
28 | " n = sum(pmap((x)->darts_in_circle(N), 1:loops)) \n",
29 | " 4 * n / (loops * N) \n",
30 | "end"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "@everywhere function darts_in_circle(N) \n",
40 | " n = 0 \n",
41 | " for i in 1:N \n",
42 | " if rand()^2 + rand()^2 < 1 \n",
43 | " n += 1 \n",
44 | " end \n",
45 | " end \n",
46 | " n \n",
47 | "end"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "estimate_pi(10, 2) #compile the function on all nodes\n",
57 | "\n",
58 | "@time estimate_pi(1_000_000, 50)"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "releaseCluster();\n",
68 | "\n",
69 | "## Ignore if you see a message as below\n",
70 | "## ERROR (unhandled task failure): EOFError: read end of file or ERROR (unhandled task failure): read: connection reset by peer (ECONNRESET)"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "sleep(30)"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "nworkers()"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": []
97 | }
98 | ],
99 | "metadata": {
100 | "kernelspec": {
101 | "display_name": "Julia 0.6.0",
102 | "language": "julia",
103 | "name": "julia-0.6"
104 | },
105 | "language_info": {
106 | "file_extension": ".jl",
107 | "mimetype": "application/julia",
108 | "name": "julia",
109 | "version": "0.6.0"
110 | }
111 | },
112 | "nbformat": 4,
113 | "nbformat_minor": 2
114 | }
115 |
--------------------------------------------------------------------------------
/more-advanced-materials/parallelism-demos/04. TracyWidom.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "using JuliaRunClient\n",
10 | "ctx = Context()\n",
11 | "nb = self()"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "initParallel()\n",
21 | "\n",
22 | "NWRKRS = 2\n",
23 | "println(\"scale up to $NWRKRS\")\n",
24 | "\n",
25 | "@result setJobScale(ctx, nb, NWRKRS)\n",
26 | "waitForWorkers(NWRKRS)"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "using StatsBase"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "@everywhere using StatsBase"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "@everywhere function montecarlo(howmany, data_generator, bins)\n",
54 | " h = Histogram(bins)\n",
55 | " for i=1:howmany\n",
56 | " push!(h, data_generator() )\n",
57 | " end\n",
58 | " return h.weights\n",
59 | "end"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "w = @parallel (+) for i=1:nworkers()\n",
69 | " montecarlo(100000, randn, -3:.1:3)\n",
70 | "end;"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "using Plots"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "@everywhere function tracywidom_sample(β=2,n=200)\n",
89 | " h=n^(-1/3)\n",
90 | " x=[0:h:10;]\n",
91 | " N=length(x)\n",
92 | " d=(-2/h^2 .-x) + 2/sqrt(h*β)*randn(N) # diagonal\n",
93 | " e=ones(N-1)/h^2 # subdiagonal\n",
94 | " eigvals(SymTridiagonal(d,e))[N]\n",
95 | "end"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {
102 | "scrolled": true
103 | },
104 | "outputs": [],
105 | "source": [
106 | "plot()\n",
107 | "for β = [1,2,4]\n",
108 | " bins = -4:.05:0.95\n",
109 | " w=\n",
110 | " @parallel (+) for i=1:nworkers()\n",
111 | " montecarlo(10000,()->tracywidom_sample(β), -4:.05:1)\n",
112 | " end;\n",
113 | "plot!(bins, w/sum(w)*bins.step.hi)\n",
114 | "end"
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": null,
120 | "metadata": {},
121 | "outputs": [],
122 | "source": [
123 | "plot!()"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "metadata": {},
130 | "outputs": [],
131 | "source": [
132 | "# Scale down\n",
133 | "@result setJobScale(ctx, self(), 0)"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": [
142 | "nworkers()"
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": null,
148 | "metadata": {},
149 | "outputs": [],
150 | "source": []
151 | }
152 | ],
153 | "metadata": {
154 | "kernelspec": {
155 | "display_name": "Julia 0.6.0",
156 | "language": "julia",
157 | "name": "julia-0.6"
158 | },
159 | "language_info": {
160 | "file_extension": ".jl",
161 | "mimetype": "application/julia",
162 | "name": "julia",
163 | "version": "0.6.0"
164 | }
165 | },
166 | "nbformat": 4,
167 | "nbformat_minor": 2
168 | }
169 |
--------------------------------------------------------------------------------
/more-advanced-materials/parallelism-demos/LICENSE.md:
--------------------------------------------------------------------------------
1 | The contents of this directory are under The MIT License.
2 |
3 | Copyright (c) 2018: Julia Computing, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/zh-cn/README.md:
--------------------------------------------------------------------------------
1 | # 中文版 JuliaBox 教程
2 |
3 | 翻译了基础部分
4 |
5 | **更新建议**
6 | + 如果只是同步英文翻译或者更新内容,建议先编辑[简短版][简短版],然后同步到[完整版][完整版]
7 | + 要增加新的内容,则直接在[完整版][完整版]中增加。视内容重要度,有选择的同步到简短版中。
8 | + 如果更新幅度较大,让[简短版][简短版]与[完整版][完整版]的差别变大。则记得更新以下的说明语句。
9 | 删除“与简短版重复”或“略有区别”等语句。
10 |
11 | **已完成**
12 | - introductory-tutorials
13 | - [intro-to-julia-ZH][完整版]
14 | - [简短版][简短版]
15 | - [00.上手 Jupyter notebook][short-00.上手Jupyter_notebook]
16 | - [01.了解 Julia][short-01.了解_Julia]
17 | - [02.Julia 中的线性代数][short-02.Julia_中的线性代数]
18 | - [03.包的使用][short-03.包的使用]
19 | - [04.Julia 绘图简介][short-04.Julia_绘图简介]
20 | - [05.Julia 很快][short-05.Julia很快]
21 | - [06.多重派发][short-06.多重派发]
22 | - [00.上手 Jupyter notebook](./intro-to-julia-ZH/00.上手Jupyter_notebook.ipynb)
23 | (与[简短版 00.上手Jupyter notebook][short-00.上手Jupyter_notebook]重复)
24 | - [01.新手入门](./intro-to-julia-ZH/01.新手入门.ipynb)
25 | - [02.字符串](./intro-to-julia-ZH/02.字符串.ipynb)
26 | - [03.数据结构](./intro-to-julia-ZH/03.数据结构.ipynb)
27 | - [04.循环](./intro-to-julia-ZH/04.循环.ipynb)
28 | - [05.条件判断](./intro-to-julia-ZH/05.条件判断.ipynb)
29 | - [06.函数](./intro-to-julia-ZH/06.函数.ipynb)
30 | (与[简短版 01.了解 Julia 中函数一节][short-01.了解_Julia]大纲一致,举例略有区别,另外附加了练习。)
31 | - [07.包(Packages)](./intro-to-julia-ZH/07.包(Packages).ipynb)
32 | (比[简短版 03.包的使用][short-03.包的使用]多了练习)
33 | - [08.绘图](./intro-to-julia-ZH/08.绘图.ipynb)
34 | (比[简短版 04.Julia 绘图简介][short-04.Julia_绘图简介]多了练习)
35 | - [09.Julia 很快](./intro-to-julia-ZH/09.Julia很快.ipynb)
36 | (与[简短版 05.Julia很快][short-05.Julia很快]重复)
37 | - [10.多重派发](./intro-to-julia-ZH/10.多重派发.ipynb)
38 | (与[简短版 06.多重派发][short-06.多重派发]最后一部分的例子略有区别,多了练习。)
39 | - [10.1多重派发](./intro-to-julia-ZH/10.1多重派发.ipynb)
40 | (多重派发-第二部分)
41 | - [11.基本线性代数](./intro-to-julia-ZH/11.基本线性代数.ipynb)
42 | (与[简短版 02.Julia 中的线性代数][short-02.Julia_中的线性代数]前半部分一直,后面增加了解线性方程组解的讨论,并带有练习。)
43 |
44 |
45 | [完整版]: ./intro-to-julia-ZH/
46 | [简短版]: ./intro-to-julia-ZH/简短版/
47 | [short-00.上手Jupyter_notebook]: ./intro-to-julia-ZH/简短版/00.上手Jupyter_notebook.ipynb
48 | [short-01.了解_Julia]: ./intro-to-julia-ZH/简短版/01.了解Julia.ipynb
49 | [short-02.Julia_中的线性代数]: ./intro-to-julia-ZH/简短版/02.Julia中的线性代数.ipynb
50 | [short-03.包的使用]: ./intro-to-julia-ZH/简短版/03.包的使用.ipynb
51 | [short-04.Julia_绘图简介]: ./intro-to-julia-ZH/简短版/04.Julia绘图简介.ipynb
52 | [short-05.Julia很快]: ./intro-to-julia-ZH/简短版/05.Julia很快.ipynb
53 | [short-06.多重派发]: ./intro-to-julia-ZH/简短版/06.多重派发.ipynb
54 |
--------------------------------------------------------------------------------
/zh-cn/intro-to-julia-ZH/00.上手Jupyter_notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## 上手 Jupyter notebook\n",
8 | "\n",
9 | "### 目录\n",
10 | "- [运行代码块(cell)](#运行代码块(cell))\n",
11 | "- [查看 Julia 的帮助文档](#使用-shell-命令)\n",
12 | "- [使用 shell 命令](#使用-shell-命令)"
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "metadata": {},
18 | "source": [
19 | "### 运行代码块(cell)\n",
20 | "选中一个代码块之后(选中的代码块会被绿色矩形框起来),有以下几种方法可以运行它:\n",
21 | "1. 按下组合键 `Shift` + `Enter`\n",
22 | "2. 点击上方工具栏中的 **运行(Run)** 按钮\n",
23 | "3. 按下组合键 `Ctrl` + `Enter`"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "1 + 1\n",
33 | "2 + 2"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "新手注意!执行一个代码块时默认打印最后一行。想要什么也不输出,在最后一行的行尾加上分号 `;` 就行了。如下所示"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "1 + 1\n",
50 | "2 + 2;"
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "metadata": {},
56 | "source": [
57 | "### 查看 Julia 的帮助文档\n",
58 | "\n",
59 | "遇到不熟悉的 Julia 函数或语法,在函数名或表达式前加个问号 `?` 就可以查询对应的帮助文档。(在Julia的REPL中也好使哦!)"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "?println"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "### 使用 shell 命令\n",
76 | "\n",
77 | "在 shell 命令前加上分号 `;` 即可,像这样:\n",
78 | "\n",
79 | "> 【译注】以下命令仅在 macOS 和 *nix 系统下可用"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | ";ls"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | ";pwd"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "metadata": {},
103 | "source": [
104 | "shell 命令在 Julia 的 REPL 中也好使哦!\n",
105 | "\n",
106 | "> 【译注】对于 Windows 系统,你需要使用 Windows cmd 命令,像这样:"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | ";cd"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "metadata": {},
121 | "source": [
122 | "但并不是所有的 cmd 命令都可以使用。如 `dir` 命令不可用,会报错"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | ";dir"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "中文社区的讨论贴\n",
139 | "- [Windows 中 Julia 的 shell 模式与 `;dir` 报错 - 综合讨论区 / 心得体会 - Julia中文社区](https://discourse.juliacn.com/t/topic/2890)"
140 | ]
141 | }
142 | ],
143 | "metadata": {
144 | "kernelspec": {
145 | "display_name": "Julia 1.0.5",
146 | "language": "julia",
147 | "name": "julia-1.0"
148 | },
149 | "language_info": {
150 | "file_extension": ".jl",
151 | "mimetype": "application/julia",
152 | "name": "julia",
153 | "version": "1.0.5"
154 | }
155 | },
156 | "nbformat": 4,
157 | "nbformat_minor": 2
158 | }
159 |
--------------------------------------------------------------------------------
/zh-cn/intro-to-julia-ZH/简短版/00.上手Jupyter_notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## 上手 Jupyter notebook\n",
8 | "\n",
9 | "### 目录\n",
10 | "- [运行代码块(cell)](#运行代码块(cell))\n",
11 | "- [查看 Julia 的帮助文档](#使用-shell-命令)\n",
12 | "- [使用 shell 命令](#使用-shell-命令)"
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "metadata": {},
18 | "source": [
19 | "### 运行代码块(cell)\n",
20 | "选中一个代码块之后(选中的代码块会被绿色矩形框起来),有以下几种方法可以运行它:\n",
21 | "1. 按下组合键 `Shift` + `Enter`\n",
22 | "2. 点击上方工具栏中的 **运行(Run)** 按钮\n",
23 | "3. 按下组合键 `Ctrl` + `Enter`"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "1 + 1\n",
33 | "2 + 2"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "新手注意!执行一个代码块时默认打印最后一行。想要什么也不输出,在最后一行的行尾加上分号 `;` 就行了。如下所示"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "1 + 1\n",
50 | "2 + 2;"
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "metadata": {},
56 | "source": [
57 | "### 查看 Julia 的帮助文档\n",
58 | "\n",
59 | "遇到不熟悉的 Julia 函数或语法,在函数名或表达式前加个问号 `?` 就可以查询对应的帮助文档。(在Julia的REPL中也好使哦!)"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "?println"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "### 使用 shell 命令\n",
76 | "\n",
77 | "在 shell 命令前加上分号 `;` 即可,像这样:\n",
78 | "\n",
79 | "> 【译注】以下命令仅在 macOS 和 *nix 系统下可用"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | ";ls"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | ";pwd"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "metadata": {},
103 | "source": [
104 | "shell 命令在 Julia 的 REPL 中也好使哦!\n",
105 | "\n",
106 | "> 【译注】对于 Windows 系统,你需要使用 Windows cmd 命令,像这样:"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | ";cd"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "metadata": {},
121 | "source": [
122 | "但并不是所有的 cmd 命令都可以使用。如 `dir` 命令不可用,会报错"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | ";dir"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "中文社区的讨论贴\n",
139 | "- [Windows 中 Julia 的 shell 模式与 `;dir` 报错 - 综合讨论区 / 心得体会 - Julia中文社区](https://discourse.juliacn.com/t/topic/2890)"
140 | ]
141 | }
142 | ],
143 | "metadata": {
144 | "kernelspec": {
145 | "display_name": "Julia 1.0.5",
146 | "language": "julia",
147 | "name": "julia-1.0"
148 | },
149 | "language_info": {
150 | "file_extension": ".jl",
151 | "mimetype": "application/julia",
152 | "name": "julia",
153 | "version": "1.0.5"
154 | }
155 | },
156 | "nbformat": 4,
157 | "nbformat_minor": 2
158 | }
159 |
--------------------------------------------------------------------------------