├── .github └── workflows │ ├── cache.yml │ ├── ci.yml │ ├── coverage.yml │ ├── pdf.yml │ └── preview.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── conf.py ├── environment.yml ├── requirements.txt ├── scripts ├── build-website.sh ├── execution-test.sh ├── install_latex.sh ├── linkchecker-test.sh └── texlive.profile ├── sitemap.xml ├── source ├── _static │ ├── includes │ │ ├── header.raw │ │ └── lecture_howto_py.raw │ ├── lecture_specific │ │ ├── aiyagari │ │ │ └── aiyagari_obit.pdf │ │ ├── arellano │ │ │ ├── arellano_bond_prices.png │ │ │ ├── arellano_bond_prices_2.png │ │ │ ├── arellano_default_probs.png │ │ │ ├── arellano_time_series.png │ │ │ └── arellano_value_funcs.png │ │ ├── cake_eating_numerical │ │ │ └── analytical.py │ │ ├── career │ │ │ └── career_solutions_ex1_py.png │ │ ├── coleman_policy_iter │ │ │ └── solve_time_iter.py │ │ ├── finite_markov │ │ │ ├── hamilton_graph.png │ │ │ ├── mc_aperiodicity1.gv │ │ │ ├── mc_aperiodicity1.png │ │ │ ├── mc_aperiodicity2.gv │ │ │ ├── mc_aperiodicity2.png │ │ │ ├── mc_ex1_plot.png │ │ │ ├── mc_irreducibility1.gv │ │ │ ├── mc_irreducibility1.png │ │ │ ├── mc_irreducibility2.gv │ │ │ ├── mc_irreducibility2.png │ │ │ ├── web_graph.png │ │ │ └── web_graph_data.txt │ │ ├── heavy_tails │ │ │ ├── light_heavy_fig1.png │ │ │ └── rank_size_fig1.png │ │ ├── ifp │ │ │ ├── ifp_agg_savings.png │ │ │ ├── ifp_histogram.png │ │ │ ├── ifp_policies.png │ │ │ └── pi2.pdf │ │ ├── kalman │ │ │ ├── kalman_ex3.png │ │ │ ├── kl_ex1_fig.png │ │ │ └── kl_ex2_fig.png │ │ ├── lake_model │ │ │ └── lake_distribution_wages.png │ │ ├── linear_algebra │ │ │ └── course_notes.pdf │ │ ├── linear_models │ │ │ ├── covariance_stationary.png │ │ │ ├── ensemble_mean.png │ │ │ ├── iteration_notes.pdf │ │ │ ├── paths_and_stationarity.png │ │ │ ├── solution_lss_ex1.png │ │ │ ├── solution_lss_ex2.png │ │ │ ├── tsh.png │ │ │ ├── tsh0.png │ │ │ └── tsh_hg.png │ │ ├── lqcontrol │ │ │ ├── solution_lqc_ex1.png │ │ │ ├── solution_lqc_ex2.png │ │ │ ├── solution_lqc_ex3_g1.png │ │ │ ├── solution_lqc_ex3_g10.png │ │ │ └── solution_lqc_ex3_g50.png │ │ ├── markov_perf │ │ │ ├── duopoly_mpe.py │ │ │ ├── judd_fig1.png │ │ │ ├── judd_fig2.png │ │ │ └── mpe_vs_monopolist.png │ │ ├── mccall │ │ │ ├── mccall_resw_alpha.py │ │ │ ├── mccall_resw_beta.py │ │ │ ├── mccall_resw_c.py │ │ │ ├── mccall_resw_gamma.py │ │ │ └── mccall_vf_plot1.py │ │ ├── mccall_model_with_separation │ │ │ ├── mccall_resw_alpha.png │ │ │ ├── mccall_resw_beta.png │ │ │ └── mccall_resw_c.png │ │ ├── mle │ │ │ └── fp.dta │ │ ├── odu │ │ │ └── odu.py │ │ ├── ols │ │ │ ├── maketable1.dta │ │ │ ├── maketable2.dta │ │ │ └── maketable4.dta │ │ ├── optgrowth │ │ │ ├── 3ndp.pdf │ │ │ ├── bellman_operator.py │ │ │ ├── cd_analytical.py │ │ │ ├── solution_og_ex2.png │ │ │ └── solve_model.py │ │ ├── optgrowth_fast │ │ │ ├── ogm.py │ │ │ └── ogm_crra.py │ │ ├── pandas_panel │ │ │ ├── countries.csv │ │ │ ├── employ.csv │ │ │ ├── realwage.csv │ │ │ └── venn_diag.png │ │ ├── perm_income │ │ │ └── perm_inc_ir.py │ │ ├── schelling │ │ │ ├── schelling_fig1.png │ │ │ ├── schelling_fig2.png │ │ │ ├── schelling_fig3.png │ │ │ └── schelling_fig4.png │ │ ├── short_path │ │ │ ├── Graph-networkx2.ipynb │ │ │ ├── graph.png │ │ │ ├── graph2.png │ │ │ ├── graph3.png │ │ │ └── graph4.png │ │ ├── troubleshooting │ │ │ └── launch.png │ │ ├── uncertainty_traps │ │ │ ├── uncertainty_traps_45.png │ │ │ ├── uncertainty_traps_mu.png │ │ │ └── uncertainty_traps_sim.png │ │ ├── wald_friedman │ │ │ ├── wald_class.py │ │ │ ├── wald_dec_rule.png │ │ │ ├── wald_dec_rule.tex │ │ │ └── wf_first_pass.py │ │ └── wealth_dynamics │ │ │ └── htop_again.png │ ├── qe-logo-large.png │ └── quant-econ.bib └── rst │ ├── .ipynb_checkpoints │ ├── Untitled-checkpoint.ipynb │ └── changethis-checkpoint.ipynb │ ├── 404.rst │ ├── _static │ ├── about_lectures.rst │ ├── aiyagari.rst │ ├── ar1_processes.rst │ ├── cake_eating_numerical.rst │ ├── cake_eating_problem.rst │ ├── career.rst │ ├── cass_koopmans_1.rst │ ├── cass_koopmans_2.rst │ ├── coleman_policy_iter.rst │ ├── complex_and_trig.rst │ ├── egm_policy_iter.rst │ ├── exchangeable.rst │ ├── finite_markov.rst │ ├── geom_series.rst │ ├── harrison_kreps.rst │ ├── heavy_tails.rst │ ├── ifp.rst │ ├── ifp_advanced.rst │ ├── index.rst │ ├── index_asset_pricing.rst │ ├── index_data_and_empirics.rst │ ├── index_information.rst │ ├── index_intro_dynam.rst │ ├── index_lq_control.rst │ ├── index_multi_agent_models.rst │ ├── index_savings_growth.rst │ ├── index_search.rst │ ├── index_toc.rst │ ├── index_tools_and_techniques.rst │ ├── inventory_dynamics.rst │ ├── jv.rst │ ├── kalman.rst │ ├── kesten_processes.rst │ ├── lake_model.rst │ ├── likelihood_bayes.rst │ ├── likelihood_ratio_process.rst │ ├── linear_algebra.rst │ ├── linear_models.rst │ ├── lln_clt.rst │ ├── lq_inventories.rst │ ├── lqcontrol.rst │ ├── markov_asset.rst │ ├── markov_perf.rst │ ├── mccall_correlated.rst │ ├── mccall_fitted_vfi.rst │ ├── mccall_model.rst │ ├── mccall_model_with_separation.rst │ ├── mle.rst │ ├── multi_hyper.rst │ ├── multivariate_normal.rst │ ├── navy_captain.rst │ ├── odu.rst │ ├── ols.rst │ ├── optgrowth.rst │ ├── optgrowth_fast.rst │ ├── pandas_panel.rst │ ├── perm_income.rst │ ├── perm_income_cons.rst │ ├── rational_expectations.rst │ ├── re_with_feedback.rst │ ├── samuelson.rst │ ├── scalar_dynam.rst │ ├── schelling.rst │ ├── search.rst │ ├── short_path.rst │ ├── sir_model.rst │ ├── status.rst │ ├── time_series_with_matrices.rst │ ├── troubleshooting.rst │ ├── uncertainty_traps.rst │ ├── wald_friedman.rst │ ├── wealth_dynamics.rst │ └── zreferences.rst └── theme └── minimal ├── static ├── css │ ├── base.css │ └── qe.python.css ├── img │ ├── code-block-fade.png │ ├── powered-by-NumFOCUS-orange.svg │ ├── py-logo.png │ ├── qe-logo.png │ ├── search-icon.png │ └── sloan_logo.png ├── js │ └── base.js └── sloan_logo.png └── templates ├── error_report_template.html ├── html.tpl ├── latex.tpl └── latex_book.tpl /.github/workflows/cache.yml: -------------------------------------------------------------------------------- 1 | # !!! 2 | # Once https://github.com/actions/cache/issues/63 is merged 3 | # this can be enabled for daily cache for full HTML previews 4 | # !!! 5 | # name: Build Website Cache (Nightly) 6 | # on: 7 | # schedule: 8 | # - cron: '1 0 * * *' 9 | # jobs: 10 | # build-cache: 11 | # name: Build Website 12 | # runs-on: ubuntu-latest 13 | # steps: 14 | # - name: Checkout 15 | # uses: actions/checkout@v2 16 | # - name: Setup Anaconda 17 | # uses: goanpeca/setup-miniconda@v1 18 | # with: 19 | # auto-update-conda: true 20 | # auto-activate-base: true 21 | # miniconda-version: 'latest' 22 | # python-version: 3.7 23 | # environment-file: environment.yml 24 | # activate-environment: qe-lectures 25 | # - name: Checkout QuantEcon theme 26 | # uses: actions/checkout@v2 27 | # with: 28 | # repository: QuantEcon/lecture-python.theme 29 | # token: ${{ secrets.ACTIONS_PAT }} 30 | # path: theme/lecture-python.theme 31 | # - name: Get current date 32 | # id: date 33 | # run: echo "::set-output name=date::$(date +'%Y-%m-%d')" 34 | # - name: Cache Website Build Folder 35 | # id: cache 36 | # uses: actions/cache@v1 37 | # with: 38 | # path: _build 39 | # key: cache-sphinx-${{ steps.date.outputs.date }} 40 | # - name: Build Website files 41 | # shell: bash -l {0} 42 | # run: | 43 | # make website THEMEPATH=theme/lecture-python.theme 44 | # ls _build/website/jupyter_html/* 45 | name: Build Website Cache 46 | on: 47 | push: 48 | branches: 49 | - master 50 | jobs: 51 | build-cache: 52 | name: Build Website 53 | runs-on: ubuntu-latest 54 | steps: 55 | - name: Checkout 56 | uses: actions/checkout@v2 57 | - name: Setup Anaconda 58 | uses: conda-incubator/setup-miniconda@v2 59 | with: 60 | auto-update-conda: true 61 | auto-activate-base: true 62 | miniconda-version: 'latest' 63 | python-version: 3.8 64 | environment-file: environment.yml 65 | activate-environment: lecture-python 66 | - name: Checkout QuantEcon theme 67 | uses: actions/checkout@v2 68 | with: 69 | repository: QuantEcon/lecture-python.theme 70 | token: ${{ secrets.ACTIONS_PAT }} 71 | path: theme/lecture-python.theme 72 | - name: Cache Website Build Folder 73 | id: cache 74 | uses: actions/cache@v1 75 | with: 76 | path: _build 77 | key: cache-sphinx 78 | - name: Build Website files 79 | shell: bash -l {0} 80 | run: | 81 | ls theme/lecture-python.theme 82 | make website THEMEPATH=theme/lecture-python.theme 83 | ls _build/website/jupyter_html/* -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Execution and Link Checks 2 | on: [pull_request] 3 | jobs: 4 | tests: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - name: Checkout 8 | uses: actions/checkout@v2 9 | - name: Setup Anaconda 10 | uses: conda-incubator/setup-miniconda@v2 11 | with: 12 | auto-update-conda: true 13 | auto-activate-base: true 14 | miniconda-version: 'latest' 15 | python-version: 3.8 16 | environment-file: environment.yml 17 | activate-environment: lecture-python 18 | - name: Display Conda Environment Versions 19 | shell: bash -l {0} 20 | run: conda list 21 | - name: Display Pip Versions 22 | shell: bash -l {0} 23 | run: pip list 24 | - name: Get Changed Files 25 | id: files 26 | uses: jitterbit/get-changed-files@v1 27 | - name: Run Execution Tests 28 | shell: bash -l {0} 29 | run: bash scripts/execution-test.sh "${{ steps.files.outputs.added_modified }}" 30 | - name: Run Linkchecker 31 | shell: bash -l {0} 32 | run: bash scripts/linkchecker-test.sh "${{ steps.files.outputs.added_modified }}" -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | name: Execution and Link Testing (Nightly) 2 | on: 3 | schedule: 4 | - cron: '0 17 * * *' 5 | jobs: 6 | coverage: 7 | name: Run Coverage 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout 11 | uses: actions/checkout@v2 12 | - name: Setup Anaconda 13 | uses: conda-incubator/setup-miniconda@v2 14 | with: 15 | auto-update-conda: true 16 | auto-activate-base: true 17 | miniconda-version: 'latest' 18 | python-version: 3.8 19 | environment-file: environment.yml 20 | activate-environment: lecture-python 21 | - name: Run Execution Tests 22 | shell: bash -l {0} 23 | run: make coverage 24 | linkchecker: 25 | name: Run linkchecker 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v2 30 | - name: Setup Anaconda 31 | uses: conda-incubator/setup-miniconda@v2 32 | with: 33 | auto-update-conda: true 34 | auto-activate-base: true 35 | miniconda-version: 'latest' 36 | python-version: 3.8 37 | environment-file: environment.yml 38 | activate-environment: lecture-python 39 | - name: Run Linkchecker 40 | shell: bash -l {0} 41 | run: make linkcheck -------------------------------------------------------------------------------- /.github/workflows/pdf.yml: -------------------------------------------------------------------------------- 1 | name: Build PDF 2 | on: 3 | push: 4 | branch: 5 | - master 6 | jobs: 7 | pdf: 8 | name: Build PDF 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@v2 13 | - name: Install Fonts 14 | run: | 15 | sudo apt-get install -y fonts-liberation 16 | sudo apt-get install -y fonts-cmu 17 | - name: TexLive Cache 18 | id: cache 19 | uses: actions/cache@v1 20 | with: 21 | path: /tmp/texlive 22 | key: cache-texlive 23 | - name: Install & Update TexLive 24 | shell: bash -l {0} 25 | run: | 26 | bash scripts/install_latex.sh 27 | echo 'export PATH=/tmp/texlive/bin/x86_64-linux:$PATH' >> ~/.bash_profile 28 | source ~/.bash_profile 29 | xelatex --version 30 | - name: Setup Anaconda 31 | uses: conda-incubator/setup-miniconda@v2 32 | with: 33 | auto-update-conda: true 34 | auto-activate-base: true 35 | miniconda-version: 'latest' 36 | python-version: 3.8 37 | environment-file: environment.yml 38 | activate-environment: lecture-python 39 | - name: Checkout QuantEcon theme 40 | uses: actions/checkout@v2 41 | with: 42 | repository: QuantEcon/lecture-python.theme 43 | token: ${{ secrets.ACTIONS_PAT }} 44 | path: theme/lecture-python.theme 45 | - name: Build PDF 46 | shell: bash -l {0} 47 | run: | 48 | more ~/.bash_profile 49 | echo 'export PATH=/tmp/texlive/bin/x86_64-linux:$PATH' >> ~/.bash_profile 50 | source ~/.bash_profile 51 | more ~/.bash_profile 52 | ls theme/lecture-python.theme 53 | make pdf 54 | - uses: actions/upload-artifact@v2 55 | with: 56 | name: pdf 57 | path: _build/jupyterpdf/texbook/quantitative_economics_with_python.pdf -------------------------------------------------------------------------------- /.github/workflows/preview.yml: -------------------------------------------------------------------------------- 1 | name: 'Netlify Preview Deploy' 2 | on: 3 | pull_request: 4 | types: ['opened', 'edited', 'synchronize'] 5 | jobs: 6 | deploy-preview: 7 | name: 'Deploy' 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout 11 | uses: actions/checkout@v2 12 | - name: Setup Anaconda 13 | uses: conda-incubator/setup-miniconda@v2 14 | with: 15 | auto-update-conda: true 16 | auto-activate-base: true 17 | miniconda-version: 'latest' 18 | python-version: 3.8 19 | environment-file: environment.yml 20 | activate-environment: lecture-python 21 | - name: Get Changed Files 22 | id: files 23 | uses: jitterbit/get-changed-files@v1 24 | - name: Checkout QuantEcon theme 25 | if: github.event.pull_request.head.repo.full_name == github.repository 26 | uses: actions/checkout@v2 27 | with: 28 | repository: QuantEcon/lecture-python.theme 29 | token: ${{ secrets.ACTIONS_PAT }} 30 | path: theme/lecture-python.theme 31 | # - name: Get current date 32 | # id: date 33 | # run: echo "::set-output name=date::$(date +'%Y-%m-%d')" 34 | - name: Check Sphinx Cache 35 | id: cache 36 | uses: actions/cache@v1 37 | with: 38 | path: _build 39 | key: cache-sphinx 40 | # key: cache-sphinx-${{ steps.date.outputs.date }} 41 | - name: Build website files 42 | shell: bash -l {0} 43 | run: | 44 | bash scripts/build-website.sh "${{ steps.files.outputs.added_modified }}" "${{ github.event.pull_request.head.repo.full_name == github.repository }}" 45 | - name: Preview Deploy to Netlify 46 | uses: nwtgck/actions-netlify@v1.1 47 | if: env.BUILD_NETLIFY == 'true' && github.event.pull_request.head.repo.full_name == github.repository 48 | with: 49 | publish-dir: './_build/website/jupyter_html' 50 | production-branch: master 51 | github-token: ${{ secrets.GITHUB_TOKEN }} 52 | deploy-message: "Preview Deploy from GitHub Actions" 53 | env: 54 | NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} 55 | NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} 56 | - name: Save Build as Artifact (Forks) 57 | uses: actions/upload-artifact@v1 58 | if: github.event.pull_request.head.repo.full_name != github.repository 59 | with: 60 | name: website 61 | path: _build/website/jupyter_html -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | dask-worker-space/ 3 | [._]*.s[a-v][a-z] 4 | [._]*.sw[a-p] 5 | [._]s[a-v][a-z] 6 | [._]sw[a-p] 7 | .DS_Store 8 | .DS_Store? 9 | **/.DS_Store 10 | venv/ 11 | theme/lecture-python.theme 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, QuantEcon 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := bash 2 | # 3 | # Makefile for Sphinx Extension Test Cases 4 | # 5 | 6 | # You can set these variables from the command line. 7 | SPHINXOPTS = -c "./" 8 | SPHINXBUILD = python -msphinx 9 | SPHINXPROJ = lecture-python 10 | SOURCEDIR = source/rst 11 | BUILDDIR = _build 12 | BUILDWEBSITE = _build/website 13 | BUILDCOVERAGE = _build/coverage 14 | BUILDPDF = _build/pdf 15 | PORT = 8890 16 | FILES = 17 | THEMEPATH = theme/minimal 18 | TEMPLATEPATH = $(THEMEPATH)/templates 19 | 20 | # Put it first so that "make" without argument is like "make help". 21 | help: 22 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) 23 | 24 | .PHONY: help Makefile 25 | 26 | # Install requiremenets for building lectures. 27 | setup: 28 | pip install -r requirements.txt 29 | 30 | preview: 31 | ifeq (,$(filter $(target),website Website)) 32 | cd $(BUILDWEBSITE)/jupyter_html && python -m http.server $(PORT) 33 | else 34 | ifdef lecture 35 | cd $(BUILDDIR)/jupyter/ && jupyter notebook --port $(PORT) --port-retries=0 $(basename $(lecture)).ipynb 36 | else 37 | cd $(BUILDDIR)/jupyter/ && jupyter notebook --port $(PORT) --port-retries=0 38 | endif 39 | endif 40 | 41 | clean-coverage: 42 | rm -rf $(BUILDCOVERAGE) 43 | 44 | clean-website: 45 | rm -rf $(BUILDWEBSITE) 46 | 47 | clean-pdf: 48 | rm -rf $(BUILDDIR)/jupyterpdf 49 | 50 | coverage: 51 | ifneq ($(strip $(parallel)),) 52 | @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_coverage=1 -D jupyter_execute_notebooks=1 -D jupyter_ignore_skip_test=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_template_coverage_file_path="error_report_template.html" -D jupyter_number_workers=$(parallel) 53 | else 54 | @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_coverage=1 -D jupyter_execute_notebooks=1 -D jupyter_ignore_skip_test=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_template_coverage_file_path="error_report_template.html" 55 | endif 56 | 57 | website: 58 | echo "Theme: $(THEMEPATH)" 59 | ifneq ($(strip $(parallel)),) 60 | @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_target_html=1 -D jupyter_download_nb_image_urlpath="https://s3-ap-southeast-2.amazonaws.com/python.quantecon.org/_static/" -D jupyter_images_markdown=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_html_template="html.tpl" -D jupyter_download_nb_urlpath="https://python-programming.quantecon.org/" -D jupyter_coverage_dir=$(BUILDCOVERAGE) -D jupyter_number_workers=$(parallel) 61 | 62 | else 63 | @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_target_html=1 -D jupyter_download_nb_image_urlpath="https://s3-ap-southeast-2.amazonaws.com/python.quantecon.org/_static/" -D jupyter_images_markdown=0 -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_html_template="html.tpl" -D jupyter_download_nb_urlpath="https://python-programming.quantecon.org/" -D jupyter_coverage_dir=$(BUILDCOVERAGE) 64 | endif 65 | 66 | pdf: 67 | ifneq ($(strip $(parallel)),) 68 | @$(SPHINXBUILD) -M jupyterpdf "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_latex_template="latex.tpl" -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_latex_template_book="latex_book.tpl" -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -D jupyter_pdf_book=1 -D jupyter_target_pdf=1 -D jupyter_number_workers=$(parallel) 69 | 70 | else 71 | @$(SPHINXBUILD) -M jupyterpdf "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_theme_path="$(THEMEPATH)" -D jupyter_template_path="$(TEMPLATEPATH)" -D jupyter_latex_template="latex.tpl" -D jupyter_latex_template_book="latex_book.tpl" -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -D jupyter_pdf_book=1 -D jupyter_target_pdf=1 72 | endif 73 | 74 | constructor-pdf: 75 | ifneq ($(strip $(parallel)),) 76 | @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDPDF)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 -D jupyter_number_workers=$(parallel) 77 | 78 | else 79 | @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDPDF)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_images_markdown=1 -D jupyter_execute_notebooks=1 80 | endif 81 | 82 | notebooks: 83 | make jupyter 84 | 85 | # Catch-all target: route all unknown targets to Sphinx using the new 86 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 87 | %: Makefile 88 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_allow_html_only=1 89 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lecture-python 2 | 3 | Source files for https://python.quantecon.org 4 | 5 | For a guide on contributing to this repository click [here](https://quantecon.org/contribute-lectures/) 6 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | 2 | name: lecture-python 3 | channels: 4 | - default 5 | dependencies: 6 | - python=3.8 7 | - anaconda=2020.07 8 | - pip 9 | - pip: 10 | - quantecon 11 | - interpolation 12 | - sphinxcontrib-jupyter 13 | - sphinxcontrib-bibtex==1.0 14 | - joblib 15 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | sphinxcontrib-bibtex 2 | sphinxcontrib-jupyter 3 | quantecon -------------------------------------------------------------------------------- /scripts/build-website.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODIFIED_FILES="$1" 4 | PRIVATE_THEME=$2 5 | 6 | # Find List of RST Files 7 | RST_FILES="" 8 | for F in $MODIFIED_FILES 9 | do 10 | if [[ $F == *.rst ]] 11 | then 12 | RST_FILES="$RST_FILES $F" 13 | fi 14 | done 15 | echo "List of Changed RST Files: $RST_FILES" 16 | echo "Building with Private theme: $PRIVATE_THEME" 17 | if [ -z "$RST_FILES" ]; then 18 | echo "BUILD_NETLIFY=false" >> $GITHUB_ENV 19 | echo "No RST Files have changed -- nothing to do in this PR" 20 | else 21 | echo "BUILD_NETLIFY=true" >> $GITHUB_ENV 22 | RST_FILES="$RST_FILES source/rst/index_toc.rst" 23 | if [ "$PRIVATE_THEME" = true ]; then 24 | make website THEMEPATH=theme/lecture-python.theme FILES="$RST_FILES" 25 | else 26 | make website FILES="$RST_FILES" 27 | fi 28 | ls _build/website/jupyter_html/* #Ensure build files are created 29 | fi -------------------------------------------------------------------------------- /scripts/execution-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CLEAN_BUILD=false 4 | MODIFIED_FILES="$1" 5 | 6 | RST_FILES="" 7 | for F in $MODIFIED_FILES 8 | do 9 | if [[ $F == environment.yml ]] 10 | then 11 | CLEAN_BUILD=true 12 | break 13 | fi 14 | #Extract List of RST Files 15 | if [[ $F == *.rst ]] 16 | then 17 | RST_FILES="$RST_FILES $F" 18 | fi 19 | done 20 | 21 | echo "List of Changed RST Files: $RST_FILES" 22 | echo "Clean Build Requested: $CLEAN_BUILD" 23 | 24 | if [ "$CLEAN_BUILD" = true ] 25 | then 26 | echo "Running Clean Build" 27 | make coverage 28 | elif [ -z "$RST_FILES" ] 29 | then 30 | echo "No RST Files have changed -- nothing to do in this PR" 31 | else 32 | RST_FILES="$RST_FILES source/rst/index_toc.rst" 33 | echo "Running Selecting Build with: $RST_FILES" 34 | make coverage FILES="$RST_FILES" 35 | fi -------------------------------------------------------------------------------- /scripts/install_latex.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # setup script to install texlive and add to path for travis 4 | # original source: https://shankarkulumani.com/2018/10/travis-and-latex.html 5 | sudo apt-get -qq update 6 | export PATH=/tmp/texlive/bin/x86_64-linux:$PATH 7 | if ! command -v pdflatex > /dev/null; then 8 | echo "Texlive not installed" 9 | echo "Downloading texlive and installing" 10 | wget http://mirror.ctan.org/systems/texlive/tlnet/install-tl-unx.tar.gz 11 | tar -xzf install-tl-unx.tar.gz 12 | ./install-tl-*/install-tl --profile=./scripts/texlive.profile 13 | echo "Finished install TexLive" 14 | fi 15 | echo "Now updating TexLive" 16 | # update texlive 17 | tlmgr option -- autobackup 0 18 | tlmgr update --self --all --no-auto-install 19 | echo "Finished updating TexLive" 20 | -------------------------------------------------------------------------------- /scripts/linkchecker-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODIFIED_FILES="$1" 4 | 5 | RST_FILES="" 6 | for F in $MODIFIED_FILES 7 | do 8 | if [[ $F == *.rst ]] 9 | then 10 | RST_FILES="$RST_FILES $F" 11 | fi 12 | done 13 | echo "List of Changed RST Files: $RST_FILES" 14 | if [ -z "$RST_FILES" ]; then 15 | echo "No RST Files have changed -- nothing to do in this PR" 16 | else 17 | RST_FILES="$RST_FILES source/rst/index_toc.rst" 18 | make linkcheck FILES="$RST_FILES" 19 | fi -------------------------------------------------------------------------------- /scripts/texlive.profile: -------------------------------------------------------------------------------- 1 | selected_scheme scheme-full 2 | TEXDIR /tmp/texlive 3 | TEXMFCONFIG ~/.texlive/texmf-config 4 | TEXMFHOME ~/texmf 5 | TEXMFLOCAL /tmp/texlive/texmf-local 6 | TEXMFSYSCONFIG /tmp/texlive/texmf-config 7 | TEXMFSYSVAR /tmp/texlive/texmf-var 8 | TEXMFVAR ~/.texlive/texmf-var 9 | option_doc 0 10 | option_src 0 11 | -------------------------------------------------------------------------------- /source/_static/includes/header.raw: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 |
4 | 5 | QuantEcon 6 | 7 |
8 | -------------------------------------------------------------------------------- /source/_static/includes/lecture_howto_py.raw: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 |
4 | 5 | QuantEcon 6 | 7 |
8 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/aiyagari/aiyagari_obit.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/aiyagari/aiyagari_obit.pdf -------------------------------------------------------------------------------- /source/_static/lecture_specific/arellano/arellano_bond_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/arellano/arellano_bond_prices.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/arellano/arellano_bond_prices_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/arellano/arellano_bond_prices_2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/arellano/arellano_default_probs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/arellano/arellano_default_probs.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/arellano/arellano_time_series.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/arellano/arellano_time_series.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/arellano/arellano_value_funcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/arellano/arellano_value_funcs.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/cake_eating_numerical/analytical.py: -------------------------------------------------------------------------------- 1 | def c_star(x, β, γ): 2 | 3 | return (1 - β ** (1/γ)) * x 4 | 5 | 6 | def v_star(x, β, γ): 7 | 8 | return (1 - β**(1 / γ))**(-γ) * (x**(1-γ) / (1-γ)) 9 | 10 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/career/career_solutions_ex1_py.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/career/career_solutions_ex1_py.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/coleman_policy_iter/solve_time_iter.py: -------------------------------------------------------------------------------- 1 | def solve_model_time_iter(model, # Class with model information 2 | σ, # Initial condition 3 | tol=1e-4, 4 | max_iter=1000, 5 | verbose=True, 6 | print_skip=25): 7 | 8 | # Set up loop 9 | i = 0 10 | error = tol + 1 11 | 12 | while i < max_iter and error > tol: 13 | σ_new = K(σ, model) 14 | error = np.max(np.abs(σ - σ_new)) 15 | i += 1 16 | if verbose and i % print_skip == 0: 17 | print(f"Error at iteration {i} is {error}.") 18 | σ = σ_new 19 | 20 | if i == max_iter: 21 | print("Failed to converge!") 22 | 23 | if verbose and i < max_iter: 24 | print(f"\nConverged in {i} iterations.") 25 | 26 | return σ_new 27 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/hamilton_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/hamilton_graph.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_aperiodicity1.gv: -------------------------------------------------------------------------------- 1 | digraph G{ 2 | rankdir=LR; 3 | "a" -> "b" [label = "1.0"]; 4 | "b" -> "c" [label = "1.0"]; 5 | "c" -> "a" [label = "1.0"]; 6 | } -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_aperiodicity1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/mc_aperiodicity1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_aperiodicity2.gv: -------------------------------------------------------------------------------- 1 | digraph G{ 2 | rankdir=LR; 3 | "a" -> "b" [label = "1.0"]; 4 | "b" -> "c" [label = "0.5"]; 5 | "b" -> "a" [label = "0.5"]; 6 | "c" -> "b" [label = "0.5"]; 7 | "c" -> "d" [label = "0.5"]; 8 | "d" -> "c" [label = "1.0"]; 9 | } -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_aperiodicity2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/mc_aperiodicity2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_ex1_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/mc_ex1_plot.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_irreducibility1.gv: -------------------------------------------------------------------------------- 1 | digraph G{ 2 | rankdir=LR; 3 | "poor" -> "poor" [label = "0.9"]; 4 | "poor" -> "middle class" [label = "0.1"]; 5 | "middle class" -> "poor" [label = "0.4"]; 6 | "middle class" -> "middle class" [label = "0.4"]; 7 | "middle class" -> "rich" [label = "0.2"]; 8 | "rich" -> "poor" [label = "0.1"]; 9 | "rich" -> "middle class" [label = "0.1"]; 10 | "rich" -> "rich" [label = "0.8"]; 11 | } -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_irreducibility1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/mc_irreducibility1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_irreducibility2.gv: -------------------------------------------------------------------------------- 1 | digraph G{ 2 | rankdir=LR; 3 | "poor" -> "poor" [label = "1.0"]; 4 | "middle class" -> "poor" [label = "0.1"]; 5 | "middle class" -> "middle class" [label = "0.8"]; 6 | "middle class" -> "rich" [label = "0.1"]; 7 | "rich" -> "middle class" [label = "0.2"]; 8 | "rich" -> "rich" [label = "0.8"]; 9 | } 10 | 11 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/mc_irreducibility2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/mc_irreducibility2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/web_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/finite_markov/web_graph.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/finite_markov/web_graph_data.txt: -------------------------------------------------------------------------------- 1 | a -> d; 2 | a -> f; 3 | b -> j; 4 | b -> k; 5 | b -> m; 6 | c -> c; 7 | c -> g; 8 | c -> j; 9 | c -> m; 10 | d -> f; 11 | d -> h; 12 | d -> k; 13 | e -> d; 14 | e -> h; 15 | e -> l; 16 | f -> a; 17 | f -> b; 18 | f -> j; 19 | f -> l; 20 | g -> b; 21 | g -> j; 22 | h -> d; 23 | h -> g; 24 | h -> l; 25 | h -> m; 26 | i -> g; 27 | i -> h; 28 | i -> n; 29 | j -> e; 30 | j -> i; 31 | j -> k; 32 | k -> n; 33 | l -> m; 34 | m -> g; 35 | n -> c; 36 | n -> j; 37 | n -> m; 38 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/heavy_tails/light_heavy_fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/heavy_tails/light_heavy_fig1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/heavy_tails/rank_size_fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/heavy_tails/rank_size_fig1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/ifp/ifp_agg_savings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ifp/ifp_agg_savings.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/ifp/ifp_histogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ifp/ifp_histogram.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/ifp/ifp_policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ifp/ifp_policies.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/ifp/pi2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ifp/pi2.pdf -------------------------------------------------------------------------------- /source/_static/lecture_specific/kalman/kalman_ex3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/kalman/kalman_ex3.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/kalman/kl_ex1_fig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/kalman/kl_ex1_fig.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/kalman/kl_ex2_fig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/kalman/kl_ex2_fig.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/lake_model/lake_distribution_wages.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/lake_model/lake_distribution_wages.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_algebra/course_notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_algebra/course_notes.pdf -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/covariance_stationary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/covariance_stationary.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/ensemble_mean.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/ensemble_mean.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/iteration_notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/iteration_notes.pdf -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/paths_and_stationarity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/paths_and_stationarity.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/solution_lss_ex1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/solution_lss_ex1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/solution_lss_ex2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/solution_lss_ex2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/tsh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/tsh.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/tsh0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/tsh0.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/linear_models/tsh_hg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/linear_models/tsh_hg.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/lqcontrol/solution_lqc_ex1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/lqcontrol/solution_lqc_ex1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/lqcontrol/solution_lqc_ex2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/lqcontrol/solution_lqc_ex2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g10.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g50.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/lqcontrol/solution_lqc_ex3_g50.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/markov_perf/duopoly_mpe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import quantecon as qe 3 | 4 | # Parameters 5 | a0 = 10.0 6 | a1 = 2.0 7 | β = 0.96 8 | γ = 12.0 9 | 10 | # In LQ form 11 | A = np.eye(3) 12 | B1 = np.array([[0.], [1.], [0.]]) 13 | B2 = np.array([[0.], [0.], [1.]]) 14 | 15 | 16 | R1 = [[ 0., -a0 / 2, 0.], 17 | [-a0 / 2., a1, a1 / 2.], 18 | [ 0, a1 / 2., 0.]] 19 | 20 | R2 = [[ 0., 0., -a0 / 2], 21 | [ 0., 0., a1 / 2.], 22 | [-a0 / 2, a1 / 2., a1]] 23 | 24 | Q1 = Q2 = γ 25 | S1 = S2 = W1 = W2 = M1 = M2 = 0.0 26 | 27 | # Solve using QE's nnash function 28 | F1, F2, P1, P2 = qe.nnash(A, B1, B2, R1, R2, Q1, 29 | Q2, S1, S2, W1, W2, M1, 30 | M2, beta=β) 31 | 32 | # Display policies 33 | print("Computed policies for firm 1 and firm 2:\n") 34 | print(f"F1 = {F1}") 35 | print(f"F2 = {F2}") 36 | print("\n") 37 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/markov_perf/judd_fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/markov_perf/judd_fig1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/markov_perf/judd_fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/markov_perf/judd_fig2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/markov_perf/mpe_vs_monopolist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/markov_perf/mpe_vs_monopolist.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall/mccall_resw_alpha.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | grid_size = 25 4 | α_vals = np.linspace(0.05, 0.5, grid_size) 5 | w_bar_vals = np.empty_like(α_vals) 6 | 7 | mcm = McCallModel() 8 | 9 | fig, ax = plt.subplots(figsize=(10, 6)) 10 | 11 | for i, α in enumerate(α_vals): 12 | mcm.α = α 13 | w_bar = compute_reservation_wage(mcm) 14 | w_bar_vals[i] = w_bar 15 | 16 | ax.set_xlabel('job separation rate') 17 | ax.set_ylabel('reservation wage') 18 | ax.set_xlim(α_vals.min(), α_vals.max()) 19 | txt = r'$\bar w$ as a function of $\alpha$' 20 | ax.plot(α_vals, w_bar_vals, 'b-', lw=2, alpha=0.7, label=txt) 21 | ax.legend(loc='upper right') 22 | ax.grid() 23 | 24 | plt.show() 25 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall/mccall_resw_beta.py: -------------------------------------------------------------------------------- 1 | grid_size = 25 2 | β_vals = np.linspace(0.8, 0.99, grid_size) 3 | w_bar_vals = np.empty_like(β_vals) 4 | 5 | mcm = McCallModel() 6 | 7 | fig, ax = plt.subplots(figsize=(10, 6)) 8 | 9 | for i, β in enumerate(β_vals): 10 | mcm.β = β 11 | w_bar = compute_reservation_wage(mcm) 12 | w_bar_vals[i] = w_bar 13 | 14 | ax.set_xlabel('discount factor') 15 | ax.set_ylabel('reservation wage') 16 | ax.set_xlim(β_vals.min(), β_vals.max()) 17 | txt = r'$\bar w$ as a function of $\beta$' 18 | ax.plot(β_vals, w_bar_vals, 'b-', lw=2, alpha=0.7, label=txt) 19 | ax.legend(loc='upper left') 20 | ax.grid() 21 | 22 | plt.show() 23 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall/mccall_resw_c.py: -------------------------------------------------------------------------------- 1 | grid_size = 25 2 | c_vals = np.linspace(2, 12, grid_size) # values of unemployment compensation 3 | w_bar_vals = np.empty_like(c_vals) 4 | 5 | mcm = McCallModel() 6 | 7 | fig, ax = plt.subplots(figsize=(10, 6)) 8 | 9 | for i, c in enumerate(c_vals): 10 | mcm.c = c 11 | w_bar = compute_reservation_wage(mcm) 12 | w_bar_vals[i] = w_bar 13 | 14 | ax.set_xlabel('unemployment compensation') 15 | ax.set_ylabel('reservation wage') 16 | txt = r'$\bar w$ as a function of $c$' 17 | ax.plot(c_vals, w_bar_vals, 'b-', lw=2, alpha=0.7, label=txt) 18 | ax.legend(loc='upper left') 19 | ax.grid() 20 | 21 | plt.show() 22 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall/mccall_resw_gamma.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | grid_size = 25 4 | γ_vals = np.linspace(0.05, 0.95, grid_size) 5 | w_bar_vals = np.empty_like(γ_vals) 6 | 7 | mcm = McCallModel() 8 | 9 | fig, ax = plt.subplots(figsize=(10, 6)) 10 | 11 | for i, γ in enumerate(γ_vals): 12 | mcm.γ = γ 13 | w_bar = compute_reservation_wage(mcm) 14 | w_bar_vals[i] = w_bar 15 | 16 | ax.set_xlabel('job offer rate') 17 | ax.set_ylabel('reservation wage') 18 | ax.set_xlim(γ_vals.min(), γ_vals.max()) 19 | txt = r'$\bar w$ as a function of $\gamma$' 20 | ax.plot(γ_vals, w_bar_vals, 'b-', lw=2, alpha=0.7, label=txt) 21 | ax.legend(loc='upper left') 22 | ax.grid() 23 | 24 | plt.show() 25 | 26 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall/mccall_vf_plot1.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | mcm = McCallModel() 4 | V, U = solve_mccall_model(mcm) 5 | 6 | fig, ax = plt.subplots(figsize=(10, 6)) 7 | 8 | ax.plot(mcm.w_vec, V, 'b-', lw=2, alpha=0.7, label='$V$') 9 | ax.plot(mcm.w_vec, [U]*len(mcm.w_vec), 'g-', lw=2, alpha=0.7, label='$U$') 10 | ax.set_xlim(min(mcm.w_vec), max(mcm.w_vec)) 11 | ax.legend(loc='upper left') 12 | ax.grid() 13 | 14 | plt.show() 15 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall_model_with_separation/mccall_resw_alpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/mccall_model_with_separation/mccall_resw_alpha.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall_model_with_separation/mccall_resw_beta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/mccall_model_with_separation/mccall_resw_beta.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/mccall_model_with_separation/mccall_resw_c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/mccall_model_with_separation/mccall_resw_c.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/mle/fp.dta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/mle/fp.dta -------------------------------------------------------------------------------- /source/_static/lecture_specific/odu/odu.py: -------------------------------------------------------------------------------- 1 | from scipy.interpolate import LinearNDInterpolator 2 | from scipy.integrate import fixed_quad 3 | from numpy import maximum as npmax 4 | 5 | 6 | class SearchProblem: 7 | """ 8 | A class to store a given parameterization of the "offer distribution 9 | unknown" model. 10 | 11 | Parameters 12 | ---------- 13 | β : scalar(float), optional(default=0.95) 14 | The discount parameter 15 | c : scalar(float), optional(default=0.6) 16 | The unemployment compensation 17 | F_a : scalar(float), optional(default=1) 18 | First parameter of β distribution on F 19 | F_b : scalar(float), optional(default=1) 20 | Second parameter of β distribution on F 21 | G_a : scalar(float), optional(default=3) 22 | First parameter of β distribution on G 23 | G_b : scalar(float), optional(default=1.2) 24 | Second parameter of β distribution on G 25 | w_max : scalar(float), optional(default=2) 26 | Maximum wage possible 27 | w_grid_size : scalar(int), optional(default=40) 28 | Size of the grid on wages 29 | π_grid_size : scalar(int), optional(default=40) 30 | Size of the grid on probabilities 31 | 32 | Attributes 33 | ---------- 34 | β, c, w_max : see Parameters 35 | w_grid : np.ndarray 36 | Grid points over wages, ndim=1 37 | π_grid : np.ndarray 38 | Grid points over π, ndim=1 39 | grid_points : np.ndarray 40 | Combined grid points, ndim=2 41 | F : scipy.stats._distn_infrastructure.rv_frozen 42 | Beta distribution with params (F_a, F_b), scaled by w_max 43 | G : scipy.stats._distn_infrastructure.rv_frozen 44 | Beta distribution with params (G_a, G_b), scaled by w_max 45 | f : function 46 | Density of F 47 | g : function 48 | Density of G 49 | π_min : scalar(float) 50 | Minimum of grid over π 51 | π_max : scalar(float) 52 | Maximum of grid over π 53 | """ 54 | 55 | def __init__(self, β=0.95, c=0.6, F_a=1, F_b=1, G_a=3, G_b=1.2, 56 | w_max=2, w_grid_size=40, π_grid_size=40): 57 | 58 | self.β, self.c, self.w_max = β, c, w_max 59 | self.F = beta(F_a, F_b, scale=w_max) 60 | self.G = beta(G_a, G_b, scale=w_max) 61 | self.f, self.g = self.F.pdf, self.G.pdf # Density functions 62 | self.π_min, self.π_max = 1e-3, 1 - 1e-3 # Avoids instability 63 | self.w_grid = np.linspace(0, w_max, w_grid_size) 64 | self.π_grid = np.linspace(self.π_min, self.π_max, π_grid_size) 65 | x, y = np.meshgrid(self.w_grid, self.π_grid) 66 | self.grid_points = np.column_stack((x.ravel(order='F'), y.ravel(order='F'))) 67 | 68 | 69 | def q(self, w, π): 70 | """ 71 | Updates π using Bayes' rule and the current wage observation w. 72 | 73 | Returns 74 | ------- 75 | 76 | new_π : scalar(float) 77 | The updated probability 78 | 79 | """ 80 | 81 | new_π = 1.0 / (1 + ((1 - π) * self.g(w)) / (π * self.f(w))) 82 | 83 | # Return new_π when in [π_min, π_max] and else end points 84 | new_π = np.maximum(np.minimum(new_π, self.π_max), self.π_min) 85 | 86 | return new_π 87 | 88 | def bellman_operator(self, v): 89 | """ 90 | 91 | The Bellman operator. Including for comparison. Value function 92 | iteration is not recommended for this problem. See the 93 | reservation wage operator below. 94 | 95 | Parameters 96 | ---------- 97 | v : array_like(float, ndim=1, length=len(π_grid)) 98 | An approximate value function represented as a 99 | one-dimensional array. 100 | 101 | Returns 102 | ------- 103 | new_v : array_like(float, ndim=1, length=len(π_grid)) 104 | The updated value function 105 | 106 | """ 107 | # == Simplify names == # 108 | f, g, β, c, q = self.f, self.g, self.β, self.c, self.q 109 | 110 | vf = LinearNDInterpolator(self.grid_points, v) 111 | N = len(v) 112 | new_v = np.empty(N) 113 | 114 | for i in range(N): 115 | w, π = self.grid_points[i, :] 116 | v1 = w / (1 - β) 117 | integrand = lambda m: vf(m, q(m, π)) * (π * f(m) + 118 | (1 - π) * g(m)) 119 | integral, error = fixed_quad(integrand, 0, self.w_max) 120 | v2 = c + β * integral 121 | new_v[i] = max(v1, v2) 122 | 123 | return new_v 124 | 125 | def get_greedy(self, v): 126 | """ 127 | Compute optimal actions taking v as the value function. 128 | 129 | Parameters 130 | ---------- 131 | v : array_like(float, ndim=1, length=len(π_grid)) 132 | An approximate value function represented as a 133 | one-dimensional array. 134 | 135 | Returns 136 | ------- 137 | policy : array_like(float, ndim=1, length=len(π_grid)) 138 | The decision to accept or reject an offer where 1 indicates 139 | accept and 0 indicates reject 140 | 141 | """ 142 | # == Simplify names == # 143 | f, g, β, c, q = self.f, self.g, self.β, self.c, self.q 144 | 145 | vf = LinearNDInterpolator(self.grid_points, v) 146 | N = len(v) 147 | policy = np.zeros(N, dtype=int) 148 | 149 | for i in range(N): 150 | w, π = self.grid_points[i, :] 151 | v1 = w / (1 - β) 152 | integrand = lambda m: vf(m, q(m, π)) * (π * f(m) + 153 | (1 - π) * g(m)) 154 | integral, error = fixed_quad(integrand, 0, self.w_max) 155 | v2 = c + β * integral 156 | policy[i] = v1 > v2 # Evaluates to 1 or 0 157 | 158 | return policy 159 | 160 | def res_wage_operator(self, ϕ): 161 | """ 162 | 163 | Updates the reservation wage function guess ϕ via the operator 164 | Q. 165 | 166 | Parameters 167 | ---------- 168 | ϕ : array_like(float, ndim=1, length=len(π_grid)) 169 | This is reservation wage guess 170 | 171 | Returns 172 | ------- 173 | new_ϕ : array_like(float, ndim=1, length=len(π_grid)) 174 | The updated reservation wage guess. 175 | 176 | """ 177 | # == Simplify names == # 178 | β, c, f, g, q = self.β, self.c, self.f, self.g, self.q 179 | # == Turn ϕ into a function == # 180 | ϕ_f = lambda p: np.interp(p, self.π_grid, ϕ) 181 | 182 | new_ϕ = np.empty(len(ϕ)) 183 | for i, π in enumerate(self.π_grid): 184 | def integrand(x): 185 | "Integral expression on right-hand side of operator" 186 | return npmax(x, ϕ_f(q(x, π))) * (π * f(x) + (1 - π) * g(x)) 187 | integral, error = fixed_quad(integrand, 0, self.w_max) 188 | new_ϕ[i] = (1 - β) * c + β * integral 189 | 190 | return new_ϕ 191 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/ols/maketable1.dta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ols/maketable1.dta -------------------------------------------------------------------------------- /source/_static/lecture_specific/ols/maketable2.dta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ols/maketable2.dta -------------------------------------------------------------------------------- /source/_static/lecture_specific/ols/maketable4.dta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/ols/maketable4.dta -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth/3ndp.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/optgrowth/3ndp.pdf -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth/bellman_operator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from interpolation import interp 3 | from numba import njit, prange 4 | from quantecon.optimize.scalar_maximization import brent_max 5 | 6 | 7 | def operator_factory(og, parallel_flag=True): 8 | """ 9 | A function factory for building the Bellman operator, as well as 10 | a function that computes greedy policies. 11 | 12 | Here og is an instance of OptimalGrowthModel. 13 | """ 14 | 15 | f, u, β = og.f, og.u, og.β 16 | grid, shocks = og.grid, og.shocks 17 | 18 | @njit 19 | def objective(c, v, y): 20 | """ 21 | The right-hand side of the Bellman equation 22 | """ 23 | # First turn v into a function via interpolation 24 | v_func = lambda x: interp(grid, v, x) 25 | return u(c) + β * np.mean(v_func(f(y - c) * shocks)) 26 | 27 | @njit(parallel=parallel_flag) 28 | def T(v): 29 | """ 30 | The Bellman operator 31 | """ 32 | v_new = np.empty_like(v) 33 | for i in prange(len(grid)): 34 | y = grid[i] 35 | # Solve for optimal v at y 36 | v_max = brent_max(objective, 1e-10, y, args=(v, y))[1] 37 | v_new[i] = v_max 38 | return v_new 39 | 40 | @njit 41 | def get_greedy(v): 42 | """ 43 | Computes the v-greedy policy of a given function v 44 | """ 45 | σ = np.empty_like(v) 46 | for i in range(len(grid)): 47 | y = grid[i] 48 | # Solve for optimal c at y 49 | c_max = brent_max(objective, 1e-10, y, args=(v, y))[0] 50 | σ[i] = c_max 51 | return σ 52 | 53 | return T, get_greedy 54 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth/cd_analytical.py: -------------------------------------------------------------------------------- 1 | def v_star(y, α, β, μ): 2 | """ 3 | True value function 4 | """ 5 | c1 = np.log(1 - α * β) / (1 - β) 6 | c2 = (μ + α * np.log(α * β)) / (1 - α) 7 | c3 = 1 / (1 - β) 8 | c4 = 1 / (1 - α * β) 9 | return c1 + c2 * (c3 - c4) + c4 * np.log(y) 10 | 11 | def σ_star(y, α, β): 12 | """ 13 | True optimal policy 14 | """ 15 | return (1 - α * β) * y -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth/solution_og_ex2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/optgrowth/solution_og_ex2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth/solve_model.py: -------------------------------------------------------------------------------- 1 | def solve_model(og, 2 | tol=1e-4, 3 | max_iter=1000, 4 | verbose=True, 5 | print_skip=25): 6 | """ 7 | Solve model by iterating with the Bellman operator. 8 | 9 | """ 10 | 11 | # Set up loop 12 | v = og.u(og.grid) # Initial condition 13 | i = 0 14 | error = tol + 1 15 | 16 | while i < max_iter and error > tol: 17 | v_greedy, v_new = T(v, og) 18 | error = np.max(np.abs(v - v_new)) 19 | i += 1 20 | if verbose and i % print_skip == 0: 21 | print(f"Error at iteration {i} is {error}.") 22 | v = v_new 23 | 24 | if i == max_iter: 25 | print("Failed to converge!") 26 | 27 | if verbose and i < max_iter: 28 | print(f"\nConverged in {i} iterations.") 29 | 30 | return v_greedy, v_new 31 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth_fast/ogm.py: -------------------------------------------------------------------------------- 1 | opt_growth_data = [ 2 | ('α', float64), # Production parameter 3 | ('β', float64), # Discount factor 4 | ('μ', float64), # Shock location parameter 5 | ('s', float64), # Shock scale parameter 6 | ('grid', float64[:]), # Grid (array) 7 | ('shocks', float64[:]) # Shock draws (array) 8 | ] 9 | 10 | @jitclass(opt_growth_data) 11 | class OptimalGrowthModel: 12 | 13 | def __init__(self, 14 | α=0.4, 15 | β=0.96, 16 | μ=0, 17 | s=0.1, 18 | grid_max=4, 19 | grid_size=120, 20 | shock_size=250, 21 | seed=1234): 22 | 23 | self.α, self.β, self.μ, self.s = α, β, μ, s 24 | 25 | # Set up grid 26 | self.grid = np.linspace(1e-5, grid_max, grid_size) 27 | 28 | # Store shocks (with a seed, so results are reproducible) 29 | np.random.seed(seed) 30 | self.shocks = np.exp(μ + s * np.random.randn(shock_size)) 31 | 32 | 33 | def f(self, k): 34 | "The production function" 35 | return k**self.α 36 | 37 | 38 | def u(self, c): 39 | "The utility function" 40 | return np.log(c) 41 | 42 | def f_prime(self, k): 43 | "Derivative of f" 44 | return self.α * (k**(self.α - 1)) 45 | 46 | 47 | def u_prime(self, c): 48 | "Derivative of u" 49 | return 1/c 50 | 51 | def u_prime_inv(self, c): 52 | "Inverse of u'" 53 | return 1/c -------------------------------------------------------------------------------- /source/_static/lecture_specific/optgrowth_fast/ogm_crra.py: -------------------------------------------------------------------------------- 1 | opt_growth_data = [ 2 | ('α', float64), # Production parameter 3 | ('β', float64), # Discount factor 4 | ('μ', float64), # Shock location parameter 5 | ('γ', float64), # Preference parameter 6 | ('s', float64), # Shock scale parameter 7 | ('grid', float64[:]), # Grid (array) 8 | ('shocks', float64[:]) # Shock draws (array) 9 | ] 10 | 11 | @jitclass(opt_growth_data) 12 | class OptimalGrowthModel_CRRA: 13 | 14 | def __init__(self, 15 | α=0.4, 16 | β=0.96, 17 | μ=0, 18 | s=0.1, 19 | γ=1.5, 20 | grid_max=4, 21 | grid_size=120, 22 | shock_size=250, 23 | seed=1234): 24 | 25 | self.α, self.β, self.γ, self.μ, self.s = α, β, γ, μ, s 26 | 27 | # Set up grid 28 | self.grid = np.linspace(1e-5, grid_max, grid_size) 29 | 30 | # Store shocks (with a seed, so results are reproducible) 31 | np.random.seed(seed) 32 | self.shocks = np.exp(μ + s * np.random.randn(shock_size)) 33 | 34 | 35 | def f(self, k): 36 | "The production function." 37 | return k**self.α 38 | 39 | def u(self, c): 40 | "The utility function." 41 | return c**(1 - self.γ) / (1 - self.γ) 42 | 43 | def f_prime(self, k): 44 | "Derivative of f." 45 | return self.α * (k**(self.α - 1)) 46 | 47 | def u_prime(self, c): 48 | "Derivative of u." 49 | return c**(-self.γ) 50 | 51 | def u_prime_inv(c): 52 | return c**(-1 / self.γ) -------------------------------------------------------------------------------- /source/_static/lecture_specific/pandas_panel/venn_diag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/pandas_panel/venn_diag.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/perm_income/perm_inc_ir.py: -------------------------------------------------------------------------------- 1 | r = 0.05 2 | β = 1 / (1 + r) 3 | T = 20 # Time horizon 4 | S = 5 # Impulse date 5 | σ1 = σ2 = 0.15 6 | 7 | 8 | def time_path(permanent=False): 9 | "Time path of consumption and debt given shock sequence" 10 | w1 = np.zeros(T+1) 11 | w2 = np.zeros(T+1) 12 | b = np.zeros(T+1) 13 | c = np.zeros(T+1) 14 | if permanent: 15 | w1[S+1] = 1.0 16 | else: 17 | w2[S+1] = 1.0 18 | for t in range(1, T): 19 | b[t+1] = b[t] - σ2 * w2[t] 20 | c[t+1] = c[t] + σ1 * w1[t+1] + (1 - β) * σ2 * w2[t+1] 21 | return b, c 22 | 23 | 24 | fig, axes = plt.subplots(2, 1, figsize=(10, 8)) 25 | p_args = {'lw': 2, 'alpha': 0.7} 26 | titles = ['transitory', 'permanent'] 27 | 28 | L = 0.175 29 | 30 | for ax, truefalse, title in zip(axes, (True, False), titles): 31 | b, c = time_path(permanent=truefalse) 32 | ax.set_title(f'Impulse reponse: {title} income shock') 33 | ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args) 34 | ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args) 35 | ax.plot((S, S), (-L, L), 'k-', lw=0.5) 36 | ax.grid(alpha=0.5) 37 | ax.set(xlabel=r'Time', ylim=(-L, L)) 38 | 39 | axes[0].legend(loc='lower right') 40 | 41 | plt.tight_layout() 42 | plt.show() -------------------------------------------------------------------------------- /source/_static/lecture_specific/schelling/schelling_fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/schelling/schelling_fig1.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/schelling/schelling_fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/schelling/schelling_fig2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/schelling/schelling_fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/schelling/schelling_fig3.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/schelling/schelling_fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/schelling/schelling_fig4.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/short_path/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/short_path/graph.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/short_path/graph2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/short_path/graph2.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/short_path/graph3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/short_path/graph3.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/short_path/graph4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/short_path/graph4.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/troubleshooting/launch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/troubleshooting/launch.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/uncertainty_traps/uncertainty_traps_45.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/uncertainty_traps/uncertainty_traps_45.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/uncertainty_traps/uncertainty_traps_mu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/uncertainty_traps/uncertainty_traps_mu.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/uncertainty_traps/uncertainty_traps_sim.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/uncertainty_traps/uncertainty_traps_sim.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/wald_friedman/wald_class.py: -------------------------------------------------------------------------------- 1 | class WaldFriedman: 2 | """ 3 | Insert relevant docstrings here 4 | """ 5 | def __init__(self, c, L0, L1, f0, f1, m=25): 6 | self.c = c 7 | self.L0, self.L1 = L0, L1 8 | self.m = m 9 | self.pgrid = np.linspace(0.0, 1.0, m) 10 | 11 | # Renormalize distributions so nothing is "too" small 12 | f0 = np.clip(f0, 1e-8, 1-1e-8) 13 | f1 = np.clip(f1, 1e-8, 1-1e-8) 14 | self.f0 = f0 / np.sum(f0) 15 | self.f1 = f1 / np.sum(f1) 16 | self.J = np.zeros(m) 17 | 18 | def current_distribution(self, p): 19 | """ 20 | This function takes a value for the probability with which 21 | the correct model is model 0 and returns the mixed 22 | distribution that corresponds with that belief. 23 | """ 24 | return p*self.f0 + (1-p)*self.f1 25 | 26 | def bayes_update_k(self, p, k): 27 | """ 28 | This function takes a value for p, and a realization of the 29 | random variable and calculates the value for p tomorrow. 30 | """ 31 | f0_k = self.f0[k] 32 | f1_k = self.f1[k] 33 | 34 | p_tp1 = p * f0_k / (p * f0_k + (1 - p) * f1_k) 35 | 36 | return np.clip(p_tp1, 0, 1) 37 | 38 | def bayes_update_all(self, p): 39 | """ 40 | This is similar to `bayes_update_k` except it returns a 41 | new value for p for each realization of the random variable 42 | """ 43 | return np.clip(p * self.f0 / (p * self.f0 + (1 - p) * self.f1), 0, 1) 44 | 45 | def payoff_choose_f0(self, p): 46 | "For a given probability specify the cost of accepting model 0" 47 | return (1 - p) * self.L0 48 | 49 | def payoff_choose_f1(self, p): 50 | "For a given probability specify the cost of accepting model 1" 51 | return p * self.L1 52 | 53 | def EJ(self, p, J): 54 | """ 55 | This function evaluates the expectation of the value function 56 | at period t+1. It does so by taking the current probability 57 | distribution over outcomes: 58 | 59 | p(z_{k+1}) = p_k f_0(z_{k+1}) + (1-p_k) f_1(z_{k+1}) 60 | 61 | and evaluating the value function at the possible states 62 | tomorrow J(p_{t+1}) where 63 | 64 | p_{t+1} = p f0 / ( p f0 + (1-p) f1) 65 | 66 | Parameters 67 | ---------- 68 | p : Scalar(Float64) 69 | The current believed probability that model 0 is the true 70 | model. 71 | J : Function 72 | The current value function for a decision to continue 73 | 74 | Returns 75 | ------- 76 | EJ : Scalar(Float64) 77 | The expected value of the value function tomorrow 78 | """ 79 | # Pull out information 80 | f0, f1 = self.f0, self.f1 81 | 82 | # Get the current believed distribution and tomorrows possible dists 83 | # Need to clip to make sure things don't blow up (go to infinity) 84 | curr_dist = self.current_distribution(p) 85 | tp1_dist = self.bayes_update_all(p) 86 | 87 | # Evaluate the expectation 88 | EJ = curr_dist @ J(tp1_dist) 89 | 90 | return EJ 91 | 92 | def payoff_continue(self, p, J): 93 | """ 94 | For a given probability distribution and value function give 95 | cost of continuing the search for correct model 96 | """ 97 | return self.c + self.EJ(p, J) 98 | 99 | def bellman_operator(self, J): 100 | """ 101 | Evaluates the value function for a given continuation value 102 | function; that is, evaluates 103 | 104 | J(p) = min(pL0, (1-p)L1, c + E[J(p')]) 105 | 106 | Uses linear interpolation between points 107 | """ 108 | payoff_choose_f0 = self.payoff_choose_f0 109 | payoff_choose_f1 = self.payoff_choose_f1 110 | payoff_continue = self.payoff_continue 111 | c, L0, L1, f0, f1 = self.c, self.L0, self.L1, self.f0, self.f1 112 | m, pgrid = self.m, self.pgrid 113 | 114 | J_out = np.empty(m) 115 | J_interp = interp.UnivariateSpline(pgrid, J, k=1, ext=0) 116 | 117 | for (p_ind, p) in enumerate(pgrid): 118 | # Payoff of choosing model 0 119 | p_c_0 = payoff_choose_f0(p) 120 | p_c_1 = payoff_choose_f1(p) 121 | p_con = payoff_continue(p, J_interp) 122 | 123 | J_out[p_ind] = min(p_c_0, p_c_1, p_con) 124 | 125 | return J_out 126 | 127 | def solve_model(self): 128 | J = qe.compute_fixed_point(self.bellman_operator, np.zeros(self.m), 129 | error_tol=1e-7, verbose=False) 130 | 131 | self.J = J 132 | return J 133 | 134 | def find_cutoff_rule(self, J): 135 | """ 136 | This function takes a value function and returns the corresponding 137 | cutoffs of where you transition between continue and choosing a 138 | specific model 139 | """ 140 | payoff_choose_f0 = self.payoff_choose_f0 141 | payoff_choose_f1 = self.payoff_choose_f1 142 | m, pgrid = self.m, self.pgrid 143 | 144 | # Evaluate cost at all points on grid for choosing a model 145 | p_c_0 = payoff_choose_f0(pgrid) 146 | p_c_1 = payoff_choose_f1(pgrid) 147 | 148 | # The cutoff points can be found by differencing these costs with 149 | # the Bellman equation (J is always less than or equal to p_c_i) 150 | lb = pgrid[np.searchsorted(p_c_1 - J, 1e-10) - 1] 151 | ub = pgrid[np.searchsorted(J - p_c_0, -1e-10)] 152 | 153 | return (lb, ub) 154 | 155 | def simulate(self, f, p0=0.5): 156 | """ 157 | This function takes an initial condition and simulates until it 158 | stops (when a decision is made). 159 | """ 160 | # Check whether vf is computed 161 | if np.sum(self.J) < 1e-8: 162 | self.solve_model() 163 | 164 | # Unpack useful info 165 | lb, ub = self.find_cutoff_rule(self.J) 166 | update_p = self.bayes_update_k 167 | curr_dist = self.current_distribution 168 | 169 | # Initialize a couple useful variables 170 | decision_made = False 171 | p = p0 172 | t = 0 173 | 174 | while decision_made is False: 175 | # Maybe should specify which distribution is correct one so that 176 | # the draws come from the "right" distribution 177 | k = int(qe.random.draw(np.cumsum(f))) 178 | t = t+1 179 | p = update_p(p, k) 180 | if p < lb: 181 | decision_made = True 182 | decision = 1 183 | elif p > ub: 184 | decision_made = True 185 | decision = 0 186 | 187 | return decision, p, t 188 | 189 | def simulate_tdgp_f0(self, p0=0.5): 190 | """ 191 | Uses the distribution f0 as the true data generating 192 | process 193 | """ 194 | decision, p, t = self.simulate(self.f0, p0) 195 | 196 | if decision == 0: 197 | correct = True 198 | else: 199 | correct = False 200 | 201 | return correct, p, t 202 | 203 | def simulate_tdgp_f1(self, p0=0.5): 204 | """ 205 | Uses the distribution f1 as the true data generating 206 | process 207 | """ 208 | decision, p, t = self.simulate(self.f1, p0) 209 | 210 | if decision == 1: 211 | correct = True 212 | else: 213 | correct = False 214 | 215 | return correct, p, t 216 | 217 | def stopping_dist(self, ndraws=250, tdgp="f0"): 218 | """ 219 | Simulates repeatedly to get distributions of time needed to make a 220 | decision and how often they are correct. 221 | """ 222 | if tdgp == "f0": 223 | simfunc = self.simulate_tdgp_f0 224 | else: 225 | simfunc = self.simulate_tdgp_f1 226 | 227 | # Allocate space 228 | tdist = np.empty(ndraws, int) 229 | cdist = np.empty(ndraws, bool) 230 | 231 | for i in range(ndraws): 232 | correct, p, t = simfunc() 233 | tdist[i] = t 234 | cdist[i] = correct 235 | 236 | return cdist, tdist 237 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/wald_friedman/wald_dec_rule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/wald_friedman/wald_dec_rule.png -------------------------------------------------------------------------------- /source/_static/lecture_specific/wald_friedman/wald_dec_rule.tex: -------------------------------------------------------------------------------- 1 | \documentclass[convert={density=300,size=1080x800,outext=.png}]{standalone} 2 | \usepackage{tikz} 3 | \usetikzlibrary{decorations.pathreplacing} 4 | \begin{document} 5 | 6 | %.. tikz:: 7 | \begin{tikzpicture} 8 | [scale=5, every node/.style={color=black}, decoration={brace,amplitude=7pt}] \coordinate (a0) at (0, 0.0); 9 | \coordinate (a1) at (1, 0.0); 10 | \coordinate (a2) at (2, 0.0); 11 | \coordinate (a3) at (3, 0.0); 12 | \coordinate (s0) at (0, 0.1); 13 | \coordinate (s1) at (1, 0.1); 14 | \coordinate (s2) at (2, 0.1); 15 | \coordinate (s3) at (3, 0.1); 16 | % axis 17 | \draw[thick] (0, 0) -- (3, 0) node[below] {}; 18 | %curly bracket 19 | \draw [decorate, very thick] (s0) -- (s1) 20 | node [midway, anchor=south, outer sep=10pt]{accept $f_1$}; 21 | \draw [decorate, very thick] (s1) -- (s2) 22 | node [midway, anchor=south, outer sep=10pt]{draw again}; 23 | \draw [decorate, very thick] (s2) -- (s3) 24 | node [midway, anchor=south, outer sep=10pt]{accept $f_0$}; 25 | \node[circle, draw, thin, blue, fill=white!10, scale=0.45] at (a0){}; 26 | \node[below, outer sep=5pt] at (a0){$0$}; 27 | \node[circle, draw, thin, blue, fill=white!10, scale=0.45] at (a1){}; 28 | \node[below, outer sep=5pt] at (a1){$\beta$}; 29 | \node[circle, draw, thin, blue, fill=white!10, scale=0.45] at (a2){}; 30 | \node[below, outer sep=5pt] at (a2){$\alpha$}; 31 | \node[circle, draw, thin, blue, fill=white!10, scale=0.45] at (a3){}; 32 | \node[below, outer sep=5pt] at (a3){$1$}; 33 | \node[below, outer sep=25pt] at (1.5, 0){values of $\pi$}; 34 | \end{tikzpicture} 35 | 36 | \end{document} -------------------------------------------------------------------------------- /source/_static/lecture_specific/wald_friedman/wf_first_pass.py: -------------------------------------------------------------------------------- 1 | import scipy.interpolate as interp 2 | import quantecon as qe 3 | 4 | def expect_loss_choose_0(p, L0): 5 | "For a given probability return expected loss of choosing model 0" 6 | return (1 - p) * L0 7 | 8 | def expect_loss_choose_1(p, L1): 9 | "For a given probability return expected loss of choosing model 1" 10 | return p * L1 11 | 12 | def EJ(p, f0, f1, J): 13 | """ 14 | Evaluates the expectation of our value function J. To do this, we 15 | need the current probability that model 0 is correct (p), the 16 | distributions (f0, f1), and the function J. 17 | """ 18 | # Get the current distribution we believe (p*f0 + (1-p)*f1) 19 | curr_dist = p * f0 + (1 - p) * f1 20 | 21 | # Get tomorrow's expected distribution through Bayes law 22 | tp1_dist = np.clip((p * f0) / (p * f0 + (1 - p) * f1), 0, 1) 23 | 24 | # Evaluate the expectation 25 | EJ = curr_dist @ J(tp1_dist) 26 | 27 | return EJ 28 | 29 | def expect_loss_cont(p, c, f0, f1, J): 30 | return c + EJ(p, f0, f1, J) 31 | 32 | 33 | def bellman_operator(pgrid, c, f0, f1, L0, L1, J): 34 | """ 35 | Evaluates the value function for a given continuation value 36 | function; that is, evaluates 37 | 38 | J(p) = min((1 - p) L0, p L1, c + E J(p')) 39 | 40 | Uses linear interpolation between points. 41 | """ 42 | m = np.size(pgrid) 43 | assert m == np.size(J) 44 | 45 | J_out = np.zeros(m) 46 | J_interp = interp.UnivariateSpline(pgrid, J, k=1, ext=0) 47 | 48 | for (p_ind, p) in enumerate(pgrid): 49 | # Payoff of choosing model 0 50 | p_c_0 = expect_loss_choose_0(p, L0) 51 | p_c_1 = expect_loss_choose_1(p, L1) 52 | p_con = expect_loss_cont(p, c, f0, f1, J_interp) 53 | 54 | J_out[p_ind] = min(p_c_0, p_c_1, p_con) 55 | 56 | return J_out 57 | 58 | 59 | # == Now run at given parameters == # 60 | 61 | # First set up distributions 62 | p_m1 = np.linspace(0, 1, 50) 63 | f0 = np.clip(st.beta.pdf(p_m1, a=1, b=1), 1e-8, np.inf) 64 | f0 = f0 / np.sum(f0) 65 | f1 = np.clip(st.beta.pdf(p_m1, a=9, b=9), 1e-8, np.inf) 66 | f1 = f1 / np.sum(f1) 67 | 68 | # Build a grid 69 | pg = np.linspace(0, 1, 251) 70 | # Turn the Bellman operator into a function with one argument 71 | bell_op = lambda vf: bellman_operator(pg, 0.5, f0, f1, 5.0, 5.0, vf) 72 | # Pass it to qe's built in iteration routine 73 | J = qe.compute_fixed_point(bell_op, 74 | np.zeros(pg.size), # Initial guess 75 | error_tol=1e-6, 76 | verbose=True, 77 | print_skip=5) 78 | 79 | -------------------------------------------------------------------------------- /source/_static/lecture_specific/wealth_dynamics/htop_again.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/lecture_specific/wealth_dynamics/htop_again.png -------------------------------------------------------------------------------- /source/_static/qe-logo-large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/source/_static/qe-logo-large.png -------------------------------------------------------------------------------- /source/rst/.ipynb_checkpoints/Untitled-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import glob\n", 10 | "import re\n", 11 | "import os\n", 12 | "import errno\n", 13 | "path1 = '/home/anju/Documents/lecture-source-py/source/rst/*.rst'\n", 14 | "path2 = '*.tex'\n", 15 | "path = '*.rst'\n", 16 | "files = glob.glob(path)" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "def func_replace(a, b, filename):\n", 26 | " with open(filename,'r+', encoding=\"utf8\") as f:\n", 27 | " #convert to string:\n", 28 | " data = f.read()\n", 29 | " f.seek(0)\n", 30 | " f.write(data.replace(a, b))\n", 31 | " f.truncate()" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "def replace(srr, name):\n", 41 | " \n", 42 | " func_replace(\"_static/figures/\", \"_static/lecture_specific/\"+srr+\"/\", name)\n", 43 | " \n", 44 | " func_replace(\"_static/code/\", \"_static/lecture_specific/\", name)\n", 45 | " \n", 46 | " func_replace(\"_static/pdfs/\", \"_static/lecture_specific/\"+srr+\"/\", name)" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "count=0\n", 56 | "for name in files:\n", 57 | " #name = \"von_neumann_model.tex\"\n", 58 | " srr =\"\"\n", 59 | " srr += name[0:len(name)-4]\n", 60 | " print(srr+\"\\n\")\n", 61 | " count+=1\n", 62 | " replace(srr,name)\n", 63 | " #make_changes(name)\n", 64 | " #sections_name(name)\n", 65 | " #func_replace(name[0:len(name)-4]+\"}\"+\"\\n\"+\" \\\\end{itemize}\"+\"\\n\"+\"\\\\end{itemize}\", name[0:len(name)-4]+\"}\"+\"\\n\"+\" \\\\end{itemize}\",name)\n", 66 | "\n", 67 | "print(count)" 68 | ] 69 | } 70 | ], 71 | "metadata": { 72 | "kernelspec": { 73 | "display_name": "Python 3", 74 | "language": "python", 75 | "name": "python3" 76 | }, 77 | "language_info": { 78 | "codemirror_mode": { 79 | "name": "ipython", 80 | "version": 3 81 | }, 82 | "file_extension": ".py", 83 | "mimetype": "text/x-python", 84 | "name": "python", 85 | "nbconvert_exporter": "python", 86 | "pygments_lexer": "ipython3", 87 | "version": "3.7.3" 88 | } 89 | }, 90 | "nbformat": 4, 91 | "nbformat_minor": 2 92 | } 93 | -------------------------------------------------------------------------------- /source/rst/.ipynb_checkpoints/changethis-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import glob\n", 10 | "import re\n", 11 | "import os\n", 12 | "import errno\n", 13 | "path1 = '/home/anju/Documents/lecture-source-py/source/rst/*.rst'\n", 14 | "path2 = '*.tex'\n", 15 | "path = '*.rst'\n", 16 | "files = glob.glob(path)" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 6, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "def func_replace(a, b, filename):\n", 26 | " with open(filename,'r+', encoding=\"utf8\") as f:\n", 27 | " #convert to string:\n", 28 | " data = f.read()\n", 29 | " f.seek(0)\n", 30 | " f.write(data.replace(a, b))\n", 31 | " f.truncate()" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 7, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "def replace(srr, name):\n", 41 | " \n", 42 | " func_replace(\"_static/figures/\", \"_static/lecture_specific/\"+srr+\"/\", name)\n", 43 | " \n", 44 | " func_replace(\"_static/code/\", \"_static/lecture_specific/\", name)\n", 45 | " \n", 46 | " func_replace(\"_static/pdfs/\", \"_static/lecture_specific/\"+srr+\"/\", name)" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 8, 52 | "metadata": {}, 53 | "outputs": [ 54 | { 55 | "name": "stdout", 56 | "output_type": "stream", 57 | "text": [ 58 | "classical_filtering\n", 59 | "\n", 60 | "coase\n", 61 | "\n", 62 | "coleman_policy_iter\n", 63 | "\n", 64 | "amss\n", 65 | "\n", 66 | "amss2\n", 67 | "\n", 68 | "amss3\n", 69 | "\n", 70 | "complex_and_trig\n", 71 | "\n", 72 | "arma\n", 73 | "\n", 74 | "cass_koopmans\n", 75 | "\n", 76 | "arellano\n", 77 | "\n", 78 | "career\n", 79 | "\n", 80 | "chang_ramsey\n", 81 | "\n", 82 | "chang_credible\n", 83 | "\n", 84 | "cattle_cycles\n", 85 | "\n", 86 | "calvo\n", 87 | "\n", 88 | "black_litterman\n", 89 | "\n", 90 | "16\n" 91 | ] 92 | } 93 | ], 94 | "source": [ 95 | "count=0\n", 96 | "for name in files:\n", 97 | " #name = \"von_neumann_model.tex\"\n", 98 | " srr =\"\"\n", 99 | " srr += name[0:len(name)-4]\n", 100 | " print(srr+\"\\n\")\n", 101 | " count+=1\n", 102 | " replace(srr,name)\n", 103 | " #make_changes(name)\n", 104 | " #sections_name(name)\n", 105 | " #func_replace(name[0:len(name)-4]+\"}\"+\"\\n\"+\" \\\\end{itemize}\"+\"\\n\"+\"\\\\end{itemize}\", name[0:len(name)-4]+\"}\"+\"\\n\"+\" \\\\end{itemize}\",name)\n", 106 | "\n", 107 | "print(count)" 108 | ] 109 | } 110 | ], 111 | "metadata": { 112 | "kernelspec": { 113 | "display_name": "Python 3", 114 | "language": "python", 115 | "name": "python3" 116 | }, 117 | "language_info": { 118 | "codemirror_mode": { 119 | "name": "ipython", 120 | "version": 3 121 | }, 122 | "file_extension": ".py", 123 | "mimetype": "text/x-python", 124 | "name": "python", 125 | "nbconvert_exporter": "python", 126 | "pygments_lexer": "ipython3", 127 | "version": "3.7.3" 128 | } 129 | }, 130 | "nbformat": 4, 131 | "nbformat_minor": 2 132 | } 133 | -------------------------------------------------------------------------------- /source/rst/404.rst: -------------------------------------------------------------------------------- 1 | .. _404: 2 | 3 | *************** 4 | Page Not Found 5 | *************** 6 | 7 | .. raw:: html 8 | 9 | 10 | 11 | 12 | We couldn’t find the page you were looking for. 13 | 14 | Please check the URL or try a link below: 15 | 16 | * `Home `_ 17 | * `QuantEcon `_ 18 | * `Quantitative Economics with Python `_ 19 | * `Quantitative Economics with Julia `_ 20 | * `QuantEcon DataScience `_ 21 | * `Forum `_ 22 | * `Contact us `_ -------------------------------------------------------------------------------- /source/rst/_static: -------------------------------------------------------------------------------- 1 | ../_static -------------------------------------------------------------------------------- /source/rst/about_lectures.rst: -------------------------------------------------------------------------------- 1 | .. _about_lectures: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | .. highlight:: python3 6 | 7 | ********************* 8 | About these Lectures 9 | ********************* 10 | 11 | This is one of a series of online texts on modern quantitative 12 | economics and programming with Python. This is the second text 13 | in the series, which focuses on introductory material. 14 | 15 | For an overview of the series, see `this page `__ -------------------------------------------------------------------------------- /source/rst/egm_policy_iter.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | .. highlight:: python3 4 | 5 | ******************************************************** 6 | :index:`Optimal Growth IV: The Endogenous Grid Method` 7 | ******************************************************** 8 | 9 | .. contents:: :depth: 2 10 | 11 | In addition to what's in Anaconda, this lecture will need the following libraries: 12 | 13 | .. code-block:: ipython 14 | :class: hide-output 15 | 16 | !pip install quantecon 17 | !pip install interpolation 18 | 19 | Overview 20 | ============ 21 | 22 | Previously, we solved the stochastic optimal growth model using 23 | 24 | #. :doc:`value function iteration ` 25 | #. :doc:`Euler equation based time iteration ` 26 | 27 | We found time iteration to be significantly more accurate and efficient. 28 | 29 | In this lecture, we'll look at a clever twist on time iteration called the **endogenous grid method** (EGM). 30 | 31 | EGM is a numerical method for implementing policy iteration invented by `Chris Carroll `__. 32 | 33 | The original reference is :cite:`Carroll2006`. 34 | 35 | Let's start with some standard imports: 36 | 37 | .. code-block:: ipython 38 | 39 | import numpy as np 40 | import quantecon as qe 41 | from interpolation import interp 42 | from numba import njit, float64 43 | from numba.experimental import jitclass 44 | from quantecon.optimize import brentq 45 | import matplotlib.pyplot as plt 46 | %matplotlib inline 47 | 48 | 49 | Key Idea 50 | ======== 51 | 52 | Let's start by reminding ourselves of the theory and then see how the numerics fit in. 53 | 54 | 55 | 56 | Theory 57 | ------ 58 | 59 | Take the model set out in :doc:`the time iteration lecture `, following the same terminology and notation. 60 | 61 | The Euler equation is 62 | 63 | .. math:: 64 | :label: egm_euler 65 | 66 | (u'\circ \sigma^*)(y) 67 | = \beta \int (u'\circ \sigma^*)(f(y - \sigma^*(y)) z) f'(y - \sigma^*(y)) z \phi(dz) 68 | 69 | 70 | As we saw, the Coleman-Reffett operator is a nonlinear operator :math:`K` engineered so that :math:`\sigma^*` is a fixed point of :math:`K`. 71 | 72 | It takes as its argument a continuous strictly increasing consumption policy :math:`\sigma \in \Sigma`. 73 | 74 | It returns a new function :math:`K \sigma`, where :math:`(K \sigma)(y)` is the :math:`c \in (0, \infty)` that solves 75 | 76 | .. math:: 77 | :label: egm_coledef 78 | 79 | u'(c) 80 | = \beta \int (u' \circ \sigma) (f(y - c) z ) f'(y - c) z \phi(dz) 81 | 82 | 83 | 84 | Exogenous Grid 85 | ------------------- 86 | 87 | As discussed in :doc:`the lecture on time iteration `, to implement the method on a computer, we need a numerical approximation. 88 | 89 | In particular, we represent a policy function by a set of values on a finite grid. 90 | 91 | The function itself is reconstructed from this representation when necessary, using interpolation or some other method. 92 | 93 | :doc:`Previously `, to obtain a finite representation of an updated consumption policy, we 94 | 95 | * fixed a grid of income points :math:`\{y_i\}` 96 | 97 | * calculated the consumption value :math:`c_i` corresponding to each 98 | :math:`y_i` using :eq:`egm_coledef` and a root-finding routine 99 | 100 | Each :math:`c_i` is then interpreted as the value of the function :math:`K \sigma` at :math:`y_i`. 101 | 102 | Thus, with the points :math:`\{y_i, c_i\}` in hand, we can reconstruct :math:`K \sigma` via approximation. 103 | 104 | Iteration then continues... 105 | 106 | 107 | 108 | 109 | Endogenous Grid 110 | -------------------- 111 | 112 | The method discussed above requires a root-finding routine to find the 113 | :math:`c_i` corresponding to a given income value :math:`y_i`. 114 | 115 | Root-finding is costly because it typically involves a significant number of 116 | function evaluations. 117 | 118 | As pointed out by Carroll :cite:`Carroll2006`, we can avoid this if 119 | :math:`y_i` is chosen endogenously. 120 | 121 | The only assumption required is that :math:`u'` is invertible on :math:`(0, \infty)`. 122 | 123 | Let :math:`(u')^{-1}` be the inverse function of :math:`u'`. 124 | 125 | The idea is this: 126 | 127 | * First, we fix an *exogenous* grid :math:`\{k_i\}` for capital (:math:`k = y - c`). 128 | 129 | * Then we obtain :math:`c_i` via 130 | 131 | .. math:: 132 | :label: egm_getc 133 | 134 | c_i = 135 | (u')^{-1} 136 | \left\{ 137 | \beta \int (u' \circ \sigma) (f(k_i) z ) \, f'(k_i) \, z \, \phi(dz) 138 | \right\} 139 | 140 | 141 | * Finally, for each :math:`c_i` we set :math:`y_i = c_i + k_i`. 142 | 143 | It is clear that each :math:`(y_i, c_i)` pair constructed in this manner satisfies :eq:`egm_coledef`. 144 | 145 | With the points :math:`\{y_i, c_i\}` in hand, we can reconstruct :math:`K \sigma` via approximation as before. 146 | 147 | The name EGM comes from the fact that the grid :math:`\{y_i\}` is determined **endogenously**. 148 | 149 | 150 | Implementation 151 | ================ 152 | 153 | As :doc:`before `, we will start with a simple setting 154 | where 155 | 156 | * :math:`u(c) = \ln c`, 157 | 158 | * production is Cobb-Douglas, and 159 | 160 | * the shocks are lognormal. 161 | 162 | This will allow us to make comparisons with the analytical solutions 163 | 164 | .. literalinclude:: /_static/lecture_specific/optgrowth/cd_analytical.py 165 | 166 | We reuse the ``OptimalGrowthModel`` class 167 | 168 | .. literalinclude:: /_static/lecture_specific/optgrowth_fast/ogm.py 169 | 170 | 171 | 172 | The Operator 173 | ---------------- 174 | 175 | 176 | Here's an implementation of :math:`K` using EGM as described above. 177 | 178 | .. code-block:: python3 179 | 180 | @njit 181 | def K(σ_array, og): 182 | """ 183 | The Coleman-Reffett operator using EGM 184 | 185 | """ 186 | 187 | # Simplify names 188 | f, β = og.f, og.β 189 | f_prime, u_prime = og.f_prime, og.u_prime 190 | u_prime_inv = og.u_prime_inv 191 | grid, shocks = og.grid, og.shocks 192 | 193 | # Determine endogenous grid 194 | y = grid + σ_array # y_i = k_i + c_i 195 | 196 | # Linear interpolation of policy using endogenous grid 197 | σ = lambda x: interp(y, σ_array, x) 198 | 199 | # Allocate memory for new consumption array 200 | c = np.empty_like(grid) 201 | 202 | # Solve for updated consumption value 203 | for i, k in enumerate(grid): 204 | vals = u_prime(σ(f(k) * shocks)) * f_prime(k) * shocks 205 | c[i] = u_prime_inv(β * np.mean(vals)) 206 | 207 | return c 208 | 209 | 210 | 211 | Note the lack of any root-finding algorithm. 212 | 213 | Testing 214 | ------- 215 | 216 | First we create an instance. 217 | 218 | .. code-block:: python3 219 | 220 | og = OptimalGrowthModel() 221 | grid = og.grid 222 | 223 | Here's our solver routine: 224 | 225 | .. literalinclude:: /_static/lecture_specific/coleman_policy_iter/solve_time_iter.py 226 | 227 | Let's call it: 228 | 229 | .. code-block:: python3 230 | 231 | σ_init = np.copy(grid) 232 | σ = solve_model_time_iter(og, σ_init) 233 | 234 | Here is a plot of the resulting policy, compared with the true policy: 235 | 236 | .. code-block:: python3 237 | 238 | y = grid + σ # y_i = k_i + c_i 239 | 240 | fig, ax = plt.subplots() 241 | 242 | ax.plot(y, σ, lw=2, 243 | alpha=0.8, label='approximate policy function') 244 | 245 | ax.plot(y, σ_star(y, og.α, og.β), 'k--', 246 | lw=2, alpha=0.8, label='true policy function') 247 | 248 | ax.legend() 249 | plt.show() 250 | 251 | The maximal absolute deviation between the two policies is 252 | 253 | .. code-block:: python3 254 | 255 | np.max(np.abs(σ - σ_star(y, og.α, og.β))) 256 | 257 | 258 | How long does it take to converge? 259 | 260 | .. code-block:: python3 261 | 262 | %%timeit -n 3 -r 1 263 | σ = solve_model_time_iter(og, σ_init, verbose=False) 264 | 265 | 266 | Relative to time iteration, which as already found to be highly efficient, EGM 267 | has managed to shave off still more run time without compromising accuracy. 268 | 269 | This is due to the lack of a numerical root-finding step. 270 | 271 | We can now solve the optimal growth model at given parameters extremely fast. 272 | -------------------------------------------------------------------------------- /source/rst/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | *********************************************** 4 | Quantitative Economics with Python 5 | *********************************************** 6 | 7 | .. toctree:: 8 | :hidden: 9 | 10 | index_toc 11 | 12 | 13 | .. raw:: html 14 | 15 |
16 |

Quantitative Economics with Python

17 |
18 |
19 |
20 |

This website presents a set of lectures on quantitative economic modeling, designed and written by Thomas J. Sargent and John Stachurski.

21 |

Last compiled:
22 | View source | 23 | View commits | See all contributors

24 |
25 | 32 |
33 | 37 |
38 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /source/rst/index_asset_pricing.rst: -------------------------------------------------------------------------------- 1 | .. _asset_pricing: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | ********************************************** 6 | Asset Pricing and Finance 7 | ********************************************** 8 | 9 | 10 | .. only:: html 11 | 12 | Lectures 13 | ******** 14 | 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | markov_asset 21 | harrison_kreps 22 | -------------------------------------------------------------------------------- /source/rst/index_data_and_empirics.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | ********************************* 4 | Data and Empirics 5 | ********************************* 6 | 7 | This part of the course provides a set of lectures focused on Data and 8 | Empirics using Python 9 | 10 | 11 | .. only:: html 12 | 13 | Lectures 14 | ******** 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | pandas_panel 21 | ols 22 | mle 23 | -------------------------------------------------------------------------------- /source/rst/index_information.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | *********** 4 | Information 5 | *********** 6 | 7 | This section of the course contains foundational models for dynamic economic 8 | modeling. Most are single agent problems that take the activities of other 9 | agents as given. Later we will look at full equilibrium problems. 10 | 11 | 12 | .. only:: html 13 | 14 | Lectures 15 | ******** 16 | 17 | 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | odu 23 | likelihood_ratio_process 24 | wald_friedman 25 | exchangeable 26 | likelihood_bayes 27 | navy_captain 28 | 29 | -------------------------------------------------------------------------------- /source/rst/index_intro_dynam.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | *************************************** 4 | Introduction to Dynamics 5 | *************************************** 6 | 7 | This section of the course contains foundational models for dynamic economic 8 | modeling. Most are single agent problems that take the activities of other 9 | agents as given. Later we will look at full equilibrium problems. 10 | 11 | 12 | .. only:: html 13 | 14 | Lectures 15 | ******** 16 | 17 | 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | scalar_dynam 23 | ar1_processes 24 | finite_markov 25 | inventory_dynamics 26 | linear_models 27 | samuelson 28 | kesten_processes 29 | wealth_dynamics 30 | kalman 31 | short_path 32 | cass_koopmans_1 33 | cass_koopmans_2 34 | -------------------------------------------------------------------------------- /source/rst/index_lq_control.rst: -------------------------------------------------------------------------------- 1 | .. _lq_dynamic_programming: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | ********** 6 | LQ Control 7 | ********** 8 | 9 | 10 | .. only:: html 11 | 12 | Lectures 13 | ******** 14 | 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | lqcontrol 21 | perm_income 22 | perm_income_cons 23 | lq_inventories 24 | 25 | -------------------------------------------------------------------------------- /source/rst/index_multi_agent_models.rst: -------------------------------------------------------------------------------- 1 | .. _multi_agent_models: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | *************************************** 6 | Multiple Agent Models 7 | *************************************** 8 | 9 | These lectures look at important economic models that also illustrate common 10 | equilibrium concepts. 11 | 12 | .. only:: html 13 | 14 | Lectures 15 | ******** 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | schelling 21 | lake_model 22 | rational_expectations 23 | re_with_feedback 24 | markov_perf 25 | uncertainty_traps 26 | aiyagari 27 | -------------------------------------------------------------------------------- /source/rst/index_savings_growth.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | ******************************* 4 | Consumption, Savings and Growth 5 | ******************************* 6 | 7 | This section of the course contains foundational models for dynamic economic 8 | modeling. Most are single agent problems that take the activities of other 9 | agents as given. Later we will look at full equilibrium problems. 10 | 11 | 12 | .. only:: html 13 | 14 | Lectures 15 | ******** 16 | 17 | 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | cake_eating_problem.rst 23 | cake_eating_numerical.rst 24 | optgrowth 25 | optgrowth_fast 26 | coleman_policy_iter 27 | egm_policy_iter 28 | ifp 29 | ifp_advanced 30 | -------------------------------------------------------------------------------- /source/rst/index_search.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | ****** 4 | Search 5 | ****** 6 | 7 | This section of the course contains foundational models for dynamic economic 8 | modeling. Most are single agent problems that take the activities of other 9 | agents as given. Later we will look at full equilibrium problems. 10 | 11 | 12 | .. only:: html 13 | 14 | Lectures 15 | ******** 16 | 17 | 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | mccall_model 23 | mccall_model_with_separation 24 | mccall_fitted_vfi 25 | mccall_correlated 26 | career 27 | jv 28 | 29 | -------------------------------------------------------------------------------- /source/rst/index_toc.rst: -------------------------------------------------------------------------------- 1 | .. _toc: 2 | 3 | .. raw:: html 4 | 5 |

Powered by NumFOCUS logo

6 | 7 | .. only:: html 8 | 9 | Table of Contents 10 | ***************** 11 | 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | :titlesonly: 16 | 17 | about_lectures 18 | index_tools_and_techniques 19 | index_intro_dynam 20 | index_search 21 | index_savings_growth 22 | index_information 23 | index_lq_control 24 | index_multi_agent_models 25 | index_asset_pricing 26 | index_data_and_empirics 27 | zreferences 28 | 29 | 30 | .. toctree:: 31 | :hidden: 32 | 33 | 404 34 | search 35 | status 36 | troubleshooting 37 | 38 | | 39 | 40 | .. image:: http://assets.quantecon.org/img/banner.png 41 | :scale: 30% 42 | :align: center 43 | 44 | .. only:: latex 45 | 46 | Acknowledgements: These lectures have benefitted greatly from comments and 47 | suggestion from our colleagues, students and friends. Special thanks go to 48 | Anmol Bhandari, Long Bui, Jeong-Hun Choi, Chase Coleman, David Evans, Shunsuke Hori, 49 | Chenghan Hou, Doc-Jin Jang, Spencer Lyon, Qingyin Ma, Akira Matsushita, 50 | Matthew McKay, Tomohito Okabe, Alex Olssen, Nathan Palmer and Yixiao Zhou. 51 | -------------------------------------------------------------------------------- /source/rst/index_tools_and_techniques.rst: -------------------------------------------------------------------------------- 1 | .. _tools_and_techniques: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | *************************************** 6 | Tools and Techniques 7 | *************************************** 8 | 9 | This section of the course contains foundational mathematical and statistical 10 | tools and techniques 11 | 12 | .. only:: html 13 | 14 | Lectures 15 | ******** 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | geom_series 21 | multi_hyper 22 | sir_model 23 | linear_algebra 24 | complex_and_trig 25 | lln_clt 26 | heavy_tails 27 | multivariate_normal 28 | time_series_with_matrices 29 | -------------------------------------------------------------------------------- /source/rst/inventory_dynamics.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: /_static/includes/header.raw 3 | 4 | .. highlight:: python3 5 | 6 | ****************** 7 | Inventory Dynamics 8 | ****************** 9 | 10 | .. index:: 11 | single: Markov process, inventory 12 | 13 | .. contents:: :depth: 2 14 | 15 | Overview 16 | ======== 17 | 18 | In this lecture we will study the time path of inventories for firms that 19 | follow so-called s-S inventory dynamics. 20 | 21 | Such firms 22 | 23 | 1. wait until inventory falls below some level :math:`s` and then 24 | 2. order sufficient quantities to bring their inventory back up to capacity :math:`S`. 25 | 26 | These kinds of policies are common in practice and also optimal in certain circumstances. 27 | 28 | A review of early literature and some macroeconomic implications can be found in :cite:`caplin1985variability`. 29 | 30 | Here our main aim is to learn more about simulation, time series and Markov dynamics. 31 | 32 | While our Markov environment and many of the concepts we consider are related to those found in our :doc:`lecture on finite Markov chains `, the state space is a continuum in the current application. 33 | 34 | Let's start with some imports 35 | 36 | .. code:: ipython3 37 | 38 | import numpy as np 39 | import matplotlib.pyplot as plt 40 | %matplotlib inline 41 | 42 | from numba import njit, float64, prange 43 | from numba.experimental import jitclass 44 | 45 | 46 | Sample Paths 47 | ============ 48 | 49 | Consider a firm with inventory :math:`X_t`. 50 | 51 | The firm waits until :math:`X_t \leq s` and then restocks up to :math:`S` units. 52 | 53 | It faces stochastic demand :math:`\{ D_t \}`, which we assume is IID. 54 | 55 | With notation :math:`a^+ := \max\{a, 0\}`, inventory dynamics can be written 56 | as 57 | 58 | .. math:: 59 | 60 | X_{t+1} = 61 | \begin{cases} 62 | ( S - D_{t+1})^+ & \quad \text{if } X_t \leq s \\ 63 | ( X_t - D_{t+1} )^+ & \quad \text{if } X_t > s 64 | \end{cases} 65 | 66 | 67 | In what follows, we will assume that each :math:`D_t` is lognormal, so that 68 | 69 | .. math:: 70 | 71 | D_t = \exp(\mu + \sigma Z_t) 72 | 73 | where :math:`\mu` and :math:`\sigma` are parameters and :math:`\{Z_t\}` is IID 74 | and standard normal. 75 | 76 | Here's a class that stores parameters and generates time paths for inventory. 77 | 78 | 79 | .. code-block:: python3 80 | 81 | firm_data = [ 82 | ('s', float64), # restock trigger level 83 | ('S', float64), # capacity 84 | ('mu', float64), # shock location parameter 85 | ('sigma', float64) # shock scale parameter 86 | ] 87 | 88 | 89 | @jitclass(firm_data) 90 | class Firm: 91 | 92 | def __init__(self, s=10, S=100, mu=1.0, sigma=0.5): 93 | 94 | self.s, self.S, self.mu, self.sigma = s, S, mu, sigma 95 | 96 | def update(self, x): 97 | "Update the state from t to t+1 given current state x." 98 | 99 | Z = np.random.randn() 100 | D = np.exp(self.mu + self.sigma * Z) 101 | if x <= self.s: 102 | return max(self.S - D, 0) 103 | else: 104 | return max(x - D, 0) 105 | 106 | def sim_inventory_path(self, x_init, sim_length): 107 | 108 | X = np.empty(sim_length) 109 | X[0] = x_init 110 | 111 | for t in range(sim_length-1): 112 | X[t+1] = self.update(X[t]) 113 | return X 114 | 115 | 116 | Let's run a first simulation, of a single path: 117 | 118 | .. code:: ipython3 119 | 120 | firm = Firm() 121 | 122 | s, S = firm.s, firm.S 123 | sim_length = 100 124 | x_init = 50 125 | 126 | X = firm.sim_inventory_path(x_init, sim_length) 127 | 128 | fig, ax = plt.subplots() 129 | bbox = (0., 1.02, 1., .102) 130 | legend_args = {'ncol': 3, 131 | 'bbox_to_anchor': bbox, 132 | 'loc': 3, 133 | 'mode': 'expand'} 134 | 135 | ax.plot(X, label="inventory") 136 | ax.plot(s * np.ones(sim_length), 'k--', label="$s$") 137 | ax.plot(S * np.ones(sim_length), 'k-', label="$S$") 138 | ax.set_ylim(0, S+10) 139 | ax.set_xlabel("time") 140 | ax.legend(**legend_args) 141 | 142 | plt.show() 143 | 144 | 145 | Now let's simulate multiple paths in order to build a more complete picture of 146 | the probabilities of different outcomes: 147 | 148 | .. code:: ipython3 149 | 150 | sim_length=200 151 | fig, ax = plt.subplots() 152 | 153 | ax.plot(s * np.ones(sim_length), 'k--', label="$s$") 154 | ax.plot(S * np.ones(sim_length), 'k-', label="$S$") 155 | ax.set_ylim(0, S+10) 156 | ax.legend(**legend_args) 157 | 158 | for i in range(400): 159 | X = firm.sim_inventory_path(x_init, sim_length) 160 | ax.plot(X, 'b', alpha=0.2, lw=0.5) 161 | 162 | plt.show() 163 | 164 | 165 | Marginal Distributions 166 | ====================== 167 | 168 | Now let’s look at the marginal distribution :math:`\psi_T` of :math:`X_T` for some 169 | fixed :math:`T`. 170 | 171 | We will do this by generating many draws of :math:`X_T` given initial 172 | condition :math:`X_0`. 173 | 174 | With these draws of :math:`X_T` we can build up a picture of its distribution :math:`\psi_T`. 175 | 176 | Here's one visualization, with :math:`T=50`. 177 | 178 | .. code:: ipython3 179 | 180 | T = 50 181 | M = 200 # Number of draws 182 | 183 | ymin, ymax = 0, S + 10 184 | 185 | fig, axes = plt.subplots(1, 2, figsize=(11, 6)) 186 | 187 | for ax in axes: 188 | ax.grid(alpha=0.4) 189 | 190 | ax = axes[0] 191 | 192 | ax.set_ylim(ymin, ymax) 193 | ax.set_ylabel('$X_t$', fontsize=16) 194 | ax.vlines((T,), -1.5, 1.5) 195 | 196 | ax.set_xticks((T,)) 197 | ax.set_xticklabels((r'$T$',)) 198 | 199 | sample = np.empty(M) 200 | for m in range(M): 201 | X = firm.sim_inventory_path(x_init, 2 * T) 202 | ax.plot(X, 'b-', lw=1, alpha=0.5) 203 | ax.plot((T,), (X[T+1],), 'ko', alpha=0.5) 204 | sample[m] = X[T+1] 205 | 206 | axes[1].set_ylim(ymin, ymax) 207 | 208 | axes[1].hist(sample, 209 | bins=16, 210 | density=True, 211 | orientation='horizontal', 212 | histtype='bar', 213 | alpha=0.5) 214 | 215 | plt.show() 216 | 217 | We can build up a clearer picture by drawing more samples 218 | 219 | .. code:: ipython3 220 | 221 | T = 50 222 | M = 50_000 223 | 224 | fig, ax = plt.subplots() 225 | 226 | sample = np.empty(M) 227 | for m in range(M): 228 | X = firm.sim_inventory_path(x_init, T+1) 229 | sample[m] = X[T] 230 | 231 | ax.hist(sample, 232 | bins=36, 233 | density=True, 234 | histtype='bar', 235 | alpha=0.75) 236 | 237 | plt.show() 238 | 239 | Note that the distribution is bimodal 240 | 241 | * Most firms have restocked twice but a few have restocked only once (see figure with paths above). 242 | 243 | * Firms in the second category have lower inventory. 244 | 245 | We can also approximate the distribution using a `kernel density estimator 246 | `__. 247 | 248 | Kernel density estimators can be thought of as smoothed histograms. 249 | 250 | They are preferable to histograms when the distribution being estimated is likely to be smooth. 251 | 252 | We will use a kernel density estimator from `scikit-learn `__ 253 | 254 | .. code:: ipython3 255 | 256 | from sklearn.neighbors import KernelDensity 257 | 258 | def plot_kde(sample, ax, label=''): 259 | 260 | xmin, xmax = 0.9 * min(sample), 1.1 * max(sample) 261 | xgrid = np.linspace(xmin, xmax, 200) 262 | kde = KernelDensity(kernel='gaussian').fit(sample[:, None]) 263 | log_dens = kde.score_samples(xgrid[:, None]) 264 | 265 | ax.plot(xgrid, np.exp(log_dens), label=label) 266 | 267 | .. code:: ipython3 268 | 269 | fig, ax = plt.subplots() 270 | plot_kde(sample, ax) 271 | plt.show() 272 | 273 | The allocation of probability mass is similar to what was shown by the 274 | histogram just above. 275 | 276 | Exercises 277 | ========= 278 | 279 | Exercise 1 280 | ---------- 281 | 282 | This model is asymptotically stationary, with a unique stationary 283 | distribution. 284 | 285 | (See the discussion of stationarity in :doc:`our lecture on AR(1) processes ` for background --- the fundamental concepts are the same.) 286 | 287 | In particular, the sequence of marginal distributions :math:`\{\psi_t\}` 288 | is converging to a unique limiting distribution that does not depend on 289 | initial conditions. 290 | 291 | Although we will not prove this here, we can investigate it using simulation. 292 | 293 | Your task is to generate and plot the sequence :math:`\{\psi_t\}` at times 294 | :math:`t = 10, 50, 250, 500, 750` based on the discussion above. 295 | 296 | (The kernel density estimator is probably the best way to present each 297 | distribution.) 298 | 299 | You should see convergence, in the sense that differences between successive distributions are getting smaller. 300 | 301 | Try different initial conditions to verify that, in the long run, the distribution is invariant across initial conditions. 302 | 303 | 304 | Exercise 2 305 | ---------- 306 | 307 | Using simulation, calculate the probability that firms that start with 308 | :math:`X_0 = 70` need to order twice or more in the first 50 periods. 309 | 310 | You will need a large sample size to get an accurate reading. 311 | 312 | 313 | Solutions 314 | ========= 315 | 316 | Exercise 1 317 | ---------- 318 | 319 | Below is one possible solution: 320 | 321 | The computations involve a lot of CPU cycles so we have tried to write the 322 | code efficiently. 323 | 324 | This meant writing a specialized function rather than using the class above. 325 | 326 | .. code:: ipython3 327 | 328 | s, S, mu, sigma = firm.s, firm.S, firm.mu, firm.sigma 329 | 330 | @njit(parallel=True) 331 | def shift_firms_forward(current_inventory_levels, num_periods): 332 | 333 | num_firms = len(current_inventory_levels) 334 | new_inventory_levels = np.empty(num_firms) 335 | 336 | for f in prange(num_firms): 337 | x = current_inventory_levels[f] 338 | for t in range(num_periods): 339 | Z = np.random.randn() 340 | D = np.exp(mu + sigma * Z) 341 | if x <= s: 342 | x = max(S - D, 0) 343 | else: 344 | x = max(x - D, 0) 345 | new_inventory_levels[f] = x 346 | 347 | return new_inventory_levels 348 | 349 | 350 | .. code:: ipython3 351 | 352 | x_init = 50 353 | num_firms = 50_000 354 | 355 | sample_dates = 0, 10, 50, 250, 500, 750 356 | 357 | first_diffs = np.diff(sample_dates) 358 | 359 | fig, ax = plt.subplots() 360 | 361 | X = np.ones(num_firms) * x_init 362 | 363 | current_date = 0 364 | for d in first_diffs: 365 | X = shift_firms_forward(X, d) 366 | current_date += d 367 | plot_kde(X, ax, label=f't = {current_date}') 368 | 369 | ax.set_xlabel('inventory') 370 | ax.set_ylabel('probability') 371 | ax.legend() 372 | plt.show() 373 | 374 | Notice that by :math:`t=500` or :math:`t=750` the densities are barely 375 | changing. 376 | 377 | We have reached a reasonable approximation of the stationary density. 378 | 379 | You can convince yourself that initial conditions don’t matter by 380 | testing a few of them. 381 | 382 | For example, try rerunning the code above will all firms starting at 383 | :math:`X_0 = 20` or :math:`X_0 = 80`. 384 | 385 | 386 | 387 | Exercise 2 388 | ---------- 389 | 390 | Here is one solution. 391 | 392 | Again, the computations are relatively intensive so we have written a a 393 | specialized function rather than using the class above. 394 | 395 | We will also use parallelization across firms. 396 | 397 | .. code:: ipython3 398 | 399 | @njit(parallel=True) 400 | def compute_freq(sim_length=50, x_init=70, num_firms=1_000_000): 401 | 402 | firm_counter = 0 # Records number of firms that restock 2x or more 403 | for m in prange(num_firms): 404 | x = x_init 405 | restock_counter = 0 # Will record number of restocks for firm m 406 | 407 | for t in range(sim_length): 408 | Z = np.random.randn() 409 | D = np.exp(mu + sigma * Z) 410 | if x <= s: 411 | x = max(S - D, 0) 412 | restock_counter += 1 413 | else: 414 | x = max(x - D, 0) 415 | 416 | if restock_counter > 1: 417 | firm_counter += 1 418 | 419 | return firm_counter / num_firms 420 | 421 | Note the time the routine takes to run, as well as the output. 422 | 423 | .. code:: ipython3 424 | 425 | %%time 426 | 427 | freq = compute_freq() 428 | print(f"Frequency of at least two stock outs = {freq}") 429 | 430 | Try switching the ``parallel`` flag to ``False`` in the jitted function 431 | above. 432 | 433 | Depending on your system, the difference can be substantial. 434 | 435 | (On our desktop machine, the speed up is by a factor of 5.) 436 | 437 | 438 | -------------------------------------------------------------------------------- /source/rst/likelihood_bayes.rst: -------------------------------------------------------------------------------- 1 | .. _likelihood_ratio_process: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | .. highlight:: python3 6 | 7 | ************************************************* 8 | Likelihood Ratio Processes and Bayesian Learning 9 | ************************************************* 10 | 11 | .. contents:: :depth: 2 12 | 13 | 14 | .. code-block:: ipython 15 | 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | from numba import vectorize, njit 19 | from math import gamma 20 | %matplotlib inline 21 | 22 | 23 | Overview 24 | ========= 25 | This lecture describes the role that **likelihood ratio processes** play in **Bayesian learning**. 26 | 27 | As in :doc:`this lecture `, we'll use a simple statistical setting from :doc:`this lecture `. 28 | 29 | We'll focus on how a likelihood ratio process and a **prior** probability determine a **posterior** probability. 30 | 31 | We'll derive a convenient recursion for today's posterior as a function of yesterday's posterior and 32 | today's multiplicative increment to a likelihood process. 33 | 34 | We'll also present a useful generalization of that formula that represents today's posterior in terms of an initial prior and 35 | today's realization of the likelihood ratio process. 36 | 37 | We'll study how, at least in our setting, a Bayesian eventually learns the probability distribution that generates the data, an outcome that 38 | rests on the asymptotic behavior of likelihood ratio processes studied in :doc:`this lecture `. 39 | 40 | This lecture provides technical results that underly outcomes to be studied in :doc:`this lecture ` 41 | and :doc:`this lecture ` and :doc:`this lecture `. 42 | 43 | 44 | The Setting 45 | ======================== 46 | 47 | We begin by reviewing the setting in :doc:`this lecture `, which we adopt here too. 48 | 49 | 50 | A nonnegative random variable :math:`W` has one of two probability density functions, either 51 | :math:`f` or :math:`g`. 52 | 53 | Before the beginning of time, nature once and for all decides whether she will draw a sequence of IID draws from :math:`f` or from :math:`g`. 54 | 55 | We will sometimes let :math:`q` be the density that nature chose once and for all, so 56 | that :math:`q` is either :math:`f` or :math:`g`, permanently. 57 | 58 | Nature knows which density it permanently draws from, but we the observers do not. 59 | 60 | We do know both :math:`f` and :math:`g`, but we don’t know which density nature 61 | chose. 62 | 63 | But we want to know. 64 | 65 | To do that, we use observations. 66 | 67 | We observe a sequence :math:`\{w_t\}_{t=1}^T` of :math:`T` IID draws 68 | from either :math:`f` or :math:`g`. 69 | 70 | We want to use these observations to infer whether nature chose :math:`f` or 71 | :math:`g`. 72 | 73 | A **likelihood ratio process** is a useful tool for this task. 74 | 75 | To begin, we define the key component of a likelihood ratio process, namely, the time :math:`t` likelihood ratio as the random variable 76 | 77 | .. math:: 78 | 79 | \ell (w_t)=\frac{f\left(w_t\right)}{g\left(w_t\right)},\quad t\geq1. 80 | 81 | 82 | We assume that :math:`f` and :math:`g` both put positive probabilities on the 83 | same intervals of possible realizations of the random variable :math:`W`. 84 | 85 | That means that under the :math:`g` density, :math:`\ell (w_t)= 86 | \frac{f\left(w_{t}\right)}{g\left(w_{t}\right)}` 87 | is evidently a nonnegative random variable with mean :math:`1`. 88 | 89 | 90 | A **likelihood ratio process** for sequence 91 | :math:`\left\{ w_{t}\right\} _{t=1}^{\infty}` is defined as 92 | 93 | .. math:: 94 | 95 | 96 | L\left(w^{t}\right)=\prod_{i=1}^{t} \ell (w_i), 97 | 98 | where :math:`w^t=\{ w_1,\dots,w_t\}` is a history of 99 | observations up to and including time :math:`t`. 100 | 101 | Sometimes for shorthand we'll write :math:`L_t = L(w^t)`. 102 | 103 | Notice that the likelihood process satisfies the *recursion* or 104 | *multiplicative decomposition* 105 | 106 | .. math:: 107 | 108 | L(w^t) = \ell (w_t) L (w^{t-1}) . 109 | 110 | The likelihood ratio and its logarithm are key tools for making 111 | inferences using a classic frequentist approach due to Neyman and 112 | Pearson :cite:`Neyman_Pearson`. 113 | 114 | We'll again deploy the following Python code from :doc:`this lecture ` that 115 | evaluates :math:`f` and :math:`g` as two different 116 | beta distributions, then computes and simulates an associated likelihood 117 | ratio process by generating a sequence :math:`w^t` from *some* 118 | probability distribution, for example, a sequence of IID draws from :math:`g`. 119 | 120 | .. code-block:: python3 121 | 122 | # Parameters in the two beta distributions. 123 | F_a, F_b = 1, 1 124 | G_a, G_b = 3, 1.2 125 | 126 | @vectorize 127 | def p(x, a, b): 128 | r = gamma(a + b) / (gamma(a) * gamma(b)) 129 | return r * x** (a-1) * (1 - x) ** (b-1) 130 | 131 | # The two density functions. 132 | f = njit(lambda x: p(x, F_a, F_b)) 133 | g = njit(lambda x: p(x, G_a, G_b)) 134 | 135 | .. code-block:: python3 136 | 137 | @njit 138 | def simulate(a, b, T=50, N=500): 139 | ''' 140 | Generate N sets of T observations of the likelihood ratio, 141 | return as N x T matrix. 142 | 143 | ''' 144 | 145 | l_arr = np.empty((N, T)) 146 | 147 | for i in range(N): 148 | 149 | for j in range(T): 150 | w = np.random.beta(a, b) 151 | l_arr[i, j] = f(w) / g(w) 152 | 153 | return l_arr 154 | 155 | 156 | We'll also use the following Python code to prepare some informative simulations 157 | 158 | 159 | .. code-block:: python3 160 | 161 | l_arr_g = simulate(G_a, G_b, N=50000) 162 | l_seq_g = np.cumprod(l_arr_g, axis=1) 163 | 164 | 165 | 166 | .. code-block:: python3 167 | 168 | l_arr_f = simulate(F_a, F_b, N=50000) 169 | l_seq_f = np.cumprod(l_arr_f, axis=1) 170 | 171 | 172 | 173 | Likelihood Ratio Process and Bayes’ Law 174 | ========================================== 175 | 176 | 177 | Let :math:`\pi_t` be a Bayesian posterior defined as 178 | 179 | .. math:: \pi_t = {\rm Prob}(q=f|w^t) 180 | 181 | The likelihood ratio process is a principal actor in the formula that governs the evolution 182 | of the posterior probability :math:`\pi_t`, an instance of **Bayes' Law**. 183 | 184 | Bayes’ law implies that :math:`\{\pi_t\}` obeys the recursion 185 | 186 | .. math:: 187 | :label: eq_recur1 188 | 189 | \pi_t=\frac{\pi_{t-1} l_t(w_t)}{\pi_{t-1} l_t(w_t)+1-\pi_{t-1}} 190 | 191 | with :math:`\pi_{0}` being a Bayesian prior probability that :math:`q = f`, 192 | i.e., a personal or subjective belief about :math:`q` based on our having seen no data. 193 | 194 | Below we define a Python function that updates belief :math:`\pi` using 195 | likelihood ratio :math:`\ell` according to recursion :eq:`eq_recur1` 196 | 197 | .. code-block:: python3 198 | 199 | @njit 200 | def update(π, l): 201 | "Update π using likelihood l" 202 | 203 | # Update belief 204 | π = π * l / (π * l + 1 - π) 205 | 206 | return π 207 | 208 | Formula :eq:`eq_recur1` can be generalized by iterating on it and thereby deriving an 209 | expression for the time :math:`t` posterior :math:`\pi_{t+1}` as a function 210 | of the time :math:`0` prior :math:`\pi_0` and the likelihood ratio process 211 | :math:`L(w^{t+1})` at time :math:`t`. 212 | 213 | To begin, notice that the updating rule 214 | 215 | .. math:: 216 | 217 | \pi_{t+1} 218 | =\frac{\pi_{t}\ell \left(w_{t+1}\right)} 219 | {\pi_{t}\ell \left(w_{t+1}\right)+\left(1-\pi_{t}\right)} 220 | 221 | implies 222 | 223 | .. math:: 224 | 225 | 226 | \begin{aligned} 227 | \frac{1}{\pi_{t+1}} 228 | &=\frac{\pi_{t}\ell \left(w_{t+1}\right) 229 | +\left(1-\pi_{t}\right)}{\pi_{t}\ell \left(w_{t+1}\right)} \\ 230 | &=1-\frac{1}{\ell \left(w_{t+1}\right)} 231 | +\frac{1}{\ell \left(w_{t+1}\right)}\frac{1}{\pi_{t}}. 232 | \end{aligned} 233 | 234 | .. math:: 235 | 236 | \Rightarrow 237 | \frac{1}{\pi_{t+1}}-1 238 | =\frac{1}{\ell \left(w_{t+1}\right)}\left(\frac{1}{\pi_{t}}-1\right). 239 | 240 | Therefore 241 | 242 | .. math:: 243 | 244 | 245 | \begin{aligned} 246 | \frac{1}{\pi_{t+1}}-1 247 | =\frac{1}{\prod_{i=1}^{t+1}\ell \left(w_{i}\right)} 248 | \left(\frac{1}{\pi_{0}}-1\right) 249 | =\frac{1}{L\left(w^{t+1}\right)}\left(\frac{1}{\pi_{0}}-1\right). 250 | \end{aligned} 251 | 252 | Since :math:`\pi_{0}\in\left(0,1\right)` and 253 | :math:`L\left(w^{t+1}\right)>0`, we can verify that 254 | :math:`\pi_{t+1}\in\left(0,1\right)`. 255 | 256 | After rearranging the preceding equation, we can express :math:`\pi_{t+1}` as a 257 | function of :math:`L\left(w^{t+1}\right)`, the likelihood ratio process at :math:`t+1`, 258 | and the initial prior :math:`\pi_{0}` 259 | 260 | .. math:: 261 | :label: eq_Bayeslaw103 262 | 263 | \pi_{t+1}=\frac{\pi_{0}L\left(w^{t+1}\right)}{\pi_{0}L\left(w^{t+1}\right)+1-\pi_{0}} . 264 | 265 | Formula :eq:`eq_Bayeslaw103` generalizes formula :eq:`eq_recur1`. 266 | 267 | Formula :eq:`eq_Bayeslaw103` can be regarded as a one step revision of prior probability :math:`\pi_0` after seeing 268 | the batch of data :math:`\left\{ w_{i}\right\} _{i=1}^{t+1}`. 269 | 270 | Formula :eq:`eq_Bayeslaw103` shows the key role that the likelihood ratio process :math:`L\left(w^{t+1}\right)` plays in determining 271 | the posterior probability :math:`\pi_{t+1}`. 272 | 273 | Formula :eq:`eq_Bayeslaw103` is the foundation for the insight that, because of how the likelihood ratio process behaves 274 | as :math:`t \rightarrow + \infty`, the likelihood ratio process dominates the initial prior :math:`\pi_0` in determining the 275 | limiting behavior of :math:`\pi_t`. 276 | 277 | To illustrate this insight, below we will plot graphs showing **one** simulated 278 | path of the likelihood ratio process :math:`L_t` along with two paths of 279 | :math:`\pi_t` that are associated with the *same* realization of the likelihood ratio process but *different* initial prior probabilities :math:`\pi_{0}`. 280 | 281 | First, we tell Python two values of :math:`\pi_0`. 282 | 283 | .. code-block:: python3 284 | 285 | π1, π2 = 0.2, 0.8 286 | 287 | Next we generate paths of the likelihood ratio process :math:`L_t` and the posterior :math:`\pi_t` for a 288 | history of IID draws from density :math:`f`. 289 | 290 | .. code-block:: python3 291 | 292 | T = l_arr_f.shape[1] 293 | π_seq_f = np.empty((2, T+1)) 294 | π_seq_f[:, 0] = π1, π2 295 | 296 | for t in range(T): 297 | for i in range(2): 298 | π_seq_f[i, t+1] = update(π_seq_f[i, t], l_arr_f[0, t]) 299 | 300 | .. code-block:: python3 301 | 302 | fig, ax1 = plt.subplots() 303 | 304 | for i in range(2): 305 | ax1.plot(range(T+1), π_seq_f[i, :], label=f"$\pi_0$={π_seq_f[i, 0]}") 306 | 307 | ax1.set_ylabel("$\pi_t$") 308 | ax1.set_xlabel("t") 309 | ax1.legend() 310 | ax1.set_title("when f governs data") 311 | 312 | ax2 = ax1.twinx() 313 | ax2.plot(range(1, T+1), np.log(l_seq_f[0, :]), '--', color='b') 314 | ax2.set_ylabel("$log(L(w^{t}))$") 315 | 316 | plt.show() 317 | 318 | 319 | The dotted line in the graph above records the logarithm of the likelihood ratio process :math:`\log L(w^t)`. 320 | 321 | 322 | Please note that there are two different scales on the :math:`y` axis. 323 | 324 | Now let's study what happens when the history consists of IID draws from density :math:`g` 325 | 326 | 327 | .. code-block:: python3 328 | 329 | T = l_arr_g.shape[1] 330 | π_seq_g = np.empty((2, T+1)) 331 | π_seq_g[:, 0] = π1, π2 332 | 333 | for t in range(T): 334 | for i in range(2): 335 | π_seq_g[i, t+1] = update(π_seq_g[i, t], l_arr_g[0, t]) 336 | 337 | .. code-block:: python3 338 | 339 | fig, ax1 = plt.subplots() 340 | 341 | for i in range(2): 342 | ax1.plot(range(T+1), π_seq_g[i, :], label=f"$\pi_0$={π_seq_g[i, 0]}") 343 | 344 | ax1.set_ylabel("$\pi_t$") 345 | ax1.set_xlabel("t") 346 | ax1.legend() 347 | ax1.set_title("when g governs data") 348 | 349 | ax2 = ax1.twinx() 350 | ax2.plot(range(1, T+1), np.log(l_seq_g[0, :]), '--', color='b') 351 | ax2.set_ylabel("$log(L(w^{t}))$") 352 | 353 | plt.show() 354 | 355 | 356 | Below we offer Python code that verifies that nature chose permanently to draw from density :math:`f`. 357 | 358 | .. code-block:: python3 359 | 360 | π_seq = np.empty((2, T+1)) 361 | π_seq[:, 0] = π1, π2 362 | 363 | for i in range(2): 364 | πL = π_seq[i, 0] * l_seq_f[0, :] 365 | π_seq[i, 1:] = πL / (πL + 1 - π_seq[i, 0]) 366 | 367 | .. code-block:: python3 368 | 369 | np.abs(π_seq - π_seq_f).max() < 1e-10 370 | 371 | We thus conclude that the likelihood ratio process is a key ingredient of the formula :eq:`eq_Bayeslaw103` for 372 | a Bayesian's posteior probabilty that nature has drawn history :math:`w^t` as repeated draws from density 373 | :math:`g`. 374 | 375 | Sequels 376 | ======== 377 | 378 | This lecture has been devoted to building some useful infrastructure. 379 | 380 | We'll build on results highlighted in this lectures to understand inferences that are the foundations of 381 | results described in :doc:`this lecture ` and :doc:`this lecture ` and :doc:`this lecture `. 382 | -------------------------------------------------------------------------------- /source/rst/mccall_fitted_vfi.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: /_static/includes/header.raw 3 | 4 | .. highlight:: python3 5 | 6 | *********************************************** 7 | Job Search III: Fitted Value Function Iteration 8 | *********************************************** 9 | 10 | .. contents:: :depth: 2 11 | 12 | 13 | In addition to what's in Anaconda, this lecture will need the following libraries: 14 | 15 | .. code-block:: ipython 16 | :class: hide-output 17 | 18 | !pip install quantecon 19 | !pip install interpolation 20 | 21 | 22 | Overview 23 | ======== 24 | 25 | In this lecture we again study the :doc:`McCall job search model with separation `, but now with a continuous wage distribution. 26 | 27 | While we already considered continuous wage distributions briefly in the 28 | exercises of the :doc:`first job search lecture `, 29 | the change was relatively trivial in that case. 30 | 31 | This is because we were able to reduce the problem to solving for a single 32 | scalar value (the continuation value). 33 | 34 | Here, with separation, the change is less trivial, since a continuous wage distribution leads to an uncountably infinite state space. 35 | 36 | The infinite state space leads to additional challenges, particularly when it 37 | comes to applying value function iteration (VFI). 38 | 39 | These challenges will lead us to modify VFI by adding an interpolation step. 40 | 41 | The combination of VFI and this interpolation step is called **fitted value function iteration** (fitted VFI). 42 | 43 | Fitted VFI is very common in practice, so we will take some time to work through the details. 44 | 45 | We will use the following imports: 46 | 47 | .. code:: ipython3 48 | 49 | import numpy as np 50 | import matplotlib.pyplot as plt 51 | %matplotlib inline 52 | 53 | import quantecon as qe 54 | from interpolation import interp 55 | from numpy.random import randn 56 | from numba import njit, prange, float64, int32 57 | from numba.experimental import jitclass 58 | 59 | 60 | 61 | 62 | The Algorithm 63 | ============= 64 | 65 | The model is the same as the McCall model with job separation we :doc:`studied before `, except that the wage offer distribution is continuous. 66 | 67 | We are going to start with the two Bellman equations we obtained for the model with job separation after :ref:`a simplifying transformation `. 68 | 69 | Modified to accommodate continuous wage draws, they take the following form: 70 | 71 | .. math:: 72 | :label: bell1mcmc 73 | 74 | d = \int \max \left\{ v(w'), \, u(c) + \beta d \right\} q(w') d w' 75 | 76 | and 77 | 78 | .. math:: 79 | :label: bell2mcmc 80 | 81 | v(w) = u(w) + \beta 82 | \left[ 83 | (1-\alpha)v(w) + \alpha d 84 | \right] 85 | 86 | The unknowns here are the function :math:`v` and the scalar :math:`d`. 87 | 88 | The difference between these and the pair of Bellman equations we previously worked on are 89 | 90 | #. in :eq:`bell1mcmc`, what used to be a sum over a finite number of wage values is an integral over an infinite set. 91 | 92 | #. The function :math:`v` in :eq:`bell2mcmc` is defined over all :math:`w \in \mathbb R_+`. 93 | 94 | The function :math:`q` in :eq:`bell1mcmc` is the density of the wage offer distribution. 95 | 96 | Its support is taken as equal to :math:`\mathbb R_+`. 97 | 98 | 99 | 100 | Value Function Iteration 101 | ------------------------ 102 | 103 | In theory, we should now proceed as follows: 104 | 105 | #. Begin with a guess :math:`v, d` for the solutions to :eq:`bell1mcmc`--:eq:`bell2mcmc`. 106 | 107 | #. Plug :math:`v, d` into the right hand side of :eq:`bell1mcmc`--:eq:`bell2mcmc` and 108 | compute the left hand side to obtain updates :math:`v', d'` 109 | 110 | #. Unless some stopping condition is satisfied, set :math:`(v, d) = (v', d')` 111 | and go to step 2. 112 | 113 | However, there is a problem we must confront before we implement this procedure: 114 | The iterates of the value function can neither be calculated exactly nor stored on a computer. 115 | 116 | To see the issue, consider :eq:`bell2mcmc`. 117 | 118 | Even if :math:`v` is a known function, the only way to store its update :math:`v'` 119 | is to record its value :math:`v'(w)` for every :math:`w \in \mathbb R_+`. 120 | 121 | Clearly, this is impossible. 122 | 123 | 124 | Fitted Value Function Iteration 125 | ------------------------------- 126 | 127 | What we will do instead is use **fitted value function iteration**. 128 | 129 | The procedure is as follows: 130 | 131 | Let a current guess :math:`v` be given. 132 | 133 | Now we record the value of the function :math:`v'` at only 134 | finitely many "grid" points :math:`w_1 < w_2 < \cdots < w_I` and then reconstruct :math:`v'` from this information when required. 135 | 136 | More precisely, the algorithm will be 137 | 138 | .. _fvi_alg: 139 | 140 | #. Begin with an array :math:`\mathbf v` representing the values of an initial guess of the value function on some grid points :math:`\{w_i\}`. 141 | 142 | #. Build a function :math:`v` on the state space :math:`\mathbb R_+` by interpolation or approximation, based on :math:`\mathbf v` and :math:`\{ w_i\}`. 143 | 144 | #. Obtain and record the samples of the updated function :math:`v'(w_i)` on each grid point :math:`w_i`. 145 | 146 | #. Unless some stopping condition is satisfied, take this as the new array and go to step 1. 147 | 148 | How should we go about step 2? 149 | 150 | This is a problem of function approximation, and there are many ways to approach it. 151 | 152 | What's important here is that the function approximation scheme must not only 153 | produce a good approximation to each :math:`v`, but also that it combines well with the broader iteration algorithm described above. 154 | 155 | 156 | One good choice from both respects is continuous piecewise linear interpolation. 157 | 158 | This method 159 | 160 | 1. combines well with value function iteration (see., e.g., 161 | :cite:`gordon1995stable` or :cite:`stachurski2008continuous`) and 162 | 163 | 2. preserves useful shape properties such as monotonicity and concavity/convexity. 164 | 165 | Linear interpolation will be implemented using a JIT-aware Python interpolation library called `interpolation.py `__. 166 | 167 | The next figure illustrates piecewise linear interpolation of an arbitrary 168 | function on grid points :math:`0, 0.2, 0.4, 0.6, 0.8, 1`. 169 | 170 | 171 | .. code-block:: python3 172 | 173 | 174 | def f(x): 175 | y1 = 2 * np.cos(6 * x) + np.sin(14 * x) 176 | return y1 + 2.5 177 | 178 | c_grid = np.linspace(0, 1, 6) 179 | f_grid = np.linspace(0, 1, 150) 180 | 181 | def Af(x): 182 | return interp(c_grid, f(c_grid), x) 183 | 184 | fig, ax = plt.subplots() 185 | 186 | ax.plot(f_grid, f(f_grid), 'b-', label='true function') 187 | ax.plot(f_grid, Af(f_grid), 'g-', label='linear approximation') 188 | ax.vlines(c_grid, c_grid * 0, f(c_grid), linestyle='dashed', alpha=0.5) 189 | 190 | ax.legend(loc="upper center") 191 | 192 | ax.set(xlim=(0, 1), ylim=(0, 6)) 193 | plt.show() 194 | 195 | 196 | 197 | 198 | Implementation 199 | ============== 200 | 201 | The first step is to build a jitted class for the McCall model with separation and 202 | a continuous wage offer distribution. 203 | 204 | We will take the utility function to be the log function for this application, with :math:`u(c) = \ln c`. 205 | 206 | We will adopt the lognormal distribution for wages, with :math:`w = \exp(\mu + \sigma z)` 207 | when :math:`z` is standard normal and :math:`\mu, \sigma` are parameters. 208 | 209 | 210 | .. code-block:: python3 211 | 212 | @njit 213 | def lognormal_draws(n=1000, μ=2.5, σ=0.5, seed=1234): 214 | np.random.seed(seed) 215 | z = np.random.randn(n) 216 | w_draws = np.exp(μ + σ * z) 217 | return w_draws 218 | 219 | 220 | Here's our class. 221 | 222 | .. code-block:: python3 223 | 224 | mccall_data_continuous = [ 225 | ('c', float64), # unemployment compensation 226 | ('α', float64), # job separation rate 227 | ('β', float64), # discount factor 228 | ('σ', float64), # scale parameter in lognormal distribution 229 | ('μ', float64), # location parameter in lognormal distribution 230 | ('w_grid', float64[:]), # grid of points for fitted VFI 231 | ('w_draws', float64[:]) # draws of wages for Monte Carlo 232 | ] 233 | 234 | @jitclass(mccall_data_continuous) 235 | class McCallModelContinuous: 236 | 237 | def __init__(self, 238 | c=1, 239 | α=0.1, 240 | β=0.96, 241 | grid_min=1e-10, 242 | grid_max=5, 243 | grid_size=100, 244 | w_draws=lognormal_draws()): 245 | 246 | self.c, self.α, self.β = c, α, β 247 | 248 | self.w_grid = np.linspace(grid_min, grid_max, grid_size) 249 | self.w_draws = w_draws 250 | 251 | def update(self, v, d): 252 | 253 | # Simplify names 254 | c, α, β, σ, μ = self.c, self.α, self.β, self.σ, self.μ 255 | w = self.w_grid 256 | u = lambda x: np.log(x) 257 | 258 | # Interpolate array represented value function 259 | vf = lambda x: interp(w, v, x) 260 | 261 | # Update d using Monte Carlo to evaluate integral 262 | d_new = np.mean(np.maximum(vf(self.w_draws), u(c) + β * d)) 263 | 264 | # Update v 265 | v_new = u(w) + β * ((1 - α) * v + α * d) 266 | 267 | return v_new, d_new 268 | 269 | 270 | We then return the current iterate as an approximate solution. 271 | 272 | .. code-block:: python3 273 | 274 | @njit 275 | def solve_model(mcm, tol=1e-5, max_iter=2000): 276 | """ 277 | Iterates to convergence on the Bellman equations 278 | 279 | * mcm is an instance of McCallModel 280 | """ 281 | 282 | v = np.ones_like(mcm.w_grid) # Initial guess of v 283 | d = 1 # Initial guess of d 284 | i = 0 285 | error = tol + 1 286 | 287 | while error > tol and i < max_iter: 288 | v_new, d_new = mcm.update(v, d) 289 | error_1 = np.max(np.abs(v_new - v)) 290 | error_2 = np.abs(d_new - d) 291 | error = max(error_1, error_2) 292 | v = v_new 293 | d = d_new 294 | i += 1 295 | 296 | return v, d 297 | 298 | 299 | Here's a function ``compute_reservation_wage`` that takes an instance of ``McCallModelContinuous`` 300 | and returns the associated reservation wage. 301 | 302 | If :math:`v(w) < h` for all :math:`w`, then the function returns `np.inf` 303 | 304 | 305 | 306 | .. code-block:: python3 307 | 308 | @njit 309 | def compute_reservation_wage(mcm): 310 | """ 311 | Computes the reservation wage of an instance of the McCall model 312 | by finding the smallest w such that v(w) >= h. 313 | 314 | If no such w exists, then w_bar is set to np.inf. 315 | """ 316 | u = lambda x: np.log(x) 317 | 318 | v, d = solve_model(mcm) 319 | h = u(mcm.c) + mcm.β * d 320 | 321 | w_bar = np.inf 322 | for i, wage in enumerate(mcm.w_grid): 323 | if v[i] > h: 324 | w_bar = wage 325 | break 326 | 327 | return w_bar 328 | 329 | 330 | 331 | The exercises ask you to explore the solution and how it changes with parameters. 332 | 333 | 334 | Exercises 335 | ========= 336 | 337 | 338 | Exercise 1 339 | ---------- 340 | 341 | Use the code above to explore what happens to the reservation wage when the wage parameter :math:`\mu` 342 | changes. 343 | 344 | Use the default parameters and :math:`\mu` in ``mu_vals = np.linspace(0.0, 2.0, 15)``. 345 | 346 | Is the impact on the reservation wage as you expected? 347 | 348 | Exercise 2 349 | ---------- 350 | 351 | Let us now consider how the agent responds to an increase in volatility. 352 | 353 | To try to understand this, compute the reservation wage when the wage offer 354 | distribution is uniform on :math:`(m - s, m + s)` and :math:`s` varies. 355 | 356 | The idea here is that we are holding the mean constant and spreading the 357 | support. 358 | 359 | (This is a form of *mean-preserving spread*.) 360 | 361 | 362 | Use ``s_vals = np.linspace(1.0, 2.0, 15)`` and ``m = 2.0``. 363 | 364 | State how you expect the reservation wage to vary with :math:`s`. 365 | 366 | Now compute it. Is this as you expected? 367 | 368 | Solutions 369 | ========= 370 | 371 | 372 | Exercise 1 373 | ---------- 374 | 375 | Here is one solution. 376 | 377 | 378 | .. code-block:: python3 379 | 380 | mcm = McCallModelContinuous() 381 | mu_vals = np.linspace(0.0, 2.0, 15) 382 | w_bar_vals = np.empty_like(mu_vals) 383 | 384 | fig, ax = plt.subplots() 385 | 386 | for i, m in enumerate(mu_vals): 387 | mcm.w_draws = lognormal_draws(μ=m) 388 | w_bar = compute_reservation_wage(mcm) 389 | w_bar_vals[i] = w_bar 390 | 391 | ax.set(xlabel='mean', ylabel='reservation wage') 392 | ax.plot(mu_vals, w_bar_vals, label=r'$\bar w$ as a function of $\mu$') 393 | ax.legend() 394 | 395 | plt.show() 396 | 397 | Not surprisingly, the agent is more inclined to wait when the distribution of 398 | offers shifts to the right. 399 | 400 | Exercise 2 401 | ---------- 402 | 403 | Here is one solution. 404 | 405 | .. code-block:: python3 406 | 407 | mcm = McCallModelContinuous() 408 | s_vals = np.linspace(1.0, 2.0, 15) 409 | m = 2.0 410 | w_bar_vals = np.empty_like(s_vals) 411 | 412 | fig, ax = plt.subplots() 413 | 414 | for i, s in enumerate(s_vals): 415 | a, b = m - s, m + s 416 | mcm.w_draws = np.random.uniform(low=a, high=b, size=10_000) 417 | w_bar = compute_reservation_wage(mcm) 418 | w_bar_vals[i] = w_bar 419 | 420 | ax.set(xlabel='volatility', ylabel='reservation wage') 421 | ax.plot(s_vals, w_bar_vals, label=r'$\bar w$ as a function of wage volatility') 422 | ax.legend() 423 | 424 | plt.show() 425 | 426 | The reservation wage increases with volatility. 427 | 428 | One might think that higher volatility would make the agent more inclined to 429 | take a given offer, since doing so represents certainty and waiting represents 430 | risk. 431 | 432 | But job search is like holding an option: the worker is only exposed to upside risk (since, in a free market, no one can force them to take a bad offer). 433 | 434 | More volatility means higher upside potential, which encourages the agent to wait. 435 | -------------------------------------------------------------------------------- /source/rst/optgrowth_fast.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _optgrowth: 3 | 4 | .. include:: /_static/includes/header.raw 5 | 6 | .. highlight:: python3 7 | 8 | ************************************************************** 9 | :index:`Optimal Growth II: Accelerating the Code with Numba` 10 | ************************************************************** 11 | 12 | .. contents:: :depth: 2 13 | 14 | In addition to what's in Anaconda, this lecture will need the following libraries: 15 | 16 | .. code-block:: ipython 17 | :class: hide-output 18 | 19 | !pip install quantecon 20 | !pip install interpolation 21 | 22 | Overview 23 | ======== 24 | 25 | :doc:`Previously `, we studied a stochastic optimal 26 | growth model with one representative agent. 27 | 28 | We solved the model using dynamic programming. 29 | 30 | In writing our code, we focused on clarity and flexibility. 31 | 32 | These are important, but there's often a trade-off between flexibility and 33 | speed. 34 | 35 | The reason is that, when code is less flexible, we can exploit structure more 36 | easily. 37 | 38 | (This is true about algorithms and mathematical problems more generally: 39 | more specific problems have more structure, which, with some thought, can be 40 | exploited for better results.) 41 | 42 | So, in this lecture, we are going to accept less flexibility while gaining 43 | speed, using just-in-time (JIT) compilation to 44 | accelerate our code. 45 | 46 | Let's start with some imports: 47 | 48 | .. code-block:: ipython 49 | 50 | import numpy as np 51 | import matplotlib.pyplot as plt 52 | from interpolation import interp 53 | from numba import jit, njit, prange, float64, int32 54 | from numba.experimental import jitclass 55 | from quantecon.optimize.scalar_maximization import brent_max 56 | 57 | %matplotlib inline 58 | 59 | 60 | We are using an interpolation function from 61 | `interpolation.py `__ because it 62 | helps us JIT-compile our code. 63 | 64 | The function ``brent_max`` is also designed for embedding in JIT-compiled code. 65 | 66 | These are alternatives to similar functions in SciPy (which, unfortunately, are not JIT-aware). 67 | 68 | 69 | 70 | The Model 71 | ========= 72 | 73 | .. index:: 74 | single: Optimal Growth; Model 75 | 76 | 77 | The model is the same as discussed in our :doc:`previous lecture ` 78 | on optimal growth. 79 | 80 | We will start with log utility: 81 | 82 | .. math:: 83 | u(c) = \ln(c) 84 | 85 | We continue to assume that 86 | 87 | * :math:`f(k) = k^{\alpha}` 88 | 89 | * :math:`\phi` is the distribution of :math:`\xi := \exp(\mu + s \zeta)` when :math:`\zeta` is standard normal 90 | 91 | We will once again use value function iteration to solve the model. 92 | 93 | In particular, the algorithm is unchanged, and the only difference is in the implementation itself. 94 | 95 | As before, we will be able to compare with the true solutions 96 | 97 | .. literalinclude:: /_static/lecture_specific/optgrowth/cd_analytical.py 98 | 99 | 100 | 101 | Computation 102 | =========== 103 | 104 | .. index:: 105 | single: Dynamic Programming; Computation 106 | 107 | 108 | We will again store the primitives of the optimal growth model in a class. 109 | 110 | But now we are going to use `Numba's `__ ``@jitclass`` decorator to target our class for JIT compilation. 111 | 112 | Because we are going to use Numba to compile our class, we need to specify the data types. 113 | 114 | You will see this as a list called ``opt_growth_data`` above our class. 115 | 116 | Unlike in the :doc:`previous lecture `, we 117 | hardwire the production and utility specifications into the 118 | class. 119 | 120 | This is where we sacrifice flexibility in order to gain more speed. 121 | 122 | 123 | .. literalinclude:: /_static/lecture_specific/optgrowth_fast/ogm.py 124 | 125 | 126 | The class includes some methods such as ``u_prime`` that we do not need now 127 | but will use in later lectures. 128 | 129 | 130 | The Bellman Operator 131 | -------------------- 132 | 133 | We will use JIT compilation to accelerate the Bellman operator. 134 | 135 | First, here's a function that returns the value of a particular consumption choice ``c``, given state ``y``, as per the Bellman equation :eq:`fpb30`. 136 | 137 | .. code-block:: python3 138 | 139 | @njit 140 | def state_action_value(c, y, v_array, og): 141 | """ 142 | Right hand side of the Bellman equation. 143 | 144 | * c is consumption 145 | * y is income 146 | * og is an instance of OptimalGrowthModel 147 | * v_array represents a guess of the value function on the grid 148 | 149 | """ 150 | 151 | u, f, β, shocks = og.u, og.f, og.β, og.shocks 152 | 153 | v = lambda x: interp(og.grid, v_array, x) 154 | 155 | return u(c) + β * np.mean(v(f(y - c) * shocks)) 156 | 157 | Now we can implement the Bellman operator, which maximizes the right hand side 158 | of the Bellman equation: 159 | 160 | .. code-block:: python3 161 | 162 | @jit(nopython=True) 163 | def T(v, og): 164 | """ 165 | The Bellman operator. 166 | 167 | * og is an instance of OptimalGrowthModel 168 | * v is an array representing a guess of the value function 169 | 170 | """ 171 | 172 | v_new = np.empty_like(v) 173 | v_greedy = np.empty_like(v) 174 | 175 | for i in range(len(og.grid)): 176 | y = og.grid[i] 177 | 178 | # Maximize RHS of Bellman equation at state y 179 | result = brent_max(state_action_value, 1e-10, y, args=(y, v, og)) 180 | v_greedy[i], v_new[i] = result[0], result[1] 181 | 182 | return v_greedy, v_new 183 | 184 | We use the ``solve_model`` function to perform iteration until convergence. 185 | 186 | .. literalinclude:: /_static/lecture_specific/optgrowth/solve_model.py 187 | 188 | Let's compute the approximate solution at the default parameters. 189 | 190 | First we create an instance: 191 | 192 | .. code-block:: python3 193 | 194 | og = OptimalGrowthModel() 195 | 196 | Now we call ``solve_model``, using the ``%%time`` magic to check how long it 197 | takes. 198 | 199 | .. code-block:: python3 200 | 201 | %%time 202 | v_greedy, v_solution = solve_model(og) 203 | 204 | You will notice that this is *much* faster than our :doc:`original implementation `. 205 | 206 | Here is a plot of the resulting policy, compared with the true policy: 207 | 208 | .. code-block:: python3 209 | 210 | fig, ax = plt.subplots() 211 | 212 | ax.plot(og.grid, v_greedy, lw=2, 213 | alpha=0.8, label='approximate policy function') 214 | 215 | ax.plot(og.grid, σ_star(og.grid, og.α, og.β), 'k--', 216 | lw=2, alpha=0.8, label='true policy function') 217 | 218 | ax.legend() 219 | plt.show() 220 | 221 | 222 | Again, the fit is excellent --- this is as expected since we have not changed 223 | the algorithm. 224 | 225 | The maximal absolute deviation between the two policies is 226 | 227 | .. code-block:: python3 228 | 229 | np.max(np.abs(v_greedy - σ_star(og.grid, og.α, og.β))) 230 | 231 | 232 | Exercises 233 | ========= 234 | 235 | .. _ogfast_ex1: 236 | 237 | 238 | 239 | Exercise 1 240 | ---------- 241 | 242 | Time how long it takes to iterate with the Bellman operator 243 | 20 times, starting from initial condition :math:`v(y) = u(y)`. 244 | 245 | Use the default parameterization. 246 | 247 | 248 | Exercise 2 249 | ---------- 250 | 251 | Modify the optimal growth model to use the CRRA utility specification. 252 | 253 | .. math:: 254 | u(c) = \frac{c^{1 - \gamma} } {1 - \gamma} 255 | 256 | Set ``γ = 1.5`` as the default value and maintaining other specifications. 257 | 258 | (Note that ``jitclass`` currently does not support inheritance, so you will 259 | have to copy the class and change the relevant parameters and methods.) 260 | 261 | Compute an estimate of the optimal policy, plot it and compare visually with 262 | the same plot from the :ref:`analogous exercise ` in the first optimal 263 | growth lecture. 264 | 265 | Compare execution time as well. 266 | 267 | 268 | .. _ogfast_ex3: 269 | 270 | Exercise 3 271 | ---------- 272 | 273 | In this exercise we return to the original log utility specification. 274 | 275 | Once an optimal consumption policy :math:`\sigma` is given, income follows 276 | 277 | .. math:: 278 | 279 | y_{t+1} = f(y_t - \sigma(y_t)) \xi_{t+1} 280 | 281 | 282 | The next figure shows a simulation of 100 elements of this sequence for three 283 | different discount factors (and hence three different policies). 284 | 285 | .. figure:: /_static/lecture_specific/optgrowth/solution_og_ex2.png 286 | 287 | In each sequence, the initial condition is :math:`y_0 = 0.1`. 288 | 289 | The discount factors are ``discount_factors = (0.8, 0.9, 0.98)``. 290 | 291 | We have also dialed down the shocks a bit with ``s = 0.05``. 292 | 293 | Otherwise, the parameters and primitives are the same as the log-linear model discussed earlier in the lecture. 294 | 295 | Notice that more patient agents typically have higher wealth. 296 | 297 | Replicate the figure modulo randomness. 298 | 299 | 300 | 301 | Solutions 302 | ========= 303 | 304 | 305 | 306 | Exercise 1 307 | ---------- 308 | 309 | Let's set up the initial condition. 310 | 311 | .. code-block:: ipython3 312 | 313 | v = og.u(og.grid) 314 | 315 | Here's the timing: 316 | 317 | .. code-block:: ipython3 318 | 319 | %%time 320 | 321 | for i in range(20): 322 | v_greedy, v_new = T(v, og) 323 | v = v_new 324 | 325 | 326 | Compared with our :ref:`timing ` for the non-compiled version of 327 | value function iteration, the JIT-compiled code is usually an order of magnitude faster. 328 | 329 | 330 | Exercise 2 331 | ---------- 332 | 333 | Here's our CRRA version of ``OptimalGrowthModel``: 334 | 335 | 336 | .. literalinclude:: /_static/lecture_specific/optgrowth_fast/ogm_crra.py 337 | 338 | Let's create an instance: 339 | 340 | .. code-block:: python3 341 | 342 | og_crra = OptimalGrowthModel_CRRA() 343 | 344 | Now we call ``solve_model``, using the ``%%time`` magic to check how long it 345 | takes. 346 | 347 | .. code-block:: python3 348 | 349 | %%time 350 | v_greedy, v_solution = solve_model(og_crra) 351 | 352 | Here is a plot of the resulting policy: 353 | 354 | .. code-block:: python3 355 | 356 | fig, ax = plt.subplots() 357 | 358 | ax.plot(og.grid, v_greedy, lw=2, 359 | alpha=0.6, label='Approximate value function') 360 | 361 | ax.legend(loc='lower right') 362 | plt.show() 363 | 364 | 365 | This matches the solution that we obtained in our non-jitted code, :ref:`in 366 | the exercises `. 367 | 368 | Execution time is an order of magnitude faster. 369 | 370 | 371 | 372 | Exercise 3 373 | ---------- 374 | 375 | Here's one solution: 376 | 377 | .. code-block:: python3 378 | 379 | def simulate_og(σ_func, og, y0=0.1, ts_length=100): 380 | ''' 381 | Compute a time series given consumption policy σ. 382 | ''' 383 | y = np.empty(ts_length) 384 | ξ = np.random.randn(ts_length-1) 385 | y[0] = y0 386 | for t in range(ts_length-1): 387 | y[t+1] = (y[t] - σ_func(y[t]))**og.α * np.exp(og.μ + og.s * ξ[t]) 388 | return y 389 | 390 | .. code-block:: python3 391 | 392 | fig, ax = plt.subplots() 393 | 394 | for β in (0.8, 0.9, 0.98): 395 | 396 | og = OptimalGrowthModel(β=β, s=0.05) 397 | 398 | v_greedy, v_solution = solve_model(og, verbose=False) 399 | 400 | # Define an optimal policy function 401 | σ_func = lambda x: interp(og.grid, v_greedy, x) 402 | y = simulate_og(σ_func, og) 403 | ax.plot(y, lw=2, alpha=0.6, label=rf'$\beta = {β}$') 404 | 405 | ax.legend(loc='lower right') 406 | plt.show() 407 | 408 | -------------------------------------------------------------------------------- /source/rst/schelling.rst: -------------------------------------------------------------------------------- 1 | .. _schelling: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | .. highlight:: python3 6 | 7 | ***************************** 8 | Schelling's Segregation Model 9 | ***************************** 10 | 11 | .. index:: 12 | single: Schelling Segregation Model 13 | 14 | .. index:: 15 | single: Models; Schelling's Segregation Model 16 | 17 | .. contents:: :depth: 2 18 | 19 | Outline 20 | ======= 21 | 22 | 23 | In 1969, Thomas C. Schelling developed a simple but striking model of racial segregation :cite:`Schelling1969`. 24 | 25 | His model studies the dynamics of racially mixed neighborhoods. 26 | 27 | Like much of Schelling's work, the model shows how local interactions can lead to surprising aggregate structure. 28 | 29 | In particular, it shows that relatively mild preference for neighbors of similar race can lead in aggregate to the collapse of mixed neighborhoods, and high levels of segregation. 30 | 31 | In recognition of this and other research, Schelling was awarded the 2005 Nobel Prize in Economic Sciences (joint with Robert Aumann). 32 | 33 | In this lecture, we (in fact you) will build and run a version of Schelling's model. 34 | 35 | Let's start with some imports: 36 | 37 | .. code-block:: ipython 38 | 39 | from random import uniform, seed 40 | from math import sqrt 41 | import matplotlib.pyplot as plt 42 | %matplotlib inline 43 | 44 | 45 | The Model 46 | ========= 47 | 48 | We will cover a variation of Schelling's model that is easy to program and captures the main idea. 49 | 50 | Set-Up 51 | ------ 52 | 53 | Suppose we have two types of people: orange people and green people. 54 | 55 | For the purpose of this lecture, we will assume there are 250 of each type. 56 | 57 | These agents all live on a single unit square. 58 | 59 | The location of an agent is just a point :math:`(x, y)`, where :math:`0 < x, y < 1`. 60 | 61 | 62 | 63 | Preferences 64 | ----------- 65 | 66 | We will say that an agent is *happy* if half or more of her 10 nearest neighbors are of the same type. 67 | 68 | Here 'nearest' is in terms of `Euclidean distance `_. 69 | 70 | An agent who is not happy is called *unhappy*. 71 | 72 | An important point here is that agents are not averse to living in mixed areas. 73 | 74 | They are perfectly happy if half their neighbors are of the other color. 75 | 76 | 77 | Behavior 78 | -------- 79 | 80 | Initially, agents are mixed together (integrated). 81 | 82 | In particular, the initial location of each agent is an independent draw from a bivariate uniform distribution on :math:`S = (0, 1)^2`. 83 | 84 | Now, cycling through the set of all agents, each agent is now given the chance to stay or move. 85 | 86 | We assume that each agent will stay put if they are happy and move if unhappy. 87 | 88 | The algorithm for moving is as follows 89 | 90 | #. Draw a random location in :math:`S` 91 | 92 | #. If happy at new location, move there 93 | 94 | #. Else, go to step 1 95 | 96 | In this way, we cycle continuously through the agents, moving as required. 97 | 98 | We continue to cycle until no one wishes to move. 99 | 100 | 101 | 102 | Results 103 | ======= 104 | 105 | Let's have a look at the results we got when we coded and ran this model. 106 | 107 | As discussed above, agents are initially mixed randomly together. 108 | 109 | 110 | .. figure:: /_static/lecture_specific/schelling/schelling_fig1.png 111 | 112 | But after several cycles, they become segregated into distinct regions. 113 | 114 | .. figure:: /_static/lecture_specific/schelling/schelling_fig2.png 115 | 116 | .. figure:: /_static/lecture_specific/schelling/schelling_fig3.png 117 | 118 | .. figure:: /_static/lecture_specific/schelling/schelling_fig4.png 119 | 120 | In this instance, the program terminated after 4 cycles through the set of 121 | agents, indicating that all agents had reached a state of happiness. 122 | 123 | What is striking about the pictures is how rapidly racial integration breaks down. 124 | 125 | This is despite the fact that people in the model don't actually mind living mixed with the other type. 126 | 127 | Even with these preferences, the outcome is a high degree of segregation. 128 | 129 | 130 | Exercises 131 | ========= 132 | 133 | 134 | 135 | .. _schelling_ex1: 136 | 137 | Exercise 1 138 | ---------- 139 | 140 | Implement and run this simulation for yourself. 141 | 142 | 143 | 144 | Consider the following structure for your program. 145 | 146 | Agents can be modeled as `objects `__. 147 | 148 | Here's an indication of how they might look 149 | 150 | .. code-block:: none 151 | 152 | * Data: 153 | 154 | * type (green or orange) 155 | * location 156 | 157 | * Methods: 158 | 159 | * determine whether happy or not given locations of other agents 160 | 161 | * If not happy, move 162 | 163 | * find a new location where happy 164 | 165 | 166 | And here's some pseudocode for the main loop 167 | 168 | 169 | .. code-block:: none 170 | 171 | while agents are still moving 172 | for agent in agents 173 | give agent the opportunity to move 174 | 175 | 176 | 177 | Use 250 agents of each type. 178 | 179 | 180 | 181 | Solutions 182 | ========= 183 | 184 | 185 | 186 | 187 | 188 | Exercise 1 189 | ---------- 190 | 191 | Here's one solution that does the job we want. 192 | 193 | If you feel like a further exercise, you can probably speed up some of the computations and 194 | then increase the number of agents. 195 | 196 | .. code-block:: python3 197 | 198 | seed(10) # For reproducible random numbers 199 | 200 | class Agent: 201 | 202 | def __init__(self, type): 203 | self.type = type 204 | self.draw_location() 205 | 206 | def draw_location(self): 207 | self.location = uniform(0, 1), uniform(0, 1) 208 | 209 | def get_distance(self, other): 210 | "Computes the euclidean distance between self and other agent." 211 | a = (self.location[0] - other.location[0])**2 212 | b = (self.location[1] - other.location[1])**2 213 | return sqrt(a + b) 214 | 215 | def happy(self, agents): 216 | "True if sufficient number of nearest neighbors are of the same type." 217 | distances = [] 218 | # distances is a list of pairs (d, agent), where d is distance from 219 | # agent to self 220 | for agent in agents: 221 | if self != agent: 222 | distance = self.get_distance(agent) 223 | distances.append((distance, agent)) 224 | # == Sort from smallest to largest, according to distance == # 225 | distances.sort() 226 | # == Extract the neighboring agents == # 227 | neighbors = [agent for d, agent in distances[:num_neighbors]] 228 | # == Count how many neighbors have the same type as self == # 229 | num_same_type = sum(self.type == agent.type for agent in neighbors) 230 | return num_same_type >= require_same_type 231 | 232 | def update(self, agents): 233 | "If not happy, then randomly choose new locations until happy." 234 | while not self.happy(agents): 235 | self.draw_location() 236 | 237 | 238 | def plot_distribution(agents, cycle_num): 239 | "Plot the distribution of agents after cycle_num rounds of the loop." 240 | x_values_0, y_values_0 = [], [] 241 | x_values_1, y_values_1 = [], [] 242 | # == Obtain locations of each type == # 243 | for agent in agents: 244 | x, y = agent.location 245 | if agent.type == 0: 246 | x_values_0.append(x) 247 | y_values_0.append(y) 248 | else: 249 | x_values_1.append(x) 250 | y_values_1.append(y) 251 | fig, ax = plt.subplots(figsize=(8, 8)) 252 | plot_args = {'markersize': 8, 'alpha': 0.6} 253 | ax.set_facecolor('azure') 254 | ax.plot(x_values_0, y_values_0, 'o', markerfacecolor='orange', **plot_args) 255 | ax.plot(x_values_1, y_values_1, 'o', markerfacecolor='green', **plot_args) 256 | ax.set_title(f'Cycle {cycle_num-1}') 257 | plt.show() 258 | 259 | # == Main == # 260 | 261 | num_of_type_0 = 250 262 | num_of_type_1 = 250 263 | num_neighbors = 10 # Number of agents regarded as neighbors 264 | require_same_type = 5 # Want at least this many neighbors to be same type 265 | 266 | # == Create a list of agents == # 267 | agents = [Agent(0) for i in range(num_of_type_0)] 268 | agents.extend(Agent(1) for i in range(num_of_type_1)) 269 | 270 | 271 | count = 1 272 | # == Loop until none wishes to move == # 273 | while True: 274 | print('Entering loop ', count) 275 | plot_distribution(agents, count) 276 | count += 1 277 | no_one_moved = True 278 | for agent in agents: 279 | old_location = agent.location 280 | agent.update(agents) 281 | if agent.location != old_location: 282 | no_one_moved = False 283 | if no_one_moved: 284 | break 285 | 286 | print('Converged, terminating.') 287 | -------------------------------------------------------------------------------- /source/rst/search.rst: -------------------------------------------------------------------------------- 1 | .. _search: 2 | 3 | ********** 4 | Search 5 | ********** 6 | 7 | .. raw:: html 8 | 9 | 10 | 11 |
-------------------------------------------------------------------------------- /source/rst/sir_model.rst: -------------------------------------------------------------------------------- 1 | .. include:: /_static/includes/header.raw 2 | 3 | .. highlight:: python3 4 | 5 | ************************************ 6 | :index:`Modeling COVID 19` 7 | ************************************ 8 | 9 | .. contents:: :depth: 2 10 | 11 | 12 | 13 | Overview 14 | ============= 15 | 16 | This is a Python version of the code for analyzing the COVID-19 pandemic 17 | provided by `Andrew Atkeson `__. 18 | 19 | See, in particular 20 | 21 | * `NBER Working Paper No. 26867 `__ 22 | * `COVID-19 Working papers and code `__ 23 | 24 | 25 | The purpose of his notes is to introduce economists to quantitative modeling 26 | of infectious disease dynamics. 27 | 28 | Dynamics are modeled using a standard SIR (Susceptible-Infected-Removed) model 29 | of disease spread. 30 | 31 | The model dynamics are represented by a system of ordinary differential 32 | equations. 33 | 34 | The main objective is to study the impact of suppression through social 35 | distancing on the spread of the infection. 36 | 37 | The focus is on US outcomes but the parameters can be adjusted to study 38 | other countries. 39 | 40 | We will use the following standard imports: 41 | 42 | .. code-block:: ipython3 43 | 44 | import numpy as np 45 | from numpy import exp 46 | 47 | import matplotlib.pyplot as plt 48 | 49 | We will also use SciPy's numerical routine `odeint` for solving differential 50 | equations. 51 | 52 | .. code-block:: ipython3 53 | 54 | from scipy.integrate import odeint 55 | 56 | This routine calls into compiled code from the FORTRAN library odepack. 57 | 58 | 59 | 60 | 61 | 62 | 63 | The SIR Model 64 | ============= 65 | 66 | In the version of the SIR model we will analyze there are four states. 67 | 68 | All individuals in the population are assumed to be in one of these four states. 69 | 70 | The states are: susceptible (S), exposed (E), infected (I) and removed (R). 71 | 72 | Comments: 73 | 74 | * Those in state R have been infected and either recovered or died. 75 | 76 | * Those who have recovered are assumed to have acquired immunity. 77 | 78 | * Those in the exposed group are not yet infectious. 79 | 80 | Time Path 81 | ---------- 82 | 83 | The flow across states follows the path :math:`S \to E \to I \to R`. 84 | 85 | 86 | All individuals in the population are eventually infected when 87 | the transmission rate is positive and :math:`i(0) > 0`. 88 | 89 | The interest is primarily in 90 | 91 | * the number of infections at a given time (which determines whether or not the health care system is overwhelmed) and 92 | * how long the caseload can be deferred (hopefully until a vaccine arrives) 93 | 94 | Using lower case letters for the fraction of the population in each state, the 95 | dynamics are 96 | 97 | .. math:: 98 | \begin{aligned} 99 | \dot s(t) & = - \beta(t) \, s(t) \, i(t) 100 | \\ 101 | \dot e(t) & = \beta(t) \, s(t) \, i(t) - σ e(t) 102 | \\ 103 | \dot i(t) & = σ e(t) - γ i(t) 104 | \end{aligned} 105 | :label: sir_system 106 | 107 | In these equations, 108 | 109 | * :math:`\beta(t)` is called the *transmission rate* (the rate at which individuals bump into others and expose them to the virus). 110 | * :math:`\sigma` is called the *infection rate* (the rate at which those who are exposed become infected) 111 | * :math:`\gamma` is called the *recovery rate* (the rate at which infected people recover or die). 112 | * the dot symbol :math:`\dot y` represents the time derivative :math:`dy/dt`. 113 | 114 | 115 | We do not need to model the fraction :math:`r` of the population in state :math:`R` separately because the states form a partition. 116 | 117 | In particular, the "removed" fraction of the population is :math:`r = 1 - s - e - i`. 118 | 119 | We will also track :math:`c = i + r`, which is the cumulative caseload 120 | (i.e., all those who have or have had the infection). 121 | 122 | The system :eq:`sir_system` can be written in vector form as 123 | 124 | .. math:: 125 | \dot x = F(x, t), \qquad x := (s, e, i) 126 | :label: dfcv 127 | 128 | for suitable definition of :math:`F` (see the code below). 129 | 130 | 131 | Parameters 132 | ---------- 133 | 134 | Both :math:`\sigma` and :math:`\gamma` are thought of as fixed, biologically determined parameters. 135 | 136 | As in Atkeson's note, we set 137 | 138 | * :math:`\sigma = 1/5.2` to reflect an average incubation period of 5.2 days. 139 | * :math:`\gamma = 1/18` to match an average illness duration of 18 days. 140 | 141 | The transmission rate is modeled as 142 | 143 | * :math:`\beta(t) := R(t) \gamma` where :math:`R(t)` is the *effective reproduction number* at time :math:`t`. 144 | 145 | (The notation is slightly confusing, since :math:`R(t)` is different to 146 | :math:`R`, the symbol that represents the removed state.) 147 | 148 | 149 | Implementation 150 | ============== 151 | 152 | First we set the population size to match the US. 153 | 154 | .. code-block:: ipython3 155 | 156 | pop_size = 3.3e8 157 | 158 | Next we fix parameters as described above. 159 | 160 | .. code-block:: ipython3 161 | 162 | γ = 1 / 18 163 | σ = 1 / 5.2 164 | 165 | Now we construct a function that represents :math:`F` in :eq:`dfcv` 166 | 167 | .. code-block:: ipython3 168 | 169 | def F(x, t, R0=1.6): 170 | """ 171 | Time derivative of the state vector. 172 | 173 | * x is the state vector (array_like) 174 | * t is time (scalar) 175 | * R0 is the effective transmission rate, defaulting to a constant 176 | 177 | """ 178 | s, e, i = x 179 | 180 | # New exposure of susceptibles 181 | β = R0(t) * γ if callable(R0) else R0 * γ 182 | ne = β * s * i 183 | 184 | # Time derivatives 185 | ds = - ne 186 | de = ne - σ * e 187 | di = σ * e - γ * i 188 | 189 | return ds, de, di 190 | 191 | Note that ``R0`` can be either constant or a given function of time. 192 | 193 | The initial conditions are set to 194 | 195 | .. code-block:: ipython3 196 | 197 | # initial conditions of s, e, i 198 | i_0 = 1e-7 199 | e_0 = 4 * i_0 200 | s_0 = 1 - i_0 - e_0 201 | 202 | In vector form the initial condition is 203 | 204 | .. code-block:: ipython3 205 | 206 | x_0 = s_0, e_0, i_0 207 | 208 | We solve for the time path numerically using `odeint`, at a sequence of dates 209 | ``t_vec``. 210 | 211 | .. code-block:: ipython3 212 | 213 | def solve_path(R0, t_vec, x_init=x_0): 214 | """ 215 | Solve for i(t) and c(t) via numerical integration, 216 | given the time path for R0. 217 | 218 | """ 219 | G = lambda x, t: F(x, t, R0) 220 | s_path, e_path, i_path = odeint(G, x_init, t_vec).transpose() 221 | 222 | c_path = 1 - s_path - e_path # cumulative cases 223 | return i_path, c_path 224 | 225 | 226 | 227 | Experiments 228 | =========== 229 | 230 | Let's run some experiments using this code. 231 | 232 | The time period we investigate will be 550 days, or around 18 months: 233 | 234 | .. code-block:: ipython3 235 | 236 | t_length = 550 237 | grid_size = 1000 238 | t_vec = np.linspace(0, t_length, grid_size) 239 | 240 | 241 | 242 | Experiment 1: Constant R0 Case 243 | ------------------------------ 244 | 245 | 246 | Let's start with the case where ``R0`` is constant. 247 | 248 | We calculate the time path of infected people under different assumptions for ``R0``: 249 | 250 | .. code-block:: ipython3 251 | 252 | R0_vals = np.linspace(1.6, 3.0, 6) 253 | labels = [f'$R0 = {r:.2f}$' for r in R0_vals] 254 | i_paths, c_paths = [], [] 255 | 256 | for r in R0_vals: 257 | i_path, c_path = solve_path(r, t_vec) 258 | i_paths.append(i_path) 259 | c_paths.append(c_path) 260 | 261 | Here's some code to plot the time paths. 262 | 263 | .. code-block:: ipython3 264 | 265 | def plot_paths(paths, labels, times=t_vec): 266 | 267 | fig, ax = plt.subplots() 268 | 269 | for path, label in zip(paths, labels): 270 | ax.plot(times, path, label=label) 271 | 272 | ax.legend(loc='upper left') 273 | 274 | plt.show() 275 | 276 | Let's plot current cases as a fraction of the population. 277 | 278 | .. code-block:: ipython3 279 | 280 | plot_paths(i_paths, labels) 281 | 282 | As expected, lower effective transmission rates defer the peak of infections. 283 | 284 | They also lead to a lower peak in current cases. 285 | 286 | Here are cumulative cases, as a fraction of population: 287 | 288 | .. code-block:: ipython3 289 | 290 | plot_paths(c_paths, labels) 291 | 292 | 293 | 294 | Experiment 2: Changing Mitigation 295 | --------------------------------- 296 | 297 | Let's look at a scenario where mitigation (e.g., social distancing) is 298 | successively imposed. 299 | 300 | Here's a specification for ``R0`` as a function of time. 301 | 302 | .. code-block:: ipython3 303 | 304 | def R0_mitigating(t, r0=3, η=1, r_bar=1.6): 305 | R0 = r0 * exp(- η * t) + (1 - exp(- η * t)) * r_bar 306 | return R0 307 | 308 | The idea is that ``R0`` starts off at 3 and falls to 1.6. 309 | 310 | This is due to progressive adoption of stricter mitigation measures. 311 | 312 | The parameter ``η`` controls the rate, or the speed at which restrictions are 313 | imposed. 314 | 315 | We consider several different rates: 316 | 317 | .. code-block:: ipython3 318 | 319 | η_vals = 1/5, 1/10, 1/20, 1/50, 1/100 320 | labels = [fr'$\eta = {η:.2f}$' for η in η_vals] 321 | 322 | This is what the time path of ``R0`` looks like at these alternative rates: 323 | 324 | .. code-block:: ipython3 325 | 326 | fig, ax = plt.subplots() 327 | 328 | for η, label in zip(η_vals, labels): 329 | ax.plot(t_vec, R0_mitigating(t_vec, η=η), label=label) 330 | 331 | ax.legend() 332 | plt.show() 333 | 334 | Let's calculate the time path of infected people: 335 | 336 | .. code-block:: ipython3 337 | 338 | i_paths, c_paths = [], [] 339 | 340 | for η in η_vals: 341 | R0 = lambda t: R0_mitigating(t, η=η) 342 | i_path, c_path = solve_path(R0, t_vec) 343 | i_paths.append(i_path) 344 | c_paths.append(c_path) 345 | 346 | 347 | These are current cases under the different scenarios: 348 | 349 | .. code-block:: ipython3 350 | 351 | plot_paths(i_paths, labels) 352 | 353 | Here are cumulative cases, as a fraction of population: 354 | 355 | .. code-block:: ipython3 356 | 357 | plot_paths(c_paths, labels) 358 | 359 | 360 | 361 | Ending Lockdown 362 | =============== 363 | 364 | 365 | The following replicates `additional results `__ by Andrew Atkeson on the timing of lifting lockdown. 366 | 367 | Consider these two mitigation scenarios: 368 | 369 | 1. :math:`R_t = 0.5` for 30 days and then :math:`R_t = 2` for the remaining 17 months. This corresponds to lifting lockdown in 30 days. 370 | 371 | 2. :math:`R_t = 0.5` for 120 days and then :math:`R_t = 2` for the remaining 14 months. This corresponds to lifting lockdown in 4 months. 372 | 373 | The parameters considered here start the model with 25,000 active infections 374 | and 75,000 agents already exposed to the virus and thus soon to be contagious. 375 | 376 | .. code-block:: ipython3 377 | 378 | # initial conditions 379 | i_0 = 25_000 / pop_size 380 | e_0 = 75_000 / pop_size 381 | s_0 = 1 - i_0 - e_0 382 | x_0 = s_0, e_0, i_0 383 | 384 | Let's calculate the paths: 385 | 386 | .. code-block:: ipython3 387 | 388 | R0_paths = (lambda t: 0.5 if t < 30 else 2, 389 | lambda t: 0.5 if t < 120 else 2) 390 | 391 | labels = [f'scenario {i}' for i in (1, 2)] 392 | 393 | i_paths, c_paths = [], [] 394 | 395 | for R0 in R0_paths: 396 | i_path, c_path = solve_path(R0, t_vec, x_init=x_0) 397 | i_paths.append(i_path) 398 | c_paths.append(c_path) 399 | 400 | 401 | Here is the number of active infections: 402 | 403 | .. code-block:: ipython3 404 | 405 | plot_paths(i_paths, labels) 406 | 407 | What kind of mortality can we expect under these scenarios? 408 | 409 | Suppose that 1\% of cases result in death 410 | 411 | .. code-block:: ipython3 412 | 413 | ν = 0.01 414 | 415 | This is the cumulative number of deaths: 416 | 417 | .. code-block:: ipython3 418 | 419 | paths = [path * ν * pop_size for path in c_paths] 420 | plot_paths(paths, labels) 421 | 422 | This is the daily death rate: 423 | 424 | .. code-block:: ipython3 425 | 426 | paths = [path * ν * γ * pop_size for path in i_paths] 427 | plot_paths(paths, labels) 428 | 429 | Pushing the peak of curve further into the future may reduce cumulative deaths 430 | if a vaccine is found. 431 | 432 | 433 | -------------------------------------------------------------------------------- /source/rst/status.rst: -------------------------------------------------------------------------------- 1 | .. _status: 2 | 3 | ************** 4 | Lecture Status 5 | ************** 6 | 7 | .. raw:: html 8 | 9 | 10 |

The badges below show which lectures are currently passing their execution test (i.e., executing without errors).

11 |

The lecture code checker was last run: N/A

12 |
13 |
14 |
15 |

The code checker is run on a t2.small Amazon EC2 instance. This is an instance with a single CPU and 2 GiB of Memory.

16 |

You should achieve faster run times on many common laptops and desktops.

-------------------------------------------------------------------------------- /source/rst/time_series_with_matrices.rst: -------------------------------------------------------------------------------- 1 | .. _time_series_with_matrices: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | .. highlight:: python3 6 | 7 | ******************************************** 8 | Univariate Time Series with Matrix Algebra 9 | ******************************************** 10 | 11 | .. contents:: :depth: 2 12 | 13 | 14 | 15 | Overview 16 | ======== 17 | 18 | This lecture uses matrices to solve some linear difference equations. 19 | 20 | As a running example, we’ll study a **second-order linear difference 21 | equation** that was the key technical tool in Paul Samuelson’s 1939 22 | article :cite:`Samuelson1939` that introduced the **multiplier-accelerator** model. 23 | 24 | This model became the workhorse that powered early econometric versions of 25 | Keynesian macroeconomic models in the United States. 26 | 27 | You can read about the details of that model in :doc:`this` 28 | QuantEcon lecture. 29 | 30 | (That lecture also describes some technicalities about second-order linear difference equations.) 31 | 32 | We'll also study a "perfect foresight" model of stock prices that involves solving 33 | a "forward-looking" linear difference equation. 34 | 35 | We will use the following imports: 36 | 37 | .. code-block:: ipython 38 | 39 | import numpy as np 40 | import matplotlib.pyplot as plt 41 | %matplotlib inline 42 | 43 | Samuelson's model 44 | ================== 45 | 46 | 47 | Let :math:`t = 0, \pm 1, \pm 2, \ldots` index time. 48 | 49 | For :math:`t = 1, 2, 3, \ldots, T` suppose that 50 | 51 | .. math:: 52 | :label: tswm_1 53 | 54 | 55 | y_{t} = \alpha_{0} + \alpha_{1} y_{t-1} + \alpha_{2} y_{t-2} 56 | 57 | 58 | where we assume that :math:`y_0` and :math:`y_{-1}` are given numbers 59 | that we take as **initial conditions**. 60 | 61 | In Samuelson's model, :math:`y_t` stood for **national income** or perhaps a different 62 | measure of aggregate activity called **gross domestic product** (GDP) at time :math:`t`. 63 | 64 | Equation :eq:`tswm_1` is called a **second-order linear difference equation**. 65 | 66 | But actually, it is a collection of :math:`T` simultaneous linear 67 | equations in the :math:`T` variables :math:`y_1, y_2, \ldots, y_T`. 68 | 69 | **Note:** To be able to solve a second-order linear difference 70 | equation, we require two **boundary conditions** that can take the form 71 | either of two **initial conditions** or two **terminal conditions** or 72 | possibly one of each. 73 | 74 | Let’s write our equations as a stacked system 75 | 76 | .. math:: 77 | 78 | 79 | \underset{\equiv A}{\underbrace{\left[\begin{array}{cccccccc} 80 | 1 & 0 & 0 & 0 & \cdots & 0 & 0 & 0\\ 81 | -\alpha_{1} & 1 & 0 & 0 & \cdots & 0 & 0 & 0\\ 82 | -\alpha_{2} & -\alpha_{1} & 1 & 0 & \cdots & 0 & 0 & 0\\ 83 | 0 & -\alpha_{2} & -\alpha_{1} & 1 & \cdots & 0 & 0 & 0\\ 84 | \vdots & \vdots & \vdots & \vdots & \cdots & \vdots & \vdots & \vdots\\ 85 | 0 & 0 & 0 & 0 & \cdots & -\alpha_{2} & -\alpha_{1} & 1 86 | \end{array}\right]}}\left[\begin{array}{c} 87 | y_{1}\\ 88 | y_{2}\\ 89 | y_{3}\\ 90 | y_{4}\\ 91 | \vdots\\ 92 | y_{T} 93 | \end{array}\right]=\underset{\equiv b}{\underbrace{\left[\begin{array}{c} 94 | \alpha_{0}+\alpha_{1}y_{0}+\alpha_{2}y_{-1}\\ 95 | \alpha_{0}+\alpha_{2}y_{0}\\ 96 | \alpha_{0}\\ 97 | \alpha_{0}\\ 98 | \vdots\\ 99 | \alpha_{0} 100 | \end{array}\right]}} 101 | 102 | or 103 | 104 | .. math:: A y = b 105 | 106 | where 107 | 108 | .. math:: y = \begin{bmatrix} y_1 \cr y_2 \cr \cdots \cr y_T \end{bmatrix} 109 | 110 | Evidently :math:`y` can be computed from 111 | 112 | .. math:: 113 | 114 | 115 | y = A^{-1} b 116 | 117 | The vector :math:`y` is a complete time path :math:`\{y_t\}_{t=1}^T`. 118 | 119 | Let’s put Python to work on an example that captures the flavor of 120 | Samuelson’s multiplier-accelerator model. 121 | 122 | We'll set parameters equal to the same values we used in :doc:`this QuantEcon lecture`. 123 | 124 | .. code-block:: python3 125 | 126 | T = 80 127 | 128 | # parameters 129 | 𝛼0 = 10.0 130 | 𝛼1 = 1.53 131 | 𝛼2 = -.9 132 | 133 | y_1 = 28. # y_{-1} 134 | y0 = 24. 135 | 136 | .. code-block:: python3 137 | 138 | # construct A and b 139 | A = np.zeros((T, T)) 140 | 141 | for i in range(T): 142 | A[i, i] = 1 143 | 144 | if i-1 >= 0: 145 | A[i, i-1] = -𝛼1 146 | 147 | if i-2 >= 0: 148 | A[i, i-2] = -𝛼2 149 | 150 | b = np.ones(T) * 𝛼0 151 | b[0] = 𝛼0 + 𝛼1 * y0 + 𝛼2 * y_1 152 | b[1] = 𝛼0 + 𝛼2 * y0 153 | 154 | Let’s look at the matrix :math:`A` and the vector :math:`b` for our 155 | example. 156 | 157 | .. code-block:: python3 158 | 159 | A, b 160 | 161 | Now let’s solve for the path of :math:`y`. 162 | 163 | If :math:`y_t` is GNP at time :math:`t`, then we have a version of 164 | Samuelson’s model of the dynamics for GNP. 165 | 166 | .. code-block:: python3 167 | 168 | A_inv = np.linalg.inv(A) 169 | 170 | y = A_inv @ b 171 | 172 | .. code-block:: python3 173 | 174 | plt.plot(np.arange(T)+1, y) 175 | plt.xlabel('t') 176 | plt.ylabel('y') 177 | 178 | plt.show() 179 | 180 | If we set both initial values at the **steady state** value of :math:`y_t`, namely, 181 | 182 | .. math:: 183 | 184 | 185 | y_{0} = y_{-1} = \frac{\alpha_{0}}{1 - \alpha_{1} - \alpha_{2}} 186 | 187 | then :math:`y_{t}` will be constant 188 | 189 | .. code-block:: python3 190 | 191 | y_1_steady = 𝛼0 / (1 - 𝛼1 - 𝛼2) # y_{-1} 192 | y0_steady = 𝛼0 / (1 - 𝛼1 - 𝛼2) 193 | 194 | b_steady = np.ones(T) * 𝛼0 195 | b_steady[0] = 𝛼0 + 𝛼1 * y0_steady + 𝛼2 * y_1_steady 196 | b_steady[1] = 𝛼0 + 𝛼2 * y0_steady 197 | 198 | .. code-block:: python3 199 | 200 | y_steady = A_inv @ b_steady 201 | 202 | .. code-block:: python3 203 | 204 | plt.plot(np.arange(T)+1, y_steady) 205 | plt.xlabel('t') 206 | plt.ylabel('y') 207 | 208 | plt.show() 209 | 210 | Adding a random term 211 | ===================== 212 | 213 | To generate some excitement, we'll follow in the spirit of the great economists 214 | Eugen Slutsky and Ragnar Frisch and replace our original second-order difference 215 | equation with the following **second-order stochastic linear difference 216 | equation**: 217 | 218 | .. math:: 219 | :label: tswm_2 220 | 221 | 222 | y_{t} = \alpha_{0} + \alpha_{1} y_{t-1} + \alpha_{2} y_{t-2} + u_t 223 | 224 | 225 | where :math:`u_{t} \sim N\left(0, \sigma_{u}^{2}\right)` and is IID, 226 | meaning **independent** and **identically** distributed. 227 | 228 | We’ll stack these :math:`T` equations into a system cast in terms of 229 | matrix algebra. 230 | 231 | Let’s define the random vector 232 | 233 | .. math:: 234 | 235 | 236 | u=\left[\begin{array}{c} 237 | u_{1}\\ 238 | u_{2}\\ 239 | \vdots\\ 240 | u_{T} 241 | \end{array}\right] 242 | 243 | Where :math:`A, b, y` are defined as above, now assume that :math:`y` is 244 | governed by the system 245 | 246 | .. math:: 247 | 248 | 249 | A y = b + u 250 | 251 | The solution for :math:`y` becomes 252 | 253 | .. math:: 254 | 255 | 256 | y = A^{-1} \left(b + u\right) 257 | 258 | Let’s try it out in Python. 259 | 260 | .. code-block:: python3 261 | 262 | 𝜎u = 2. 263 | 264 | .. code-block:: python3 265 | 266 | u = np.random.normal(0, 𝜎u, size=T) 267 | y = A_inv @ (b + u) 268 | 269 | .. code-block:: python3 270 | 271 | plt.plot(np.arange(T)+1, y) 272 | plt.xlabel('t') 273 | plt.ylabel('y') 274 | 275 | plt.show() 276 | 277 | The above time series looks a lot like (detrended) GDP series for a 278 | number of advanced countries in recent decades. 279 | 280 | We can simulate :math:`N` paths. 281 | 282 | .. code-block:: python3 283 | 284 | N = 100 285 | 286 | for i in range(N): 287 | u = np.random.normal(0, 𝜎u, size=T) 288 | y = A_inv @ (b + u) 289 | plt.plot(np.arange(T)+1, y, lw=0.5) 290 | 291 | plt.xlabel('t') 292 | plt.ylabel('y') 293 | 294 | plt.show() 295 | 296 | Also consider the case when :math:`y_{0}` and :math:`y_{-1}` are at 297 | steady state. 298 | 299 | .. code-block:: python3 300 | 301 | N = 100 302 | 303 | for i in range(N): 304 | u = np.random.normal(0, 𝜎u, size=T) 305 | y_steady = A_inv @ (b_steady + u) 306 | plt.plot(np.arange(T)+1, y_steady, lw=0.5) 307 | 308 | plt.xlabel('t') 309 | plt.ylabel('y') 310 | 311 | plt.show() 312 | 313 | A forward looking model 314 | ======================= 315 | 316 | Samuelson’s model is **backwards looking** in the sense that we give it **initial conditions** and let it 317 | run. 318 | 319 | Let’s now turn to model that is **forward looking**. 320 | 321 | We apply similar linear algebra machinery to study a **perfect 322 | foresight** model widely used as a benchmark in macroeconomics and 323 | finance. 324 | 325 | As an example, we suppose that :math:`p_t` is the price of a stock and 326 | that :math:`y_t` is its dividend. 327 | 328 | We assume that :math:`y_t` is determined by second-order difference 329 | equation that we analyzed just above, so that 330 | 331 | .. math:: 332 | 333 | 334 | y = A^{-1} \left(b + u\right) 335 | 336 | Our **perfect foresight** model of stock prices is 337 | 338 | .. math:: 339 | 340 | 341 | p_{t} = \sum_{j=0}^{T-t} \beta^{j} y_{t+j}, \quad \beta \in (0,1) 342 | 343 | where :math:`\beta` is a discount factor. 344 | 345 | The model asserts that the price of the stock at :math:`t` equals the 346 | discounted present values of the (perfectly foreseen) future dividends. 347 | 348 | Form 349 | 350 | .. math:: 351 | 352 | 353 | \underset{\equiv p}{\underbrace{\left[\begin{array}{c} 354 | p_{1}\\ 355 | p_{2}\\ 356 | p_{3}\\ 357 | \vdots\\ 358 | p_{T} 359 | \end{array}\right]}}=\underset{\equiv B}{\underbrace{\left[\begin{array}{ccccc} 360 | 1 & \beta & \beta^{2} & \cdots & \beta^{T-1}\\ 361 | 0 & 1 & \beta & \cdots & \beta^{T-2}\\ 362 | 0 & 0 & 1 & \cdots & \beta^{T-3}\\ 363 | \vdots & \vdots & \vdots & \vdots & \vdots\\ 364 | 0 & 0 & 0 & \cdots & 1 365 | \end{array}\right]}}\left[\begin{array}{c} 366 | y_{1}\\ 367 | y_{2}\\ 368 | y_{3}\\ 369 | \vdots\\ 370 | y_{T} 371 | \end{array}\right] 372 | 373 | .. code-block:: python3 374 | 375 | 𝛽 = .96 376 | 377 | 378 | .. code-block:: python3 379 | 380 | # construct B 381 | B = np.zeros((T, T)) 382 | 383 | for i in range(T): 384 | B[i, i:] = 𝛽 ** np.arange(0, T-i) 385 | 386 | .. code-block:: python3 387 | 388 | B 389 | 390 | .. code-block:: python3 391 | 392 | 𝜎u = 0. 393 | u = np.random.normal(0, 𝜎u, size=T) 394 | y = A_inv @ (b + u) 395 | y_steady = A_inv @ (b_steady + u) 396 | 397 | .. code-block:: python3 398 | 399 | p = B @ y 400 | 401 | .. code-block:: python3 402 | 403 | plt.plot(np.arange(0, T)+1, y, label='y') 404 | plt.plot(np.arange(0, T)+1, p, label='p') 405 | plt.xlabel('t') 406 | plt.ylabel('y/p') 407 | plt.legend() 408 | 409 | plt.show() 410 | 411 | Can you explain why the trend of the price is downward over time? 412 | 413 | Also consider the case when :math:`y_{0}` and :math:`y_{-1}` are at the 414 | steady state. 415 | 416 | .. code-block:: python3 417 | 418 | p_steady = B @ y_steady 419 | 420 | plt.plot(np.arange(0, T)+1, y_steady, label='y') 421 | plt.plot(np.arange(0, T)+1, p_steady, label='p') 422 | plt.xlabel('t') 423 | plt.ylabel('y/p') 424 | plt.legend() 425 | 426 | plt.show() 427 | 428 | -------------------------------------------------------------------------------- /source/rst/troubleshooting.rst: -------------------------------------------------------------------------------- 1 | .. _troubleshooting: 2 | 3 | .. include:: /_static/includes/header.raw 4 | 5 | .. highlight:: python3 6 | 7 | *************** 8 | Troubleshooting 9 | *************** 10 | 11 | .. contents:: :depth: 2 12 | 13 | This page is for readers experiencing errors when running the code from the lectures. 14 | 15 | Fixing Your Local Environment 16 | ============================== 17 | 18 | The basic assumption of the lectures is that code in a lecture should execute whenever 19 | 20 | #. it is executed in a Jupyter notebook and 21 | 22 | #. the notebook is running on a machine with the latest version of Anaconda Python. 23 | 24 | You have installed Anaconda, haven't you, following the instructions in `this lecture `__? 25 | 26 | Assuming that you have, the most common source of problems for our readers is that their Anaconda distribution is not up to date. 27 | 28 | `Here's a useful article `__ 29 | on how to update Anaconda. 30 | 31 | Another option is to simply remove Anaconda and reinstall. 32 | 33 | You also need to keep the external code libraries, such as `QuantEcon.py 34 | `__ up to date. 35 | 36 | For this task you can either 37 | 38 | * use `conda install -y quantecon` on the command line, or 39 | 40 | * execute `!pip install quantecon` within a Jupyter notebook. 41 | 42 | If your local environment is still not working you can do two things. 43 | 44 | First, you can use a remote machine instead, by clicking on the `Launch Notebook` icon available for each lecture 45 | 46 | .. image:: _static/lecture_specific/troubleshooting/launch.png 47 | 48 | Second, you can report an issue, so we can try to fix your local set up. 49 | 50 | We like getting feedback on the lectures so please don't hesitate to get in 51 | touch. 52 | 53 | Reporting an Issue 54 | =================== 55 | 56 | One way to give feedback is to raise an issue through our `issue tracker 57 | `__. 58 | 59 | Please be as specific as possible. Tell us where the problem is and as much 60 | detail about your local set up as you can provide. 61 | 62 | Another feedback option is to use our `discourse forum `__. 63 | 64 | Finally, you can provide direct feedback to contact@quantecon.org 65 | 66 | -------------------------------------------------------------------------------- /source/rst/zreferences.rst: -------------------------------------------------------------------------------- 1 | .. _references: 2 | 3 | ********** 4 | References 5 | ********** 6 | 7 | .. bibliography:: /_static/quant-econ.bib 8 | :cited: 9 | -------------------------------------------------------------------------------- /theme/minimal/static/css/qe.python.css: -------------------------------------------------------------------------------- 1 | /* Homepage */ 2 | .home-intro { 3 | display: flex; 4 | align-content: center; 5 | } 6 | .home-blurb { 7 | font-size: 1.1rem; 8 | line-height: 1.5; 9 | } 10 | .home-intro .sponsor { 11 | list-style: none; 12 | padding:0; 13 | flex-shrink: 0; 14 | margin:0 60px 0 4rem; 15 | text-align: center; 16 | } 17 | .home-intro .sponsor li { 18 | display: block; 19 | margin:1rem 0; 20 | padding:0; 21 | } 22 | .web-version { 23 | display:inline-block; 24 | padding: 2rem 0rem; 25 | } 26 | .web-version a { 27 | display: block; 28 | padding:1rem 40px 1rem 80px; 29 | position: relative; 30 | } 31 | .web-version a .thumb { 32 | position: absolute; 33 | left:0px; 34 | top:1rem; 35 | } 36 | .web-version a .thumb img { 37 | width:50px; 38 | } 39 | .web-version a h2 { 40 | line-height: 1; 41 | margin:0; 42 | font-size: 1.4rem; 43 | } 44 | .web-version a p { 45 | margin:10px 0 0 0; 46 | } 47 | .home-alternatives { 48 | padding: 1rem 0rem; 49 | } 50 | .home-alternatives ul { 51 | list-style: none; 52 | padding:0; 53 | margin:0 0; 54 | } 55 | .home-alternatives li { 56 | padding:0; 57 | margin:1rem 1rem; 58 | } 59 | .home-alternatives li a { 60 | display: block; 61 | } 62 | .home-alternatives li a h3 { 63 | line-height: 1; 64 | margin:0; 65 | font-size: 1.2rem; 66 | } 67 | .home-alternatives li a p { 68 | margin:10px 0 0 0; 69 | } 70 | @media only screen and (max-width: 768px) { 71 | .home-intro { 72 | display: block; 73 | } 74 | .home-intro .sponsor { 75 | margin:0 auto; 76 | } 77 | } 78 | 79 | /* Other */ 80 | #qe-notebook-header { 81 | display: none; 82 | } -------------------------------------------------------------------------------- /theme/minimal/static/img/code-block-fade.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/theme/minimal/static/img/code-block-fade.png -------------------------------------------------------------------------------- /theme/minimal/static/img/powered-by-NumFOCUS-orange.svg: -------------------------------------------------------------------------------- 1 | powered bypowered byNumFOCUSNumFOCUS -------------------------------------------------------------------------------- /theme/minimal/static/img/py-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/theme/minimal/static/img/py-logo.png -------------------------------------------------------------------------------- /theme/minimal/static/img/qe-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/theme/minimal/static/img/qe-logo.png -------------------------------------------------------------------------------- /theme/minimal/static/img/search-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/theme/minimal/static/img/search-icon.png -------------------------------------------------------------------------------- /theme/minimal/static/img/sloan_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/theme/minimal/static/img/sloan_logo.png -------------------------------------------------------------------------------- /theme/minimal/static/js/base.js: -------------------------------------------------------------------------------- 1 | // base.js v1.0 2 | 3 | 4 | // Declare MathJax Macros for the Appropriate Macros 5 | MathJax.Hub.Config({ 6 | TeX: { 7 | Macros: { 8 | Var: "\\mathop{\\mathrm{Var}}", 9 | trace: "\\mathop{\\mathrm{trace}}", 10 | argmax: "\\mathop{\\mathrm{arg\\,max}}", 11 | argmin: "\\mathop{\\mathrm{arg\\,min}}", 12 | proj: "\\mathop{\\mathrm{proj}}", 13 | col: "\\mathop{\\mathrm{col}}", 14 | Span: "\\mathop{\\mathrm{span}}", 15 | epsilon: "\\varepsilon", 16 | EE: "\\mathbb{E}", 17 | PP: "\\mathbb{P}", 18 | RR: "\\mathbb{R}", 19 | NN: "\\mathbb{N}", 20 | ZZ: "\\mathbb{Z}", 21 | aA: "\\mathcal{A}", 22 | bB: "\\mathcal{B}", 23 | cC: "\\mathcal{C}", 24 | dD: "\\mathcal{D}", 25 | eE: "\\mathcal{E}", 26 | fF: "\\mathcal{F}", 27 | gG: "\\mathcal{G}", 28 | hH: "\\mathcal{H}", 29 | } 30 | } 31 | }); 32 | MathJax.Hub.Config({ 33 | tex2jax: { 34 | inlineMath: [ ['$','$'], ['\\(','\\)'] ], 35 | processEscapes: true 36 | } 37 | }); 38 | 39 | 40 | /* Collapsed code block */ 41 | 42 | const collapsableCodeBlocks = document.querySelectorAll("div[class^='collapse'] .highlight"); 43 | for (var i = 0; i < collapsableCodeBlocks.length; i++) { 44 | const toggleContainer = document.createElement('div'); 45 | toggleContainer.innerHTML = 'Show more...'; 46 | collapsableCodeBlocks[i].parentNode.insertBefore(toggleContainer, collapsableCodeBlocks[i].nextSibling); 47 | } 48 | 49 | const collapsableCodeToggles = document.querySelectorAll("div[class^='collapse'] .toggle"); 50 | for (var i = 0; i < collapsableCodeToggles.length; i++) { 51 | collapsableCodeToggles[i].addEventListener('click', function(e) { 52 | e.preventDefault(); 53 | var codeBlock = this.closest('div[class^="collapse"]'); 54 | if ( codeBlock.classList.contains('expanded') ) { 55 | codeBlock.classList.remove('expanded'); 56 | this.style.display = 'none'; 57 | this.nextSibling.style.display = 'block'; 58 | } else { 59 | codeBlock.classList.add('expanded'); 60 | this.style.display = 'none'; 61 | this.previousSibling.style.display = 'block'; 62 | } 63 | }); 64 | } 65 | 66 | 67 | /* Wrap container around all tables allowing hirizontal scroll */ 68 | 69 | const contentTables = document.querySelectorAll(".content table"); 70 | for (var i = 0; i < contentTables.length; i++) { 71 | var wrapper = document.createElement('div'); 72 | wrapper.classList.add('table-container'); 73 | contentTables[i].parentNode.insertBefore(wrapper, contentTables[i]); 74 | wrapper.appendChild(contentTables[i]); 75 | } 76 | 77 | 78 | // Populate status page from code execution results JSON 79 | 80 | function loadCodeExecutionJSON(callback) { 81 | var xobj = new XMLHttpRequest(); 82 | xobj.overrideMimeType("application/json"); 83 | xobj.open('GET', '_static/code-execution-results.json', true); // Replace 'appDataServices' with the path to your file 84 | xobj.onreadystatechange = function () { 85 | if (xobj.readyState == 4 && xobj.status == "200") { 86 | // Required use of an anonymous callback as .open will NOT return a value but simply returns undefined in asynchronous mode 87 | callback(xobj.responseText); 88 | } 89 | }; 90 | xobj.send(null); 91 | } 92 | 93 | if ( document.getElementById('status_table') ) { 94 | 95 | loadCodeExecutionJSON(function(response) { 96 | // Parsing JSON string into object 97 | var data = JSON.parse(response); 98 | var status_data = []; 99 | var last_test_time = data.run_time; 100 | document.getElementById('last_test_time').textContent = last_test_time; 101 | for (var key in data.results) 102 | { 103 | var new_record = {}; 104 | new_record['name'] = data.results[key].filename; 105 | new_record['runtime'] = data.results[key].runtime; 106 | new_record['extension'] = data.results[key].extension; 107 | new_record['result'] = data.results[key].num_errors; 108 | new_record['language'] = data.results[key].language; 109 | 110 | status_data.push(new_record); 111 | } 112 | 113 | // empty the table 114 | var table = document.getElementById("status_table"); 115 | while (table.firstChild) 116 | table.removeChild(table.firstChild); 117 | var rawHTML = "Lecture FileLanguageRunning Time"; 118 | table.innerHTML = rawHTML; 119 | // add the data 120 | for (var i = 0; i < status_data.length; i ++) 121 | { 122 | var table = document.getElementById("status_table"); 123 | var row = table.insertRow(-1); 124 | row.setAttribute("id", status_data[i]['name'], 0); 125 | 126 | // Insert new cells ( elements) at the 1st and 2nd position of the "new" element: 127 | var lectureCell = row.insertCell(0); 128 | var langCell = row.insertCell(1); 129 | var runtimeCell = row.insertCell(2); 130 | var statusCell = row.insertCell(3); 131 | var badge, status, color, lang, link; 132 | 133 | if (status_data[i]['result'] === 0) 134 | { 135 | status = "Passing"; 136 | color = "brightgreen"; 137 | } 138 | else if (status_data[i]['result'] === 1) 139 | { 140 | status = "Failing"; 141 | color = "red"; 142 | } 143 | else if (status_data[i]['result'] === -1) { 144 | status = "Not available"; 145 | color = "lightgrey"; 146 | } 147 | 148 | link = '/' + status_data[i]['name'] + '.html'; 149 | 150 | badge = ''; 151 | 152 | // Add some text to the new cells: 153 | lectureCell.innerHTML = status_data[i]['name']; 154 | langCell.innerHTML = status_data[i]['language']; 155 | runtimeCell.innerHTML = status_data[i]['runtime']; 156 | statusCell.innerHTML = badge; 157 | 158 | 159 | } 160 | }) 161 | } 162 | 163 | 164 | // Show executability status badge in header 165 | 166 | const LECTURE_OK = 0; 167 | const LECTURE_FAILED = 1; 168 | const LECTURE_ERROR = -1; 169 | 170 | function update_page_badge(page_status) 171 | { 172 | var badge = document.getElementById("executability_status_badge"); 173 | var status, color; 174 | 175 | if (page_status === LECTURE_OK) 176 | { 177 | status = "Passing"; 178 | color = "brightgreen"; 179 | } 180 | else if (page_status == LECTURE_FAILED) 181 | { 182 | status = "Failing"; 183 | color = "red"; 184 | } 185 | else if (page_status == LECTURE_ERROR) 186 | { 187 | status = "Not available"; 188 | color = "lightgrey"; 189 | } 190 | else 191 | { 192 | console.log("Panic! Invalid parameter passed to update_page_badge()."); 193 | } 194 | 195 | badge.innerHTML = ''; 196 | 197 | //badge.style.display="block"; 198 | 199 | return; 200 | } 201 | 202 | function determine_page_status(status_data) 203 | { 204 | var path = window.location.pathname; 205 | var filename_parts = path.split("/"); 206 | var filename = filename_parts.pop(); 207 | 208 | var lecture_name = filename.split(".")[0].toLowerCase(); 209 | 210 | var res = LECTURE_ERROR; 211 | 212 | for (var i = 0; i < status_data.length; i ++) 213 | { 214 | if (status_data[i]['name'].split('/').pop() === lecture_name) 215 | { 216 | if (status_data[i]['result'] === 0) 217 | { 218 | res = LECTURE_OK; 219 | } 220 | else 221 | { 222 | res = LECTURE_FAILED; 223 | } 224 | } 225 | } 226 | return res; 227 | } 228 | 229 | function load_this_page_badge() 230 | { 231 | loadCodeExecutionJSON(function(response) { 232 | // Parsing JSON string into object 233 | var data = JSON.parse(response); 234 | status_data = []; 235 | for (var key in data.results) 236 | { 237 | var new_record = {}; 238 | new_record['name'] = data.results[key].filename; 239 | new_record['runtime'] = data.results[key].runtime; 240 | new_record['extension'] = data.results[key].extension; 241 | new_record['result'] = data.results[key].num_errors; 242 | new_record['language'] = data.results[key].language; 243 | status_data.push(new_record); 244 | } 245 | var page_status = determine_page_status(status_data); 246 | update_page_badge(page_status); 247 | }); 248 | } 249 | 250 | function get_badge(percentage) 251 | { 252 | var color, badge; 253 | 254 | if (percentage > -1) 255 | { 256 | if ( percentage < 50 ) { 257 | color = 'red'; 258 | } else { 259 | color = 'brightgreen'; 260 | } 261 | badge = 'https://img.shields.io/badge/Total%20coverage-' + percentage + '%25-' + color + '.svg'; 262 | } else { 263 | badge = 'https://img.shields.io/badge/Total%20coverage-not%20available-lightgrey.svg>'; 264 | } 265 | return badge; 266 | } 267 | 268 | function load_percentages() 269 | { 270 | var number_of_lectures = {}; 271 | var number_which_passed = {}; 272 | var keys_list = []; 273 | var combined_percentage; 274 | 275 | loadCodeExecutionJSON(function(response) { 276 | // Parsing JSON string into object 277 | var data = JSON.parse(response); 278 | for (var key in data.results) 279 | { 280 | if (data.results[key].num_errors === 0) 281 | { 282 | if (!(data.results[key].extension in number_which_passed)) 283 | { 284 | number_which_passed[data.results[key].extension] = 0; 285 | keys_list.push(data.results[key].extension); 286 | } 287 | number_which_passed[data.results[key].extension] += 1; 288 | } 289 | 290 | if (!(data.results[key].extension in number_of_lectures)) 291 | { 292 | number_of_lectures[data.results[key].extension] = 0; 293 | } 294 | number_of_lectures[data.results[key].extension] += 1; 295 | } 296 | 297 | var percentages = {}; 298 | var total_lectures = 0; 299 | var total_passing = 0; 300 | for (var k in keys_list) 301 | { 302 | key = keys_list[k]; 303 | 304 | percentages[key] = 0; 305 | if (number_of_lectures[key] === 0) 306 | { 307 | // An appropriate value for this is yet to be determined. 308 | percentages[key] = 100; 309 | } 310 | else 311 | { 312 | percentages[key] = Math.floor(100 * number_which_passed[key] / number_of_lectures[key]); 313 | } 314 | 315 | // Sensible boundary checking. 316 | if (percentages[key] < 0 || percentages[key] > 100) 317 | { 318 | percentages[key] = -1; 319 | } 320 | 321 | total_lectures += number_of_lectures[key]; 322 | total_passing += number_which_passed[key]; 323 | } 324 | 325 | if (total_lectures === 0) 326 | { 327 | combined_percentage = 0; 328 | } 329 | else 330 | { 331 | combined_percentage = Math.floor(100 * total_passing / total_lectures); 332 | } 333 | 334 | var badge = document.getElementById("coverage_badge"); 335 | badge.innerHTML = ''; 336 | 337 | }); 338 | 339 | } 340 | 341 | if ( document.getElementById('executability_status_badge') ) { 342 | load_this_page_badge(); 343 | } 344 | 345 | if ( document.getElementById('coverage_badge') ) { 346 | load_percentages(); 347 | } -------------------------------------------------------------------------------- /theme/minimal/static/sloan_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantEcon/lecture-python/5f8bee9be8fa5e2089186fb5eb142f75056bbfb8/theme/minimal/static/sloan_logo.png -------------------------------------------------------------------------------- /theme/minimal/templates/error_report_template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Jupyter Script Test Execution - Error Report 4 | 5 | 6 | 7 | 8 | 9 | 10 |

Error Report - Generated {DATETIME}

11 | 12 |

13 | Overview 14 |

15 | 16 | {ERROR_SUMMARY} 17 | 18 | {NOTEBOOK_LOOP} 19 | 20 | 21 | -------------------------------------------------------------------------------- /theme/minimal/templates/html.tpl: -------------------------------------------------------------------------------- 1 | {%- extends 'display_priority.tpl' -%} 2 | 3 | {% set site_title = 'Lectures' %} 4 | {% set nb_title = nb.metadata.get('title', '') %} 5 | {% set nb_filename = nb.metadata.get('filename', '') %} 6 | {% set nb_filename_with_path = nb.metadata.get('filename_with_path','') %} 7 | {% set indexPage = nb_filename.startswith('index') %} 8 | {% set download_nb = nb.metadata.get('download_nb','') %} 9 | {% set download_nb_path = nb.metadata.get('download_nb_path','') %} 10 | {% if nb_filename.endswith('.rst') %} 11 | {% set nb_filename = nb_filename[:-4] %} 12 | {% endif %} 13 | 14 | {%- block header %} 15 | 16 | 17 | 18 | 19 | {% if nb_filename == 'index' %} 20 | {{ site_title }} 21 | {% else %} 22 | {{nb_title}} – {{ site_title }} 23 | {% endif %} 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
37 | 38 |
39 | 40 |
41 | 42 |

{{ site_title }}

43 | 44 |

Skip to content

45 | 46 |
47 | 48 |
49 | 50 | {% if indexPage or nb_filename == 'status' %} 51 |
52 | {% else %} 53 |
54 | {% endif %} 55 | 56 |
57 | 58 |
59 | 60 |
61 | 62 |
63 | 64 | 65 | 66 |
67 | 68 | {%- endblock header-%} 69 | 70 | {% block codecell %} 71 | {% set html_class = cell['metadata'].get('html-class', {}) %} 72 |
73 | {{ super() }} 74 |
75 | {%- endblock codecell %} 76 | 77 | {% block input_group -%} 78 |
79 | {{ super() }} 80 |
81 | {% endblock input_group %} 82 | 83 | {% block output_group %} 84 |
85 |
86 | {{ super() }} 87 |
88 |
89 | {% endblock output_group %} 90 | 91 | {% block in_prompt -%} 92 |
93 | {%- if cell.execution_count is defined -%} 94 | In [{{ cell.execution_count|replace(None, " ") }}]: 95 | {%- else -%} 96 | In [ ]: 97 | {%- endif -%} 98 |
99 | {%- endblock in_prompt %} 100 | 101 | {% block empty_in_prompt -%} 102 |
103 |
104 | {%- endblock empty_in_prompt %} 105 | 106 | {# 107 | output_prompt doesn't do anything in HTML, 108 | because there is a prompt div in each output area (see output block) 109 | #} 110 | {% block output_prompt %} 111 | {% endblock output_prompt %} 112 | 113 | {% block input %} 114 |
115 |
116 | {{ cell.source | highlight_code(metadata=cell.metadata) }} 117 |
118 |
119 | {%- endblock input %} 120 | 121 | {% block output_area_prompt %} 122 | {%- if output.output_type == 'execute_result' -%} 123 |
124 | {%- if cell.execution_count is defined -%} 125 | Out[{{ cell.execution_count|replace(None, " ") }}]: 126 | {%- else -%} 127 | Out[ ]: 128 | {%- endif -%} 129 | {%- else -%} 130 |
131 | {%- endif -%} 132 |
133 | {% endblock output_area_prompt %} 134 | 135 | {% block output %} 136 |
137 | {% if resources.global_content_filter.include_output_prompt %} 138 | {{ self.output_area_prompt() }} 139 | {% endif %} 140 | {{ super() }} 141 |
142 | {% endblock output %} 143 | 144 | {% block markdowncell scoped %} 145 | {% set html_class = cell['metadata'].get('html-class', {}) %} 146 |
147 | {%- if resources.global_content_filter.include_input_prompt-%} 148 | {{ self.empty_in_prompt() }} 149 | {%- endif -%} 150 |
151 |
152 | {{ cell.source | markdown2html | strip_files_prefix }} 153 |
154 |
155 |
156 | {%- endblock markdowncell %} 157 | 158 | {% block unknowncell scoped %} 159 | unknown type {{ cell.type }} 160 | {% endblock unknowncell %} 161 | 162 | {% block execute_result -%} 163 | {%- set extra_class="output_execute_result" -%} 164 | {% block data_priority scoped %} 165 | {{ super() }} 166 | {% endblock data_priority %} 167 | {%- set extra_class="" -%} 168 | {%- endblock execute_result %} 169 | 170 | {% block stream_stdout -%} 171 |
172 |
173 | {{- output.text | ansi2html -}}
174 | 
175 |
176 | {%- endblock stream_stdout %} 177 | 178 | {% block stream_stderr -%} 179 |
180 |
181 | {{- output.text | ansi2html -}}
182 | 
183 |
184 | {%- endblock stream_stderr %} 185 | 186 | {% block data_svg scoped -%} 187 |
188 | {%- if output.svg_filename %} 189 | 194 | {%- endblock data_svg %} 195 | 196 | {% block data_html scoped -%} 197 |
198 | {{ output.data['text/html'] }} 199 |
200 | {%- endblock data_html %} 201 | 202 | {% block data_markdown scoped -%} 203 |
204 | {{ output.data['text/markdown'] | markdown2html }} 205 |
206 | {%- endblock data_markdown %} 207 | 208 | {% block data_png scoped %} 209 |
210 | {%- if 'image/png' in output.metadata.get('filenames', {}) %} 211 | 227 |
228 | {%- endblock data_png %} 229 | 230 | {% block data_jpg scoped %} 231 |
232 | {%- if 'image/jpeg' in output.metadata.get('filenames', {}) %} 233 | 249 |
250 | {%- endblock data_jpg %} 251 | 252 | {% block data_latex scoped %} 253 |
254 | {{ output.data['text/latex'] }} 255 |
256 | {%- endblock data_latex %} 257 | 258 | {% block error -%} 259 |
260 |
261 | {{- super() -}}
262 | 
263 |
264 | {%- endblock error %} 265 | 266 | {%- block traceback_line %} 267 | {{ line | ansi2html }} 268 | {%- endblock traceback_line %} 269 | 270 | {%- block data_text scoped %} 271 |
272 |
273 | {{- output.data['text/plain'] | ansi2html -}}
274 | 
275 |
276 | {%- endblock -%} 277 | 278 | {%- block data_javascript scoped %} 279 | {% set div_id = uuid4() %} 280 |
281 |
282 | 286 |
287 | {%- endblock -%} 288 | 289 | {%- block data_widget_state scoped %} 290 | {% set div_id = uuid4() %} 291 | {% set datatype_list = output.data | filter_data_type %} 292 | {% set datatype = datatype_list[0]%} 293 |
294 |
295 | 298 | 301 |
302 | {%- endblock data_widget_state -%} 303 | 304 | {%- block data_widget_view scoped %} 305 | {% set div_id = uuid4() %} 306 | {% set datatype_list = output.data | filter_data_type %} 307 | {% set datatype = datatype_list[0]%} 308 |
309 |
310 | 313 | 316 |
317 | {%- endblock data_widget_view -%} 318 | 319 | {%- block footer %} 320 | {% set mimetype = 'application/vnd.jupyter.widget-state+json'%} 321 | {% if mimetype in nb.metadata.get("widgets",{})%} 322 | 325 | {% endif %} 326 | {{ super() }} 327 | 328 | 329 |
330 | 331 |
332 | 333 |
334 | 335 |
336 | 337 |

© Copyright XXXX, Occaecat ipsum culpa nulla in Lorem dolor exercitation adipisicing in qui pariatur.

338 | 339 |
340 | 341 |
342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | {%- endblock footer-%} 351 | --------------------------------------------------------------------------------