├── examples ├── temp.py ├── tests │ ├── __init__.py │ └── test_directory_pyfiles.py ├── gd.xls ├── pylab_eg.py ├── pylab_eg2.py ├── plot_example_1.py ├── white_noise_plot.py ├── us_cities.txt ├── sine2.py ├── plot_example_2.py ├── plot_example_3.py ├── sine3.py ├── sine4.py ├── dice.py ├── test_program_1.py ├── test_program_2.py ├── sine5.py ├── test_program_3.py ├── us_cities.py ├── test_program_6.py ├── qm_plot.py ├── test_program_5.py ├── nds.py ├── plot_example_4.py ├── boxplot_example.py ├── test_program_4.py ├── odu_plot_densities.py ├── test_program_5_short.py ├── binom_df.py ├── duopoly_mpe_dynamics.py ├── plot_example_5.py ├── six_hists.py ├── bisection.py ├── 3dplot.py ├── beta-binomial.py ├── quadmap_class.py ├── linapprox.py ├── subplots.py ├── ar1_acov.py ├── vecs.py ├── ar1_sd.py ├── lucas_tree_price1.py ├── cauchy_samples.py ├── nx_demo.py ├── jv_test.py ├── ifp_savings_plots.py ├── tsh_hg.py ├── lqramsey_ar1.py ├── vecs2.py ├── mc_convergence_plot.py ├── career_vf_plot.py ├── paths_and_stationarity.py ├── duopoly_mpe.py ├── paths_and_hist.py ├── lqramsey_discrete.py ├── ar1_cycles.py ├── evans_sargent_plot1.py ├── wb_download.py ├── illustrates_clt.py ├── duopoly_lqnash.py ├── eigenvec.py ├── preim1.py ├── web_network.py ├── lin_interp_3d_plot.py ├── qs.py ├── perm_inc_ir.py ├── 3dvec.py ├── perm_inc_figs.py ├── evans_sargent_plot2.py ├── lq_permanent_1.py ├── stochasticgrowth.py ├── illustrates_lln.py ├── odu_vfi_plots.py ├── optgrowth_v0.py ├── clt3d.py └── gaussian_contours.py ├── quantecon ├── tests │ ├── tests_models │ │ ├── __init__.py │ │ ├── tests_solow │ │ │ ├── __init__.py │ │ │ ├── test_ces.py │ │ │ └── test_cobb_douglas.py │ │ ├── test_career.py │ │ ├── test_asset_pricing.py │ │ ├── test_lucastree.py │ │ ├── test_optgrowth.py │ │ └── test_jv.py │ ├── matplotlibrc │ ├── data │ │ └── matlab_quad.mat │ ├── __init__.py │ ├── test_timing.py │ ├── test_lyapunov.py │ ├── test_ecdf.py │ ├── test_matrix_eqn.py │ ├── test_arma.py │ ├── test_lae.py │ ├── test_quadsum.py │ ├── test_lss.py │ ├── test_tauchen.py │ ├── test_discrete_rv.py │ ├── test_rank_nullspace.py │ ├── test_ricatti.py │ ├── test_compute_fp.py │ ├── test_lqcontrol.py │ ├── test_kalman.py │ ├── test_robustlq.py │ ├── test_lqnash.py │ └── test_estspec.py ├── version.py ├── build.sh ├── bld.bat ├── models │ ├── solow │ │ ├── __init__.py │ │ └── cobb_douglas.py │ ├── __init__.py │ └── optgrowth.py ├── __init__.py ├── ecdf.py ├── meta.yaml ├── compute_fp.py ├── discrete_rv.py ├── timing.py ├── tauchen.py ├── lae.py ├── ce_util.py ├── cartesian.py ├── quadsums.py ├── rank_nullspace.py ├── gth_solve.py └── distributions.py ├── solutions ├── numbers.txt ├── web_graph_data.txt ├── scipy_solutions.ipynb ├── asset_solutions.ipynb └── oop_solutions.ipynb ├── setup.cfg ├── data ├── gd.xls ├── us_cities.txt ├── graph_out.txt ├── web_graph_data.txt └── test_pwt.csv ├── .coveralls.yml ├── docs ├── source │ ├── models │ │ ├── jv.rst │ │ ├── ifp.rst │ │ ├── odu.rst │ │ ├── career.rst │ │ ├── lucastree.rst │ │ ├── optgrowth.rst │ │ └── asset_pricing.rst │ ├── tools │ │ ├── arma.rst │ │ ├── ecdf.rst │ │ ├── ivp.rst │ │ ├── lae.rst │ │ ├── lss.rst │ │ ├── quad.rst │ │ ├── kalman.rst │ │ ├── lqnash.rst │ │ ├── ce_util.rst │ │ ├── estspec.rst │ │ ├── tauchen.rst │ │ ├── version.rst │ │ ├── cartesian.rst │ │ ├── lqcontrol.rst │ │ ├── mc_tools.rst │ │ ├── quadsums.rst │ │ ├── robustlq.rst │ │ ├── compute_fp.rst │ │ ├── matrix_eqn.rst │ │ ├── discrete_rv.rst │ │ ├── distributions.rst │ │ └── rank_nullspace.rst │ ├── models.rst │ ├── index.rst │ └── tools.rst ├── README.md └── sphinxext │ └── only_directives.py ├── rtd-pip-requirements.txt ├── .coveragerc ├── .gitignore ├── pip-requirements.txt ├── LIB_README.md ├── .travis.yml ├── LICENSE.txt ├── MANIFEST └── README.md /examples/temp.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/tests_solow/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solutions/numbers.txt: -------------------------------------------------------------------------------- 1 | prices 2 | 3 3 | 8 4 | 5 | 7 6 | 21 -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = LIB_README.md 3 | 4 | -------------------------------------------------------------------------------- /data/gd.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidrpugh/quant-econ/master/data/gd.xls -------------------------------------------------------------------------------- /.coveralls.yml: -------------------------------------------------------------------------------- 1 | service_name: travis-ci 2 | repo_token: 7f8JDNk6BsPMESy0URL3W00zlazzRcuzR 3 | -------------------------------------------------------------------------------- /examples/gd.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidrpugh/quant-econ/master/examples/gd.xls -------------------------------------------------------------------------------- /quantecon/tests/matplotlibrc: -------------------------------------------------------------------------------- 1 | # necessary in order to get tests working on Travis CI 2 | backend : Agg -------------------------------------------------------------------------------- /quantecon/version.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a VERSION file and should NOT be manually altered 3 | """ 4 | version = '0.1.7-2' -------------------------------------------------------------------------------- /quantecon/tests/data/matlab_quad.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidrpugh/quant-econ/master/quantecon/tests/data/matlab_quad.mat -------------------------------------------------------------------------------- /examples/pylab_eg.py: -------------------------------------------------------------------------------- 1 | from pylab import * # Depreciated 2 | x = linspace(0, 10, 200) 3 | y = sin(x) 4 | plot(x, y, 'b-', linewidth=2) 5 | show() 6 | -------------------------------------------------------------------------------- /docs/source/models/jv.rst: -------------------------------------------------------------------------------- 1 | jv 2 | == 3 | 4 | .. automodule:: quantecon.models.jv 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/arma.rst: -------------------------------------------------------------------------------- 1 | arma 2 | ==== 3 | 4 | .. automodule:: quantecon.arma 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/ecdf.rst: -------------------------------------------------------------------------------- 1 | ecdf 2 | ==== 3 | 4 | .. automodule:: quantecon.ecdf 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/ivp.rst: -------------------------------------------------------------------------------- 1 | ivp 2 | === 3 | 4 | .. automodule:: quantecon.ivp 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/lae.rst: -------------------------------------------------------------------------------- 1 | lae 2 | === 3 | 4 | .. automodule:: quantecon.lae 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/lss.rst: -------------------------------------------------------------------------------- 1 | lss 2 | === 3 | 4 | .. automodule:: quantecon.lss 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/quad.rst: -------------------------------------------------------------------------------- 1 | quad 2 | ==== 3 | 4 | .. automodule:: quantecon.quad 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/models/ifp.rst: -------------------------------------------------------------------------------- 1 | ifp 2 | === 3 | 4 | .. automodule:: quantecon.models.ifp 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/models/odu.rst: -------------------------------------------------------------------------------- 1 | odu 2 | === 3 | 4 | .. automodule:: quantecon.models.odu 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/kalman.rst: -------------------------------------------------------------------------------- 1 | kalman 2 | ====== 3 | 4 | .. automodule:: quantecon.kalman 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/lqnash.rst: -------------------------------------------------------------------------------- 1 | lqnash 2 | ====== 3 | 4 | .. automodule:: quantecon.lqnash 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/ce_util.rst: -------------------------------------------------------------------------------- 1 | ce_util 2 | ======= 3 | 4 | .. automodule:: quantecon.ce_util 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/estspec.rst: -------------------------------------------------------------------------------- 1 | estspec 2 | ======= 3 | 4 | .. automodule:: quantecon.estspec 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/tauchen.rst: -------------------------------------------------------------------------------- 1 | tauchen 2 | ======= 3 | 4 | .. automodule:: quantecon.tauchen 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/version.rst: -------------------------------------------------------------------------------- 1 | version 2 | ======= 3 | 4 | .. automodule:: quantecon.version 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/models/career.rst: -------------------------------------------------------------------------------- 1 | career 2 | ====== 3 | 4 | .. automodule:: quantecon.models.career 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/cartesian.rst: -------------------------------------------------------------------------------- 1 | cartesian 2 | ========= 3 | 4 | .. automodule:: quantecon.cartesian 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/lqcontrol.rst: -------------------------------------------------------------------------------- 1 | lqcontrol 2 | ========= 3 | 4 | .. automodule:: quantecon.lqcontrol 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/mc_tools.rst: -------------------------------------------------------------------------------- 1 | mc_tools 2 | ======== 3 | 4 | .. automodule:: quantecon.mc_tools 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/quadsums.rst: -------------------------------------------------------------------------------- 1 | quadsums 2 | ======== 3 | 4 | .. automodule:: quantecon.quadsums 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/robustlq.rst: -------------------------------------------------------------------------------- 1 | robustlq 2 | ======== 3 | 4 | .. automodule:: quantecon.robustlq 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/compute_fp.rst: -------------------------------------------------------------------------------- 1 | compute_fp 2 | ========== 3 | 4 | .. automodule:: quantecon.compute_fp 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/matrix_eqn.rst: -------------------------------------------------------------------------------- 1 | matrix_eqn 2 | ========== 3 | 4 | .. automodule:: quantecon.matrix_eqn 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/models/lucastree.rst: -------------------------------------------------------------------------------- 1 | lucastree 2 | ========= 3 | 4 | .. automodule:: quantecon.models.lucastree 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/models/optgrowth.rst: -------------------------------------------------------------------------------- 1 | optgrowth 2 | ========= 3 | 4 | .. automodule:: quantecon.models.optgrowth 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/discrete_rv.rst: -------------------------------------------------------------------------------- 1 | discrete_rv 2 | =========== 3 | 4 | .. automodule:: quantecon.discrete_rv 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /examples/pylab_eg2.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | x = np.linspace(0, 10, 200) 4 | y = np.sin(x) 5 | plt.plot(x, y, 'b-', linewidth=2) 6 | plt.show() 7 | -------------------------------------------------------------------------------- /docs/source/tools/distributions.rst: -------------------------------------------------------------------------------- 1 | distributions 2 | ============= 3 | 4 | .. automodule:: quantecon.distributions 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/tools/rank_nullspace.rst: -------------------------------------------------------------------------------- 1 | rank_nullspace 2 | ============== 3 | 4 | .. automodule:: quantecon.rank_nullspace 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/models/asset_pricing.rst: -------------------------------------------------------------------------------- 1 | asset_pricing 2 | ============= 3 | 4 | .. automodule:: quantecon.models.asset_pricing 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /rtd-pip-requirements.txt: -------------------------------------------------------------------------------- 1 | # These are the required packages for the quant-econ package to build 2 | 3 | Sphinx 4 | 5 | ipython 6 | 7 | numpy 8 | 9 | numpydoc 10 | 11 | scipy 12 | 13 | matplotlib 14 | -------------------------------------------------------------------------------- /examples/plot_example_1.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'b-', linewidth=2) 7 | plt.show() 8 | -------------------------------------------------------------------------------- /examples/white_noise_plot.py: -------------------------------------------------------------------------------- 1 | from pylab import plot, show, legend 2 | from random import normalvariate 3 | 4 | x = [normalvariate(0, 1) for i in range(100)] 5 | plot(x, 'b-', label="white noise") 6 | legend() 7 | show() 8 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = quantecon 3 | omit = 4 | */python?.?/* 5 | */lib-python/?.?/*.py 6 | */lib_pypy/_*.py 7 | */site-packages/ordereddict.py 8 | */site-packages/nose/* 9 | */unittest2/* 10 | -------------------------------------------------------------------------------- /data/us_cities.txt: -------------------------------------------------------------------------------- 1 | new york: 8244910 2 | los angeles: 3819702 3 | chicago: 2707120 4 | houston: 2145146 5 | philadelphia: 1536471 6 | phoenix: 1469471 7 | san antonio: 1359758 8 | san diego: 1326179 9 | dallas: 1223229 10 | -------------------------------------------------------------------------------- /examples/us_cities.txt: -------------------------------------------------------------------------------- 1 | new york: 8244910 2 | los angeles: 3819702 3 | chicago: 2707120 4 | houston: 2145146 5 | philadelphia: 1536471 6 | phoenix: 1469471 7 | san antonio: 1359758 8 | san diego: 1326179 9 | dallas: 1223229 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.ipynb_checkpoints* 3 | 4 | build/ 5 | /dist/ 6 | /*.egg_info 7 | *.DS_Store 8 | *.TODO 9 | *.noseids 10 | *.coverage 11 | *.h5 12 | examples/solow_model/depreciation_rates.dta 13 | examples/solow_model/pwt80.dta 14 | -------------------------------------------------------------------------------- /docs/source/models.rst: -------------------------------------------------------------------------------- 1 | Models 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | models/asset_pricing 8 | models/career 9 | models/ifp 10 | models/jv 11 | models/lucastree 12 | models/odu 13 | models/optgrowth 14 | -------------------------------------------------------------------------------- /examples/sine2.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'r-', linewidth=2, label='sine function', alpha=0.6) 7 | ax.legend() 8 | plt.show() 9 | -------------------------------------------------------------------------------- /examples/plot_example_2.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'r-', lw=2, label='sine function', alpha=0.6) 7 | ax.legend(loc='upper center') 8 | plt.show() 9 | -------------------------------------------------------------------------------- /examples/plot_example_3.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'r-', lw=2, label=r'$y=\sin(x)$', alpha=0.6) 7 | ax.legend(loc='upper center') 8 | plt.show() 9 | -------------------------------------------------------------------------------- /examples/sine3.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'r-', linewidth=2, label='sine function', alpha=0.6) 7 | ax.legend(loc='upper center') 8 | plt.show() 9 | -------------------------------------------------------------------------------- /examples/sine4.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'r-', linewidth=2, label=r'$y=\sin(x)$', alpha=0.6) 7 | ax.legend(loc='upper center') 8 | plt.show() 9 | -------------------------------------------------------------------------------- /quantecon/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | $PYTHON setup.py install 4 | 5 | # Add more build steps here, if they are necessary. 6 | 7 | # See 8 | # http://docs.continuum.io/conda/build.html 9 | # for a list of environment variables that are set during the build process. 10 | -------------------------------------------------------------------------------- /pip-requirements.txt: -------------------------------------------------------------------------------- 1 | # These are the required packages for the quant-econ package to build 2 | 3 | Sphinx 4 | 5 | ipython 6 | 7 | numpy 8 | 9 | numpydoc 10 | 11 | scipy 12 | 13 | matplotlib 14 | 15 | pandas 16 | 17 | statsmodels 18 | 19 | sympy 20 | 21 | numba 22 | -------------------------------------------------------------------------------- /quantecon/bld.bat: -------------------------------------------------------------------------------- 1 | "%PYTHON%" setup.py install 2 | if errorlevel 1 exit 1 3 | 4 | :: Add more build steps here, if they are necessary. 5 | 6 | :: See 7 | :: http://docs.continuum.io/conda/build.html 8 | :: for a list of environment variables that are set during the build process. 9 | -------------------------------------------------------------------------------- /quantecon/tests/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | namespace for quantecon.tests 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-08-01 13:13:59 6 | 7 | """ 8 | from . util import (capture, get_data_dir, get_h5_data_file, write_array, 9 | max_abs_diff, get_h5_data_group) 10 | -------------------------------------------------------------------------------- /examples/dice.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: dice.py 3 | """ 4 | 5 | import random 6 | 7 | 8 | class Dice: 9 | 10 | faces = (1, 2, 3, 4, 5, 6) 11 | 12 | def __init__(self): 13 | self.current_face = 1 14 | 15 | def roll(self): 16 | self.current_face = random.choice(Dice.faces) 17 | -------------------------------------------------------------------------------- /examples/test_program_1.py: -------------------------------------------------------------------------------- 1 | from random import normalvariate 2 | import matplotlib.pyplot as plt 3 | ts_length = 100 4 | epsilon_values = [] # An empty list 5 | for i in range(ts_length): 6 | e = normalvariate(0, 1) 7 | epsilon_values.append(e) 8 | plt.plot(epsilon_values, 'b-') 9 | plt.show() 10 | -------------------------------------------------------------------------------- /examples/test_program_2.py: -------------------------------------------------------------------------------- 1 | from random import normalvariate 2 | import matplotlib.pyplot as plt 3 | ts_length = 100 4 | epsilon_values = [] 5 | i = 0 6 | while i < ts_length: 7 | e = normalvariate(0, 1) 8 | epsilon_values.append(e) 9 | i = i + 1 10 | plt.plot(epsilon_values, 'b-') 11 | plt.show() 12 | -------------------------------------------------------------------------------- /data/graph_out.txt: -------------------------------------------------------------------------------- 1 | node0 2 | node8 3 | node11 4 | node18 5 | node23 6 | node33 7 | node41 8 | node53 9 | node56 10 | node57 11 | node60 12 | node67 13 | node70 14 | node73 15 | node76 16 | node85 17 | node87 18 | node88 19 | node93 20 | node94 21 | node96 22 | node97 23 | node98 24 | node99 25 | 26 | Cost: 160.55 27 | -------------------------------------------------------------------------------- /examples/sine5.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | fig, ax = plt.subplots() 4 | x = np.linspace(0, 10, 200) 5 | y = np.sin(x) 6 | ax.plot(x, y, 'r-', linewidth=2, label=r'$y=\sin(x)$', alpha=0.6) 7 | ax.legend(loc='upper center') 8 | ax.set_yticks([-1, 0, 1]) 9 | ax.set_title('Test plot') 10 | plt.show() 11 | -------------------------------------------------------------------------------- /examples/test_program_3.py: -------------------------------------------------------------------------------- 1 | from random import normalvariate 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def generate_data(n): 6 | epsilon_values = [] 7 | for i in range(n): 8 | e = normalvariate(0, 1) 9 | epsilon_values.append(e) 10 | return epsilon_values 11 | 12 | data = generate_data(100) 13 | plt.plot(data, 'b-') 14 | plt.show() 15 | -------------------------------------------------------------------------------- /examples/us_cities.py: -------------------------------------------------------------------------------- 1 | data_file = open('us_cities.txt', 'r') 2 | for line in data_file: 3 | city, population = line.split(':') # Tuple unpacking 4 | city = city.title() # Capitalize city names 5 | population = '{0:,}'.format(int(population)) # Add commas to numbers 6 | print(city.ljust(15) + population) 7 | data_file.close() 8 | -------------------------------------------------------------------------------- /examples/test_program_6.py: -------------------------------------------------------------------------------- 1 | from random import uniform 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def generate_data(n, generator_type): 6 | epsilon_values = [] 7 | for i in range(n): 8 | e = generator_type(0, 1) 9 | epsilon_values.append(e) 10 | return epsilon_values 11 | 12 | data = generate_data(100, uniform) 13 | plt.plot(data, 'b-') 14 | plt.show() 15 | -------------------------------------------------------------------------------- /examples/qm_plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def qm(x0, n): 6 | x = np.empty(n+1) 7 | x[0] = x0 8 | for t in range(n): 9 | x[t+1] = 4 * x[t] * (1 - x[t]) 10 | return x 11 | 12 | x = qm(0.1, 250) 13 | fig, ax = plt.subplots(figsize=(10, 6.5)) 14 | ax.plot(x, 'b-', lw=2, alpha=0.8) 15 | ax.set_xlabel('time', fontsize=16) 16 | plt.show() 17 | -------------------------------------------------------------------------------- /quantecon/models/solow/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | models directory imports 3 | 4 | objects imported here will live in the `quantecon.models.solow` namespace 5 | 6 | """ 7 | __all__ = ['Model', 'CobbDouglasModel', 'CESModel'] 8 | 9 | from . model import Model 10 | from . import model 11 | from . cobb_douglas import CobbDouglasModel 12 | from . import cobb_douglas 13 | from . ces import CESModel 14 | from . import ces 15 | -------------------------------------------------------------------------------- /LIB_README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Quantitative Economics 3 | 4 | This repository collects code for [Quantitative Economics](http://quant-econ.net), an on-line course on quantitative economic modeling authored by [Thomas J. Sargent](https://files.nyu.edu/ts43/public/) and [John Stachurski](http://johnstachurski.net). 5 | 6 | Visit the [lecture series main page](http://quant-econ.net) to find detailed information on working with this code repository. 7 | 8 | 9 | -------------------------------------------------------------------------------- /examples/test_program_5.py: -------------------------------------------------------------------------------- 1 | from random import normalvariate, uniform 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def generate_data(n, generator_type): 6 | epsilon_values = [] 7 | for i in range(n): 8 | e = uniform(0, 1) if generator_type == 'U' else normalvariate(0, 1) 9 | epsilon_values.append(e) 10 | return epsilon_values 11 | 12 | data = generate_data(100, 'U') 13 | plt.plot(data, 'b-') 14 | plt.show() 15 | -------------------------------------------------------------------------------- /examples/nds.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from scipy.stats import norm 4 | from random import uniform 5 | 6 | fig, ax = plt.subplots() 7 | x = np.linspace(-4, 4, 150) 8 | for i in range(3): 9 | m, s = uniform(-1, 1), uniform(1, 2) 10 | y = norm.pdf(x, loc=m, scale=s) 11 | current_label = r'$\mu = {0:.2f}$'.format(m) 12 | ax.plot(x, y, linewidth=2, alpha=0.6, label=current_label) 13 | ax.legend() 14 | plt.show() 15 | -------------------------------------------------------------------------------- /examples/plot_example_4.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from scipy.stats import norm 4 | from random import uniform 5 | fig, ax = plt.subplots() 6 | x = np.linspace(-4, 4, 150) 7 | for i in range(3): 8 | m, s = uniform(-1, 1), uniform(1, 2) 9 | y = norm.pdf(x, loc=m, scale=s) 10 | current_label = r'$\mu = {0:.2f}$'.format(m) 11 | ax.plot(x, y, lw=2, alpha=0.6, label=current_label) 12 | ax.legend() 13 | plt.show() 14 | -------------------------------------------------------------------------------- /data/web_graph_data.txt: -------------------------------------------------------------------------------- 1 | a -> d; 2 | a -> f; 3 | b -> j; 4 | b -> k; 5 | b -> m; 6 | c -> c; 7 | c -> g; 8 | c -> j; 9 | c -> m; 10 | d -> f; 11 | d -> h; 12 | d -> k; 13 | e -> d; 14 | e -> h; 15 | e -> l; 16 | f -> a; 17 | f -> b; 18 | f -> j; 19 | f -> l; 20 | g -> b; 21 | g -> j; 22 | h -> d; 23 | h -> g; 24 | h -> l; 25 | h -> m; 26 | i -> g; 27 | i -> h; 28 | i -> n; 29 | j -> e; 30 | j -> i; 31 | j -> k; 32 | k -> n; 33 | l -> m; 34 | m -> g; 35 | n -> c; 36 | n -> j; 37 | n -> m; 38 | -------------------------------------------------------------------------------- /solutions/web_graph_data.txt: -------------------------------------------------------------------------------- 1 | a -> d; 2 | a -> f; 3 | b -> j; 4 | b -> k; 5 | b -> m; 6 | c -> c; 7 | c -> g; 8 | c -> j; 9 | c -> m; 10 | d -> f; 11 | d -> h; 12 | d -> k; 13 | e -> d; 14 | e -> h; 15 | e -> l; 16 | f -> a; 17 | f -> b; 18 | f -> j; 19 | f -> l; 20 | g -> b; 21 | g -> j; 22 | h -> d; 23 | h -> g; 24 | h -> l; 25 | h -> m; 26 | i -> g; 27 | i -> h; 28 | i -> n; 29 | j -> e; 30 | j -> i; 31 | j -> k; 32 | k -> n; 33 | l -> m; 34 | m -> g; 35 | n -> c; 36 | n -> j; 37 | n -> m; -------------------------------------------------------------------------------- /examples/boxplot_example.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | n = 500 5 | x = np.random.randn(n) # N(0, 1) 6 | x = np.exp(x) # Map x to lognormal 7 | y = np.random.randn(n) + 2.0 # N(2, 1) 8 | z = np.random.randn(n) + 4.0 # N(4, 1) 9 | 10 | fig, ax = plt.subplots(figsize=(10, 6.6)) 11 | ax.boxplot([x, y, z]) 12 | ax.set_xticks((1, 2, 3)) 13 | ax.set_ylim(-2, 14) 14 | ax.set_xticklabels((r'$X$', r'$Y$', r'$Z$'), fontsize=16) 15 | plt.show() 16 | -------------------------------------------------------------------------------- /examples/test_program_4.py: -------------------------------------------------------------------------------- 1 | from random import normalvariate, uniform 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def generate_data(n, generator_type): 6 | epsilon_values = [] 7 | for i in range(n): 8 | if generator_type == 'U': 9 | e = uniform(0, 1) 10 | else: 11 | e = normalvariate(0, 1) 12 | epsilon_values.append(e) 13 | return epsilon_values 14 | 15 | data = generate_data(100, 'U') 16 | plt.plot(data, 'b-') 17 | plt.show() 18 | -------------------------------------------------------------------------------- /examples/odu_plot_densities.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: odu_plot_densities.py 3 | Authors: John Stachurski, Thomas J. Sargent 4 | 5 | """ 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | from quantecon.models import SearchProblem 9 | 10 | sp = SearchProblem(F_a=1, F_b=1, G_a=3, G_b=1.2) 11 | grid = np.linspace(0, 2, 150) 12 | fig, ax = plt.subplots() 13 | ax.plot(grid, sp.f(grid), label=r'$f$', lw=2) 14 | ax.plot(grid, sp.g(grid), label=r'$g$', lw=2) 15 | ax.legend(loc=0) 16 | plt.show() 17 | -------------------------------------------------------------------------------- /examples/test_program_5_short.py: -------------------------------------------------------------------------------- 1 | import pylab 2 | from random import normalvariate, uniform 3 | 4 | 5 | def generate_data(n, generator_type): 6 | epsilon_values = [] 7 | for i in range(n): 8 | if generator_type == "U": 9 | e = uniform(0, 1) 10 | else: 11 | e = normalvariate(0, 1) 12 | 13 | epsilon_values.append(e) 14 | return epsilon_values 15 | 16 | ts_length = 100 17 | data = generate_data(ts_length, 'U') 18 | pylab.plot(data, 'b-') 19 | pylab.show() 20 | -------------------------------------------------------------------------------- /examples/binom_df.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from scipy.stats import binom 3 | 4 | fig, axes = plt.subplots(2, 2) 5 | plt.subplots_adjust(hspace=0.4) 6 | axes = axes.flatten() 7 | ns = [1, 2, 4, 8] 8 | dom = list(range(9)) 9 | 10 | for ax, n in zip(axes, ns): 11 | b = binom(n, 0.5) 12 | ax.bar(dom, b.pmf(dom), alpha=0.6, align='center') 13 | ax.set_xlim(-0.5, 8.5) 14 | ax.set_ylim(0, 0.55) 15 | ax.set_xticks(list(range(9))) 16 | ax.set_yticks((0, 0.2, 0.4)) 17 | ax.set_title(r'$n = {}$'.format(n)) 18 | 19 | fig.show() 20 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | QuantEcon documentation 3 | ======================= 4 | 5 | The `quantecon` python library is composed of two main section: models 6 | and tools. The models section contains implementations of standard 7 | models, many of which are discussed in lectures on the website `quant- 8 | econ.net `_. 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | models 14 | tools 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | -------------------------------------------------------------------------------- /quantecon/tests/test_timing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: timing.py 3 | Authors: Pablo Winant 4 | Tests for timing.py 5 | """ 6 | 7 | def test_tic_tac_toc(): 8 | 9 | from ..timing import tic, tac, toc 10 | import time 11 | 12 | h = 0.1 13 | 14 | tic() 15 | 16 | time.sleep(h) 17 | el1 = tac() 18 | 19 | time.sleep(h) 20 | el2 = tac() 21 | 22 | time.sleep(h) 23 | el3 = toc() 24 | 25 | assert(abs(el1-h)<0.01) 26 | assert(abs(el2-h)<0.01) 27 | assert(abs(el3-h*3)<0.01) 28 | 29 | 30 | if __name__ == "__main__": 31 | 32 | test_tic_tac_toc() 33 | -------------------------------------------------------------------------------- /docs/source/tools.rst: -------------------------------------------------------------------------------- 1 | Tools 2 | ===== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | tools/arma 8 | tools/cartesian 9 | tools/ce_util 10 | tools/compute_fp 11 | tools/discrete_rv 12 | tools/distributions 13 | tools/ecdf 14 | tools/estspec 15 | tools/graph_tools 16 | tools/gth_solve 17 | tools/ivp 18 | tools/kalman 19 | tools/lae 20 | tools/lqcontrol 21 | tools/lqnash 22 | tools/lss 23 | tools/matrix_eqn 24 | tools/mc_tools 25 | tools/quad 26 | tools/quadsums 27 | tools/rank_nullspace 28 | tools/robustlq 29 | tools/tauchen 30 | tools/version 31 | -------------------------------------------------------------------------------- /quantecon/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | models directory imports 3 | 4 | objects imported here will live in the `quantecon.models` namespace 5 | 6 | """ 7 | 8 | __all__ = ["AssetPrices", "CareerWorkerProblem", "ConsumerProblem", 9 | "JvWorker", "LucasTree", "SearchProblem", "GrowthModel", 10 | "solow"] 11 | 12 | #from . import solow as solow 13 | from .asset_pricing import AssetPrices 14 | from .career import CareerWorkerProblem 15 | from .ifp import ConsumerProblem 16 | from .jv import JvWorker 17 | from .lucastree import LucasTree 18 | from .odu import SearchProblem 19 | from .optgrowth import GrowthModel 20 | -------------------------------------------------------------------------------- /examples/duopoly_mpe_dynamics.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from duopoly_mpe import * 3 | 4 | AF = A - B1.dot(F1) - B2.dot(F2) 5 | n = 20 6 | x = np.empty((3, n)) 7 | x[:, 0] = 1, 1, 1 8 | for t in range(n-1): 9 | x[:, t+1] = np.dot(AF, x[:, t]) 10 | q1 = x[1, :] 11 | q2 = x[2, :] 12 | q = q1 + q2 # Total output, MPE 13 | p = a0 - a1 * q # Price, MPE 14 | 15 | fig, ax = plt.subplots(figsize=(9, 5.8)) 16 | ax.plot(q, 'b-', lw=2, alpha=0.75, label='total output') 17 | ax.plot(p, 'g-', lw=2, alpha=0.75, label='price') 18 | ax.set_title('Output and prices, duopoly MPE') 19 | ax.legend(frameon=False) 20 | plt.show() 21 | -------------------------------------------------------------------------------- /examples/plot_example_5.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from scipy.stats import norm 3 | from random import uniform 4 | num_rows, num_cols = 2, 3 5 | fig, axes = plt.subplots(num_rows, num_cols, figsize=(12, 8)) 6 | for i in range(num_rows): 7 | for j in range(num_cols): 8 | m, s = uniform(-1, 1), uniform(1, 2) 9 | x = norm.rvs(loc=m, scale=s, size=100) 10 | axes[i, j].hist(x, alpha=0.6, bins=20) 11 | t = r'$\mu = {0:.1f},\; \sigma = {1:.1f}$'.format(m, s) 12 | axes[i, j].set_title(t) 13 | axes[i, j].set_xticks([-4, 0, 4]) 14 | axes[i, j].set_yticks([]) 15 | plt.show() 16 | -------------------------------------------------------------------------------- /examples/six_hists.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from scipy.stats import norm 3 | from random import uniform 4 | num_rows, num_cols = 3, 2 5 | fig, axes = plt.subplots(num_rows, num_cols, figsize=(8, 12)) 6 | for i in range(num_rows): 7 | for j in range(num_cols): 8 | m, s = uniform(-1, 1), uniform(1, 2) 9 | x = norm.rvs(loc=m, scale=s, size=100) 10 | axes[i, j].hist(x, alpha=0.6, bins=20) 11 | t = r'$\mu = {0:.1f}, \quad \sigma = {1:.1f}$'.format(m, s) 12 | axes[i, j].set_title(t) 13 | axes[i, j].set_xticks([-4, 0, 4]) 14 | axes[i, j].set_yticks([]) 15 | plt.show() 16 | -------------------------------------------------------------------------------- /examples/bisection.py: -------------------------------------------------------------------------------- 1 | 2 | def bisect(f, a, b, tol=10e-5): 3 | """ 4 | Implements the bisection root finding algorithm, assuming that f is a 5 | real-valued function on [a, b] satisfying f(a) < 0 < f(b). 6 | """ 7 | lower, upper = a, b 8 | 9 | while upper - lower > tol: 10 | middle = 0.5 * (upper + lower) 11 | # === if root is between lower and middle === # 12 | if f(middle) > 0: 13 | lower, upper = lower, middle 14 | # === if root is between middle and upper === # 15 | else: 16 | lower, upper = middle, upper 17 | 18 | return 0.5 * (upper + lower) 19 | 20 | 21 | -------------------------------------------------------------------------------- /examples/3dplot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from mpl_toolkits.mplot3d.axes3d import Axes3D 3 | import numpy as np 4 | from matplotlib import cm 5 | 6 | 7 | def f(x, y): 8 | return np.cos(x**2 + y**2) / (1 + x**2 + y**2) 9 | 10 | xgrid = np.linspace(-3, 3, 50) 11 | ygrid = xgrid 12 | x, y = np.meshgrid(xgrid, ygrid) 13 | 14 | fig = plt.figure(figsize=(8, 6)) 15 | ax = fig.add_subplot(111, projection='3d') 16 | ax.plot_surface(x, 17 | y, 18 | f(x, y), 19 | rstride=2, cstride=2, 20 | cmap=cm.jet, 21 | alpha=0.7, 22 | linewidth=0.25) 23 | ax.set_zlim(-0.5, 1.0) 24 | plt.show() 25 | -------------------------------------------------------------------------------- /quantecon/tests/test_lyapunov.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_tauchen.py 3 | Authors: Chase Coleman, John Stachurski 4 | Date: 07/22/2014 5 | 6 | Tests for ricatti.py file 7 | 8 | """ 9 | import numpy as np 10 | from numpy.testing import assert_allclose 11 | from quantecon.matrix_eqn import solve_discrete_lyapunov 12 | 13 | 14 | def test_dlyap_simple_ones(): 15 | A = np.zeros((4, 4)) 16 | B = np.ones((4, 4)) 17 | 18 | sol = solve_discrete_lyapunov(A, B) 19 | 20 | assert_allclose(sol, np.ones((4, 4))) 21 | 22 | 23 | def test_dlyap_scalar(): 24 | a = .5 25 | b = .75 26 | 27 | sol = solve_discrete_lyapunov(a, b) 28 | 29 | assert_allclose(sol, np.ones((1, 1))) 30 | -------------------------------------------------------------------------------- /examples/beta-binomial.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: beta-binomial.py 3 | Authors: John Stachurski, Thomas J. Sargent 4 | 5 | """ 6 | from scipy.special import binom, beta 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | 10 | 11 | def gen_probs(n, a, b): 12 | probs = np.zeros(n+1) 13 | for k in range(n+1): 14 | probs[k] = binom(n, k) * beta(k + a, n - k + b) / beta(a, b) 15 | return probs 16 | 17 | n = 50 18 | a_vals = [0.5, 1, 100] 19 | b_vals = [0.5, 1, 100] 20 | fig, ax = plt.subplots() 21 | for a, b in zip(a_vals, b_vals): 22 | ab_label = r'$a = %.1f$, $b = %.1f$' % (a, b) 23 | ax.plot(list(range(0, n+1)), gen_probs(n, a, b), '-o', label=ab_label) 24 | ax.legend() 25 | plt.show() 26 | -------------------------------------------------------------------------------- /examples/quadmap_class.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: quadmap_class.py 3 | Authors: John Stachurski, Thomas J. Sargent 4 | 5 | """ 6 | 7 | 8 | class QuadMap(object): 9 | 10 | def __init__(self, initial_state): 11 | self.x = initial_state 12 | 13 | def update(self): 14 | "Apply the quadratic map to update the state." 15 | self.x = 4 * self.x * (1 - self.x) 16 | 17 | def generate_series(self, n): 18 | """ 19 | Generate and return a trajectory of length n, starting at the 20 | current state. 21 | """ 22 | trajectory = [] 23 | for i in range(n): 24 | trajectory.append(self.x) 25 | self.update() 26 | return trajectory 27 | -------------------------------------------------------------------------------- /examples/linapprox.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sp 3 | import matplotlib.pyplot as plt 4 | 5 | 6 | def f(x): 7 | y1 = 2 * np.cos(6 * x) + np.sin(14 * x) 8 | return y1 + 2.5 9 | 10 | c_grid = np.linspace(0, 1, 6) 11 | 12 | 13 | def Af(x): 14 | return sp.interp(x, c_grid, f(c_grid)) 15 | 16 | f_grid = np.linspace(0, 1, 150) 17 | 18 | fig, ax = plt.subplots() 19 | ax.set_xlim(0, 1) 20 | 21 | ax.plot(f_grid, f(f_grid), 'b-', lw=2, alpha=0.8, label='true function') 22 | ax.plot(f_grid, Af(f_grid), 'g-', lw=2, alpha=0.8, 23 | label='linear approximation') 24 | 25 | ax.vlines(c_grid, c_grid * 0, f(c_grid), linestyle='dashed', alpha=0.5) 26 | ax.legend(loc='upper center') 27 | 28 | plt.show() 29 | -------------------------------------------------------------------------------- /examples/subplots.py: -------------------------------------------------------------------------------- 1 | 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | 6 | def subplots(): 7 | "Custom subplots with axes throught the origin" 8 | fig, ax = plt.subplots() 9 | 10 | # Set the axes through the origin 11 | for spine in ['left', 'bottom']: 12 | ax.spines[spine].set_position('zero') 13 | for spine in ['right', 'top']: 14 | ax.spines[spine].set_color('none') 15 | 16 | ax.grid() 17 | return fig, ax 18 | 19 | 20 | fig, ax = subplots() # Call the local version, not plt.subplots() 21 | x = np.linspace(-2, 10, 200) 22 | y = np.sin(x) 23 | ax.plot(x, y, 'r-', linewidth=2, label='sine function', alpha=0.6) 24 | ax.legend(loc='lower right') 25 | plt.show() 26 | -------------------------------------------------------------------------------- /examples/ar1_acov.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plots autocovariance function for AR(1) X' = phi X + epsilon 3 | """ 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | num_rows, num_cols = 2, 1 8 | fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 8)) 9 | plt.subplots_adjust(hspace=0.4) 10 | 11 | # Autocovariance when phi = 0.8 12 | temp = r'autocovariance, $\phi = {0:.2}$' 13 | for i, phi in enumerate((0.8, -0.8)): 14 | ax = axes[i] 15 | times = list(range(16)) 16 | acov = [phi**k / (1 - phi**2) for k in times] 17 | ax.plot(times, acov, 'bo-', alpha=0.6, label=temp.format(phi)) 18 | ax.legend(loc='upper right') 19 | ax.set_xlabel('time') 20 | ax.set_xlim((0, 15)) 21 | ax.hlines(0, 0, 15, linestyle='--', alpha=0.5) 22 | plt.show() 23 | -------------------------------------------------------------------------------- /examples/vecs.py: -------------------------------------------------------------------------------- 1 | """ 2 | QE by Tom Sargent and John Stachurski. 3 | Illustrates vectors in the plane. 4 | """ 5 | import matplotlib.pyplot as plt 6 | 7 | fig, ax = plt.subplots() 8 | # Set the axes through the origin 9 | for spine in ['left', 'bottom']: 10 | ax.spines[spine].set_position('zero') 11 | for spine in ['right', 'top']: 12 | ax.spines[spine].set_color('none') 13 | 14 | 15 | ax.set_xlim(-5, 5) 16 | ax.set_ylim(-5, 5) 17 | ax.grid() 18 | vecs = ((2, 4), (-3, 3), (-4, -3.5)) 19 | for v in vecs: 20 | ax.annotate('', xy=v, xytext=(0, 0), 21 | arrowprops=dict(facecolor='blue', 22 | shrink=0, 23 | alpha=0.7, 24 | width=0.5)) 25 | ax.text(1.1 * v[0], 1.1 * v[1], str(v)) 26 | plt.show() 27 | -------------------------------------------------------------------------------- /examples/ar1_sd.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plots spectral density for AR(1) X' = phi X + epsilon 3 | """ 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | def ar1_sd(phi, omega): 9 | return 1 / (1 - 2 * phi * np.cos(omega) + phi**2) 10 | 11 | omegas = np.linspace(0, np.pi, 180) 12 | num_rows, num_cols = 2, 1 13 | fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 8)) 14 | plt.subplots_adjust(hspace=0.4) 15 | 16 | # Autocovariance when phi = 0.8 17 | temp = r'spectral density, $\phi = {0:.2}$' 18 | for i, phi in enumerate((0.8, -0.8)): 19 | ax = axes[i] 20 | sd = ar1_sd(phi, omegas) 21 | ax.plot(omegas, sd, 'b-', alpha=0.6, lw=2, label=temp.format(phi)) 22 | ax.legend(loc='upper center') 23 | ax.set_xlabel('frequency') 24 | ax.set_xlim((0, np.pi)) 25 | plt.show() 26 | -------------------------------------------------------------------------------- /examples/lucas_tree_price1.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import division # Omit for Python 3.x 3 | import matplotlib.pyplot as plt 4 | from quantecon.models import LucasTree 5 | 6 | fig, ax = plt.subplots() 7 | 8 | tree = LucasTree(gamma=2, beta=0.95, alpha=0.90, sigma=0.1) 9 | grid, price_vals = tree.grid, tree.compute_lt_price() 10 | ax.plot(grid, price_vals, lw=2, alpha=0.7, label=r'$p^*(y)$') 11 | ax.set_xlim(min(grid), max(grid)) 12 | 13 | # tree = LucasTree(gamma=3, beta=0.95, alpha=0.90, sigma=0.1) 14 | # grid, price_vals = tree.grid, tree.compute_lt_price() 15 | # ax.plot(grid, price_vals, lw=2, alpha=0.7, label='more patient') 16 | # ax.set_xlim(min(grid), max(grid)) 17 | 18 | ax.set_xlabel(r'$y$', fontsize=16) 19 | ax.set_ylabel(r'price', fontsize=16) 20 | ax.legend(loc='upper left') 21 | 22 | plt.show() 23 | -------------------------------------------------------------------------------- /examples/tests/test_directory_pyfiles.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple Test Script which can be used to run a directoy of py files 3 | 4 | Just run this file using `python $filename` 5 | 6 | """ 7 | #-Subprocess Recipe-# 8 | from subprocess import call 9 | import glob 10 | files = glob.glob("*.py") 11 | for fl in files: 12 | print "Testing File: %s" % fl 13 | call(["python", fl]) 14 | print "------------ END (%s) -----------------" % fl 15 | 16 | #-IPYTHON NOTEBOOK Recipe-# 17 | #-Instructions-# 18 | #--------------# 19 | #-1. Open an IPython Notebook in quantecon.py/examples/ folder 20 | #-2. Copy the following code recipe into the notebook and run 21 | import glob 22 | files = glob.glob("*.py") 23 | %pylab inline 24 | for fl in files: 25 | print "----RUNNING (%s)----"%fl 26 | %run $fl 27 | print "----END (%s)-----"%fl -------------------------------------------------------------------------------- /data/test_pwt.csv: -------------------------------------------------------------------------------- 1 | "country","country isocode","year","POP","XRAT","tcgdp","cc","cg" 2 | "Argentina","ARG","2000","37335.653","0.9995","295072.21869","75.716805379","5.5788042896" 3 | "Australia","AUS","2000","19053.186","1.72483","541804.6521","67.759025993","6.7200975332" 4 | "India","IND","2000","1006300.297","44.9416","1728144.3748","64.575551328","14.072205773" 5 | "Israel","ISR","2000","6114.57","4.07733","129253.89423","64.436450847","10.266688415" 6 | "Malawi","MWI","2000","11801.505","59.543808333","5026.2217836","74.707624181","11.658954494" 7 | "South Africa","ZAF","2000","45064.098","6.93983","227242.36949","72.718710427","5.7265463933" 8 | "United States","USA","2000","282171.957","1","9898700","72.347054303","6.0324539789" 9 | "Uruguay","URY","2000","3219.793","12.099591667","25255.961693","78.978740282","5.108067988" 10 | -------------------------------------------------------------------------------- /examples/cauchy_samples.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from scipy.stats import cauchy 4 | import matplotlib.pyplot as plt 5 | 6 | n = 1000 7 | distribution = cauchy() 8 | 9 | fig, ax = plt.subplots() 10 | data = distribution.rvs(n) 11 | 12 | if 0: 13 | ax.plot(list(range(n)), data, 'bo', alpha=0.5) 14 | ax.vlines(list(range(n)), 0, data, lw=0.2) 15 | ax.set_title("{} observations from the Cauchy distribution".format(n)) 16 | 17 | if 1: 18 | # == Compute sample mean at each n == # 19 | sample_mean = np.empty(n) 20 | for i in range(n): 21 | sample_mean[i] = np.mean(data[:i]) 22 | 23 | # == Plot == # 24 | ax.plot(list(range(n)), sample_mean, 'r-', lw=3, alpha=0.6, 25 | label=r'$\bar X_n$') 26 | ax.plot(list(range(n)), [0] * n, 'k--', lw=0.5) 27 | ax.legend() 28 | 29 | fig.show() 30 | -------------------------------------------------------------------------------- /examples/nx_demo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: nx_demo.py 3 | Authors: John Stachurski and Thomas J. Sargent 4 | """ 5 | 6 | import networkx as nx 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | 10 | G = nx.random_geometric_graph(200, 0.12) # Generate random graph 11 | pos = nx.get_node_attributes(G, 'pos') # Get positions of nodes 12 | # find node nearest the center point (0.5,0.5) 13 | dists = [(x - 0.5)**2 + (y - 0.5)**2 for x, y in list(pos.values())] 14 | ncenter = np.argmin(dists) 15 | # Plot graph, coloring by path length from central node 16 | p = nx.single_source_shortest_path_length(G, ncenter) 17 | plt.figure() 18 | nx.draw_networkx_edges(G, pos, alpha=0.4) 19 | nx.draw_networkx_nodes(G, pos, nodelist=list(p.keys()), 20 | node_size=120, alpha=0.5, 21 | node_color=list(p.values()), cmap=plt.cm.jet_r) 22 | plt.show() 23 | -------------------------------------------------------------------------------- /examples/jv_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Origin: QE by John Stachurski and Thomas J. Sargent 3 | Filename: jv_test.py 4 | Authors: John Stachurski and Thomas Sargent 5 | LastModified: 11/08/2013 6 | 7 | Tests jv.py with a particular parameterization. 8 | 9 | """ 10 | import matplotlib.pyplot as plt 11 | from quantecon import compute_fixed_point 12 | from quantecon.models import JvWorker 13 | 14 | # === solve for optimal policy === # 15 | wp = JvWorker(grid_size=25) 16 | v_init = wp.x_grid * 0.5 17 | V = compute_fixed_point(wp.bellman_operator, v_init, max_iter=40) 18 | s_policy, phi_policy = wp.bellman_operator(V, return_policies=True) 19 | 20 | # === plot policies === # 21 | fig, ax = plt.subplots() 22 | ax.set_xlim(0, max(wp.x_grid)) 23 | ax.set_ylim(-0.1, 1.1) 24 | ax.plot(wp.x_grid, phi_policy, 'b-', label='phi') 25 | ax.plot(wp.x_grid, s_policy, 'g-', label='s') 26 | ax.set_xlabel("x") 27 | ax.legend() 28 | plt.show() 29 | -------------------------------------------------------------------------------- /examples/ifp_savings_plots.py: -------------------------------------------------------------------------------- 1 | """ 2 | Origin: QE by John Stachurski and Thomas J. Sargent 3 | Filename: ifp_savings_plots.py 4 | Authors: John Stachurski, Thomas J. Sargent 5 | LastModified: 11/08/2013 6 | 7 | """ 8 | 9 | from matplotlib import pyplot as plt 10 | from quantecon import compute_fixed_point 11 | from quantecon.models import ConsumerProblem 12 | 13 | # === solve for optimal consumption === # 14 | m = ConsumerProblem(r=0.03, grid_max=4) 15 | v_init, c_init = m.initialize() 16 | 17 | # Coleman Operator takes in (c)? 18 | c = compute_fixed_point(m.coleman_operator, c_init) 19 | a = m.asset_grid 20 | R, z_vals = m.R, m.z_vals 21 | 22 | # === generate savings plot === # 23 | fig, ax = plt.subplots() 24 | ax.plot(a, R * a + z_vals[0] - c[:, 0], label='low income') 25 | ax.plot(a, R * a + z_vals[1] - c[:, 1], label='high income') 26 | ax.plot(a, a, 'k--') 27 | ax.set_xlabel('current assets') 28 | ax.set_ylabel('next period assets') 29 | ax.legend(loc='upper left') 30 | plt.show() 31 | -------------------------------------------------------------------------------- /examples/tsh_hg.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from scipy.stats import norm 5 | from quantecon import LSS 6 | 7 | phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5 8 | sigma = 0.1 9 | 10 | A = [[phi_1, phi_2, phi_3, phi_4], 11 | [1, 0, 0, 0], 12 | [0, 1, 0, 0], 13 | [0, 0, 1, 0]] 14 | C = [sigma, 0, 0, 0] 15 | G = [1, 0, 0, 0] 16 | 17 | T = 30 18 | ar = LSS(A, C, G) 19 | 20 | ymin, ymax = -0.8, 1.25 21 | 22 | fig, ax = plt.subplots(figsize=(8, 4)) 23 | 24 | ax.set_xlim(ymin, ymax) 25 | ax.set_xlabel(r'$y_t$', fontsize=16) 26 | 27 | x, y = ar.replicate(T=T, num_reps=100000) 28 | mu_x, mu_y, Sigma_x, Sigma_y = ar.stationary_distributions() 29 | f_y = norm(loc=float(mu_y), scale=float(np.sqrt(Sigma_y))) 30 | 31 | y = y.flatten() 32 | ax.hist(y, bins=50, normed=True, alpha=0.4) 33 | 34 | ygrid = np.linspace(ymin, ymax, 150) 35 | ax.plot(ygrid, f_y.pdf(ygrid), 'k-', lw=2, alpha=0.8, label='true density') 36 | ax.legend() 37 | plt.show() 38 | -------------------------------------------------------------------------------- /examples/lqramsey_ar1.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: lqramsey_ar1.py 3 | Authors: Thomas Sargent, Doc-Jin Jang, Jeong-hun Choi, John Stachurski 4 | 5 | Example 1: Govt spending is AR(1) and state is (g, 1). 6 | 7 | """ 8 | 9 | import numpy as np 10 | from numpy import array 11 | import lqramsey 12 | 13 | # == Parameters == # 14 | beta = 1 / 1.05 15 | rho, mg = .7, .35 16 | A = np.identity(2) 17 | A[0, :] = rho, mg * (1-rho) 18 | C = np.zeros((2, 1)) 19 | C[0, 0] = np.sqrt(1 - rho**2) * mg / 10 20 | Sg = array((1, 0)).reshape(1, 2) 21 | Sd = array((0, 0)).reshape(1, 2) 22 | Sb = array((0, 2.135)).reshape(1, 2) 23 | Ss = array((0, 0)).reshape(1, 2) 24 | 25 | economy = lqramsey.Economy(beta=beta, 26 | Sg=Sg, 27 | Sd=Sd, 28 | Sb=Sb, 29 | Ss=Ss, 30 | discrete=False, 31 | proc=(A, C)) 32 | 33 | T = 50 34 | path = lqramsey.compute_paths(T, economy) 35 | lqramsey.gen_fig_1(path) 36 | -------------------------------------------------------------------------------- /quantecon/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Import the main names to top level. 3 | """ 4 | 5 | from . import models as models 6 | from .compute_fp import compute_fixed_point 7 | from .cartesian import cartesian, mlinspace 8 | from .discrete_rv import DiscreteRV 9 | from .ecdf import ECDF 10 | from .estspec import smooth, periodogram, ar_periodogram 11 | from .graph_tools import DiGraph 12 | from .gth_solve import gth_solve 13 | from .kalman import Kalman 14 | from .lae import LAE 15 | from .arma import ARMA 16 | from .lqcontrol import LQ 17 | from .lqnash import nnash 18 | from .lss import LSS 19 | from .matrix_eqn import solve_discrete_lyapunov, solve_discrete_riccati 20 | from .mc_tools import MarkovChain, mc_compute_stationary, mc_sample_path 21 | from .quadsums import var_quadratic_sum, m_quadratic_sum 22 | from .rank_nullspace import rank_est, nullspace 23 | from .robustlq import RBLQ 24 | from .tauchen import approx_markov 25 | from . import quad as quad 26 | 27 | #Add Version Attribute 28 | from .version import version as __version__ 29 | -------------------------------------------------------------------------------- /examples/vecs2.py: -------------------------------------------------------------------------------- 1 | """ 2 | QE by Tom Sargent and John Stachurski. 3 | Illustrates scalar multiplication. 4 | """ 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | 8 | fig, ax = plt.subplots() 9 | # Set the axes through the origin 10 | for spine in ['left', 'bottom']: 11 | ax.spines[spine].set_position('zero') 12 | for spine in ['right', 'top']: 13 | ax.spines[spine].set_color('none') 14 | 15 | ax.set_xlim(-5, 5) 16 | ax.set_ylim(-5, 5) 17 | 18 | x = (2, 2) 19 | ax.annotate('', xy=x, xytext=(0, 0), 20 | arrowprops=dict(facecolor='blue', 21 | shrink=0, 22 | alpha=1, 23 | width=0.5)) 24 | ax.text(x[0] + 0.4, x[1] - 0.2, r'$x$', fontsize='16') 25 | 26 | 27 | scalars = (-2, 2) 28 | x = np.array(x) 29 | 30 | for s in scalars: 31 | v = s * x 32 | ax.annotate('', xy=v, xytext=(0, 0), 33 | arrowprops=dict(facecolor='red', 34 | shrink=0, 35 | alpha=0.5, 36 | width=0.5)) 37 | ax.text(v[0] + 0.4, v[1] - 0.2, r'${} x$'.format(s), fontsize='16') 38 | plt.show() 39 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Thank you for the writeup given by dan-blanchard at https://gist.github.com/dan-blanchard/7045057 2 | language: python 3 | python: 4 | - 2.7 5 | - 3.3 6 | 7 | notifications: 8 | email: false 9 | 10 | branches: 11 | only: 12 | - master 13 | 14 | before_install: 15 | - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh 16 | - chmod +x miniconda.sh 17 | - ./miniconda.sh -b 18 | - export PATH=/home/travis/miniconda/bin:$PATH 19 | - conda update --yes conda 20 | - sudo rm -rf /dev/shm 21 | - sudo ln -s /run/shm /dev/shm 22 | install: 23 | - conda install --yes python=$TRAVIS_PYTHON_VERSION ipython numpy scipy matplotlib nose pandas pip sympy pytables statsmodels numba 24 | # Get packages that we can't get directly from conda 25 | - pip install coveralls coverage 26 | # - conda install --yes -c dan_blanchard python-coveralls nose-cov 27 | - python setup.py install 28 | - cp quantecon/tests/matplotlibrc . 29 | 30 | script: 31 | - nosetests --with-coverage --cover-package=quantecon 32 | 33 | after_success: 34 | - coveralls 35 | -------------------------------------------------------------------------------- /examples/mc_convergence_plot.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: mc_convergence_plot.py 3 | Authors: John Stachurski, Thomas J. Sargent 4 | 5 | """ 6 | import numpy as np 7 | from mpl_toolkits.mplot3d import Axes3D 8 | import matplotlib.pyplot as plt 9 | from quantecon import mc_compute_stationary 10 | 11 | P = ((0.971, 0.029, 0.000), 12 | (0.145, 0.778, 0.077), 13 | (0.000, 0.508, 0.492)) 14 | P = np.array(P) 15 | 16 | psi = (0.0, 0.2, 0.8) # Initial condition 17 | 18 | fig = plt.figure() 19 | ax = fig.add_subplot(111, projection='3d') 20 | 21 | ax.set_xlim(0, 1) 22 | ax.set_ylim(0, 1) 23 | ax.set_zlim(0, 1) 24 | ax.set_xticks((0.25, 0.5, 0.75)) 25 | ax.set_yticks((0.25, 0.5, 0.75)) 26 | ax.set_zticks((0.25, 0.5, 0.75)) 27 | 28 | x_vals, y_vals, z_vals = [], [], [] 29 | for t in range(20): 30 | x_vals.append(psi[0]) 31 | y_vals.append(psi[1]) 32 | z_vals.append(psi[2]) 33 | psi = np.dot(psi, P) 34 | 35 | ax.scatter(x_vals, y_vals, z_vals, c='r', s=60) 36 | 37 | psi_star = mc_compute_stationary(P)[0] 38 | ax.scatter(psi_star[0], psi_star[1], psi_star[2], c='k', s=60) 39 | 40 | plt.show() 41 | -------------------------------------------------------------------------------- /quantecon/tests/test_ecdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.ecdf 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-07-31 6 | 7 | """ 8 | from __future__ import division 9 | import unittest 10 | import numpy as np 11 | from quantecon import ECDF 12 | 13 | 14 | class TestECDF(unittest.TestCase): 15 | 16 | @classmethod 17 | def setUpClass(cls): 18 | cls.obs = np.random.rand(40) # observations defining dist 19 | cls.ecdf = ECDF(cls.obs) 20 | 21 | def test_call_high(self): 22 | "ecdf: x above all obs give 1.0" 23 | # all of self.obs <= 1 so ecdf(1.1) should be 1 24 | self.assertAlmostEqual(self.ecdf(1.1), 1.0) 25 | 26 | def test_call_low(self): 27 | "ecdf: x below all obs give 0.0" 28 | # all of self.obs <= 1 so ecdf(1.1) should be 1 29 | self.assertAlmostEqual(self.ecdf(-0.1), 0.0) 30 | 31 | def test_ascending(self): 32 | "ecdf: larger values should return F(x) at least as big" 33 | x = np.random.rand() 34 | F_1 = self.ecdf(x) 35 | F_2 = self.ecdf(1.1 * x) 36 | self.assertGreaterEqual(F_2, F_1) 37 | -------------------------------------------------------------------------------- /examples/career_vf_plot.py: -------------------------------------------------------------------------------- 1 | """ 2 | Origin: QE by John Stachurski and Thomas J. Sargent 3 | Filename: career_vf_plot.py 4 | Authors: John Stachurski and Thomas Sargent 5 | LastModified: 11/08/2013 6 | 7 | """ 8 | 9 | import matplotlib.pyplot as plt 10 | from mpl_toolkits.mplot3d.axes3d import Axes3D 11 | import numpy as np 12 | from matplotlib import cm 13 | import quantecon as qe 14 | from quantecon.models import CareerWorkerProblem 15 | 16 | # === solve for the value function === # 17 | wp = CareerWorkerProblem() 18 | v_init = np.ones((wp.N, wp.N))*100 19 | v = qe.compute_fixed_point(wp.bellman_operator, v_init) 20 | 21 | # === plot value function === # 22 | fig = plt.figure(figsize=(8, 6)) 23 | ax = fig.add_subplot(111, projection='3d') 24 | tg, eg = np.meshgrid(wp.theta, wp.epsilon) 25 | ax.plot_surface(tg, 26 | eg, 27 | v.T, 28 | rstride=2, cstride=2, 29 | cmap=cm.jet, 30 | alpha=0.5, 31 | linewidth=0.25) 32 | ax.set_zlim(150, 200) 33 | ax.set_xlabel('theta', fontsize=14) 34 | ax.set_ylabel('epsilon', fontsize=14) 35 | plt.show() 36 | -------------------------------------------------------------------------------- /examples/paths_and_stationarity.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from quantecon import LSS 5 | import random 6 | 7 | phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5 8 | sigma = 0.1 9 | 10 | A = [[phi_1, phi_2, phi_3, phi_4], 11 | [1, 0, 0, 0], 12 | [0, 1, 0, 0], 13 | [0, 0, 1, 0]] 14 | C = [sigma, 0, 0, 0] 15 | G = [1, 0, 0, 0] 16 | 17 | T0 = 10 18 | T1 = 50 19 | T2 = 75 20 | T4 = 100 21 | 22 | ar = LSS(A, C, G, mu_0=np.ones(4)) 23 | ymin, ymax = -0.8, 1.25 24 | 25 | fig, ax = plt.subplots(figsize=(8, 5)) 26 | 27 | ax.grid(alpha=0.4) 28 | ax.set_ylim(ymin, ymax) 29 | ax.set_ylabel(r'$y_t$', fontsize=16) 30 | ax.vlines((T0, T1, T2), -1.5, 1.5) 31 | 32 | ax.set_xticks((T0, T1, T2)) 33 | ax.set_xticklabels((r"$T$", r"$T'$", r"$T''$"), fontsize=14) 34 | 35 | sample = [] 36 | for i in range(80): 37 | rcolor = random.choice(('c', 'g', 'b')) 38 | x, y = ar.simulate(ts_length=T4) 39 | y = y.flatten() 40 | ax.plot(y, color=rcolor, lw=0.8, alpha=0.5) 41 | ax.plot((T0, T1, T2), (y[T0], y[T1], y[T2],), 'ko', alpha=0.5) 42 | 43 | plt.show() 44 | -------------------------------------------------------------------------------- /examples/duopoly_mpe.py: -------------------------------------------------------------------------------- 1 | """ 2 | @authors: Chase Coleman, Thomas Sargent, John Stachurski 3 | 4 | Markov Perfect Equilibrium for the simple duopoly example. 5 | 6 | See the lecture at http://quant-econ.net/py/markov_perfect.html for a 7 | description of the model. 8 | """ 9 | 10 | from __future__ import division 11 | import numpy as np 12 | import quantecon as qe 13 | 14 | # == Parameters == # 15 | a0 = 10.0 16 | a1 = 2.0 17 | beta = 0.96 18 | gamma = 12.0 19 | 20 | # == In LQ form == # 21 | 22 | A = np.eye(3) 23 | 24 | B1 = np.array([[0.], [1.], [0.]]) 25 | B2 = np.array([[0.], [0.], [1.]]) 26 | 27 | 28 | R1 = [[0., -a0/2, 0.], 29 | [-a0/2., a1, a1/2.], 30 | [0, a1/2., 0.]] 31 | 32 | R2 = [[0., 0., -a0/2], 33 | [0., 0., a1/2.], 34 | [-a0/2, a1/2., a1]] 35 | 36 | Q1 = Q2 = gamma 37 | 38 | S1 = S2 = W1 = W2 = M1 = M2 = 0.0 39 | 40 | # == Solve using QE's nnash function == # 41 | F1, F2, P1, P2 = qe.nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, 42 | beta=beta) 43 | 44 | # == Display policies == # 45 | print("Computed policies for firm 1 and firm 2:\n") 46 | print("F1 = {}".format(F1)) 47 | print("F2 = {}".format(F2)) 48 | print("\n") 49 | -------------------------------------------------------------------------------- /examples/paths_and_hist.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from quantecon import LSS 5 | import random 6 | 7 | phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5 8 | sigma = 0.1 9 | 10 | A = [[phi_1, phi_2, phi_3, phi_4], 11 | [1, 0, 0, 0], 12 | [0, 1, 0, 0], 13 | [0, 0, 1, 0]] 14 | C = [sigma, 0, 0, 0] 15 | G = [1, 0, 0, 0] 16 | 17 | T = 30 18 | ar = LSS(A, C, G, mu_0=np.ones(4)) 19 | 20 | ymin, ymax = -0.8, 1.25 21 | 22 | fig, axes = plt.subplots(1, 2, figsize=(8, 3)) 23 | 24 | for ax in axes: 25 | ax.grid(alpha=0.4) 26 | 27 | ax = axes[0] 28 | 29 | ax.set_ylim(ymin, ymax) 30 | ax.set_ylabel(r'$y_t$', fontsize=16) 31 | ax.vlines((T,), -1.5, 1.5) 32 | 33 | ax.set_xticks((T,)) 34 | ax.set_xticklabels((r'$T$',)) 35 | 36 | sample = [] 37 | for i in range(20): 38 | rcolor = random.choice(('c', 'g', 'b', 'k')) 39 | x, y = ar.simulate(ts_length=T+15) 40 | y = y.flatten() 41 | ax.plot(y, color=rcolor, lw=1, alpha=0.5) 42 | ax.plot((T,), (y[T],), 'ko', alpha=0.5) 43 | sample.append(y[T]) 44 | 45 | y = y.flatten() 46 | axes[1].set_ylim(ymin, ymax) 47 | axes[1].hist(sample, bins=16, normed=True, orientation='horizontal', alpha=0.5) 48 | 49 | plt.show() 50 | -------------------------------------------------------------------------------- /quantecon/tests/test_matrix_eqn.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.util 3 | 4 | """ 5 | from __future__ import division 6 | from collections import Counter 7 | import unittest 8 | import numpy as np 9 | from numpy.testing import assert_allclose 10 | from nose.plugins.attrib import attr 11 | import pandas as pd 12 | from quantecon import matrix_eqn as qme 13 | 14 | 15 | def test_solve_discrete_lyapunov_zero(): 16 | 'Simple test where X is all zeros' 17 | A = np.eye(4) * .95 18 | B = np.zeros((4, 4)) 19 | 20 | X = qme.solve_discrete_lyapunov(A, B) 21 | 22 | assert_allclose(X, np.zeros((4, 4))) 23 | 24 | 25 | def test_solve_discrete_lyapunov_B(): 26 | 'Simple test where X is same as B' 27 | A = np.ones((2, 2)) * .5 28 | B = np.array([[.5, -.5], [-.5, .5]]) 29 | 30 | X = qme.solve_discrete_lyapunov(A, B) 31 | 32 | assert_allclose(B, X) 33 | 34 | def test_solve_discrete_lyapunov_complex(): 35 | 'Complex test, A is companion matrix' 36 | A = np.array([[0.5 + 0.3j, 0.1 + 0.1j], 37 | [ 1, 0]]) 38 | B = np.eye(2) 39 | 40 | X = qme.solve_discrete_lyapunov(A, B) 41 | 42 | assert_allclose(np.dot(np.dot(A, X), A.conj().transpose()) - X, -B, 43 | atol=1e-15) 44 | 45 | -------------------------------------------------------------------------------- /examples/lqramsey_discrete.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: lqramsey_discrete.py 3 | Authors: Thomas Sargent, Doc-Jin Jang, Jeong-hun Choi, John Stachurski 4 | 5 | LQ Ramsey model with discrete exogenous process. 6 | 7 | """ 8 | from numpy import array 9 | import lqramsey 10 | 11 | # == Parameters == # 12 | beta = 1 / 1.05 13 | P = array([[0.8, 0.2, 0.0], 14 | [0.0, 0.5, 0.5], 15 | [0.0, 0.0, 1.0]]) 16 | # == Possible states of the world == # 17 | # Each column is a state of the world. The rows are [g d b s 1] 18 | x_vals = array([[0.5, 0.5, 0.25], 19 | [0.0, 0.0, 0.0], 20 | [2.2, 2.2, 2.2], 21 | [0.0, 0.0, 0.0], 22 | [1.0, 1.0, 1.0]]) 23 | Sg = array((1, 0, 0, 0, 0)).reshape(1, 5) 24 | Sd = array((0, 1, 0, 0, 0)).reshape(1, 5) 25 | Sb = array((0, 0, 1, 0, 0)).reshape(1, 5) 26 | Ss = array((0, 0, 0, 1, 0)).reshape(1, 5) 27 | 28 | economy = lqramsey.Economy(beta=beta, 29 | Sg=Sg, 30 | Sd=Sd, 31 | Sb=Sb, 32 | Ss=Ss, 33 | discrete=True, 34 | proc=(P, x_vals)) 35 | 36 | T = 15 37 | path = lqramsey.compute_paths(T, economy) 38 | lqramsey.gen_fig_1(path) 39 | -------------------------------------------------------------------------------- /examples/ar1_cycles.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helps to illustrate the spectral density for AR(1) X' = phi X + epsilon 3 | """ 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | phi = -0.8 8 | times = list(range(16)) 9 | y1 = [phi**k / (1 - phi**2) for k in times] 10 | y2 = [np.cos(np.pi * k) for k in times] 11 | y3 = [a * b for a, b in zip(y1, y2)] 12 | 13 | num_rows, num_cols = 3, 1 14 | fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 8)) 15 | plt.subplots_adjust(hspace=0.25) 16 | 17 | # Autocovariance when phi = -0.8 18 | ax = axes[0] 19 | ax.plot(times, y1, 'bo-', alpha=0.6, label=r'$\gamma(k)$') 20 | ax.legend(loc='upper right') 21 | ax.set_xlim(0, 15) 22 | ax.set_yticks((-2, 0, 2)) 23 | ax.hlines(0, 0, 15, linestyle='--', alpha=0.5) 24 | 25 | # Cycles at frequence pi 26 | ax = axes[1] 27 | ax.plot(times, y2, 'bo-', alpha=0.6, label=r'$\cos(\pi k)$') 28 | ax.legend(loc='upper right') 29 | ax.set_xlim(0, 15) 30 | ax.set_yticks((-1, 0, 1)) 31 | ax.hlines(0, 0, 15, linestyle='--', alpha=0.5) 32 | 33 | # Product 34 | ax = axes[2] 35 | ax.stem(times, y3, label=r'$\gamma(k) \cos(\pi k)$') 36 | ax.legend(loc='upper right') 37 | ax.set_xlim((0, 15)) 38 | ax.set_ylim(-3, 3) 39 | ax.set_yticks((-1, 0, 1, 2, 3)) 40 | ax.hlines(0, 0, 15, linestyle='--', alpha=0.5) 41 | 42 | plt.show() 43 | -------------------------------------------------------------------------------- /examples/evans_sargent_plot1.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot 1 from the Evans Sargent model. 3 | 4 | @author: David Evans 5 | Edited by: John Stachurski 6 | 7 | """ 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | from evans_sargent import T, y 11 | 12 | tt = np.arange(T) # tt is used to make the plot time index correct. 13 | 14 | n_rows = 3 15 | fig, axes = plt.subplots(n_rows, 1, figsize=(10, 12)) 16 | 17 | plt.subplots_adjust(hspace=0.5) 18 | for ax in axes: 19 | ax.grid() 20 | ax.set_xlim(0, 15) 21 | 22 | bbox = (0., 1.02, 1., .102) 23 | legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} 24 | p_args = {'lw': 2, 'alpha': 0.7} 25 | 26 | ax = axes[0] 27 | ax.plot(tt, y[1, :], 'b-', label="output", **p_args) 28 | ax.set_ylabel(r"$Q$", fontsize=16) 29 | ax.legend(ncol=1, **legend_args) 30 | 31 | ax = axes[1] 32 | ax.plot(tt, y[2, :], 'b-', label="tax rate", **p_args) 33 | ax.set_ylabel(r"$\tau$", fontsize=16) 34 | ax.set_yticks((0.0, 0.2, 0.4, 0.6, 0.8)) 35 | ax.legend(ncol=1, **legend_args) 36 | 37 | ax = axes[2] 38 | ax.plot(tt, y[3, :], 'b-', label="first difference in output", **p_args) 39 | ax.set_ylabel(r"$u$", fontsize=16) 40 | ax.set_yticks((0, 100, 200, 300, 400)) 41 | ax.legend(ncol=1, **legend_args) 42 | ax.set_xlabel(r'time', fontsize=16) 43 | 44 | plt.show() 45 | -------------------------------------------------------------------------------- /quantecon/tests/test_arma.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_arma.py 3 | Authors: Chase Coleman 4 | Date: 07/24/2014 5 | 6 | Tests for arma.py file. Most of this testing can be considered 7 | covered by the numpy tests since we rely on much of their code. 8 | 9 | """ 10 | import sys 11 | import os 12 | import unittest 13 | import numpy as np 14 | from numpy.testing import assert_allclose 15 | from quantecon.arma import ARMA 16 | 17 | 18 | class TestARMA(unittest.TestCase): 19 | 20 | def setUp(self): 21 | # Initial Values 22 | phi = np.array([.95, -.4, -.4]) 23 | theta = np.zeros(3) 24 | sigma = .15 25 | 26 | 27 | self.lp = ARMA(phi, theta, sigma) 28 | 29 | 30 | def tearDown(self): 31 | del self.lp 32 | 33 | def test_simulate(self): 34 | lp = self.lp 35 | 36 | sim = lp.simulation(ts_length=250) 37 | 38 | self.assertTrue(sim.size==250) 39 | 40 | def test_impulse_response(self): 41 | lp = self.lp 42 | 43 | imp_resp = lp.impulse_response(impulse_length=75) 44 | 45 | self.assertTrue(imp_resp.size==75) 46 | 47 | if __name__ == '__main__': 48 | suite = unittest.TestLoader().loadTestsFromTestCase(TestARMA) 49 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 50 | 51 | -------------------------------------------------------------------------------- /examples/wb_download.py: -------------------------------------------------------------------------------- 1 | """ 2 | Origin: QE by John Stachurski and Thomas J. Sargent 3 | Filename: wb_download.py 4 | Authors: John Stachurski, Tomohito Okabe 5 | LastModified: 29/08/2013 6 | 7 | Dowloads data from the World Bank site on GDP per capita and plots result for 8 | a subset of countries. 9 | 10 | NOTE: This is not dually compatible with Python 3. Python 2 and Python 11 | 3 call the urllib package differently. 12 | """ 13 | import sys 14 | import matplotlib.pyplot as plt 15 | from pandas.io.excel import ExcelFile 16 | 17 | if sys.version_info[0] == 2: 18 | from urllib import urlretrieve 19 | elif sys.version_info[0] == 3: 20 | from urllib.request import urlretrieve 21 | 22 | # == Get data and read into file gd.xls == # 23 | wb_data_file_dir = "http://api.worldbank.org/datafiles/" 24 | file_name = "GC.DOD.TOTL.GD.ZS_Indicator_MetaData_en_EXCEL.xls" 25 | url = wb_data_file_dir + file_name 26 | urlretrieve(url, "gd.xls") 27 | 28 | # == Parse data into a DataFrame == # 29 | gov_debt_xls = ExcelFile('gd.xls') 30 | govt_debt = gov_debt_xls.parse('Sheet1', index_col=1, na_values=['NA']) 31 | 32 | # == Take desired values and plot == # 33 | govt_debt = govt_debt.transpose() 34 | govt_debt = govt_debt[['AUS', 'DEU', 'FRA', 'USA']] 35 | govt_debt = govt_debt[36:] 36 | govt_debt.plot(lw=2) 37 | plt.show() 38 | -------------------------------------------------------------------------------- /quantecon/ecdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: ecdf.py 3 | 4 | Authors: Thomas Sargent, John Stachurski 5 | 6 | Implements the empirical cumulative distribution function given an array 7 | of observations. 8 | 9 | """ 10 | 11 | import numpy as np 12 | 13 | 14 | class ECDF(object): 15 | """ 16 | One-dimensional empirical distribution function given a vector of 17 | observations. 18 | 19 | Parameters 20 | ---------- 21 | observations : array_like 22 | An array of observations 23 | 24 | Attributes 25 | ---------- 26 | observations : see Parameters 27 | 28 | """ 29 | 30 | def __init__(self, observations): 31 | self.observations = np.asarray(observations) 32 | 33 | def __repr__(self): 34 | return self.__str__() 35 | 36 | def __str__(self): 37 | m = "Empirical CDF:\n - number of observations: {n}" 38 | return m.format(n=self.observations.size) 39 | 40 | def __call__(self, x): 41 | """ 42 | Evaluates the ecdf at x 43 | 44 | Parameters 45 | ---------- 46 | x : scalar(float) 47 | The x at which the ecdf is evaluated 48 | 49 | Returns 50 | ------- 51 | scalar(float) 52 | Fraction of the sample less than x 53 | 54 | """ 55 | return np.mean(self.observations <= x) 56 | -------------------------------------------------------------------------------- /quantecon/tests/test_lae.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for lae.py 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-08-02 6 | 7 | TODO: write (economically) meaningful tests for this module 8 | 9 | """ 10 | from __future__ import division 11 | from nose.tools import assert_equal 12 | import numpy as np 13 | from scipy.stats import lognorm 14 | from quantecon import LAE 15 | 16 | # copied from the lae lecture 17 | s = 0.2 18 | delta = 0.1 19 | a_sigma = 0.4 # A = exp(B) where B ~ N(0, a_sigma) 20 | alpha = 0.4 # We set f(k) = k**alpha 21 | phi = lognorm(a_sigma) 22 | 23 | 24 | def p(x, y): 25 | d = s * x**alpha 26 | return phi.pdf((y - (1 - delta) * x) / d) / d 27 | 28 | # other data 29 | n_a, n_b, n_y = 50, (5, 5), 20 30 | a = np.random.rand(n_a) + 0.01 31 | b = np.random.rand(*n_b) + 0.01 32 | 33 | y = np.linspace(0, 10, 20) 34 | 35 | lae_a = LAE(p, a) 36 | lae_b = LAE(p, b) 37 | 38 | 39 | def test_x_flattened(): 40 | "lae: is x flattened and reshaped" 41 | # should have a trailing singleton dimension 42 | assert_equal(lae_b.X.shape[-1], 1) 43 | assert_equal(lae_a.X.shape[-1], 1) 44 | 45 | 46 | def test_x_2d(): 47 | "lae: is x 2d" 48 | assert_equal(lae_a.X.ndim, 2) 49 | assert_equal(lae_b.X.ndim, 2) 50 | 51 | 52 | def test_call_shapes(): 53 | "lae: shape of call to lae" 54 | assert_equal(lae_a(y).shape, (n_y,)) 55 | assert_equal(lae_b(y).shape, (n_y,)) 56 | -------------------------------------------------------------------------------- /examples/illustrates_clt.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: illustrates_clt.py 3 | Authors: John Stachurski and Thomas J. Sargent 4 | 5 | Visual illustration of the central limit theorem. Histograms draws of 6 | 7 | Y_n := \sqrt{n} (\bar X_n - \mu) 8 | 9 | for a given distribution of X_i, and a given choice of n. 10 | """ 11 | import numpy as np 12 | from scipy.stats import expon, norm 13 | import matplotlib.pyplot as plt 14 | from matplotlib import rc 15 | 16 | # == Specifying font, needs LaTeX integration == # 17 | rc('font', **{'family': 'serif', 'serif': ['Palatino']}) 18 | rc('text', usetex=True) 19 | 20 | # == Set parameters == # 21 | n = 250 # Choice of n 22 | k = 100000 # Number of draws of Y_n 23 | distribution = expon(2) # Exponential distribution, lambda = 1/2 24 | mu, s = distribution.mean(), distribution.std() 25 | 26 | # == Draw underlying RVs. Each row contains a draw of X_1,..,X_n == # 27 | data = distribution.rvs((k, n)) 28 | # == Compute mean of each row, producing k draws of \bar X_n == # 29 | sample_means = data.mean(axis=1) 30 | # == Generate observations of Y_n == # 31 | Y = np.sqrt(n) * (sample_means - mu) 32 | 33 | # == Plot == # 34 | fig, ax = plt.subplots() 35 | xmin, xmax = -3 * s, 3 * s 36 | ax.set_xlim(xmin, xmax) 37 | ax.hist(Y, bins=60, alpha=0.5, normed=True) 38 | xgrid = np.linspace(xmin, xmax, 200) 39 | ax.plot(xgrid, norm.pdf(xgrid, scale=s), 'k-', lw=2, label=r'$N(0, \sigma^2)$') 40 | ax.legend() 41 | 42 | plt.show() 43 | -------------------------------------------------------------------------------- /quantecon/tests/test_quadsum.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_tauchen.py 3 | Authors: Chase Coleman 4 | Date: 07/24/2014 5 | 6 | Tests for quadsums.py file 7 | 8 | """ 9 | import sys 10 | import os 11 | import unittest 12 | import numpy as np 13 | from numpy.testing import assert_allclose 14 | from quantecon.quadsums import var_quadratic_sum, m_quadratic_sum 15 | 16 | 17 | def test_var_simplesum(): 18 | beta = .95 19 | A = 1. 20 | C = 0. 21 | H = 1. 22 | x0 = 1. 23 | 24 | val = var_quadratic_sum(A, C, H, beta, x0) 25 | 26 | assert abs(val-20) < 1e-10 27 | 28 | 29 | def test_var_identitysum(): 30 | beta = .95 31 | A = np.eye(3) 32 | C = np.zeros((3, 3)) 33 | H = np.eye(3) 34 | x0 = np.ones(3) 35 | 36 | val = var_quadratic_sum(A, C, H, beta, x0) 37 | 38 | assert(abs(val-60) < 1e-10) 39 | 40 | 41 | def test_m_simplesum(): 42 | a = np.sqrt(.95) 43 | b = 1 44 | 45 | retval = m_quadratic_sum(a, b) 46 | 47 | assert(abs(retval - 20) < 1e-8) 48 | 49 | 50 | def test_m_matsum(): 51 | 52 | a = np.eye(3) * .99 53 | b = np.eye(3) 54 | 55 | retval = m_quadratic_sum(a, b) 56 | 57 | summedval = np.zeros_like(a) 58 | 59 | for i in range(5000): 60 | summedval = summedval + a**i * b * a.T**i 61 | 62 | assert_allclose(retval, summedval, atol=1e-5, rtol=0) 63 | 64 | 65 | 66 | if __name__ == '__main__': 67 | test_simplesum() 68 | test_identitysum() 69 | test_m_simplesum() 70 | test_m_identitysum -------------------------------------------------------------------------------- /examples/duopoly_lqnash.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: lqnash.py 3 | Authors: Chase Coleman, Thomas Sargent 4 | 5 | This file provides an example of a Markov Perfect Equilibrium for a 6 | simple duopoly example. 7 | 8 | See the lecture at http://quant-econ.net/markov_perfect.html for a 9 | description of the model. 10 | 11 | """ 12 | from __future__ import division 13 | from numpy import array, eye 14 | from quantecon.lqnash import nnash 15 | 16 | 17 | # ---------------------------------------------------------------------# 18 | # Set up parameter values and LQ matrices 19 | # Remember state is x_t = [1, y_{1, t}, y_{2, t}] and 20 | # control is u_{i, t} = [y_{i, t+1} - y_{i, t}] 21 | # ---------------------------------------------------------------------# 22 | a0 = 10. 23 | a1 = 1. 24 | beta = 1. 25 | d = .5 26 | 27 | a = eye(3) 28 | b1 = array([[0.], [1.], [0.]]) 29 | b2 = array([[0.], [0.], [1.]]) 30 | 31 | r1 = array([[a0, 0., 0.], 32 | [0., -a1, -a1/2.], 33 | [0, -a1/2., 0.]]) 34 | 35 | r2 = array([[a0, 0., 0.], 36 | [0., 0., -a1/2.], 37 | [0, -a1/2., -a1]]) 38 | 39 | q1 = array([[-.5*d]]) 40 | q2 = array([[-.5*d]]) 41 | 42 | 43 | # ---------------------------------------------------------------------# 44 | # Solve using QE's nnash function 45 | # ---------------------------------------------------------------------# 46 | 47 | f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, 0., 0., 0., 0., 0., 0., 48 | tol=1e-8, max_iter=1000) 49 | -------------------------------------------------------------------------------- /examples/eigenvec.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: eigenvec.py 3 | Authors: Tom Sargent and John Stachurski. 4 | 5 | Illustrates eigenvectors. 6 | """ 7 | 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | from scipy.linalg import eig 11 | 12 | A = ((1, 2), 13 | (2, 1)) 14 | A = np.array(A) 15 | evals, evecs = eig(A) 16 | evecs = evecs[:, 0], evecs[:, 1] 17 | 18 | fig, ax = plt.subplots() 19 | # Set the axes through the origin 20 | for spine in ['left', 'bottom']: 21 | ax.spines[spine].set_position('zero') 22 | for spine in ['right', 'top']: 23 | ax.spines[spine].set_color('none') 24 | ax.grid(alpha=0.4) 25 | 26 | xmin, xmax = -3, 3 27 | ymin, ymax = -3, 3 28 | ax.set_xlim(xmin, xmax) 29 | ax.set_ylim(ymin, ymax) 30 | # ax.set_xticks(()) 31 | # ax.set_yticks(()) 32 | 33 | # Plot each eigenvector 34 | for v in evecs: 35 | ax.annotate('', xy=v, xytext=(0, 0), 36 | arrowprops=dict(facecolor='blue', 37 | shrink=0, 38 | alpha=0.6, 39 | width=0.5)) 40 | 41 | # Plot the image of each eigenvector 42 | for v in evecs: 43 | v = np.dot(A, v) 44 | ax.annotate('', xy=v, xytext=(0, 0), 45 | arrowprops=dict(facecolor='red', 46 | shrink=0, 47 | alpha=0.6, 48 | width=0.5)) 49 | 50 | # Plot the lines they run through 51 | x = np.linspace(xmin, xmax, 3) 52 | for v in evecs: 53 | a = v[1] / v[0] 54 | ax.plot(x, a * x, 'b-', lw=0.4) 55 | 56 | 57 | plt.show() 58 | -------------------------------------------------------------------------------- /quantecon/tests/test_lss.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_lss.py 3 | Authors: Chase Coleman 4 | Date: 07/24/2014 5 | 6 | Tests for lss.py file 7 | 8 | """ 9 | import sys 10 | import os 11 | import unittest 12 | import numpy as np 13 | from numpy.testing import assert_allclose 14 | from quantecon.lss import LSS 15 | 16 | 17 | class TestLinearStateSpace(unittest.TestCase): 18 | 19 | def setUp(self): 20 | # Initial Values 21 | A = .95 22 | C = .05 23 | G = 1. 24 | mu_0 = .75 25 | 26 | self.ss = LSS(A, C, G, mu_0) 27 | 28 | def tearDown(self): 29 | del self.ss 30 | 31 | def test_stationarity(self): 32 | vals = self.ss.stationary_distributions(max_iter=1000, tol=1e-9) 33 | ssmux, ssmuy, sssigx, sssigy = vals 34 | 35 | self.assertTrue(abs(ssmux - ssmuy) < 2e-8) 36 | self.assertTrue(abs(sssigx - sssigy) < 2e-8) 37 | self.assertTrue(abs(ssmux) < 2e-8) 38 | self.assertTrue(abs(sssigx - self.ss.C**2/(1 - self.ss.A**2)) < 2e-8) 39 | 40 | def test_replicate(self): 41 | xval, yval = self.ss.replicate(T=100, num_reps=5000) 42 | 43 | assert_allclose(xval, yval) 44 | self.assertEqual(xval.size, 5000) 45 | self.assertLessEqual(abs(np.mean(xval)), .05) 46 | 47 | # def test_ 48 | 49 | 50 | if __name__ == '__main__': 51 | suite = unittest.TestLoader().loadTestsFromTestCase(TestLinearStateSpace) 52 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 53 | 54 | -------------------------------------------------------------------------------- /examples/preim1.py: -------------------------------------------------------------------------------- 1 | """ 2 | QE by Tom Sargent and John Stachurski. 3 | Illustrates preimages of functions 4 | """ 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | 8 | 9 | def f(x): 10 | return 0.6 * np.cos(4 * x) + 1.4 11 | 12 | 13 | xmin, xmax = -1, 1 14 | x = np.linspace(xmin, xmax, 160) 15 | y = f(x) 16 | ya, yb = np.min(y), np.max(y) 17 | 18 | fig, axes = plt.subplots(2, 1, figsize=(8, 8)) 19 | 20 | for ax in axes: 21 | # Set the axes through the origin 22 | for spine in ['left', 'bottom']: 23 | ax.spines[spine].set_position('zero') 24 | for spine in ['right', 'top']: 25 | ax.spines[spine].set_color('none') 26 | 27 | ax.set_ylim(-0.6, 3.2) 28 | ax.set_xlim(xmin, xmax) 29 | ax.set_yticks(()) 30 | ax.set_xticks(()) 31 | 32 | ax.plot(x, y, 'k-', lw=2, label=r'$f$') 33 | ax.fill_between(x, ya, yb, facecolor='blue', alpha=0.05) 34 | ax.vlines([0], ya, yb, lw=3, color='blue', label=r'range of $f$') 35 | ax.text(0.04, -0.3, '$0$', fontsize=16) 36 | 37 | ax = axes[0] 38 | 39 | ax.legend(loc='upper right', frameon=False) 40 | ybar = 1.5 41 | ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5) 42 | ax.text(0.05, 0.8 * ybar, r'$y$', fontsize=16) 43 | for i, z in enumerate((-0.35, 0.35)): 44 | ax.vlines(z, 0, f(z), linestyle='--', alpha=0.5) 45 | ax.text(z, -0.2, r'$x_{}$'.format(i), fontsize=16) 46 | 47 | ax = axes[1] 48 | 49 | ybar = 2.6 50 | ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5) 51 | ax.text(0.04, 0.91 * ybar, r'$y$', fontsize=16) 52 | 53 | plt.show() 54 | -------------------------------------------------------------------------------- /examples/web_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import re 3 | 4 | alphabet = 'abcdefghijklmnopqrstuvwxyz' 5 | 6 | 7 | def gen_rw_mat(n): 8 | "Generate an n x n matrix of zeros and ones." 9 | Q = np.random.randn(n, n) - 0.8 10 | Q = np.where(Q > 0, 1, 0) 11 | # Make sure that no row contains only zeros 12 | for i in range(n): 13 | if Q[i, :].sum() == 0: 14 | Q[i, np.random.randint(0, n, 1)] = 1 15 | return Q 16 | 17 | 18 | def adj_matrix_to_dot(Q, outfile='/tmp/foo_out.dot'): 19 | """ 20 | Convert an adjacency matrix to a dot file. 21 | """ 22 | n = Q.shape[0] 23 | f = open(outfile, 'w') 24 | f.write('digraph {\n') 25 | for i in range(n): 26 | for j in range(n): 27 | if Q[i, j]: 28 | f.write(' {0} -> {1};\n'.format(alphabet[i], alphabet[j])) 29 | f.write('}\n') 30 | f.close() 31 | 32 | 33 | def dot_to_adj_matrix(node_num, infile='/tmp/foo_out.dot'): 34 | Q = np.zeros((node_num, node_num), dtype=int) 35 | f = open(infile, 'r') 36 | lines = f.readlines() 37 | f.close() 38 | edges = lines[1:-1] # Drop first and last lines 39 | for edge in edges: 40 | from_node, to_node = re.findall('\w', edge) 41 | i, j = alphabet.index(from_node), alphabet.index(to_node) 42 | Q[i, j] = 1 43 | return Q 44 | 45 | 46 | def adj_matrix_to_markov(Q): 47 | n = Q.shape[0] 48 | P = np.empty((n, n)) 49 | for i in range(n): 50 | P[i, :] = Q[i, :] / float(Q[i, :].sum()) 51 | return P 52 | -------------------------------------------------------------------------------- /quantecon/tests/test_tauchen.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_tauchen.py 3 | Authors: Chase Coleman 4 | Date: 07/18/2014 5 | 6 | Tests for tauchen.py file 7 | 8 | """ 9 | import sys 10 | import os 11 | import unittest 12 | import numpy as np 13 | from numpy.testing import assert_allclose 14 | from quantecon.tauchen import approx_markov 15 | 16 | 17 | class TestApproxMarkov(unittest.TestCase): 18 | 19 | def setUp(self): 20 | self.rho, self.sigma_u = np.random.rand(2) 21 | self.n = np.random.random_integers(3, 25) 22 | self.m = np.random.random_integers(4) 23 | self.tol = 1e-12 24 | 25 | self.x, self.P = approx_markov(self.rho, self.sigma_u, self.m, self.n) 26 | 27 | def tearDown(self): 28 | del self.x 29 | del self.P 30 | 31 | def testShape(self): 32 | i, j = self.P.shape 33 | 34 | self.assertTrue(i == j) 35 | 36 | def testDim(self): 37 | dim_x = self.x.ndim 38 | dim_P = self.P.ndim 39 | 40 | self.assertTrue(dim_x == 1 and dim_P == 2) 41 | 42 | def test_transition_mat_row_sum_1(self): 43 | self.assertTrue(np.allclose(np.sum(self.P, axis=1), 1, atol=self.tol)) 44 | 45 | def test_positive_probs(self): 46 | self.assertTrue(np.all(self.P) > -self.tol) 47 | 48 | def test_states_sum_0(self): 49 | self.assertTrue(abs(np.sum(self.x)) < self.tol) 50 | 51 | 52 | 53 | if __name__ == '__main__': 54 | suite = unittest.TestLoader().loadTestsFromTestCase(TestApproxMarkov) 55 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 56 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Copyright © 2013, 2014 Thomas J. Sargent and John Stachurski: BSD-3 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 26 | OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 | AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 29 | WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /examples/lin_interp_3d_plot.py: -------------------------------------------------------------------------------- 1 | """ 2 | Origin: QE by John Stachurski and Thomas J. Sargent 3 | Filename: lin_inter_3d_plot.py 4 | Authors: John Stachurski, Thomas J. Sargent 5 | LastModified: 21/08/2013 6 | """ 7 | from scipy.interpolate import LinearNDInterpolator 8 | import matplotlib.pyplot as plt 9 | from mpl_toolkits.mplot3d.axes3d import Axes3D 10 | import numpy as np 11 | 12 | alpha = 0.7 13 | phi_ext = 2 * 3.14 * 0.5 14 | 15 | 16 | def f(a, b): 17 | # return 2 + alpha - 2 * np.cos(b)*np.cos(a) - alpha*np.cos(phi_ext - 2*b) 18 | return a + np.sqrt(b) 19 | 20 | x_max = 3 21 | y_max = 2.5 22 | 23 | # === the approximation grid === # 24 | Nx0, Ny0 = 25, 25 25 | x0 = np.linspace(0, x_max, Nx0) 26 | y0 = np.linspace(0, y_max, Ny0) 27 | X0, Y0 = np.meshgrid(x0, y0) 28 | points = np.column_stack((X0.ravel(1), Y0.ravel(1))) 29 | 30 | # === generate the function values on the grid === # 31 | Z0 = np.empty(Nx0 * Ny0) 32 | for i in range(len(Z0)): 33 | a, b = points[i, :] 34 | Z0[i] = f(a, b) 35 | 36 | g = LinearNDInterpolator(points, Z0) 37 | 38 | # === a grid for plotting === # 39 | Nx1, Ny1 = 100, 100 40 | x1 = np.linspace(0, x_max, Nx1) 41 | y1 = np.linspace(0, y_max, Ny1) 42 | X1, Y1 = np.meshgrid(x1, y1) 43 | 44 | # === the approximating function, as a matrix, for plotting === # 45 | # ZA = np.empty((Ny1, Nx1)) 46 | # for i in range(Ny1): 47 | # for j in range(Nx1): 48 | # ZA[i, j] = g(x1[j], y1[i]) 49 | ZA = g(X1, Y1) 50 | ZF = f(X1, Y1) 51 | 52 | # === plot === # 53 | fig = plt.figure(figsize=(8, 6)) 54 | ax = fig.add_subplot(1, 1, 1, projection='3d') 55 | p = ax.plot_wireframe(X1, Y1, ZF, rstride=4, cstride=4) 56 | plt.show() 57 | -------------------------------------------------------------------------------- /examples/qs.py: -------------------------------------------------------------------------------- 1 | 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | from scipy.stats import norm 5 | from matplotlib import cm 6 | 7 | xmin, xmax = -4, 12 8 | x = 10 9 | alpha = 0.5 10 | 11 | m, v = x, 10 12 | 13 | xgrid = np.linspace(xmin, xmax, 200) 14 | 15 | fig, ax = plt.subplots() 16 | 17 | ax.spines['right'].set_color('none') 18 | ax.spines['top'].set_color('none') 19 | ax.spines['left'].set_color('none') 20 | ax.xaxis.set_ticks_position('bottom') 21 | ax.spines['bottom'].set_position(('data', 0)) 22 | 23 | ax.set_ylim(-0.05, 0.5) 24 | ax.set_xticks((x,)) 25 | ax.set_xticklabels((r'$x$',), fontsize=18) 26 | ax.set_yticks(()) 27 | 28 | K = 3 29 | for i in range(K): 30 | m = alpha * m 31 | v = alpha * alpha * v + 1 32 | f = norm(loc=m, scale=np.sqrt(v)) 33 | k = (i + 0.5) / K 34 | ax.plot(xgrid, f.pdf(xgrid), lw=1, color='black', alpha=0.4) 35 | ax.fill_between(xgrid, 0 * xgrid, f.pdf(xgrid), color=cm.jet(k), alpha=0.4) 36 | 37 | 38 | ax.annotate(r'$Q(x,\cdot)$', xy=(6.6, 0.2), xycoords='data', 39 | xytext=(20, 90), textcoords='offset points', fontsize=16, 40 | arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) 41 | ax.annotate(r'$Q^2(x,\cdot)$', xy=(3.6, 0.24), xycoords='data', 42 | xytext=(20, 90), textcoords='offset points', fontsize=16, 43 | arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) 44 | ax.annotate(r'$Q^3(x,\cdot)$', xy=(-0.2, 0.28), xycoords='data', 45 | xytext=(-90, 90), textcoords='offset points', fontsize=16, 46 | arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2")) 47 | fig.show() 48 | -------------------------------------------------------------------------------- /quantecon/tests/test_discrete_rv.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.discrete_rv 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-07-31 6 | 7 | """ 8 | from __future__ import division 9 | from collections import Counter 10 | import unittest 11 | import numpy as np 12 | from numpy.testing import assert_allclose 13 | from nose.plugins.attrib import attr 14 | import pandas as pd 15 | from quantecon import DiscreteRV 16 | 17 | 18 | class TestDiscreteRV(unittest.TestCase): 19 | 20 | @classmethod 21 | def setUpClass(cls): 22 | x = np.random.rand(10) 23 | x /= x.sum() 24 | # make sure it sums to 1 25 | cls.x = x 26 | cls.drv = DiscreteRV(cls.x) 27 | 28 | def test_Q_updates(self): 29 | "discrete_rv: Q attributes updates on q change?" 30 | Q_init = np.copy(self.drv.Q) 31 | 32 | # change q, see if Q updates 33 | x = np.random.rand(10) 34 | x /= x.sum() 35 | self.drv.q = x 36 | Q_after = self.drv.Q 37 | 38 | # should be different 39 | self.assertFalse(np.allclose(Q_init, Q_after)) 40 | 41 | # clean up: reset values 42 | self.drv.q = self.x 43 | 44 | # now we should have our original Q back 45 | assert_allclose(Q_init, self.drv.Q) 46 | 47 | def test_Q_end_1(self): 48 | "discrete_rv: Q sums to 1" 49 | assert (self.drv.Q[-1] - 1.0 < 1e-10) 50 | 51 | @attr("slow") 52 | def test_draw_lln(self): 53 | "discrete_rv: lln satisfied?" 54 | draws = self.drv.draw(1000000) 55 | counts = pd.Series(Counter(draws)) 56 | counts = (counts / counts.sum()).values 57 | assert max(np.abs(counts - self.drv.q)) < 1e-2 58 | 59 | -------------------------------------------------------------------------------- /examples/perm_inc_ir.py: -------------------------------------------------------------------------------- 1 | """ 2 | Impulse response functions for the LQ permanent income model permanent and 3 | transitory shocks. 4 | """ 5 | 6 | 7 | import numpy as np 8 | import matplotlib.pyplot as plt 9 | 10 | r = 0.05 11 | beta = 1 / (1 + r) 12 | T = 20 # Time horizon 13 | S = 5 # Impulse date 14 | sigma1 = sigma2 = 0.15 15 | 16 | 17 | def time_path(permanent=False): 18 | "Time path of consumption and debt given shock sequence" 19 | w1 = np.zeros(T+1) 20 | w2 = np.zeros(T+1) 21 | b = np.zeros(T+1) 22 | c = np.zeros(T+1) 23 | if permanent: 24 | w1[S+1] = 1.0 25 | else: 26 | w2[S+1] = 1.0 27 | for t in range(1, T): 28 | b[t+1] = b[t] - sigma2 * w2[t] 29 | c[t+1] = c[t] + sigma1 * w1[t+1] + (1 - beta) * sigma2 * w2[t+1] 30 | return b, c 31 | 32 | 33 | fig, axes = plt.subplots(2, 1) 34 | plt.subplots_adjust(hspace=0.5) 35 | p_args = {'lw': 2, 'alpha': 0.7} 36 | 37 | L = 0.175 38 | 39 | for ax in axes: 40 | ax.grid(alpha=0.5) 41 | ax.set_xlabel(r'Time') 42 | ax.set_ylim(-L, L) 43 | ax.plot((S, S), (-L, L), 'k-', lw=0.5) 44 | 45 | ax = axes[0] 46 | b, c = time_path(permanent=0) 47 | ax.set_title('impulse-response, transitory income shock') 48 | ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args) 49 | ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args) 50 | ax.legend(loc='upper right') 51 | 52 | ax = axes[1] 53 | b, c = time_path(permanent=1) 54 | ax.set_title('impulse-response, permanent income shock') 55 | ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args) 56 | ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args) 57 | ax.legend(loc='lower right') 58 | plt.show() 59 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # QuantEcon documentation 2 | 3 | This is the main directory for the documentation for the `quantecon` python library. 4 | 5 | ## Dependencies 6 | 7 | The documentation requires a few dependencies beyond those necessary for the quantecon library. These dependencies are (warning, this may be an incomplete list): 8 | 9 | * sphinx 10 | * numpydoc 11 | * sphinx_rtd_theme 12 | 13 | You can install these by executing 14 | 15 | ``` 16 | pip install sphinx numpydoc sphinx_rtd_theme 17 | ``` 18 | 19 | ## Building the docs 20 | 21 | In order to generate the documentation, follow these steps: 22 | 23 | 1. Install the `quantecon` python library locally. Do to this enter the commands below: 24 | ``` 25 | cd .. 26 | python setup.py install 27 | cd docs 28 | ``` 29 | 2. From this directory, execute the local file `qe_apidoc.py` (for an explanation of what the file does, see the module level docstring in the file) 30 | ``` 31 | python qe_apidoc.py 32 | ``` 33 | 3. Run sphinx using the Makefile (this is the command for unix based system -- sorry windows users, you will have to google how to do this) 34 | ``` 35 | make html 36 | ``` 37 | 4. Open the file `build/html/index.html`. 38 | 39 | I have added a couple utility commands to the make file: 40 | 41 | ``` 42 | srcclean: 43 | rm -rf source/modules* 44 | rm -rf source/models* 45 | rm -rf source/tools* 46 | rm -f source/index.rst 47 | rm -f source/models.rst 48 | rm -f source/tools.rst 49 | 50 | myhtml: 51 | make srcclean 52 | cd .. && python setup.py install && cd docs 53 | python qe_apidoc.py 54 | make html 55 | ``` 56 | 57 | Notice that we can automate steps 1-3 (and make sure we get a clean build) above by simply running `make myhtml` 58 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/test_career.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for quantecon.carrer module 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-07-31 6 | 7 | """ 8 | from __future__ import division 9 | import unittest 10 | import numpy as np 11 | from quantecon.models import CareerWorkerProblem 12 | 13 | 14 | class TestCareerWorkerProblem(unittest.TestCase): 15 | 16 | @classmethod 17 | def setUpClass(cls): 18 | cls.cp = CareerWorkerProblem() 19 | cls.v_init = np.random.rand(cls.cp.N, cls.cp.N) 20 | cls.v_prime = cls.cp.bellman_operator(cls.v_init) 21 | cls.greedy = cls.cp.get_greedy(cls.v_init) 22 | 23 | def test_bellman_shape(self): 24 | "career: bellman shape" 25 | assert self.v_init.shape == self.v_prime.shape 26 | 27 | def test_greedy_shape(self): 28 | "career: greedy shape" 29 | assert self.v_init.shape == self.greedy.shape 30 | 31 | def test_greedy_new_life(self): 32 | "career: want new life with worst job/career?" 33 | if (self.greedy == 3).any(): 34 | # if we ever want a new life, it will be with worst possible 35 | # theta and worst epsilon 36 | assert self.greedy[0, 0] == 3 37 | 38 | def test_greedy_new_job(self): 39 | "career: want new job with best carrer/worst job?" 40 | # we should want a new job with best career and worst job 41 | assert self.greedy[-1, 0] == 2 42 | 43 | def test_greedy_stay_put(self): 44 | "career: want to stayw with best career/job?" 45 | if (self.greedy == 1).any(): 46 | # if we ever want to stay put, it will be with best possible 47 | # theta and best epsilon 48 | assert self.greedy[-1, -1] == 1 49 | -------------------------------------------------------------------------------- /quantecon/tests/test_rank_nullspace.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_tauchen.py 3 | Authors: Chase Coleman 4 | Date: 07/21/2014 5 | 6 | Tests for rank_nullspace.py file 7 | 8 | """ 9 | import sys 10 | import os 11 | import unittest 12 | import numpy as np 13 | from numpy.linalg import matrix_rank as np_rank 14 | from numpy.testing import assert_allclose 15 | from quantecon.rank_nullspace import rank_est, nullspace 16 | 17 | 18 | class TestRankNullspace(unittest.TestCase): 19 | 20 | def setUp(self): 21 | self.A1 = np.eye(6) 22 | self.A2 = np.array([[1., 0, 0], [0., 1., 0], [1., 1., 0.]]) 23 | self.A3 = np.zeros((3, 3)) 24 | 25 | def tearDown(self): 26 | del self.A1 27 | del self.A2 28 | del self.A3 29 | 30 | def testRankwithNumpy(self): 31 | A1, A2, A3 = self.A1, self.A2, self.A3 32 | qe_A1 = rank_est(A1) 33 | qe_A2 = rank_est(A2) 34 | qe_A3 = rank_est(A3) 35 | 36 | np_A1 = np_rank(A1) 37 | np_A2 = np_rank(A2) 38 | np_A3 = np_rank(A3) 39 | 40 | 41 | self.assertTrue(qe_A1 == np_A1 and qe_A2 == np_A2 and qe_A3 == np_A3) 42 | 43 | 44 | def testNullspacewithPaper(self): 45 | A1, A2, A3 = self.A1, self.A2, self.A3 46 | ns_A1 = nullspace(A1).squeeze() 47 | ns_A2 = nullspace(A2).squeeze() 48 | ns_A3 = nullspace(A3).squeeze() 49 | 50 | self.assertTrue(np.allclose(ns_A1, np.array([])) and 51 | np.allclose(ns_A2, np.array([0, 0, 1])) and 52 | np.allclose(ns_A3, np.eye(3))) 53 | 54 | 55 | if __name__ == '__main__': 56 | suite = unittest.TestLoader().loadTestsFromTestCase(TestRankNullspace) 57 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /examples/3dvec.py: -------------------------------------------------------------------------------- 1 | """ 2 | QE by Tom Sargent and John Stachurski. 3 | Illustrates the span of two vectors in R^3. 4 | """ 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | from matplotlib import cm 8 | from mpl_toolkits.mplot3d import Axes3D 9 | from scipy.interpolate import interp2d 10 | 11 | fig = plt.figure() 12 | ax = fig.gca(projection='3d') 13 | 14 | x_min, x_max = -5, 5 15 | y_min, y_max = -5, 5 16 | 17 | alpha, beta = 0.2, 0.1 18 | 19 | ax.set_xlim((x_min, x_max)) 20 | ax.set_ylim((x_min, x_max)) 21 | ax.set_zlim((x_min, x_max)) 22 | 23 | # Axes 24 | ax.set_xticks((0,)) 25 | ax.set_yticks((0,)) 26 | ax.set_zticks((0,)) 27 | gs = 3 28 | z = np.linspace(x_min, x_max, gs) 29 | x = np.zeros(gs) 30 | y = np.zeros(gs) 31 | ax.plot(x, y, z, 'k-', lw=2, alpha=0.5) 32 | ax.plot(z, x, y, 'k-', lw=2, alpha=0.5) 33 | ax.plot(y, z, x, 'k-', lw=2, alpha=0.5) 34 | 35 | 36 | # Fixed linear function, to generate a plane 37 | def f(x, y): 38 | return alpha * x + beta * y 39 | 40 | # Vector locations, by coordinate 41 | x_coords = np.array((3, 3)) 42 | y_coords = np.array((4, -4)) 43 | z = f(x_coords, y_coords) 44 | for i in (0, 1): 45 | ax.text(x_coords[i], y_coords[i], z[i], r'$a_{}$'.format(i+1), fontsize=14) 46 | 47 | # Lines to vectors 48 | for i in (0, 1): 49 | x = (0, x_coords[i]) 50 | y = (0, y_coords[i]) 51 | z = (0, f(x_coords[i], y_coords[i])) 52 | ax.plot(x, y, z, 'b-', lw=1.5, alpha=0.6) 53 | 54 | 55 | # Draw the plane 56 | grid_size = 20 57 | xr2 = np.linspace(x_min, x_max, grid_size) 58 | yr2 = np.linspace(y_min, y_max, grid_size) 59 | x2, y2 = np.meshgrid(xr2, yr2) 60 | z2 = f(x2, y2) 61 | ax.plot_surface(x2, y2, z2, rstride=1, cstride=1, cmap=cm.jet, 62 | linewidth=0, antialiased=True, alpha=0.2) 63 | plt.show() 64 | -------------------------------------------------------------------------------- /quantecon/meta.yaml: -------------------------------------------------------------------------------- 1 | package: 2 | name: quantecon 3 | version: !!str 0.1.2 4 | 5 | source: 6 | fn: quantecon-0.1.2.tar.gz 7 | url: https://pypi.python.org/packages/source/q/quantecon/quantecon-0.1.2.tar.gz 8 | md5: 245bcef5f18d1afe858cf95453c28934 9 | # patches: 10 | # List any patch files here 11 | # - fix.patch 12 | 13 | # build: 14 | #preserve_egg_dir: True 15 | #entry_points: 16 | # Put any entry points (scripts to be generated automatically) here. The 17 | # syntax is module:function. For example 18 | # 19 | # - quantecon = quantecon:main 20 | # 21 | # Would create an entry point called quantecon that calls quantecon.main() 22 | 23 | 24 | # If this is a new build for the same version, increment the build 25 | # number. If you do not include this key, it defaults to 0. 26 | # number: 1 27 | 28 | requirements: 29 | build: 30 | - python 31 | - setuptools 32 | # - numpy 33 | # - scipy 34 | # - pandas 35 | # - matplotlib 36 | 37 | run: 38 | - python 39 | - numpy 40 | - scipy 41 | - pandas 42 | - matplotlib 43 | 44 | test: 45 | # Python imports 46 | imports: 47 | - quantecon 48 | 49 | #commands: 50 | # You can put test commands to be run here. Use this to test that the 51 | # entry points work. 52 | 53 | 54 | # You can also put a file called run_test.py in the recipe that will be run 55 | # at test time. 56 | 57 | # requires: 58 | # Put any additional test requirements here. For example 59 | # - nose 60 | 61 | about: 62 | home: https://github.com/jstac/quant-econ 63 | license: BSD 64 | summary: 'Code for quant-econ.net' 65 | 66 | # See 67 | # http://docs.continuum.io/conda/build.html for 68 | # more information about meta.yaml 69 | -------------------------------------------------------------------------------- /examples/perm_inc_figs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plots consumption, income and debt for the simple infinite horizon LQ 3 | permanent income model with Gaussian iid income. 4 | """ 5 | 6 | 7 | import random 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | 11 | r = 0.05 12 | beta = 1 / (1 + r) 13 | T = 60 14 | sigma = 0.15 15 | mu = 1 16 | 17 | 18 | def time_path(): 19 | w = np.random.randn(T+1) # w_0, w_1, ..., w_T 20 | w[0] = 0 21 | b = np.zeros(T+1) 22 | for t in range(1, T+1): 23 | b[t] = w[1:t].sum() 24 | b = - sigma * b 25 | c = mu + (1 - beta) * (sigma * w - b) 26 | return w, b, c 27 | 28 | 29 | # == Figure showing a typical realization == # 30 | 31 | if 1: 32 | fig, ax = plt.subplots() 33 | 34 | p_args = {'lw': 2, 'alpha': 0.7} 35 | ax.grid() 36 | ax.set_xlabel(r'Time') 37 | bbox = (0., 1.02, 1., .102) 38 | legend_args = {'bbox_to_anchor': bbox, 'loc': 'upper left', 39 | 'mode': 'expand'} 40 | 41 | w, b, c = time_path() 42 | ax.plot(list(range(T+1)), mu + sigma * w, 'g-', 43 | label="non-financial income", **p_args) 44 | ax.plot(list(range(T+1)), c, 'k-', label="consumption", **p_args) 45 | ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args) 46 | ax.legend(ncol=3, **legend_args) 47 | 48 | plt.show() 49 | 50 | # == Figure showing multiple consumption paths == # 51 | 52 | if 0: 53 | fig, ax = plt.subplots() 54 | 55 | p_args = {'lw': 0.8, 'alpha': 0.7} 56 | ax.grid() 57 | ax.set_xlabel(r'Time') 58 | ax.set_ylabel(r'Consumption') 59 | b_sum = np.zeros(T+1) 60 | for i in range(250): 61 | rcolor = random.choice(('c', 'g', 'b', 'k')) 62 | w, b, c = time_path() 63 | ax.plot(list(range(T+1)), c, color=rcolor, **p_args) 64 | 65 | plt.show() 66 | -------------------------------------------------------------------------------- /examples/evans_sargent_plot2.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot 2 from the Evans Sargent model. 3 | 4 | @author: David Evans 5 | Edited by: John Stachurski 6 | 7 | """ 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | from evans_sargent import T, uhatdif, tauhatdif, mu, G 11 | 12 | tt = np.arange(T) # tt is used to make the plot time index correct. 13 | tt2 = np.arange(T-1) 14 | 15 | n_rows = 4 16 | fig, axes = plt.subplots(n_rows, 1, figsize=(10, 16)) 17 | 18 | plt.subplots_adjust(hspace=0.5) 19 | for ax in axes: 20 | ax.grid(alpha=.5) 21 | ax.set_xlim(-0.5, 15) 22 | 23 | bbox = (0., 1.02, 1., .102) 24 | legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} 25 | p_args = {'lw': 2, 'alpha': 0.7} 26 | 27 | ax = axes[0] 28 | ax.plot(tt2, tauhatdif, label=r'time inconsistency differential for tax rate', 29 | **p_args) 30 | ax.set_ylabel(r"$\Delta\tau$", fontsize=16) 31 | ax.set_ylim(-0.1, 1.4) 32 | ax.set_yticks((0.0, 0.4, 0.8, 1.2)) 33 | ax.legend(ncol=1, **legend_args) 34 | 35 | ax = axes[1] 36 | ax.plot(tt, uhatdif, label=r'time inconsistency differential for $u$', 37 | **p_args) 38 | ax.set_ylabel(r"$\Delta u$", fontsize=16) 39 | ax.set_ylim(-3, .1) 40 | ax.set_yticks((-3.0, -2.0, -1.0, 0.0)) 41 | ax.legend(ncol=1, **legend_args) 42 | 43 | ax = axes[2] 44 | ax.plot(tt, mu, label='Lagrange multiplier', **p_args) 45 | ax.set_ylabel(r"$\mu$", fontsize=16) 46 | ax.set_ylim(2.34e-3, 2.52e-3) 47 | ax.set_yticks((2.34e-3, 2.43e-3, 2.52e-3)) 48 | ax.legend(ncol=1, **legend_args) 49 | 50 | ax = axes[3] 51 | ax.plot(tt, G, label='government revenue', **p_args) 52 | ax.set_ylabel(r"$G$", fontsize=16) 53 | ax.set_ylim(9100, 9800) 54 | ax.set_yticks((9200, 9400, 9600, 9800)) 55 | ax.legend(ncol=1, **legend_args) 56 | 57 | ax.set_xlabel(r'time', fontsize=16) 58 | 59 | plt.show() 60 | # lines = plt.plot(tt, GPay, "o") 61 | -------------------------------------------------------------------------------- /quantecon/compute_fp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: compute_fp.py 3 | Authors: Thomas Sargent, John Stachurski 4 | 5 | Compute the fixed point of a given operator T, starting from 6 | specified initial condition v. 7 | 8 | """ 9 | 10 | import numpy as np 11 | 12 | 13 | def compute_fixed_point(T, v, error_tol=1e-3, max_iter=50, verbose=1, *args, 14 | **kwargs): 15 | """ 16 | Computes and returns :math:`T^k v`, an approximate fixed point. 17 | 18 | Here T is an operator, v is an initial condition and k is the number 19 | of iterates. Provided that T is a contraction mapping or similar, 20 | :math:`T^k v` will be an approximation to the fixed point. 21 | 22 | Parameters 23 | ---------- 24 | T : callable 25 | A callable object (e.g., function) that acts on v 26 | v : object 27 | An object such that T(v) is defined 28 | error_tol : scalar(float), optional(default=1e-3) 29 | Error tolerance 30 | max_iter : scalar(int), optional(default=50) 31 | Maximum number of iterations 32 | verbose : bool, optional(default=True) 33 | If True then print current error at each iterate. 34 | args, kwargs : 35 | Other arguments and keyword arguments that are passed directly 36 | to the function T each time it is called 37 | 38 | Returns 39 | ------- 40 | v : object 41 | The approximate fixed point 42 | 43 | """ 44 | iterate = 0 45 | error = error_tol + 1 46 | while iterate < max_iter and error > error_tol: 47 | new_v = T(v, *args, **kwargs) 48 | iterate += 1 49 | error = np.max(np.abs(new_v - v)) 50 | if verbose: 51 | print("Computed iterate %d with error %f" % (iterate, error)) 52 | try: 53 | v[:] = new_v 54 | except TypeError: 55 | v = new_v 56 | return v 57 | -------------------------------------------------------------------------------- /quantecon/discrete_rv.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: discrete_rv.py 3 | 4 | Authors: Thomas Sargent, John Stachurski 5 | 6 | Generates an array of draws from a discrete random variable with a 7 | specified vector of probabilities. 8 | 9 | """ 10 | 11 | from numpy import cumsum 12 | from numpy.random import uniform 13 | 14 | 15 | class DiscreteRV(object): 16 | """ 17 | Generates an array of draws from a discrete random variable with 18 | vector of probabilities given by q. 19 | 20 | Parameters 21 | ---------- 22 | q : array_like(float) 23 | Nonnegative numbers that sum to 1 24 | 25 | Attributes 26 | ---------- 27 | q : see Parameters 28 | Q : array_like(float) 29 | The cumulative sum of q 30 | 31 | """ 32 | 33 | def __init__(self, q): 34 | self._q = q 35 | self.Q = cumsum(q) 36 | 37 | def __repr__(self): 38 | return "DiscreteRV with {n} elements".format(n=self._q.size) 39 | 40 | def __str__(self): 41 | return self.__repr__() 42 | 43 | @property 44 | def q(self): 45 | """ 46 | Getter method for q. 47 | 48 | """ 49 | return self._q 50 | 51 | @q.setter 52 | def q(self, val): 53 | """ 54 | Setter method for q. 55 | 56 | """ 57 | self._q = val 58 | self.Q = cumsum(val) 59 | 60 | def draw(self, k=1): 61 | """ 62 | Returns k draws from q. 63 | 64 | For each such draw, the value i is returned with probability 65 | q[i]. 66 | 67 | Parameters 68 | ----------- 69 | k : scalar(int), optional 70 | Number of draws to be returned 71 | 72 | Returns 73 | ------- 74 | array_like(int) 75 | An array of k independent draws from q 76 | 77 | """ 78 | return self.Q.searchsorted(uniform(0, 1, size=k)) 79 | -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | # file GENERATED by distutils, do NOT edit 2 | setup.cfg 3 | setup.py 4 | quantecon/__init__.py 5 | quantecon/arma.py 6 | quantecon/cartesian.py 7 | quantecon/ce_util.py 8 | quantecon/compute_fp.py 9 | quantecon/discrete_rv.py 10 | quantecon/distributions.py 11 | quantecon/ecdf.py 12 | quantecon/estspec.py 13 | quantecon/graph_tools.py 14 | quantecon/gth_solve.py 15 | quantecon/ivp.py 16 | quantecon/kalman.py 17 | quantecon/lae.py 18 | quantecon/lqcontrol.py 19 | quantecon/lqnash.py 20 | quantecon/lss.py 21 | quantecon/matrix_eqn.py 22 | quantecon/mc_tools.py 23 | quantecon/quad.py 24 | quantecon/quadsums.py 25 | quantecon/rank_nullspace.py 26 | quantecon/robustlq.py 27 | quantecon/tauchen.py 28 | quantecon/timing.py 29 | quantecon/version.py 30 | quantecon/models/__init__.py 31 | quantecon/models/asset_pricing.py 32 | quantecon/models/career.py 33 | quantecon/models/ifp.py 34 | quantecon/models/jv.py 35 | quantecon/models/lucastree.py 36 | quantecon/models/odu.py 37 | quantecon/models/optgrowth.py 38 | quantecon/tests/__init__.py 39 | quantecon/tests/test_arma.py 40 | quantecon/tests/test_cartesian.py 41 | quantecon/tests/test_compute_fp.py 42 | quantecon/tests/test_discrete_rv.py 43 | quantecon/tests/test_ecdf.py 44 | quantecon/tests/test_estspec.py 45 | quantecon/tests/test_graph_tools.py 46 | quantecon/tests/test_gth_solve.py 47 | quantecon/tests/test_ivp.py 48 | quantecon/tests/test_kalman.py 49 | quantecon/tests/test_lae.py 50 | quantecon/tests/test_lqcontrol.py 51 | quantecon/tests/test_lqnash.py 52 | quantecon/tests/test_lss.py 53 | quantecon/tests/test_lyapunov.py 54 | quantecon/tests/test_matrix_eqn.py 55 | quantecon/tests/test_mc_tools.py 56 | quantecon/tests/test_quad.py 57 | quantecon/tests/test_quadsum.py 58 | quantecon/tests/test_rank_nullspace.py 59 | quantecon/tests/test_ricatti.py 60 | quantecon/tests/test_robustlq.py 61 | quantecon/tests/test_tauchen.py 62 | quantecon/tests/test_timing.py 63 | quantecon/tests/util.py 64 | -------------------------------------------------------------------------------- /examples/lq_permanent_1.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: lq_permanent_1.py 3 | Authors: John Stachurski and Thomas J. Sargent 4 | 5 | A permanent income / life-cycle model with iid income 6 | """ 7 | 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | from quantecon import LQ 11 | 12 | # == Model parameters == # 13 | r = 0.05 14 | beta = 1 / (1 + r) 15 | T = 45 16 | c_bar = 2 17 | sigma = 0.25 18 | mu = 1 19 | q = 1e6 20 | 21 | # == Formulate as an LQ problem == # 22 | Q = 1 23 | R = np.zeros((2, 2)) 24 | Rf = np.zeros((2, 2)) 25 | Rf[0, 0] = q 26 | A = [[1 + r, -c_bar + mu], 27 | [0, 1]] 28 | B = [[-1], 29 | [0]] 30 | C = [[sigma], 31 | [0]] 32 | 33 | # == Compute solutions and simulate == # 34 | lq = LQ(Q, R, A, B, C, beta=beta, T=T, Rf=Rf) 35 | x0 = (0, 1) 36 | xp, up, wp = lq.compute_sequence(x0) 37 | 38 | # == Convert back to assets, consumption and income == # 39 | assets = xp[0, :] # a_t 40 | c = up.flatten() + c_bar # c_t 41 | income = wp[0, 1:] + mu # y_t 42 | 43 | # == Plot results == # 44 | n_rows = 2 45 | fig, axes = plt.subplots(n_rows, 1, figsize=(12, 10)) 46 | 47 | plt.subplots_adjust(hspace=0.5) 48 | for i in range(n_rows): 49 | axes[i].grid() 50 | axes[i].set_xlabel(r'Time') 51 | bbox = (0., 1.02, 1., .102) 52 | legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} 53 | p_args = {'lw': 2, 'alpha': 0.7} 54 | 55 | axes[0].plot(list(range(1, T+1)), income, 'g-', label="non-financial income", 56 | **p_args) 57 | axes[0].plot(list(range(T)), c, 'k-', label="consumption", **p_args) 58 | axes[0].legend(ncol=2, **legend_args) 59 | 60 | axes[1].plot(list(range(1, T+1)), np.cumsum(income - mu), 'r-', 61 | label="cumulative unanticipated income", **p_args) 62 | axes[1].plot(list(range(T+1)), assets, 'b-', label="assets", **p_args) 63 | axes[1].plot(list(range(T)), np.zeros(T), 'k-') 64 | axes[1].legend(ncol=2, **legend_args) 65 | 66 | plt.show() 67 | -------------------------------------------------------------------------------- /quantecon/tests/test_ricatti.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_tauchen.py 3 | Authors: Chase Coleman, John Stachurski 4 | Date: 07/22/2014 5 | 6 | Tests for ricatti.py file 7 | 8 | """ 9 | import numpy as np 10 | from numpy.testing import assert_allclose 11 | from quantecon.matrix_eqn import solve_discrete_riccati 12 | 13 | def dare_test_golden_num_float(): 14 | val = solve_discrete_riccati(1.0, 1.0, 1.0, 1.0) 15 | gold_ratio = (1 + np.sqrt(5)) / 2. 16 | assert_allclose(val, gold_ratio) 17 | 18 | def dare_test_golden_num_2d(): 19 | A, B, R, Q = np.eye(2), np.eye(2), np.eye(2), np.eye(2) 20 | gold_diag = np.eye(2) * (1 + np.sqrt(5)) / 2. 21 | val = solve_discrete_riccati(A, B, R, Q) 22 | assert_allclose(val, gold_diag) 23 | 24 | def dare_test_tjm_1(): 25 | A = [[0.0, 0.1, 0.0], 26 | [0.0, 0.0, 0.1], 27 | [0.0, 0.0, 0.0]] 28 | B = [[1.0, 0.0], 29 | [0.0, 0.0], 30 | [0.0, 1.0]] 31 | Q = [[10**5, 0.0, 0.0], 32 | [0.0, 10**3, 0.0], 33 | [0.0, 0.0, -10.0]] 34 | R = [[0.0, 0.0], 35 | [0.0, 1.0]] 36 | X = solve_discrete_riccati(A, B, Q, R) 37 | Y = np.diag((1e5, 1e3, 0.0)) 38 | assert_allclose(X, Y, atol=1e-07) 39 | 40 | 41 | def dare_test_tjm_2(): 42 | A = [[0, -1], 43 | [0, 2]] 44 | B = [[1, 0], 45 | [1, 1]] 46 | Q = [[1, 0], 47 | [0, 0]] 48 | R = [[4, 2], 49 | [2, 1]] 50 | X = solve_discrete_riccati(A, B, Q, R) 51 | Y = np.zeros((2, 2)) 52 | Y[0, 0] = 1 53 | assert_allclose(X, Y, atol=1e-07) 54 | 55 | 56 | def dare_test_tjm_3(): 57 | r = 0.5 58 | I = np.identity(2) 59 | A = [[2 + r**2, 0], 60 | [0, 0]] 61 | A = np.array(A) 62 | B = I 63 | R = [[1, r], 64 | [r, r*r]] 65 | Q = I - np.dot(A.T, A) + np.dot(A.T, np.linalg.solve(R + I, A)) 66 | X = solve_discrete_riccati(A, B, Q, R) 67 | Y = np.identity(2) 68 | assert_allclose(X, Y, atol=1e-07) 69 | -------------------------------------------------------------------------------- /examples/stochasticgrowth.py: -------------------------------------------------------------------------------- 1 | """ 2 | Neoclassical growth model with constant savings rate, where the dynamics are 3 | given by 4 | 5 | k_{t+1} = s A_{t+1} f(k_t) + (1 - delta) k_t 6 | 7 | Marginal densities are computed using the look-ahead estimator. Thus, the 8 | estimate of the density psi_t of k_t is 9 | 10 | (1/n) sum_{i=0}^n p(k_{t-1}^i, y) 11 | 12 | This is a density in y. 13 | """ 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | from scipy.stats import lognorm, beta 17 | from quantecon import LAE 18 | 19 | # == Define parameters == # 20 | s = 0.2 21 | delta = 0.1 22 | a_sigma = 0.4 # A = exp(B) where B ~ N(0, a_sigma) 23 | alpha = 0.4 # We set f(k) = k**alpha 24 | psi_0 = beta(5, 5, scale=0.5) # Initial distribution 25 | phi = lognorm(a_sigma) 26 | 27 | 28 | def p(x, y): 29 | """ 30 | Stochastic kernel for the growth model with Cobb-Douglas production. 31 | Both x and y must be strictly positive. 32 | """ 33 | d = s * x**alpha 34 | return phi.pdf((y - (1 - delta) * x) / d) / d 35 | 36 | n = 10000 # Number of observations at each date t 37 | T = 30 # Compute density of k_t at 1,...,T+1 38 | 39 | # == Generate matrix s.t. t-th column is n observations of k_t == # 40 | k = np.empty((n, T)) 41 | A = phi.rvs((n, T)) 42 | k[:, 0] = psi_0.rvs(n) # Draw first column from initial distribution 43 | for t in range(T-1): 44 | k[:, t+1] = s * A[:, t] * k[:, t]**alpha + (1 - delta) * k[:, t] 45 | 46 | # == Generate T instances of LAE using this data, one for each date t == # 47 | laes = [LAE(p, k[:, t]) for t in range(T)] 48 | 49 | # == Plot == # 50 | fig, ax = plt.subplots() 51 | ygrid = np.linspace(0.01, 4.0, 200) 52 | greys = [str(g) for g in np.linspace(0.0, 0.8, T)] 53 | greys.reverse() 54 | for psi, g in zip(laes, greys): 55 | ax.plot(ygrid, psi(ygrid), color=g, lw=2, alpha=0.6) 56 | ax.set_xlabel('capital') 57 | title = r'Density of $k_1$ (lighter) to $k_T$ (darker) for $T={}$' 58 | ax.set_title(title.format(T)) 59 | plt.show() 60 | -------------------------------------------------------------------------------- /examples/illustrates_lln.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: illustrates_lln.py 3 | Authors: John Stachurski and Thomas J. Sargent 4 | 5 | Visual illustration of the law of large numbers. 6 | """ 7 | 8 | import random 9 | import numpy as np 10 | from scipy.stats import t, beta, lognorm, expon, gamma, poisson 11 | import matplotlib.pyplot as plt 12 | 13 | n = 100 14 | 15 | # == Arbitrary collection of distributions == # 16 | distributions = {"student's t with 10 degrees of freedom": t(10), 17 | "beta(2, 2)": beta(2, 2), 18 | "lognormal LN(0, 1/2)": lognorm(0.5), 19 | "gamma(5, 1/2)": gamma(5, scale=2), 20 | "poisson(4)": poisson(4), 21 | "exponential with lambda = 1": expon(1)} 22 | 23 | # == Create a figure and some axes == # 24 | num_plots = 3 25 | fig, axes = plt.subplots(num_plots, 1, figsize=(10, 10)) 26 | 27 | # == Set some plotting parameters to improve layout == # 28 | bbox = (0., 1.02, 1., .102) 29 | legend_args = {'ncol': 2, 30 | 'bbox_to_anchor': bbox, 31 | 'loc': 3, 32 | 'mode': 'expand'} 33 | plt.subplots_adjust(hspace=0.5) 34 | 35 | for ax in axes: 36 | # == Choose a randomly selected distribution == # 37 | name = random.choice(list(distributions.keys())) 38 | distribution = distributions.pop(name) 39 | 40 | # == Generate n draws from the distribution == # 41 | data = distribution.rvs(n) 42 | 43 | # == Compute sample mean at each n == # 44 | sample_mean = np.empty(n) 45 | for i in range(n): 46 | sample_mean[i] = np.mean(data[:i+1]) 47 | 48 | # == Plot == # 49 | ax.plot(list(range(n)), data, 'o', color='grey', alpha=0.5) 50 | axlabel = r'$\bar X_n$' + ' for ' + r'$X_i \sim$' + ' ' + name 51 | ax.plot(list(range(n)), sample_mean, 'g-', lw=3, alpha=0.6, label=axlabel) 52 | m = distribution.mean() 53 | ax.plot(list(range(n)), [m] * n, 'k--', lw=1.5, label=r'$\mu$') 54 | ax.vlines(list(range(n)), m, data, lw=0.2) 55 | ax.legend(**legend_args) 56 | 57 | plt.show() 58 | -------------------------------------------------------------------------------- /quantecon/timing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: timing.py 3 | Authors: Pablo Winant 4 | Date: 10/16/14 5 | Provides Matlab-like tic, tac and toc functions. 6 | """ 7 | 8 | 9 | class __Timer__: 10 | '''Computes elapsed time, between tic, tac, and toc. 11 | 12 | Methods 13 | ------- 14 | tic : 15 | Resets timer. 16 | toc : 17 | Returns and prints time elapsed since last tic(). 18 | tac : 19 | Returns and prints time elapsed since last 20 | tic(), tac() or toc() whichever occured last. 21 | ''' 22 | 23 | start = None 24 | last = None 25 | 26 | def tic(self): 27 | """Resets timer.""" 28 | 29 | import time 30 | 31 | t = time.time() 32 | self.start = t 33 | self.last = t 34 | 35 | def tac(self): 36 | """Returns and prints time elapsed since last tic()""" 37 | 38 | import time 39 | 40 | if self.start is None: 41 | raise Exception("tac() without tic()") 42 | 43 | t = time.time() 44 | elapsed = t-self.last 45 | self.last = t 46 | 47 | print("TAC: Elapsed: {} seconds.".format(elapsed)) 48 | return elapsed 49 | 50 | def toc(self): 51 | """Returns and prints time elapsed since last 52 | tic() or tac() whichever occured last""" 53 | 54 | import time 55 | 56 | if self.start is None: 57 | raise Exception("toc() without tic()") 58 | 59 | t = time.time() 60 | self.last = t 61 | elapsed = t-self.start 62 | 63 | print("TOC: Elapsed: {} seconds.".format(elapsed)) 64 | return elapsed 65 | 66 | __timer__ = __Timer__() 67 | 68 | 69 | def tic(): 70 | """Saves time for future use with tac or toc.""" 71 | return __timer__.tic() 72 | 73 | 74 | def tac(): 75 | """Prints and returns elapsed time since last tic, tac or toc.""" 76 | return __timer__.tac() 77 | 78 | 79 | def toc(): 80 | """Prints and returns elapsed time since last tic, tac or toc.""" 81 | return __timer__.toc() 82 | -------------------------------------------------------------------------------- /quantecon/tauchen.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: tauchen.py 3 | 4 | Authors: Thomas Sargent, John Stachurski 5 | 6 | Discretizes Gaussian linear AR(1) processes via Tauchen's method 7 | 8 | """ 9 | 10 | import numpy as np 11 | from scipy.stats import norm 12 | 13 | 14 | def approx_markov(rho, sigma_u, m=3, n=7): 15 | """ 16 | Computes the Markov matrix associated with a discretized version of 17 | the linear Gaussian AR(1) process 18 | 19 | y_{t+1} = rho * y_t + u_{t+1} 20 | 21 | according to Tauchen's method. Here {u_t} is an iid Gaussian 22 | process with zero mean. 23 | 24 | Parameters 25 | ---------- 26 | rho : scalar(float) 27 | The autocorrelation coefficient 28 | sigma_u : scalar(float) 29 | The standard deviation of the random process 30 | m : scalar(int), optional(default=3) 31 | The number of standard deviations to approximate out to 32 | n : scalar(int), optional(default=7) 33 | The number of states to use in the approximation 34 | 35 | Returns 36 | ------- 37 | 38 | x : array_like(float, ndim=1) 39 | The state space of the discretized process 40 | P : array_like(float, ndim=2) 41 | The Markov transition matrix where P[i, j] is the probability 42 | of transitioning from x[i] to x[j] 43 | 44 | """ 45 | F = norm(loc=0, scale=sigma_u).cdf 46 | 47 | # standard deviation of y_t 48 | std_y = np.sqrt(sigma_u**2 / (1-rho**2)) 49 | 50 | # top of discrete state space 51 | x_max = m * std_y 52 | 53 | # bottom of discrete state space 54 | x_min = - x_max 55 | 56 | # discretized state space 57 | x = np.linspace(x_min, x_max, n) 58 | 59 | step = (x_max - x_min) / (n - 1) 60 | half_step = 0.5 * step 61 | P = np.empty((n, n)) 62 | 63 | for i in range(n): 64 | P[i, 0] = F(x[0]-rho * x[i] + half_step) 65 | P[i, n-1] = 1 - F(x[n-1] - rho * x[i] - half_step) 66 | for j in range(1, n-1): 67 | z = x[j] - rho * x[i] 68 | P[i, j] = F(z + half_step) - F(z - half_step) 69 | 70 | return x, P 71 | -------------------------------------------------------------------------------- /examples/odu_vfi_plots.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: odu_vfi_plots.py 3 | Authors: John Stachurski and Thomas Sargent 4 | """ 5 | 6 | import matplotlib.pyplot as plt 7 | from mpl_toolkits.mplot3d.axes3d import Axes3D 8 | from matplotlib import cm 9 | from scipy.interpolate import LinearNDInterpolator 10 | import numpy as np 11 | from quantecon import compute_fixed_point 12 | from quantecon.models import SearchProblem 13 | 14 | 15 | sp = SearchProblem(w_grid_size=100, pi_grid_size=100) 16 | v_init = np.zeros(len(sp.grid_points)) + sp.c / (1 - sp.beta) 17 | v = compute_fixed_point(sp.bellman_operator, v_init) 18 | policy = sp.get_greedy(v) 19 | 20 | # Make functions from these arrays by interpolation 21 | vf = LinearNDInterpolator(sp.grid_points, v) 22 | pf = LinearNDInterpolator(sp.grid_points, policy) 23 | 24 | pi_plot_grid_size, w_plot_grid_size = 100, 100 25 | pi_plot_grid = np.linspace(0.001, 0.99, pi_plot_grid_size) 26 | w_plot_grid = np.linspace(0, sp.w_max, w_plot_grid_size) 27 | 28 | # plot_choice = 'value_function' 29 | plot_choice = 'policy_function' 30 | 31 | if plot_choice == 'value_function': 32 | Z = np.empty((w_plot_grid_size, pi_plot_grid_size)) 33 | for i in range(w_plot_grid_size): 34 | for j in range(pi_plot_grid_size): 35 | Z[i, j] = vf(w_plot_grid[i], pi_plot_grid[j]) 36 | fig, ax = plt.subplots() 37 | ax.contourf(pi_plot_grid, w_plot_grid, Z, 12, alpha=0.6, cmap=cm.jet) 38 | cs = ax.contour(pi_plot_grid, w_plot_grid, Z, 12, colors="black") 39 | ax.clabel(cs, inline=1, fontsize=10) 40 | ax.set_xlabel('pi', fontsize=14) 41 | ax.set_ylabel('wage', fontsize=14) 42 | else: 43 | Z = np.empty((w_plot_grid_size, pi_plot_grid_size)) 44 | for i in range(w_plot_grid_size): 45 | for j in range(pi_plot_grid_size): 46 | Z[i, j] = pf(w_plot_grid[i], pi_plot_grid[j]) 47 | fig, ax = plt.subplots() 48 | ax.contourf(pi_plot_grid, w_plot_grid, Z, 1, alpha=0.6, cmap=cm.jet) 49 | ax.contour(pi_plot_grid, w_plot_grid, Z, 1, colors="black") 50 | ax.set_xlabel('pi', fontsize=14) 51 | ax.set_ylabel('wage', fontsize=14) 52 | ax.text(0.4, 1.0, 'reject') 53 | ax.text(0.7, 1.8, 'accept') 54 | 55 | plt.show() 56 | -------------------------------------------------------------------------------- /examples/optgrowth_v0.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: optgrowth_v0.py 3 | Authors: John Stachurski and Thomas Sargent 4 | 5 | A first pass at solving the optimal growth problem via value function 6 | iteration. A more general version is provided in optgrowth.py. 7 | 8 | """ 9 | from __future__ import division # Omit for Python 3.x 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | from numpy import log 13 | from scipy.optimize import fminbound 14 | from scipy import interp 15 | 16 | # Primitives and grid 17 | alpha = 0.65 18 | beta = 0.95 19 | grid_max = 2 20 | grid_size = 150 21 | grid = np.linspace(1e-6, grid_max, grid_size) 22 | # Exact solution 23 | ab = alpha * beta 24 | c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta) 25 | c2 = alpha / (1 - ab) 26 | 27 | 28 | def v_star(k): 29 | return c1 + c2 * log(k) 30 | 31 | 32 | def bellman_operator(w): 33 | """ 34 | The approximate Bellman operator, which computes and returns the updated 35 | value function Tw on the grid points. 36 | 37 | * w is a flat NumPy array with len(w) = len(grid) 38 | 39 | The vector w represents the value of the input function on the grid 40 | points. 41 | """ 42 | # === Apply linear interpolation to w === # 43 | Aw = lambda x: interp(x, grid, w) 44 | 45 | # === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === # 46 | Tw = np.empty(grid_size) 47 | for i, k in enumerate(grid): 48 | objective = lambda c: - log(c) - beta * Aw(k**alpha - c) 49 | c_star = fminbound(objective, 1e-6, k**alpha) 50 | Tw[i] = - objective(c_star) 51 | 52 | return Tw 53 | 54 | # === If file is run directly, not imported, produce figure === # 55 | if __name__ == '__main__': 56 | 57 | w = 5 * log(grid) - 25 # An initial condition -- fairly arbitrary 58 | n = 35 59 | fig, ax = plt.subplots() 60 | ax.set_ylim(-40, -20) 61 | ax.set_xlim(np.min(grid), np.max(grid)) 62 | lb = 'initial condition' 63 | ax.plot(grid, w, color=plt.cm.jet(0), lw=2, alpha=0.6, label=lb) 64 | for i in range(n): 65 | w = bellman_operator(w) 66 | ax.plot(grid, w, color=plt.cm.jet(i / n), lw=2, alpha=0.6) 67 | lb = 'true value function' 68 | ax.plot(grid, v_star(grid), 'k-', lw=2, alpha=0.8, label=lb) 69 | ax.legend(loc='upper left') 70 | 71 | plt.show() 72 | -------------------------------------------------------------------------------- /quantecon/lae.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: lae.py 3 | 4 | Authors: Thomas J. Sargent, John Stachurski, 5 | 6 | Computes a sequence of marginal densities for a continuous state space 7 | Markov chain :math:`X_t` where the transition probabilities can be represented 8 | as densities. The estimate of the marginal density of :math:`X_t` is 9 | 10 | .. math:: 11 | 12 | \frac{1}{n} \sum_{i=0}^n p(X_{t-1}^i, y) 13 | 14 | This is a density in y. 15 | 16 | References 17 | ---------- 18 | 19 | http://quant-econ.net/py/stationary_densities.html 20 | 21 | """ 22 | from textwrap import dedent 23 | import numpy as np 24 | 25 | 26 | class LAE(object): 27 | """ 28 | An instance is a representation of a look ahead estimator associated 29 | with a given stochastic kernel p and a vector of observations X. 30 | 31 | Parameters 32 | ---------- 33 | p : function 34 | The stochastic kernel. A function p(x, y) that is vectorized in 35 | both x and y 36 | X : array_like(float) 37 | A vector containing observations 38 | 39 | Attributes 40 | ---------- 41 | p, X : see Parameters 42 | 43 | Examples 44 | -------- 45 | >>> psi = LAE(p, X) 46 | >>> y = np.linspace(0, 1, 100) 47 | >>> psi(y) # Evaluate look ahead estimate at grid of points y 48 | 49 | """ 50 | 51 | def __init__(self, p, X): 52 | X = X.flatten() # So we know what we're dealing with 53 | n = len(X) 54 | self.p, self.X = p, X.reshape((n, 1)) 55 | 56 | def __repr__(self): 57 | return self.__str__() 58 | 59 | def __str__(self): 60 | m = """\ 61 | Look ahead estimator 62 | - number of observations : {n} 63 | """ 64 | return dedent(m.format(n=self.X.size)) 65 | 66 | def __call__(self, y): 67 | """ 68 | A vectorized function that returns the value of the look ahead 69 | estimate at the values in the array y. 70 | 71 | Parameters 72 | ---------- 73 | y : array_like(float) 74 | A vector of points at which we wish to evaluate the look- 75 | ahead estimator 76 | 77 | Returns 78 | ------- 79 | psi_vals : array_like(float) 80 | The values of the density estimate at the points in y 81 | 82 | """ 83 | k = len(y) 84 | v = self.p(self.X, y.reshape((1, k))) 85 | psi_vals = np.mean(v, axis=0) # Take mean along each row 86 | 87 | return psi_vals.flatten() 88 | -------------------------------------------------------------------------------- /examples/clt3d.py: -------------------------------------------------------------------------------- 1 | """ 2 | Origin: QE by John Stachurski and Thomas J. Sargent 3 | Filename: clt3d.py 4 | 5 | Visual illustration of the central limit theorem. Produces a 3D figure 6 | showing the density of the scaled sample mean \sqrt{n} \bar X_n plotted 7 | against n. 8 | """ 9 | 10 | import numpy as np 11 | from scipy.stats import beta, gaussian_kde 12 | from mpl_toolkits.mplot3d import Axes3D 13 | from matplotlib.collections import PolyCollection 14 | import matplotlib.pyplot as plt 15 | 16 | beta_dist = beta(2, 2) 17 | 18 | 19 | def gen_x_draws(k): 20 | """ 21 | Returns a flat array containing k independent draws from the 22 | distribution of X, the underlying random variable. This distribution is 23 | itself a convex combination of three beta distributions. 24 | """ 25 | bdraws = beta_dist.rvs((3, k)) 26 | # == Transform rows, so each represents a different distribution == # 27 | bdraws[0, :] -= 0.5 28 | bdraws[1, :] += 0.6 29 | bdraws[2, :] -= 1.1 30 | # == Set X[i] = bdraws[j, i], where j is a random draw from {0, 1, 2} == # 31 | js = np.random.random_integers(0, 2, size=k) 32 | X = bdraws[js, np.arange(k)] 33 | # == Rescale, so that the random variable is zero mean == # 34 | m, sigma = X.mean(), X.std() 35 | return (X - m) / sigma 36 | 37 | nmax = 5 38 | reps = 100000 39 | ns = list(range(1, nmax + 1)) 40 | 41 | # == Form a matrix Z such that each column is reps independent draws of X == # 42 | Z = np.empty((reps, nmax)) 43 | for i in range(nmax): 44 | Z[:, i] = gen_x_draws(reps) 45 | # == Take cumulative sum across columns 46 | S = Z.cumsum(axis=1) 47 | # == Multiply j-th column by sqrt j == # 48 | Y = (1 / np.sqrt(ns)) * S 49 | 50 | # == Plot == # 51 | 52 | fig = plt.figure() 53 | ax = fig.gca(projection='3d') 54 | 55 | a, b = -3, 3 56 | gs = 100 57 | xs = np.linspace(a, b, gs) 58 | 59 | # == Build verts == # 60 | greys = np.linspace(0.3, 0.7, nmax) 61 | verts = [] 62 | for n in ns: 63 | density = gaussian_kde(Y[:, n-1]) 64 | ys = density(xs) 65 | verts.append(list(zip(xs, ys))) 66 | 67 | poly = PolyCollection(verts, facecolors=[str(g) for g in greys]) 68 | poly.set_alpha(0.85) 69 | ax.add_collection3d(poly, zs=ns, zdir='x') 70 | 71 | # ax.text(np.mean(rhos), a-1.4, -0.02, r'$\beta$', fontsize=16) 72 | # ax.text(np.max(rhos)+0.016, (a+b)/2, -0.02, r'$\log(y)$', fontsize=16) 73 | ax.set_xlim3d(1, nmax) 74 | ax.set_xticks(ns) 75 | ax.set_xlabel("n") 76 | ax.set_yticks((-3, 0, 3)) 77 | ax.set_ylim3d(a, b) 78 | ax.set_zlim3d(0, 0.4) 79 | ax.set_zticks((0.2, 0.4)) 80 | plt.show() 81 | -------------------------------------------------------------------------------- /quantecon/tests/test_compute_fp.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.compute_fp module 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-07-31 6 | 7 | References 8 | ---------- 9 | 10 | https://www.math.ucdavis.edu/~hunter/book/ch3.pdf 11 | 12 | TODO: add multivariate case 13 | 14 | """ 15 | from __future__ import division 16 | import unittest 17 | from quantecon import compute_fixed_point 18 | 19 | 20 | class TestFPLogisticEquation(unittest.TestCase): 21 | 22 | @classmethod 23 | def setUpClass(cls): 24 | cls.mu_1 = 0.2 # 0 is unique fixed point forall x_0 \in [0, 1] 25 | 26 | # (4mu - 1)/(4mu) is a fixed point forall x_0 \in [0, 1] 27 | cls.mu_2 = 0.3 28 | 29 | # starting points on (0, 1) 30 | cls.unit_inverval = [0.1, 0.3, 0.6, 0.9] 31 | 32 | # arguments for compute_fixed_point 33 | cls.kwargs = {"error_tol": 1e-5, "max_iter": 200, "verbose": 0} 34 | 35 | def T(self, x, mu): 36 | return 4.0 * mu * x * (1.0 - x) 37 | 38 | def test_contraction_1(self): 39 | "compute_fp: convergence inside interval of convergence" 40 | f = lambda x: self.T(x, self.mu_1) 41 | for i in self.unit_inverval: 42 | # should have fixed point of 0.0 43 | self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs)) 44 | < 1e-4) 45 | 46 | def test_not_contraction_2(self): 47 | "compute_fp: no convergence outside interval of convergence" 48 | f = lambda x: self.T(x, self.mu_2) 49 | for i in self.unit_inverval: 50 | # This shouldn't converge to 0.0 51 | self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs)) 52 | < 1e-4) 53 | 54 | def test_contraction_2(self): 55 | "compute_fp: convergence inside interval of convergence" 56 | f = lambda x: self.T(x, self.mu_2) 57 | fp = (4 * self.mu_2 - 1) / (4 * self.mu_2) 58 | for i in self.unit_inverval: 59 | # This should converge to fp 60 | self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs)-fp) 61 | < 1e-4) 62 | 63 | def test_not_contraction_1(self): 64 | "compute_fp: no convergence outside interval of convergence" 65 | f = lambda x: self.T(x, self.mu_1) 66 | fp = (4 * self.mu_1 - 1) / (4 * self.mu_1) 67 | for i in self.unit_inverval: 68 | # This should not converge (b/c unique fp is 0.0) 69 | self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs)-fp) 70 | < 1e-4) 71 | -------------------------------------------------------------------------------- /quantecon/tests/test_lqcontrol.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Chase Coleman 3 | Filename: test_lqcontrol 4 | 5 | Tests for lqcontrol.py file 6 | 7 | """ 8 | import sys 9 | import os 10 | import unittest 11 | import numpy as np 12 | from scipy.linalg import LinAlgError 13 | from numpy.testing import assert_allclose 14 | from quantecon.lqcontrol import LQ 15 | 16 | 17 | class TestLQControl(unittest.TestCase): 18 | 19 | def setUp(self): 20 | # Initial Values 21 | q = 1. 22 | r = 1. 23 | rf = 1. 24 | a = .95 25 | b = -1. 26 | c = .05 27 | beta = .95 28 | T = 1 29 | 30 | self.lq_scalar = LQ(q, r, a, b, C=c, beta=beta, T=T, Rf=rf) 31 | 32 | 33 | Q = np.array([[0., 0.], [0., 1]]) 34 | R = np.array([[1., 0.], [0., 0]]) 35 | RF = np.eye(2) * 100 36 | A = np.ones((2, 2)) * .95 37 | B = np.ones((2, 2)) * -1 38 | 39 | self.lq_mat = LQ(Q, R, A, B, beta=beta, T=T, Rf=RF) 40 | 41 | 42 | def tearDown(self): 43 | del self.lq_scalar 44 | del self.lq_mat 45 | 46 | 47 | def test_scalar_sequences(self): 48 | 49 | lq_scalar = self.lq_scalar 50 | x0 = 2 51 | 52 | x_seq, u_seq, w_seq = lq_scalar.compute_sequence(x0) 53 | 54 | # Solution found by hand 55 | u_0 = (-2*lq_scalar.A*lq_scalar.B*lq_scalar.beta*lq_scalar.Rf) / \ 56 | (2*lq_scalar.Q+lq_scalar.beta*lq_scalar.Rf*2*lq_scalar.B**2) \ 57 | * x0 58 | x_1 = lq_scalar.A * x0 + lq_scalar.B * u_0 + w_seq[0, -1] 59 | 60 | assert_allclose(u_0, u_seq, rtol=1e-4) 61 | assert_allclose(x_1, x_seq[0, -1], rtol=1e-4) 62 | 63 | 64 | def test_mat_sequences(self): 65 | 66 | lq_mat = self.lq_mat 67 | x0 = np.random.randn(2) * 25 68 | 69 | x_seq, u_seq, w_seq = lq_mat.compute_sequence(x0) 70 | 71 | assert_allclose(np.sum(u_seq), .95 * np.sum(x0), atol=1e-3) 72 | assert_allclose(x_seq[:, -1], np.zeros_like(x0), atol=1e-3) 73 | 74 | 75 | def test_stationary_mat(self): 76 | x0 = np.random.randn(2) * 25 77 | lq_mat = self.lq_mat 78 | 79 | P, F, d = lq_mat.stationary_values() 80 | f_answer = np.array([[-.95, -.95], [0., 0.]]) 81 | p_answer = np.array([[1., 0], [0., 0.]]) 82 | 83 | val_func_lq = np.dot(x0, P).dot(x0) 84 | val_func_answer = x0[0]**2 85 | 86 | assert_allclose(f_answer, F, atol=1e-3) 87 | assert_allclose(val_func_lq, val_func_answer, atol=1e-3) 88 | 89 | 90 | 91 | 92 | if __name__ == '__main__': 93 | suite = unittest.TestLoader().loadTestsFromTestCase(TestLQControl) 94 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 95 | -------------------------------------------------------------------------------- /quantecon/ce_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: ce_util.py 3 | Authors: Chase Coleman, Spencer Lyon, John Stachurski, and Thomas Sargent 4 | Date: 2014-07-01 5 | 6 | Utility functions used in CompEcon 7 | 8 | Based routines found in the CompEcon toolbox by Miranda and Fackler. 9 | 10 | References 11 | ---------- 12 | Miranda, Mario J, and Paul L Fackler. Applied Computational Economics 13 | and Finance, MIT Press, 2002. 14 | 15 | """ 16 | from functools import reduce 17 | import numpy as np 18 | 19 | 20 | def ckron(*arrays): 21 | """ 22 | Repeatedly applies the np.kron function to an arbitrary number of 23 | input arrays 24 | 25 | Parameters 26 | ---------- 27 | *arrays : tuple/list of np.ndarray 28 | 29 | Returns 30 | ------- 31 | out : np.ndarray 32 | The result of repeated kronecker products 33 | 34 | Notes 35 | ----- 36 | Based of original function `ckron` in CompEcon toolbox by Miranda 37 | and Fackler 38 | 39 | References 40 | ---------- 41 | Miranda, Mario J, and Paul L Fackler. Applied Computational 42 | Economics and Finance, MIT Press, 2002. 43 | 44 | """ 45 | return reduce(np.kron, arrays) 46 | 47 | 48 | def gridmake(*arrays): 49 | """ 50 | TODO: finish this docstring 51 | 52 | Notes 53 | ----- 54 | Based of original function ``gridmake`` in CompEcon toolbox by 55 | Miranda and Fackler 56 | 57 | References 58 | ---------- 59 | Miranda, Mario J, and Paul L Fackler. Applied Computational Economics 60 | and Finance, MIT Press, 2002. 61 | 62 | """ 63 | if all([i.ndim == 1 for i in arrays]): 64 | d = len(arrays) 65 | if d == 2: 66 | out = _gridmake2(*arrays) 67 | else: 68 | out = _gridmake2(arrays[0], arrays[1]) 69 | for arr in arrays[2:]: 70 | out = _gridmake2(out, arr) 71 | 72 | return out 73 | else: 74 | raise NotImplementedError("Come back here") 75 | 76 | 77 | def _gridmake2(x1, x2): 78 | """ 79 | TODO: finish this docstring 80 | 81 | Notes 82 | ----- 83 | Based of original function ``gridmake2`` in CompEcon toolbox by 84 | Miranda and Fackler 85 | 86 | References 87 | ---------- 88 | Miranda, Mario J, and Paul L Fackler. Applied Computational Economics 89 | and Finance, MIT Press, 2002. 90 | 91 | """ 92 | if x1.ndim == 1 and x2.ndim == 1: 93 | return np.column_stack([np.tile(x1, x2.shape[0]), 94 | np.repeat(x2, x1.shape[0])]) 95 | elif x1.ndim > 1 and x2.ndim == 1: 96 | first = np.tile(x1, (x2.shape[0], 1)) 97 | second = np.repeat(x2, x1.shape[0]) 98 | return np.column_stack([first, second]) 99 | else: 100 | raise NotImplementedError("Come back here") 101 | -------------------------------------------------------------------------------- /quantecon/tests/test_kalman.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Chase Coleman 3 | Date: 08/04/2014 4 | 5 | Contains test for the kalman.py file. 6 | 7 | """ 8 | import sys 9 | import os 10 | import unittest 11 | import numpy as np 12 | from numpy.testing import assert_allclose 13 | from quantecon.kalman import Kalman 14 | 15 | 16 | class TestKalman(unittest.TestCase): 17 | 18 | def setUp(self): 19 | # Initial Values 20 | self.A = np.array([[.95, 0], [0., .95]]) 21 | self.Q = np.eye(2) * 0.5 22 | self.G = np.eye(2) * .5 23 | self.R = np.eye(2) * 0.2 24 | 25 | self.kf = Kalman(self.A, self.G, self.Q, self.R) 26 | 27 | 28 | 29 | def tearDown(self): 30 | del self.kf 31 | 32 | 33 | def test_stationarity(self): 34 | A, Q, G, R = self.A, self.Q, self.G, self.R 35 | kf = self.kf 36 | 37 | sig_inf, kal_gain = kf.stationary_values() 38 | 39 | mat_inv = np.linalg.inv(G.dot(sig_inf).dot(G.T) + R) 40 | 41 | # Compute the kalmain gain and sigma infinity according to the 42 | # recursive equations and compare 43 | kal_recursion = np.dot(A, sig_inf).dot(G.T).dot(mat_inv) 44 | sig_recursion = (A.dot(sig_inf).dot(A.T) - 45 | kal_recursion.dot(G).dot(sig_inf).dot(A.T) + Q) 46 | 47 | assert_allclose(kal_gain, kal_recursion, rtol=1e-4, atol=1e-2) 48 | assert_allclose(sig_inf, sig_recursion, rtol=1e-4, atol=1e-2) 49 | 50 | 51 | def test_update_using_stationary(self): 52 | A, Q, G, R = self.A, self.Q, self.G, self.R 53 | kf = self.kf 54 | 55 | sig_inf, kal_gain = kf.stationary_values() 56 | 57 | kf.set_state(np.zeros((2, 1)), sig_inf) 58 | 59 | kf.update(np.zeros((2, 1))) 60 | 61 | assert_allclose(kf.current_Sigma, sig_inf, rtol=1e-4, atol=1e-2) 62 | assert_allclose(kf.current_x_hat.squeeze(), np.zeros(2), rtol=1e-4, atol=1e-2) 63 | 64 | 65 | def test_update_nonstationary(self): 66 | A, Q, G, R = self.A, self.Q, self.G, self.R 67 | kf = self.kf 68 | 69 | curr_x, curr_sigma = np.ones((2, 1)), np.eye(2) * .75 70 | y_observed = np.ones((2, 1)) * .75 71 | 72 | kf.set_state(curr_x, curr_sigma) 73 | kf.update(y_observed) 74 | 75 | mat_inv = np.linalg.inv(G.dot(curr_sigma).dot(G.T) + R) 76 | curr_k = np.dot(A, curr_sigma).dot(G.T).dot(mat_inv) 77 | new_sigma = (A.dot(curr_sigma).dot(A.T) - 78 | curr_k.dot(G).dot(curr_sigma).dot(A.T) + Q) 79 | 80 | new_xhat = A.dot(curr_x) + curr_k.dot(y_observed - G.dot(curr_x)) 81 | 82 | assert_allclose(kf.current_Sigma, new_sigma, rtol=1e-4, atol=1e-2) 83 | assert_allclose(kf.current_x_hat, new_xhat, rtol=1e-4, atol=1e-2) 84 | 85 | if __name__ == '__main__': 86 | suite = unittest.TestLoader().loadTestsFromTestCase(TestKalman) 87 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 88 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/tests_solow/test_ces.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test suite for ces.py module. 3 | 4 | @author : David R. Pugh 5 | @date : 2014-12-08 6 | 7 | """ 8 | import nose 9 | 10 | import numpy as np 11 | 12 | from .... models.solow import ces 13 | 14 | params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15, 15 | 'alpha': 0.33, 'sigma': 1.1, 'delta': 0.05} 16 | model = ces.CESModel(params) 17 | 18 | 19 | def test_steady_state(): 20 | """Compare analytic steady state with numerical steady state.""" 21 | eps = 1e-1 22 | for g in np.linspace(eps, 0.05, 4): 23 | for n in np.linspace(eps, 0.05, 4): 24 | for s in np.linspace(eps, 1-eps, 4): 25 | for alpha in np.linspace(eps, 1-eps, 4): 26 | for delta in np.linspace(eps, 1-eps, 4): 27 | for sigma in np.linspace(eps, 2.0, 4): 28 | 29 | tmp_params = {'A0': 1.0, 'g': g, 'L0': 1.0, 'n': n, 30 | 's': s, 'alpha': alpha, 'delta': delta, 31 | 'sigma': sigma} 32 | try: 33 | model.params = tmp_params 34 | 35 | # use root finder to compute the steady state 36 | actual_ss = model.steady_state 37 | expected_ss = model.find_steady_state(1e-12, 1e9) 38 | 39 | # conduct the test (numerical precision limits!) 40 | nose.tools.assert_almost_equals(actual_ss, 41 | expected_ss, 42 | places=6) 43 | 44 | # handles params with non finite steady state 45 | except AttributeError: 46 | continue 47 | 48 | 49 | def test_validate_params(): 50 | """Testing validation of params attribute.""" 51 | invalid_params_0 = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15, 52 | 'alpha': 1.33, 'delta': 0.03, 'sigma': 1.2} 53 | invalid_params_1 = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15, 54 | 'alpha': 0.33, 'delta': 0.03, 'sigma': 0.0} 55 | invalid_params_2 = {'A0': 1.0, 'g': 0.01, 'L0': 1.0, 'n': 0.01, 's': 0.12, 56 | 'alpha': 0.75, 'delta': 0.01, 'sigma': 2.0} 57 | 58 | # alpha must be in (0, 1) 59 | with nose.tools.assert_raises(AttributeError): 60 | ces.CESModel(invalid_params_0) 61 | 62 | # sigma must be strictly positive 63 | with nose.tools.assert_raises(AttributeError): 64 | ces.CESModel(invalid_params_1) 65 | 66 | # parameters inconsistent with finite steady state 67 | with nose.tools.assert_raises(AttributeError): 68 | ces.CESModel(invalid_params_2) 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Quantitative Economics (Python) 3 | 4 | This repository collects code for quantitative economic modeling in Python 5 | 6 | Libary Website: [http://quantecon.org](http://quantecon.org) 7 | 8 | ### Downloading the Repository 9 | 10 | Either 11 | 12 | * Click the 'Download ZIP' button on the right, or 13 | * Use [Git](https://help.github.com) to clone the repository 14 | 15 | ### The quantecon Package 16 | 17 | This repository includes the Python package `quantecon` 18 | 19 | Assuming you have [pip](https://pypi.python.org/pypi/pip) on your computer --- as will be the case if you've [installed Anaconda](http://quant-econ.net/getting_started.html#installing-anaconda) --- you can install the latest stable release of `quantecon` by typing 20 | 21 | pip install quantecon 22 | 23 | at a terminal prompt 24 | 25 | #### Current Build and Coverage Status: 26 | [![Build Status](https://travis-ci.org/QuantEcon/QuantEcon.py.svg?branch=master)](https://travis-ci.org/QuantEcon/QuantEcon.py) 27 | [![Coverage Status](https://coveralls.io/repos/QuantEcon/QuantEcon.py/badge.png)](https://coveralls.io/r/QuantEcon/QuantEcon.py) 28 | 29 | ## Additional Links 30 | 31 | 1. [Project Coordinators](http://quantecon.org/about/#people) 32 | 2. [Lead Developers](http://quantecon.org/about/#people) 33 | 3. [QuantEcon Course Website](http://quant-econ.net) 34 | 35 | ### License 36 | 37 | Copyright © 2013, 2014 Thomas J. Sargent and John Stachurski: BSD-3 38 | All rights reserved. 39 | 40 | Redistribution and use in source and binary forms, with or without 41 | modification, are permitted provided that the following conditions are met: 42 | 43 | 1. Redistributions of source code must retain the above copyright notice, this 44 | list of conditions and the following disclaimer. 45 | 46 | 2. Redistributions in binary form must reproduce the above copyright 47 | notice, this list of conditions and the following disclaimer in the 48 | documentation and/or other materials provided with the distribution. 49 | 50 | 3. Neither the name of the copyright holder nor the names of its 51 | contributors may be used to endorse or promote products derived from 52 | this software without specific prior written permission. 53 | 54 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 59 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 60 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 61 | OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 62 | AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 64 | WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 | POSSIBILITY OF SUCH DAMAGE. 66 | -------------------------------------------------------------------------------- /quantecon/tests/test_robustlq.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Chase Coleman 3 | Filename: test_lqcontrol 4 | 5 | Tests for lqcontrol.py file 6 | 7 | """ 8 | import sys 9 | import os 10 | import unittest 11 | import numpy as np 12 | from scipy.linalg import LinAlgError 13 | from numpy.testing import assert_allclose 14 | from quantecon.lqcontrol import LQ 15 | from quantecon.robustlq import RBLQ 16 | 17 | 18 | class TestRBLQControl(unittest.TestCase): 19 | 20 | def setUp(self): 21 | # Initial Values 22 | a_0 = 100 23 | a_1 = 0.5 24 | rho = 0.9 25 | sigma_d = 0.05 26 | beta = 0.95 27 | c = 2 28 | gamma = 50.0 29 | theta = 0.002 30 | ac = (a_0 - c) / 2.0 31 | 32 | R = np.array([[0, ac, 0], 33 | [ac, -a_1, 0.5], 34 | [0., 0.5, 0]]) 35 | 36 | R = -R 37 | Q = gamma / 2 38 | 39 | A = np.array([[1., 0., 0.], 40 | [0., 1., 0.], 41 | [0., 0., rho]]) 42 | B = np.array([[0.], 43 | [1.], 44 | [0.]]) 45 | C = np.array([[0.], 46 | [0.], 47 | [sigma_d]]) 48 | 49 | 50 | self.rblq_test = RBLQ(Q, R, A, B, C, beta, theta) 51 | self.lq_test = LQ(Q, R, A, B, C, beta) 52 | 53 | self.Fr, self.Kr, self.Pr = self.rblq_test.robust_rule() 54 | 55 | def tearDown(self): 56 | del self.rblq_test 57 | 58 | def test_robust_rule_vs_simple(self): 59 | rblq = self.rblq_test 60 | Fr, Kr, Pr = self.Fr, self.Kr, self.Pr 61 | 62 | Fs, Ks, Ps = rblq.robust_rule_simple(P_init=Pr, tol=1e-12) 63 | 64 | assert_allclose(Fr, Fs, rtol=1e-4) 65 | assert_allclose(Kr, Ks, rtol=1e-4) 66 | assert_allclose(Pr, Ps, rtol=1e-4) 67 | 68 | def test_f2k_and_k2f(self): 69 | rblq = self.rblq_test 70 | Fr, Kr, Pr = self.Fr, self.Kr, self.Pr 71 | 72 | K_f2k, P_f2k = rblq.F_to_K(Fr) 73 | F_k2f, P_k2f = rblq.K_to_F(Kr) 74 | 75 | assert_allclose(K_f2k, Kr, rtol=1e-4) 76 | assert_allclose(F_k2f, Fr, rtol=1e-4) 77 | assert_allclose(P_f2k, P_k2f, rtol=1e-4) 78 | 79 | def test_evaluate_F(self): 80 | rblq = self.rblq_test 81 | Fr, Kr, Pr = self.Fr, self.Kr, self.Pr 82 | 83 | Kf, Pf, df, Of, of = rblq.evaluate_F(Fr) 84 | 85 | # In the future if we wanted, we could check more things, but I 86 | # think the other pieces are basically just plugging these into 87 | # equations so if these hold then the others should be correct 88 | # as well. 89 | assert_allclose(Pf, Pr) 90 | assert_allclose(Kf, Kr) 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | if __name__ == '__main__': 100 | suite = unittest.TestLoader().loadTestsFromTestCase(TestRBLQControl) 101 | unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite) 102 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/test_asset_pricing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_asset_pricing.py 3 | Authors: Spencer Lyon 4 | Date: 2014-07-30 5 | 6 | Tests for quantecon.asset_pricing module 7 | 8 | TODO: come up with some simple examples we can check by hand for price 9 | methods. 10 | 11 | """ 12 | from __future__ import division 13 | import unittest 14 | import numpy as np 15 | from numpy.testing import assert_allclose 16 | from quantecon.models import AssetPrices 17 | 18 | # parameters for object 19 | n = 5 20 | P = 0.0125 * np.ones((n, n)) 21 | P += np.diag(0.95 - 0.0125 * np.ones(5)) 22 | s = np.array([1.05, 1.025, 1.0, 0.975, 0.95]) # state values 23 | gamma = 2.0 24 | bet = 0.94 25 | zeta = 1.0 26 | p_s = 150.0 27 | 28 | 29 | class TestAssetPrices(unittest.TestCase): 30 | 31 | @classmethod 32 | def setUpClass(cls): 33 | cls.ap = AssetPrices(bet, P, s, gamma) 34 | 35 | def test_P_shape(self): 36 | "asset_pricing: is P square" 37 | shp = self.ap.P.shape 38 | assert shp[0] == shp[1] 39 | 40 | def test_n(self): 41 | "asset_pricing: n computed correctly" 42 | assert self.ap.n == self.ap.P.shape[0] 43 | 44 | def test_P_tilde(self): 45 | "asset_pricing: test P_tilde by hand using nested loops" 46 | # unpack variables and allocate memory for new P_tilde 47 | n, s, P, gam = (self.ap.n, self.ap.s, self.ap.P, self.ap.gamma) 48 | p_tilde_2 = np.empty_like(self.ap.P) 49 | 50 | # fill in new p_tilde by hand 51 | for i in range(n): 52 | for k in range(n): 53 | p_tilde_2[i, k] = P[i, k] * s[k] ** (1.0 - gam) 54 | 55 | assert_allclose(self.ap.P_tilde, p_tilde_2) 56 | 57 | def test_P_check(self): 58 | "asset_pricing: test P_check by hand using nested loops" 59 | # unpack variables and allocate memory for new P_tilde 60 | n, s, P, gam = (self.ap.n, self.ap.s, self.ap.P, self.ap.gamma) 61 | p_check_2 = np.empty_like(self.ap.P) 62 | 63 | # fill in new p_check by hand 64 | for i in range(n): 65 | for k in range(n): 66 | p_check_2[i, k] = P[i, k] * s[k] ** (-gam) 67 | 68 | assert_allclose(self.ap.P_check, p_check_2) 69 | 70 | def test_tree_price_size(self): 71 | "asset_pricing: test lucas_tree price size" 72 | assert self.ap.tree_price().size == self.ap.n 73 | 74 | def test_consol_price_size(self): 75 | "asset_pricing: test consol_price price size" 76 | assert self.ap.consol_price(zeta).size == self.ap.n 77 | 78 | def test_call_option_size(self): 79 | "asset_pricing: test call_option price size" 80 | assert self.ap.call_option(zeta, p_s)[0].size == self.ap.n 81 | 82 | def test_tree_price(self): 83 | pass 84 | 85 | def test_consol_price(self): 86 | pass 87 | 88 | def test_call_option_price(self): 89 | pass 90 | 91 | def test_multiple_periods_call_option(self): 92 | "asset_pricing: T option works to return multiple periods" 93 | w_bars = self.ap.call_option(zeta, p_s, T=[5, 7])[1] 94 | self.assertEqual(len(w_bars), 2) 95 | -------------------------------------------------------------------------------- /quantecon/cartesian.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: cartesian.py 3 | 4 | Authors: Pablo Winant 5 | 6 | Implements cartesian products and regular cartesian grids. 7 | """ 8 | 9 | import numpy 10 | from numba import njit 11 | 12 | 13 | def cartesian(nodes, order='C'): 14 | '''Cartesian product of a list of arrays 15 | 16 | Parameters: 17 | ----------- 18 | nodes: (list of 1d-arrays) 19 | order: ('C' or 'F') order in which the product is enumerated 20 | 21 | Returns: 22 | -------- 23 | out: (2d-array) each line corresponds to one point of the product space 24 | ''' 25 | 26 | nodes = [numpy.array(e) for e in nodes] 27 | shapes = [e.shape[0] for e in nodes] 28 | 29 | dtype = nodes[0].dtype 30 | 31 | n = len(nodes) 32 | l = numpy.prod(shapes) 33 | out = numpy.zeros((l, n), dtype=dtype) 34 | 35 | if order == 'C': 36 | repetitions = numpy.cumprod([1] + shapes[:-1]) 37 | else: 38 | shapes.reverse() 39 | sh = [1] + shapes[:-1] 40 | repetitions = numpy.cumprod(sh) 41 | repetitions = repetitions.tolist() 42 | repetitions.reverse() 43 | 44 | for i in range(n): 45 | _repeat_1d(nodes[i], repetitions[i], out[:, i]) 46 | 47 | return out 48 | 49 | 50 | def mlinspace(a, b, nums, order='C'): 51 | '''Constructs a regular cartesian grid 52 | 53 | Parameters: 54 | ----------- 55 | a: (1d-array) lower bounds in each dimension 56 | b: (1d-array) upper bounds in each dimension 57 | nums: (1d-array) number of nodes along each dimension 58 | order: ('C' or 'F') order in which the product is enumerated 59 | 60 | Returns: 61 | -------- 62 | out: (2d-array) each line corresponds to one point of the product space 63 | ''' 64 | 65 | a = numpy.array(a, dtype='float64') 66 | b = numpy.array(b, dtype='float64') 67 | nums = numpy.array(nums, dtype='int64') 68 | nodes = [numpy.linspace(a[i], b[i], nums[i]) for i in range(len(nums))] 69 | 70 | return cartesian(nodes, order=order) 71 | 72 | 73 | @njit 74 | def _repeat_1d(x, K, out): 75 | ''' 76 | Repeats each element of a vector many times and repeats the 77 | whole result many times 78 | 79 | Parameters 80 | ---------- 81 | 82 | x : array_like(Any, ndim=1) 83 | The vector to be repeated 84 | K : scalar(int) 85 | The number of times each element of x 86 | is repeated (inner iterations) 87 | out : array_like(Any, ndim=1) 88 | placeholder for the result 89 | 90 | Returns 91 | ------- 92 | None 93 | ''' 94 | 95 | N = x.shape[0] 96 | L = out.shape[0] // (K*N) # number of outer iterations 97 | # K # number of inner iterations 98 | 99 | # the result out should enumerate in C-order the elements 100 | # of a 3-dimensional array T of dimensions (K,N,L) 101 | # such that for all k,n,l, we have T[k,n,l] == x[n] 102 | 103 | for n in range(N): 104 | val = x[n] 105 | for k in range(K): 106 | for l in range(L): 107 | ind = k*N*L + n*L + l 108 | out[ind] = val 109 | -------------------------------------------------------------------------------- /quantecon/quadsums.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: quadsums.py 3 | 4 | Authors: Thomas Sargent, John Stachurski 5 | 6 | This module provides functions to compute quadratic sums of the form described 7 | in the docstrings. 8 | 9 | """ 10 | 11 | 12 | import numpy as np 13 | from numpy import sqrt, dot 14 | import scipy.linalg 15 | from .matrix_eqn import solve_discrete_lyapunov 16 | 17 | 18 | def var_quadratic_sum(A, C, H, beta, x0): 19 | r""" 20 | Computes the expected discounted quadratic sum 21 | 22 | .. math:: 23 | 24 | q(x_0) = E \sum_{t=0}^{\infty} \beta^t x_t' H x_t 25 | 26 | Here {x_t} is the VAR process x_{t+1} = A x_t + C w_t with {w_t} 27 | standard normal and x_0 the initial condition. 28 | 29 | Parameters 30 | ---------- 31 | A : array_like(float, ndim=2) 32 | The matrix described above in description. Should be n x n 33 | C : array_like(float, ndim=2) 34 | The matrix described above in description. Should be n x n 35 | H : array_like(float, ndim=2) 36 | The matrix described above in description. Should be n x n 37 | beta: scalar(float) 38 | Should take a value in (0, 1) 39 | x_0: array_like(float, ndim=1) 40 | The initial condtion. A conformable array (of length n, or with 41 | n rows) 42 | 43 | Returns 44 | ------- 45 | q0: scalar(float) 46 | Represents the value q(x_0) 47 | 48 | Remarks: The formula for computing q(x_0) is q(x_0) = x_0' Q x_0 + v 49 | where 50 | 51 | Q is the solution to Q = H + beta A' Q A and 52 | v = \trace(C' Q C) \beta / (1 - \beta) 53 | 54 | """ 55 | # == Make sure that A, C, H and x0 are array_like == # 56 | 57 | A, C, H = list(map(np.atleast_2d, (A, C, H))) 58 | x0 = np.atleast_1d(x0) 59 | # == Start computations == # 60 | Q = scipy.linalg.solve_discrete_lyapunov(sqrt(beta) * A.T, H) 61 | cq = dot(dot(C.T, Q), C) 62 | v = np.trace(cq) * beta / (1 - beta) 63 | q0 = dot(dot(x0.T, Q), x0) + v 64 | 65 | return q0 66 | 67 | 68 | def m_quadratic_sum(A, B, max_it=50): 69 | r""" 70 | Computes the quadratic sum 71 | 72 | .. math:: 73 | 74 | V = \sum_{j=0}^{\infty} A^j B A^{j'} 75 | 76 | V is computed by solving the corresponding discrete lyapunov 77 | equation using the doubling algorithm. See the documentation of 78 | `util.solve_discrete_lyapunov` for more information. 79 | 80 | Parameters 81 | ---------- 82 | A : array_like(float, ndim=2) 83 | An n x n matrix as described above. We assume in order for 84 | convergence that the eigenvalues of A have moduli bounded by 85 | unity 86 | B : array_like(float, ndim=2) 87 | An n x n matrix as described above. We assume in order for 88 | convergence that the eigenvalues of A have moduli bounded by 89 | unity 90 | max_it : scalar(int), optional(default=50) 91 | The maximum number of iterations 92 | 93 | Returns 94 | ======== 95 | gamma1: array_like(float, ndim=2) 96 | Represents the value V 97 | 98 | """ 99 | 100 | gamma1 = solve_discrete_lyapunov(A, B, max_it) 101 | 102 | return gamma1 103 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/test_lucastree.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for quantecon.models.lucastree 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-08-05 09:15:45 6 | 7 | """ 8 | from __future__ import division 9 | from nose.tools import (assert_equal, assert_true, assert_less_equal) 10 | import numpy as np 11 | from quantecon.models import LucasTree 12 | from quantecon.tests import (get_h5_data_file, get_h5_data_group, write_array, 13 | max_abs_diff) 14 | 15 | # helper parameters 16 | _tol = 1e-6 17 | 18 | 19 | # helper functions 20 | def _new_solution(tree, f, grp): 21 | "gets a new set of prices and updates the file" 22 | prices = tree.compute_lt_price(error_tol=_tol, max_iter=5000) 23 | write_array(f, grp, prices, "prices") 24 | return prices 25 | 26 | 27 | def _get_price_data(tree, force_new=False): 28 | "get price data from file, or create if necessary" 29 | with get_h5_data_file() as f: 30 | existed, grp = get_h5_data_group("lucastree") 31 | 32 | if force_new or not existed: 33 | if existed: 34 | grp.prices._f_remove() 35 | prices = _new_solution(tree, f, grp) 36 | 37 | return prices 38 | 39 | # if we made it here, the group exists and we should try to read 40 | # existing solutions 41 | try: 42 | # Try reading vfi 43 | prices = grp.prices[:] 44 | 45 | except: 46 | # doesn't exist. Let's create it 47 | prices = _new_solution(tree, f, grp) 48 | 49 | return prices 50 | 51 | 52 | # model parameters 53 | gamma = 2.0 54 | beta = 0.95 55 | alpha = 0.90 56 | sigma = 0.1 57 | 58 | # model object 59 | tree = LucasTree(gamma, beta, alpha, sigma) 60 | grid = tree.grid 61 | prices = _get_price_data(tree) 62 | 63 | 64 | def test_h5_access(): 65 | "lucastree: test access to data file" 66 | assert_true(prices is not None) 67 | 68 | 69 | def test_prices_shape(): 70 | "lucastree: test access shape of computed prices" 71 | assert_equal(prices.shape, grid.shape) 72 | 73 | 74 | def test_integrate(): 75 | "lucastree: integrate function" 76 | # just have it be a 1. Then integrate should give cdf 77 | g = lambda x: x*0.0 + 1.0 78 | 79 | # estimate using integrate function 80 | est = tree.integrate(g) 81 | 82 | # compute exact solution 83 | exact = tree.phi.cdf(tree._int_max) - tree.phi.cdf(tree._int_min) 84 | 85 | assert_less_equal(est - exact, .1) 86 | 87 | 88 | def test_lucas_op_fixed_point(): 89 | "lucastree: are prices a fixed point of lucas_operator" 90 | # transform from p to f 91 | old_f = prices / (grid ** gamma) 92 | 93 | # compute new f 94 | new_f = tree.lucas_operator(old_f) 95 | 96 | # transform from f to p 97 | new_p = new_f * grid**gamma 98 | 99 | # test if close. Make it one order of magnitude less than tol used 100 | # to compute prices 101 | assert_less_equal(max_abs_diff(new_p, prices), _tol*10) 102 | 103 | 104 | def test_lucas_prices_increasing(): 105 | "lucastree: test prices are increasing in y" 106 | # sort the array and test that it is the same 107 | sorted = np.sort(np.copy(prices)) 108 | np.testing.assert_array_equal(sorted, prices) 109 | -------------------------------------------------------------------------------- /quantecon/rank_nullspace.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.linalg import svd 3 | 4 | 5 | def rank_est(A, atol=1e-13, rtol=0): 6 | """ 7 | Estimate the rank (i.e. the dimension of the nullspace) of a matrix. 8 | 9 | The algorithm used by this function is based on the singular value 10 | decomposition of `A`. 11 | 12 | Parameters 13 | ---------- 14 | A : array_like(float, ndim=1 or 2) 15 | A should be at most 2-D. A 1-D array with length n will be 16 | treated as a 2-D with shape (1, n) 17 | atol : scalar(float), optional(default=1e-13) 18 | The absolute tolerance for a zero singular value. Singular 19 | values smaller than `atol` are considered to be zero. 20 | rtol : scalar(float), optional(default=0) 21 | The relative tolerance. Singular values less than rtol*smax are 22 | considered to be zero, where smax is the largest singular value. 23 | 24 | Returns 25 | ------- 26 | r : scalar(int) 27 | The estimated rank of the matrix. 28 | 29 | Note: If both `atol` and `rtol` are positive, the combined tolerance 30 | is the maximum of the two; that is: 31 | 32 | tol = max(atol, rtol * smax) 33 | 34 | Note: Singular values smaller than `tol` are considered to be zero. 35 | 36 | See also 37 | -------- 38 | numpy.linalg.matrix_rank 39 | matrix_rank is basically the same as this function, but it does 40 | not provide the option of the absolute tolerance. 41 | 42 | """ 43 | 44 | A = np.atleast_2d(A) 45 | s = svd(A, compute_uv=False) 46 | tol = max(atol, rtol * s[0]) 47 | rank = int((s >= tol).sum()) 48 | 49 | return rank 50 | 51 | 52 | def nullspace(A, atol=1e-13, rtol=0): 53 | """ 54 | Compute an approximate basis for the nullspace of A. 55 | 56 | The algorithm used by this function is based on the singular value 57 | decomposition of `A`. 58 | 59 | Parameters 60 | ---------- 61 | A : array_like(float, ndim=1 or 2) 62 | A should be at most 2-D. A 1-D array with length k will be 63 | treated as a 2-D with shape (1, k) 64 | atol : scalar(float), optional(default=1e-13) 65 | The absolute tolerance for a zero singular value. Singular 66 | values smaller than `atol` are considered to be zero. 67 | rtol : scalar(float), optional(default=0) 68 | The relative tolerance. Singular values less than rtol*smax are 69 | considered to be zero, where smax is the largest singular value. 70 | 71 | Returns 72 | ------- 73 | ns : array_like(float, ndim=2) 74 | If `A` is an array with shape (m, k), then `ns` will be an array 75 | with shape (k, n), where n is the estimated dimension of the 76 | nullspace of `A`. The columns of `ns` are a basis for the 77 | nullspace; each element in numpy.dot(A, ns) will be 78 | approximately zero. 79 | 80 | Note: If both `atol` and `rtol` are positive, the combined tolerance 81 | is the maximum of the two; that is: 82 | 83 | tol = max(atol, rtol * smax) 84 | 85 | Note: Singular values smaller than `tol` are considered to be zero. 86 | 87 | """ 88 | 89 | A = np.atleast_2d(A) 90 | u, s, vh = svd(A) 91 | tol = max(atol, rtol * s[0]) 92 | nnz = (s >= tol).sum() 93 | ns = vh[nnz:].conj().T 94 | 95 | return ns 96 | -------------------------------------------------------------------------------- /quantecon/gth_solve.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: gth_solve.py 3 | 4 | Author: Daisuke Oyama 5 | 6 | Routine to compute the stationary distribution of an irreducible Markov 7 | chain by the Grassmann-Taksar-Heyman (GTH) algorithm. 8 | 9 | """ 10 | import numpy as np 11 | 12 | try: 13 | xrange 14 | except: # python3 15 | xrange = range 16 | 17 | 18 | def gth_solve(A, overwrite=False): 19 | r""" 20 | This routine computes the stationary distribution of an irreducible 21 | Markov transition matrix (stochastic matrix) or transition rate 22 | matrix (generator matrix) `A`. 23 | 24 | More generally, given a Metzler matrix (square matrix whose 25 | off-diagonal entries are all nonnegative) `A`, this routine solves 26 | for a nonzero solution `x` to `x (A - D) = 0`, where `D` is the 27 | diagonal matrix for which the rows of `A - D` sum to zero (i.e., 28 | :math:`D_{ii} = \sum_j A_{ij}` for all :math:`i`). One (and only 29 | one, up to normalization) nonzero solution exists corresponding to 30 | each reccurent class of `A`, and in particular, if `A` is 31 | irreducible, there is a unique solution; when there are more than 32 | one solution, the routine returns the solution that contains in its 33 | support the first index `i` such that no path connects `i` to any 34 | index larger than `i`. The solution is normalized so that its 1-norm 35 | equals one. This routine implements the Grassmann-Taksar-Heyman 36 | (GTH) algorithm [1]_, a numerically stable variant of Gaussian 37 | elimination, where only the off-diagonal entries of `A` are used as 38 | the input data. For a nice exposition of the algorithm, see Stewart 39 | [2]_, Chapter 10. 40 | 41 | Parameters 42 | ---------- 43 | A : array_like(float, ndim=2) 44 | Stochastic matrix or generator matrix. Must be of shape n x n. 45 | overwrite : bool, optional(default=False) 46 | Whether to overwrite `A`; may improve performance. 47 | 48 | Returns 49 | ------- 50 | x : numpy.ndarray(float, ndim=1) 51 | Stationary distribution of `A`. 52 | 53 | References 54 | ---------- 55 | .. [1] W. K. Grassmann, M. I. Taksar and D. P. Heyman, "Regenerative 56 | Analysis and Steady State Distributions for Markov Chains," 57 | Operations Research (1985), 1107-1116. 58 | 59 | .. [2] W. J. Stewart, Probability, Markov Chains, Queues, and 60 | Simulation, Princeton University Press, 2009. 61 | 62 | """ 63 | A1 = np.array(A, dtype=float, copy=not overwrite) 64 | 65 | if len(A1.shape) != 2 or A1.shape[0] != A1.shape[1]: 66 | raise ValueError('matrix must be square') 67 | 68 | n = A1.shape[0] 69 | 70 | x = np.zeros(n) 71 | 72 | # === Reduction === # 73 | for i in xrange(n-1): 74 | scale = np.sum(A1[i, i+1:n]) 75 | if scale <= 0: 76 | # There is one (and only one) recurrent class contained in 77 | # {0, ..., i}; 78 | # compute the solution associated with that recurrent class. 79 | n = i+1 80 | break 81 | A1[i+1:n, i] /= scale 82 | 83 | A1[i+1:n, i+1:n] += np.dot(A1[i+1:n, i:i+1], A1[i:i+1, i+1:n]) 84 | 85 | # === Backward substitution === # 86 | x[n-1] = 1 87 | for i in xrange(n-2, -1, -1): 88 | x[i] = np.dot(x[i+1:n], A1[i+1:n, i]) 89 | 90 | # === Normalization === # 91 | x /= np.sum(x) 92 | 93 | return x 94 | -------------------------------------------------------------------------------- /solutions/scipy_solutions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:da5a4e2755161c4b9d309d2697cd1a0f4c41ef40220265dd07daac825891c8ff" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "heading", 13 | "level": 1, 14 | "metadata": {}, 15 | "source": [ 16 | "quant-econ Solutions: SciPy" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "Solutions for http://quant-econ.net/py/scipy.html" 24 | ] 25 | }, 26 | { 27 | "cell_type": "heading", 28 | "level": 2, 29 | "metadata": {}, 30 | "source": [ 31 | "Exercise 1" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "Here's a reasonable solution:" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "collapsed": false, 44 | "input": [ 45 | "def bisect(f, a, b, tol=10e-5):\n", 46 | " \"\"\"\n", 47 | " Implements the bisection root finding algorithm, assuming that f is a\n", 48 | " real-valued function on [a, b] satisfying f(a) < 0 < f(b).\n", 49 | " \"\"\"\n", 50 | " lower, upper = a, b\n", 51 | " if upper - lower < tol:\n", 52 | " return 0.5 * (upper + lower)\n", 53 | " else:\n", 54 | " middle = 0.5 * (upper + lower)\n", 55 | " print('Current mid point = {}'.format(middle))\n", 56 | " if f(middle) > 0: # Implies root is between lower and middle\n", 57 | " bisect(f, lower, middle)\n", 58 | " else: # Implies root is between middle and upper\n", 59 | " bisect(f, middle, upper)\n" 60 | ], 61 | "language": "python", 62 | "metadata": {}, 63 | "outputs": [], 64 | "prompt_number": 1 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "We can test it as follows" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "collapsed": false, 76 | "input": [ 77 | "import numpy as np\n", 78 | "f = lambda x: np.sin(4 * (x - 0.25)) + x + x**20 - 1\n", 79 | "\n", 80 | "bisect(f, 0, 1)" 81 | ], 82 | "language": "python", 83 | "metadata": {}, 84 | "outputs": [ 85 | { 86 | "output_type": "stream", 87 | "stream": "stdout", 88 | "text": [ 89 | "Current mid point = 0.5\n", 90 | "Current mid point = 0.25\n", 91 | "Current mid point = 0.375\n", 92 | "Current mid point = 0.4375\n", 93 | "Current mid point = 0.40625\n", 94 | "Current mid point = 0.421875\n", 95 | "Current mid point = 0.4140625\n", 96 | "Current mid point = 0.41015625\n", 97 | "Current mid point = 0.408203125\n", 98 | "Current mid point = 0.4091796875\n", 99 | "Current mid point = 0.40869140625\n", 100 | "Current mid point = 0.408447265625\n", 101 | "Current mid point = 0.4083251953125\n", 102 | "Current mid point = 0.40826416015625\n" 103 | ] 104 | } 105 | ], 106 | "prompt_number": 2 107 | } 108 | ], 109 | "metadata": {} 110 | } 111 | ] 112 | } -------------------------------------------------------------------------------- /docs/sphinxext/only_directives.py: -------------------------------------------------------------------------------- 1 | # 2 | # A pair of directives for inserting content that will only appear in 3 | # either html or latex. 4 | # 5 | 6 | from docutils.nodes import Body, Element 7 | from docutils.writers.html4css1 import HTMLTranslator 8 | try: 9 | from sphinx.latexwriter import LaTeXTranslator 10 | except ImportError: 11 | from sphinx.writers.latex import LaTeXTranslator 12 | 13 | import warnings 14 | warnings.warn("The numpydoc.only_directives module is deprecated;" 15 | "please use the only:: directive available in Sphinx >= 0.6", 16 | DeprecationWarning, stacklevel=2) 17 | 18 | from docutils.parsers.rst import directives 19 | 20 | class html_only(Body, Element): 21 | pass 22 | 23 | class latex_only(Body, Element): 24 | pass 25 | 26 | def run(content, node_class, state, content_offset): 27 | text = '\n'.join(content) 28 | node = node_class(text) 29 | state.nested_parse(content, content_offset, node) 30 | return [node] 31 | 32 | try: 33 | from docutils.parsers.rst import Directive 34 | except ImportError: 35 | from docutils.parsers.rst.directives import _directives 36 | 37 | def html_only_directive(name, arguments, options, content, lineno, 38 | content_offset, block_text, state, state_machine): 39 | return run(content, html_only, state, content_offset) 40 | 41 | def latex_only_directive(name, arguments, options, content, lineno, 42 | content_offset, block_text, state, state_machine): 43 | return run(content, latex_only, state, content_offset) 44 | 45 | for func in (html_only_directive, latex_only_directive): 46 | func.content = 1 47 | func.options = {} 48 | func.arguments = None 49 | 50 | _directives['htmlonly'] = html_only_directive 51 | _directives['latexonly'] = latex_only_directive 52 | else: 53 | class OnlyDirective(Directive): 54 | has_content = True 55 | required_arguments = 0 56 | optional_arguments = 0 57 | final_argument_whitespace = True 58 | option_spec = {} 59 | 60 | def run(self): 61 | self.assert_has_content() 62 | return run(self.content, self.node_class, 63 | self.state, self.content_offset) 64 | 65 | class HtmlOnlyDirective(OnlyDirective): 66 | node_class = html_only 67 | 68 | class LatexOnlyDirective(OnlyDirective): 69 | node_class = latex_only 70 | 71 | directives.register_directive('htmlonly', HtmlOnlyDirective) 72 | directives.register_directive('latexonly', LatexOnlyDirective) 73 | 74 | def setup(app): 75 | app.add_node(html_only) 76 | app.add_node(latex_only) 77 | 78 | # Add visit/depart methods to HTML-Translator: 79 | def visit_perform(self, node): 80 | pass 81 | def depart_perform(self, node): 82 | pass 83 | def visit_ignore(self, node): 84 | node.children = [] 85 | def depart_ignore(self, node): 86 | node.children = [] 87 | 88 | HTMLTranslator.visit_html_only = visit_perform 89 | HTMLTranslator.depart_html_only = depart_perform 90 | HTMLTranslator.visit_latex_only = visit_ignore 91 | HTMLTranslator.depart_latex_only = depart_ignore 92 | 93 | LaTeXTranslator.visit_html_only = visit_ignore 94 | LaTeXTranslator.depart_html_only = depart_ignore 95 | LaTeXTranslator.visit_latex_only = visit_perform 96 | LaTeXTranslator.depart_latex_only = depart_perform 97 | -------------------------------------------------------------------------------- /solutions/asset_solutions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:f95d50b3abd5dc694309bb15b01ad17d5b2269d03da61295961b5e7daef40de5" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "heading", 13 | "level": 1, 14 | "metadata": {}, 15 | "source": [ 16 | "quant-econ Solutions: The Lucas Asset Pricing Model" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "Solutions for http://quant-econ.net/py/markov_asset.html" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "collapsed": false, 29 | "input": [ 30 | "%matplotlib inline" 31 | ], 32 | "language": "python", 33 | "metadata": {}, 34 | "outputs": [], 35 | "prompt_number": 2 36 | }, 37 | { 38 | "cell_type": "code", 39 | "collapsed": false, 40 | "input": [ 41 | "from __future__ import division # Omit for Python 3.x\n", 42 | "import numpy as np\n", 43 | "import matplotlib.pyplot as plt\n", 44 | "from quantecon.models import AssetPrices" 45 | ], 46 | "language": "python", 47 | "metadata": {}, 48 | "outputs": [], 49 | "prompt_number": 3 50 | }, 51 | { 52 | "cell_type": "heading", 53 | "level": 2, 54 | "metadata": {}, 55 | "source": [ 56 | "Exercise 1" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "collapsed": false, 62 | "input": [ 63 | "# == Define primitives == #\n", 64 | "n = 5\n", 65 | "P = 0.0125 * np.ones((n, n))\n", 66 | "P += np.diag(0.95 - 0.0125 * np.ones(5))\n", 67 | "s = np.array([1.05, 1.025, 1.0, 0.975, 0.95])\n", 68 | "gamma = 2.0\n", 69 | "beta = 0.94\n", 70 | "zeta = 1.0\n", 71 | "\n", 72 | "ap = AssetPrices(beta, P, s, gamma)\n", 73 | "\n", 74 | "v = ap.tree_price()\n", 75 | "print(\"Lucas Tree Prices: \", v)\n", 76 | "\n", 77 | "v_consol = ap.consol_price(zeta)\n", 78 | "print(\"Consol Bond Prices: \", v_consol)\n", 79 | "\n", 80 | "P_tilde = P * s**(1-gamma)\n", 81 | "temp = beta * P_tilde.dot(v) + beta * P_tilde.dot(np.ones(n))\n", 82 | "print(\"Should be 0: \", v - temp)\n", 83 | "\n", 84 | "p_s = 150.0\n", 85 | "w_bar, w_bars = ap.call_option(zeta, p_s, T = [10,20,30])\n" 86 | ], 87 | "language": "python", 88 | "metadata": {}, 89 | "outputs": [ 90 | { 91 | "output_type": "stream", 92 | "stream": "stdout", 93 | "text": [ 94 | "('Lucas Tree Prices: ', array([ 12.72221763, 14.72515002, 17.57142236, 21.93570661, 29.47401578]))\n", 95 | "('Consol Bond Prices: ', array([ 87.56860139, 109.25108965, 148.67554548, 242.55144082,\n", 96 | " 753.87100476]))\n", 97 | "('Should be 0: ', array([ -1.77635684e-15, -1.77635684e-15, 0.00000000e+00,\n", 98 | " 0.00000000e+00, 0.00000000e+00]))\n" 99 | ] 100 | } 101 | ], 102 | "prompt_number": 4 103 | }, 104 | { 105 | "cell_type": "code", 106 | "collapsed": false, 107 | "input": [], 108 | "language": "python", 109 | "metadata": {}, 110 | "outputs": [] 111 | } 112 | ], 113 | "metadata": {} 114 | } 115 | ] 116 | } -------------------------------------------------------------------------------- /quantecon/tests/tests_models/tests_solow/test_cobb_douglas.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test suite for solow.cobb_douglas.py module. 3 | 4 | @author : David R. Pugh 5 | 6 | """ 7 | import nose 8 | 9 | import numpy as np 10 | 11 | from .... models.solow import cobb_douglas 12 | 13 | params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15, 14 | 'alpha': 0.33, 'delta': 0.05} 15 | model = cobb_douglas.CobbDouglasModel(params) 16 | 17 | 18 | def test_ivp_solve(): 19 | """Testing computation of solution to the initial value problem.""" 20 | eps = 1e-1 21 | for g in np.linspace(eps, 0.05, 4): 22 | for n in np.linspace(eps, 0.05, 4): 23 | for s in np.linspace(eps, 1-eps, 4): 24 | for alpha in np.linspace(eps, 1-eps, 4): 25 | for delta in np.linspace(eps, 1-eps, 4): 26 | 27 | tmp_params = {'A0': 1.0, 'g': g, 'L0': 1.0, 'n': n, 28 | 's': s, 'alpha': alpha, 'delta': delta} 29 | model.params = tmp_params 30 | 31 | # solve the initial value problem 32 | t0, k0 = 0, 0.5 * model.steady_state 33 | numeric_soln = model.ivp.solve(t0, k0, T=100) 34 | 35 | # compute the analytic solution 36 | tmp_ti = numeric_soln[:, 0] 37 | analytic_soln = model.analytic_solution(tmp_ti, k0) 38 | 39 | # conduct the test 40 | np.testing.assert_allclose(numeric_soln, analytic_soln) 41 | 42 | 43 | def test_root_finders(): 44 | """Testing conditional logic in find_steady_state.""" 45 | valid_methods = ['brenth', 'brentq', 'ridder', 'bisect'] 46 | for method in valid_methods: 47 | actual_ss = model.find_steady_state(1e-6, 1e6, method=method) 48 | expected_ss = model.steady_state 49 | nose.tools.assert_almost_equals(actual_ss, expected_ss) 50 | 51 | 52 | def test_steady_state(): 53 | """Compare analytic steady state with numerical steady state.""" 54 | eps = 1e-1 55 | for g in np.linspace(eps, 0.05, 4): 56 | for n in np.linspace(eps, 0.05, 4): 57 | for s in np.linspace(eps, 1-eps, 4): 58 | for alpha in np.linspace(eps, 1-eps, 4): 59 | for delta in np.linspace(eps, 1-eps, 4): 60 | 61 | tmp_params = {'A0': 1.0, 'g': g, 'L0': 1.0, 'n': n, 62 | 's': s, 'alpha': alpha, 'delta': delta} 63 | model.params = tmp_params 64 | 65 | # use root finder to compute the steady state 66 | actual_ss = model.steady_state 67 | expected_ss = model.find_steady_state(1e-12, 1e12) 68 | 69 | # conduct the test 70 | nose.tools.assert_almost_equals(actual_ss, expected_ss) 71 | 72 | 73 | def test_valid_methods(): 74 | """Testing invalid method passed to find_steady_state.""" 75 | with nose.tools.assert_raises(ValueError): 76 | model.find_steady_state(1e-12, 1e12, method='invalid_method') 77 | 78 | 79 | def test_valid_parameters(): 80 | """Testing invalid value for output elasticity.""" 81 | with nose.tools.assert_raises(AttributeError): 82 | invalid_params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 83 | 's': 0.15, 'alpha': 1.1, 'delta': 0.03} 84 | cobb_douglas.CobbDouglasModel(invalid_params) 85 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/test_optgrowth.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.models.optgrowth 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-08-05 10:20:53 6 | 7 | TODO: I'd really like to see why the solutions only match analytical 8 | counter part up to 1e-2. Seems like we should be able to do better 9 | than that. 10 | """ 11 | from __future__ import division 12 | from math import log 13 | import numpy as np 14 | from nose.tools import (assert_equal, assert_true, assert_less_equal) 15 | from quantecon import compute_fixed_point 16 | from quantecon.models import GrowthModel 17 | from quantecon.tests import (get_h5_data_file, get_h5_data_group, write_array, 18 | max_abs_diff) 19 | 20 | 21 | # helper parameters 22 | _tol = 1e-6 23 | 24 | 25 | # helper functions 26 | def _new_solution(gm, f, grp): 27 | "gets a new set of solution objects and updates the data file" 28 | 29 | # compute value function and policy rule using vfi 30 | v_init = 5 * gm.u(gm.grid) - 25 31 | v = compute_fixed_point(gm.bellman_operator, v_init, error_tol=_tol, 32 | max_iter=5000) 33 | # sigma = gm.get_greedy(v) 34 | 35 | # write all arrays to file 36 | write_array(f, grp, v, "v") 37 | 38 | # return data 39 | return v 40 | 41 | 42 | def _get_data(gm, force_new=False): 43 | "get solution data from file, or create if necessary" 44 | with get_h5_data_file() as f: 45 | existed, grp = get_h5_data_group("optgrowth") 46 | 47 | if force_new or not existed: 48 | if existed: 49 | grp.w._f_remove() 50 | v = _new_solution(gm, f, grp) 51 | 52 | return v 53 | 54 | # if we made it here, the group exists and we should try to read 55 | # existing solutions 56 | try: 57 | # Try reading data 58 | v = grp.v[:] 59 | 60 | except: 61 | # doesn't exist. Let's create it 62 | v = _new_solution(gm, f, grp) 63 | 64 | return v 65 | 66 | # model parameters 67 | alpha = 0.65 68 | f = lambda k: k ** alpha 69 | beta = 0.95 70 | u = np.log 71 | grid_max = 2 72 | grid_size = 150 73 | 74 | gm = GrowthModel(f, beta, u, grid_max, grid_size) 75 | 76 | v = _get_data(gm) 77 | 78 | # compute analytical policy function 79 | true_sigma = (1 - alpha * beta) * gm.grid**alpha 80 | 81 | # compute analytical value function 82 | ab = alpha * beta 83 | c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta) 84 | c2 = alpha / (1 - ab) 85 | def v_star(k): 86 | return c1 + c2 * np.log(k) 87 | 88 | 89 | def test_h5_access(): 90 | "optgrowth: test access to data file" 91 | assert_true(v is not None) 92 | 93 | 94 | def test_bellman_return_both(): 95 | "optgrowth: bellman_operator compute_policy option works" 96 | assert_equal(len(gm.bellman_operator(v, compute_policy=True)), 2) 97 | 98 | 99 | def test_analytical_policy(): 100 | "optgrowth: approx sigma matches analytical" 101 | sigma = gm.compute_greedy(v) 102 | assert_less_equal(max_abs_diff(sigma, true_sigma), 1e-2) 103 | 104 | 105 | def test_analytical_vf(): 106 | "optgrowth: approx v matches analytical" 107 | true_v = v_star(gm.grid) 108 | assert_less_equal(max_abs_diff(v[1:-1], true_v[1:-1]), 5e-2) 109 | 110 | 111 | def test_vf_fixed_point(): 112 | "optgrowth: solution is fixed point of bellman" 113 | new_v = gm.bellman_operator(v) 114 | assert_less_equal(max_abs_diff(v[1:-1], new_v[1:-1]), 5e-2) 115 | -------------------------------------------------------------------------------- /examples/gaussian_contours.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: gaussian_contours.py 3 | Authors: John Stachurski and Thomas Sargent 4 | 5 | Plots of bivariate Gaussians to illustrate the Kalman filter. 6 | """ 7 | 8 | from scipy import linalg 9 | import numpy as np 10 | import matplotlib.cm as cm 11 | from matplotlib.mlab import bivariate_normal 12 | import matplotlib.pyplot as plt 13 | 14 | # == Set up the Gaussian prior density p == # 15 | Sigma = [[0.4, 0.3], [0.3, 0.45]] 16 | Sigma = np.matrix(Sigma) 17 | x_hat = np.matrix([0.2, -0.2]).T 18 | # == Define the matrices G and R from the equation y = G x + N(0, R) == # 19 | G = [[1, 0], [0, 1]] 20 | G = np.matrix(G) 21 | R = 0.5 * Sigma 22 | # == The matrices A and Q == # 23 | A = [[1.2, 0], [0, -0.2]] 24 | A = np.matrix(A) 25 | Q = 0.3 * Sigma 26 | # == The observed value of y == # 27 | y = np.matrix([2.3, -1.9]).T 28 | 29 | # == Set up grid for plotting == # 30 | x_grid = np.linspace(-1.5, 2.9, 100) 31 | y_grid = np.linspace(-3.1, 1.7, 100) 32 | X, Y = np.meshgrid(x_grid, y_grid) 33 | 34 | 35 | def gen_gaussian_plot_vals(mu, C): 36 | "Z values for plotting the bivariate Gaussian N(mu, C)" 37 | m_x, m_y = float(mu[0]), float(mu[1]) 38 | s_x, s_y = np.sqrt(C[0, 0]), np.sqrt(C[1, 1]) 39 | s_xy = C[0, 1] 40 | return bivariate_normal(X, Y, s_x, s_y, m_x, m_y, s_xy) 41 | 42 | fig, ax = plt.subplots() 43 | ax.xaxis.grid(True, zorder=0) 44 | ax.yaxis.grid(True, zorder=0) 45 | 46 | # == Code for the 4 plots, choose one below == # 47 | 48 | 49 | def plot1(): 50 | Z = gen_gaussian_plot_vals(x_hat, Sigma) 51 | ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet) 52 | cs = ax.contour(X, Y, Z, 6, colors="black") 53 | ax.clabel(cs, inline=1, fontsize=10) 54 | 55 | 56 | def plot2(): 57 | Z = gen_gaussian_plot_vals(x_hat, Sigma) 58 | ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet) 59 | cs = ax.contour(X, Y, Z, 6, colors="black") 60 | ax.clabel(cs, inline=1, fontsize=10) 61 | ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black") 62 | 63 | 64 | def plot3(): 65 | Z = gen_gaussian_plot_vals(x_hat, Sigma) 66 | cs1 = ax.contour(X, Y, Z, 6, colors="black") 67 | ax.clabel(cs1, inline=1, fontsize=10) 68 | M = Sigma * G.T * linalg.inv(G * Sigma * G.T + R) 69 | x_hat_F = x_hat + M * (y - G * x_hat) 70 | Sigma_F = Sigma - M * G * Sigma 71 | new_Z = gen_gaussian_plot_vals(x_hat_F, Sigma_F) 72 | cs2 = ax.contour(X, Y, new_Z, 6, colors="black") 73 | ax.clabel(cs2, inline=1, fontsize=10) 74 | ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet) 75 | ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black") 76 | 77 | 78 | def plot4(): 79 | # Density 1 80 | Z = gen_gaussian_plot_vals(x_hat, Sigma) 81 | cs1 = ax.contour(X, Y, Z, 6, colors="black") 82 | ax.clabel(cs1, inline=1, fontsize=10) 83 | # Density 2 84 | M = Sigma * G.T * linalg.inv(G * Sigma * G.T + R) 85 | x_hat_F = x_hat + M * (y - G * x_hat) 86 | Sigma_F = Sigma - M * G * Sigma 87 | Z_F = gen_gaussian_plot_vals(x_hat_F, Sigma_F) 88 | cs2 = ax.contour(X, Y, Z_F, 6, colors="black") 89 | ax.clabel(cs2, inline=1, fontsize=10) 90 | # Density 3 91 | new_x_hat = A * x_hat_F 92 | new_Sigma = A * Sigma_F * A.T + Q 93 | new_Z = gen_gaussian_plot_vals(new_x_hat, new_Sigma) 94 | cs3 = ax.contour(X, Y, new_Z, 6, colors="black") 95 | ax.clabel(cs3, inline=1, fontsize=10) 96 | ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet) 97 | ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black") 98 | 99 | # == Choose a plot to generate == # 100 | plot1() 101 | plt.show() 102 | -------------------------------------------------------------------------------- /quantecon/tests/tests_models/test_jv.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.jv 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-08-01 13:53:29 6 | 7 | """ 8 | from __future__ import division 9 | import sys 10 | import unittest 11 | from nose.plugins.skip import SkipTest 12 | from quantecon.models import JvWorker 13 | from quantecon import compute_fixed_point 14 | from quantecon.tests import get_h5_data_file, write_array, max_abs_diff 15 | 16 | # specify params -- use defaults 17 | A = 1.4 18 | alpha = 0.6 19 | beta = 0.96 20 | grid_size = 50 21 | 22 | if sys.version_info[0] == 2: 23 | v_nm = "V" 24 | else: # python 3 25 | raise SkipTest("Python 3 tests aren't ready.") 26 | v_nm = "V_py3" 27 | 28 | 29 | def _new_solution(jv, f, grp): 30 | "gets new solution and updates data file" 31 | V = _solve_via_vfi(jv) 32 | write_array(f, grp, V, v_nm) 33 | 34 | return V 35 | 36 | 37 | def _solve_via_vfi(jv): 38 | "compute policy rules via value function iteration" 39 | v_init = jv.x_grid * 0.6 40 | V = compute_fixed_point(jv.bellman_operator, v_init, 41 | max_iter=3000, 42 | error_tol=1e-5) 43 | return V 44 | 45 | 46 | def _get_vf_guess(jv, force_new=False): 47 | with get_h5_data_file() as f: 48 | 49 | # See if the jv group already exists 50 | group_existed = True 51 | try: 52 | jv_group = f.getNode("/jv") 53 | except: 54 | # doesn't exist 55 | group_existed = False 56 | jv_group = f.create_group("/", "jv", "data for jv.py tests") 57 | 58 | if force_new or not group_existed: 59 | # group doesn't exist, or forced to create new data. 60 | # This function updates f in place and returns v_vfi, c_vfi, c_pfi 61 | V = _new_solution(jv, f, jv_group) 62 | 63 | return V 64 | 65 | # if we made it here, the group exists and we should try to read 66 | # existing solutions 67 | try: 68 | # Try reading vfi 69 | if sys.version_info[0] == 2: 70 | V = jv_group.V[:] 71 | else: # python 3 72 | V = jv_group.V_py3[:] 73 | 74 | except: 75 | # doesn't exist. Let's create it 76 | V = _new_solution(jv, f, jv_group) 77 | 78 | return V 79 | 80 | 81 | class TestJvWorkder(unittest.TestCase): 82 | 83 | @classmethod 84 | def setUpClass(cls): 85 | jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size) 86 | cls.jv = jv 87 | 88 | # compute solution 89 | v_init = _get_vf_guess(jv) 90 | cls.V = compute_fixed_point(jv.bellman_operator, v_init) 91 | cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999, 92 | return_policies=True) 93 | 94 | def test_low_x_prefer_s(self): 95 | "jv: s preferred to phi with low x?" 96 | # low x is an early index 97 | self.assertGreaterEqual(self.s_pol[0], self.phi_pol[0]) 98 | 99 | def test_high_x_prefer_phi(self): 100 | "jv: phi preferred to s with high x?" 101 | # low x is an early index 102 | self.assertGreaterEqual(self.phi_pol[-1], self.s_pol[-1]) 103 | 104 | def test_policy_sizes(self): 105 | "jv: policies correct size" 106 | n = self.jv.x_grid.size 107 | self.assertEqual(self.s_pol.size, n) 108 | self.assertEqual(self.phi_pol.size, n) 109 | 110 | def test_bellman_sol_fixed_point(self): 111 | "jv: solution to bellman is fixed point" 112 | new_V = self.jv.bellman_operator(self.V) 113 | self.assertLessEqual(max_abs_diff(new_V, self.V), 1e-4) 114 | -------------------------------------------------------------------------------- /quantecon/distributions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: distributions.py 3 | 4 | Probability distributions useful in economics. 5 | 6 | References 7 | ---------- 8 | 9 | http://en.wikipedia.org/wiki/Beta-binomial_distribution 10 | 11 | """ 12 | from math import sqrt 13 | import numpy as np 14 | from scipy.special import binom, beta 15 | 16 | 17 | class BetaBinomial(object): 18 | """ 19 | The Beta-Binomial distribution 20 | 21 | Parameters 22 | ---------- 23 | n : scalar(int) 24 | First parameter to the Beta-binomial distribution 25 | a : scalar(float) 26 | Second parameter to the Beta-binomial distribution 27 | b : scalar(float) 28 | Third parameter to the Beta-binomial distribution 29 | 30 | Attributes 31 | ---------- 32 | n, a, b : see Parameters 33 | 34 | """ 35 | 36 | def __init__(self, n, a, b): 37 | self.n, self.a, self.b = n, a, b 38 | 39 | @property 40 | def mean(self): 41 | "mean" 42 | n, a, b = self.n, self.a, self.b 43 | return n * a / (a + b) 44 | 45 | @property 46 | def std(self): 47 | "standard deviation" 48 | return sqrt(self.var) 49 | 50 | @property 51 | def var(self): 52 | "Variance" 53 | n, a, b = self.n, self.a, self.b 54 | top = n*a*b * (a + b + n) 55 | btm = (a+b)**2.0 * (a+b+1.0) 56 | return top / btm 57 | 58 | @property 59 | def skew(self): 60 | "skewness" 61 | n, a, b = self.n, self.a, self.b 62 | t1 = (a+b+2*n) * (b - a) / (a+b+2) 63 | t2 = sqrt((1+a+b) / (n*a*b * (n+a+b))) 64 | return t1 * t2 65 | 66 | def pdf(self): 67 | r""" 68 | Generate the vector of probabilities for the Beta-binomial 69 | (n, a, b) distribution. 70 | 71 | The Beta-binomial distribution takes the form 72 | 73 | .. math:: 74 | p(k \,|\, n, a, b) = 75 | {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)}, 76 | \qquad k = 0, \ldots, n, 77 | 78 | where :math:`B` is the beta function. 79 | 80 | Parameters 81 | ---------- 82 | n : scalar(int) 83 | First parameter to the Beta-binomial distribution 84 | a : scalar(float) 85 | Second parameter to the Beta-binomial distribution 86 | b : scalar(float) 87 | Third parameter to the Beta-binomial distribution 88 | 89 | Returns 90 | ------- 91 | probs: array_like(float) 92 | Vector of probabilities over k 93 | 94 | """ 95 | n, a, b = self.n, self.a, self.b 96 | k = np.arange(n + 1) 97 | probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b) 98 | return probs 99 | 100 | # def cdf(self): 101 | # r""" 102 | # Generate the vector of cumulative probabilities for the 103 | # Beta-binomial(n, a, b) distribution. 104 | 105 | # The cdf of the Beta-binomial distribution takes the form 106 | 107 | # .. math:: 108 | # P(k \,|\, n, a, b) = 1 - 109 | # \frac{B(b+n-k-1, a+k+1) {}_3F_2(a,b;k)}{B(a,b) B(n-k, k+2)}, 110 | # \qquad k = 0, \ldots, n 111 | 112 | # where :math:`B` is the beta function. 113 | 114 | # Parameters 115 | # ---------- 116 | # n : scalar(int) 117 | # First parameter to the Beta-binomial distribution 118 | # a : scalar(float) 119 | # Second parameter to the Beta-binomial distribution 120 | # b : scalar(float) 121 | # Third parameter to the Beta-binomial distribution 122 | 123 | # Returns 124 | # ------- 125 | # probs: array_like(float) 126 | # Vector of probabilities over k 127 | 128 | # """ 129 | -------------------------------------------------------------------------------- /quantecon/tests/test_lqnash.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: test_arma.py 3 | Authors: Chase Coleman 4 | Date: 07/24/2014 5 | 6 | Tests for lqnash.py file. 7 | 8 | """ 9 | from __future__ import division 10 | import sys 11 | import os 12 | import unittest 13 | import numpy as np 14 | from numpy.testing import assert_allclose 15 | from quantecon.lqnash import nnash 16 | from quantecon.lqcontrol import LQ 17 | 18 | 19 | def test_noninteractive(): 20 | "Test case for when agents don't interact with each other" 21 | # Copied these values from test_lqcontrol 22 | a = np.array([[.95, 0.], [0, .95]]) 23 | b1 = np.array([.95, 0.]) 24 | b2 = np.array([0., .95]) 25 | r1 = np.array([[-.25, 0.], [0., 0.]]) 26 | r2 = np.array([[0., 0.], [0., -.25]]) 27 | q1 = np.array([[-.15]]) 28 | q2 = np.array([[-.15]]) 29 | f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, 0, 0, 0, 0, 0, 0, 30 | tol=1e-8, max_iter=10000) 31 | 32 | alq = a[:1, :1] 33 | blq = b1[:1].reshape((1, 1)) 34 | rlq = r1[:1, :1] 35 | qlq = q1 36 | 37 | lq_obj = LQ(qlq, rlq, alq, blq, beta=1.) 38 | p, f, d = lq_obj.stationary_values() 39 | 40 | assert_allclose(f1, f2[:, ::-1]) 41 | assert_allclose(f1[0, 0], f[0]) 42 | assert_allclose(p1[0, 0], p2[1, 1]) 43 | assert_allclose(p1[0, 0], p[0, 0]) 44 | 45 | 46 | def test_nnash(): 47 | "Use judd test case for nnash. Follows judd.m" 48 | # Define Parameters 49 | delta = 0.02 50 | d = np.array([[-1, 0.5], [0.5, -1]]) 51 | B = np.array([25, 25]) 52 | c1 = np.array([1, -2, 1]) 53 | c2 = np.array([1, -2, 1]) 54 | e1 = np.array([10, 10, 3]) 55 | e2 = np.array([10, 10, 3]) 56 | delta_1 = 1 - delta 57 | 58 | ## Define matrices 59 | a = np.array([[delta_1, 0, -delta_1*B[0]], 60 | [0, delta_1, -delta_1*B[1]], 61 | [0, 0, 1]]) 62 | 63 | b1 = delta_1 * np.array([[1, -d[0, 0]], 64 | [0, -d[1, 0]], 65 | [0, 0]]) 66 | b2 = delta_1 * np.array([[0, -d[0, 1]], 67 | [1, -d[1, 1]], 68 | [0, 0]]) 69 | 70 | r1 = -np.array([[0.5*c1[2], 0, 0.5*c1[1]], 71 | [0, 0, 0], 72 | [0.5*c1[1], 0, c1[0]]]) 73 | r2 = -np.array([[0, 0, 0], 74 | [0, 0.5*c2[2], 0.5*c2[1]], 75 | [0, 0.5*c2[1], c2[0]]]) 76 | 77 | q1 = np.array([[-0.5*e1[2], 0], [0, d[0, 0]]]) 78 | q2 = np.array([[-0.5*e2[2], 0], [0, d[1, 1]]]) 79 | 80 | s1 = np.zeros((2, 2)) 81 | s2 = np.copy(s1) 82 | 83 | w1 = np.array([[0, 0], 84 | [0, 0], 85 | [-0.5*e1[1], B[0]/2.]]) 86 | w2 = np.array([[0, 0], 87 | [0, 0], 88 | [-0.5*e2[1], B[1]/2.]]) 89 | 90 | m1 = np.array([[0, 0], [0, d[0, 1] / 2.]]) 91 | m2 = np.copy(m1) 92 | 93 | # build model and solve it 94 | f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, s1, s2, w1, w2, m1, m2) 95 | 96 | aaa = a - b1.dot(f1) - b2.dot(f2) 97 | aa = aaa[:2, :2] 98 | tf = np.eye(2)-aa 99 | tfi = np.linalg.inv(tf) 100 | xbar = tfi.dot(aaa[:2, 2]) 101 | 102 | # Define answers from matlab. TODO: this is ghetto 103 | f1_ml = np.asarray(np.matrix("""\ 104 | 0.243666582208565, 0.027236062661951, -6.827882928738190; 105 | 0.392370733875639, 0.139696450885998, -37.734107291009138""")) 106 | 107 | f2_ml = np.asarray(np.matrix("""\ 108 | 0.027236062661951, 0.243666582208565, -6.827882928738186; 109 | 0.139696450885998, 0.392370733875639, -37.734107291009131""")) 110 | 111 | xbar_ml = np.array([1.246871007582702, 1.246871007582685]) 112 | 113 | assert_allclose(f1, f1_ml) 114 | assert_allclose(f2, f2_ml) 115 | assert_allclose(xbar, xbar_ml) 116 | -------------------------------------------------------------------------------- /quantecon/tests/test_estspec.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for quantecon.estspec 3 | 4 | @author : Spencer Lyon 5 | @date : 2014-08-01 6 | 7 | TODO: write tests that check accuracy of returns 8 | 9 | """ 10 | from __future__ import division 11 | import unittest 12 | import numpy as np 13 | from quantecon import smooth, periodogram, ar_periodogram 14 | from quantecon.tests.util import capture 15 | 16 | 17 | x_20 = np.random.rand(20) 18 | x_21 = np.random.rand(21) 19 | 20 | 21 | class PeriodogramBase(unittest.TestCase): 22 | 23 | @classmethod 24 | def setUpClass(cls): 25 | if cls is PeriodogramBase: 26 | raise unittest.SkipTest("Skip PeriodogramBase tests" + 27 | " it's a base class") 28 | 29 | def test_func_w_shape_even_x(self): 30 | self.assertEqual(self.w_20.size, x_20.size // 2 + 1) 31 | 32 | def test_func_w_shape_odd_x(self): 33 | self.assertEqual(self.w_21.size, x_21.size // 2 + 1) 34 | 35 | def test_func_Iw_shape_even_x(self): 36 | self.assertEqual(self.Iw_20.size, x_20.size // 2 + 1) 37 | 38 | def test_func_Iw_shape_odd_x(self): 39 | self.assertEqual(self.Iw_21.size, x_21.size // 2 + 1) 40 | 41 | def test_func_w_Iw_same_shape(self): 42 | self.assertEqual(self.w_20.shape, self.Iw_20.shape) 43 | self.assertEqual(self.w_21.shape, self.Iw_21.shape) 44 | 45 | def test_func_I(self): 46 | pass 47 | 48 | 49 | class TestPeriodogram(PeriodogramBase): 50 | 51 | @classmethod 52 | def setUpClass(cls): 53 | if cls is PeriodogramBase: 54 | raise unittest.SkipTest("Skip BaseTest tests, it's a base class") 55 | super(TestPeriodogram, cls).setUpClass() 56 | cls.window_length = 7 57 | cls.w_20, cls.Iw_20 = periodogram(x_20) 58 | cls.w_21, cls.Iw_21 = periodogram(x_21) 59 | cls.funcname = "periodogram" 60 | 61 | 62 | class TestArPeriodogram(PeriodogramBase): 63 | 64 | @classmethod 65 | def setUpClass(cls): 66 | if cls is PeriodogramBase: 67 | raise unittest.SkipTest("Skip BaseTest tests, it's a base class") 68 | super(TestArPeriodogram, cls).setUpClass() 69 | cls.window_length = 7 70 | cls.w_20, cls.Iw_20 = ar_periodogram(x_20) 71 | cls.w_21, cls.Iw_21 = ar_periodogram(x_21) 72 | cls.funcname = "ar_periodogram" 73 | 74 | # I need to over-ride these b/c this function always has 75 | # w.size == x.size //2 76 | def test_func_w_shape_even_x(self): 77 | self.assertEqual(self.w_20.size, x_20.size // 2) 78 | 79 | def test_func_Iw_shape_even_x(self): 80 | self.assertEqual(self.Iw_20.size, x_20.size // 2) 81 | 82 | 83 | class TestSmooth(unittest.TestCase): 84 | 85 | @classmethod 86 | def setUpClass(cls): 87 | cls.x_20 = np.random.rand(20) 88 | cls.x_21 = np.random.rand(21) 89 | cls.window_length = 7 90 | 91 | def test_smooth(self): # does smoothing smooth? 92 | pass 93 | 94 | def test_smooth_raise_long_window(self): 95 | "estspec: raise error if smooth(*a, window_len) too large" 96 | self.assertRaises(ValueError, smooth, self.x_20, window_len=25) 97 | 98 | def test_smooth_short_window_err(self): 99 | "estspec: raise error in smooth(*a, window_len) if window_len too small" 100 | self.assertRaises(ValueError, smooth, self.x_20, window_len=2) 101 | 102 | def test_smooth_default_hanning(self): 103 | "estspec: smooth defaults to hanning on unrecognized window" 104 | with capture(smooth, x=self.x_20, window="foobar") as output: 105 | self.assertRegexpMatches(output, "Defaulting") 106 | 107 | def test_smooth_window_len_must_be_odd(self): 108 | "estspec: smooth changes even window_len to odd" 109 | with capture(smooth, x=self.x_20, window_len=4) as output: 110 | self.assertRegexpMatches(output, "reset") 111 | -------------------------------------------------------------------------------- /solutions/oop_solutions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:39099b5d6503621c4076c7a6bd2ec02bc59c2747a07ebb54a8b27f1175ed52d6" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "heading", 13 | "level": 1, 14 | "metadata": {}, 15 | "source": [ 16 | "quant-econ Solutions: Object Oriented Programming" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "Solutions for http://quant-econ.net/py/python_oop.html" 24 | ] 25 | }, 26 | { 27 | "cell_type": "heading", 28 | "level": 2, 29 | "metadata": {}, 30 | "source": [ 31 | "Exercise 1" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "collapsed": false, 37 | "input": [ 38 | "class ECDF(object):\n", 39 | "\n", 40 | " def __init__(self, observations):\n", 41 | " self.observations = observations\n", 42 | "\n", 43 | " def __call__(self, x):\n", 44 | " counter = 0.0\n", 45 | " for obs in self.observations:\n", 46 | " if obs <= x:\n", 47 | " counter += 1\n", 48 | " return counter / len(self.observations)" 49 | ], 50 | "language": "python", 51 | "metadata": {}, 52 | "outputs": [], 53 | "prompt_number": 1 54 | }, 55 | { 56 | "cell_type": "code", 57 | "collapsed": false, 58 | "input": [ 59 | "# == test == #\n", 60 | "\n", 61 | "from random import uniform\n", 62 | "samples = [uniform(0, 1) for i in range(10)]\n", 63 | "F = ECDF(samples)\n", 64 | "\n", 65 | "print(F(0.5)) # Evaluate ecdf at x = 0.5\n", 66 | "\n", 67 | "F.observations = [uniform(0, 1) for i in range(1000)]\n", 68 | "\n", 69 | "print(F(0.5))" 70 | ], 71 | "language": "python", 72 | "metadata": {}, 73 | "outputs": [ 74 | { 75 | "output_type": "stream", 76 | "stream": "stdout", 77 | "text": [ 78 | "0.5\n", 79 | "0.486\n" 80 | ] 81 | } 82 | ], 83 | "prompt_number": 2 84 | }, 85 | { 86 | "cell_type": "heading", 87 | "level": 2, 88 | "metadata": {}, 89 | "source": [ 90 | "Exercise 2" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "collapsed": false, 96 | "input": [ 97 | "class Polynomial(object):\n", 98 | "\n", 99 | " def __init__(self, coefficients):\n", 100 | " \"\"\"\n", 101 | " Creates an instance of the Polynomial class representing \n", 102 | "\n", 103 | " p(x) = a_0 x^0 + ... + a_N x^N, \n", 104 | " \n", 105 | " where a_i = coefficients[i].\n", 106 | " \"\"\"\n", 107 | " self.coefficients = coefficients\n", 108 | "\n", 109 | " def __call__(self, x):\n", 110 | " \"Evaluate the polynomial at x.\"\n", 111 | " y = 0\n", 112 | " for i, a in enumerate(self.coefficients):\n", 113 | " y += a * x**i \n", 114 | " return y\n", 115 | "\n", 116 | " def differentiate(self):\n", 117 | " \"Reset self.coefficients to those of p' instead of p.\"\n", 118 | " new_coefficients = []\n", 119 | " for i, a in enumerate(self.coefficients):\n", 120 | " new_coefficients.append(i * a)\n", 121 | " # Remove the first element, which is zero\n", 122 | " del new_coefficients[0] \n", 123 | " # And reset coefficients data to new values\n", 124 | " self.coefficients = new_coefficients\n" 125 | ], 126 | "language": "python", 127 | "metadata": {}, 128 | "outputs": [], 129 | "prompt_number": 3 130 | } 131 | ], 132 | "metadata": {} 133 | } 134 | ] 135 | } -------------------------------------------------------------------------------- /quantecon/models/solow/cobb_douglas.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solow growth model with Cobb-Douglas aggregate production. 3 | 4 | @author : David R. Pugh 5 | @date : 2014-11-27 6 | 7 | """ 8 | from __future__ import division 9 | from textwrap import dedent 10 | 11 | import numpy as np 12 | import sympy as sym 13 | 14 | from . import model 15 | 16 | # declare key variables for the model 17 | t, X = sym.symbols('t'), sym.DeferredVector('X') 18 | A, k, K, L = sym.symbols('A, k, K, L') 19 | 20 | # declare required model parameters 21 | g, n, s, alpha, delta = sym.symbols('g, n, s, alpha, delta') 22 | 23 | 24 | class CobbDouglasModel(model.Model): 25 | 26 | _required_params = ['g', 'n', 's', 'alpha', 'delta', 'A0', 'L0'] 27 | 28 | def __init__(self, params): 29 | """ 30 | Create an instance of the Solow growth model with Cobb-Douglas 31 | aggregate production. 32 | 33 | Parameters 34 | ---------- 35 | params : dict 36 | Dictionary of model parameters. 37 | 38 | """ 39 | cobb_douglas_output = K**alpha * (A * L)**(1 - alpha) 40 | super(CobbDouglasModel, self).__init__(cobb_douglas_output, params) 41 | 42 | def __str__(self): 43 | """Human readable summary of a CESModel instance.""" 44 | m = super(CobbDouglasModel, self).__str__() 45 | m += " - alpha (output elasticity) : {alpha:g}\n" 46 | formatted_str = dedent(m.format(alpha=self.params['alpha'])) 47 | return formatted_str 48 | 49 | @property 50 | def steady_state(self): 51 | r""" 52 | Steady state value of capital stock (per unit effective labor). 53 | 54 | :getter: Return the current steady state value. 55 | :type: float 56 | 57 | Notes 58 | ----- 59 | The steady state value of capital stock (per unit effective labor) 60 | with Cobb-Douglas production is defined as 61 | 62 | .. math:: 63 | 64 | k^* = \bigg(\frac{s}{g + n + \delta}\bigg)^\frac{1}{1-\alpha} 65 | 66 | where `s` is the savings rate, :math:`g + n + \delta` is the effective 67 | depreciation rate, and :math:`\alpha` is the elasticity of output with 68 | respect to capital (i.e., capital's share). 69 | 70 | """ 71 | s = self.params['s'] 72 | alpha = self.params['alpha'] 73 | return (s / self.effective_depreciation_rate)**(1 / (1 - alpha)) 74 | 75 | def _validate_params(self, params): 76 | """Validate the model parameters.""" 77 | params = super(CobbDouglasModel, self)._validate_params(params) 78 | if params['alpha'] <= 0.0 or params['alpha'] >= 1.0: 79 | raise AttributeError('Output elasticity must be in (0, 1).') 80 | else: 81 | return params 82 | 83 | def analytic_solution(self, t, k0): 84 | """ 85 | Compute the analytic solution for the Solow model with Cobb-Douglas 86 | production technology. 87 | 88 | Parameters 89 | ---------- 90 | t : numpy.ndarray (shape=(T,)) 91 | Array of points at which the solution is desired. 92 | k0 : (float) 93 | Initial condition for capital stock (per unit of effective labor) 94 | 95 | Returns 96 | ------- 97 | analytic_traj : ndarray (shape=t.size, 2) 98 | Array representing the analytic solution trajectory. 99 | 100 | """ 101 | s = self.params['s'] 102 | alpha = self.params['alpha'] 103 | 104 | # lambda governs the speed of convergence 105 | lmbda = self.effective_depreciation_rate * (1 - alpha) 106 | 107 | # analytic solution for Solow model at time t 108 | k_t = (((s / (self.effective_depreciation_rate)) * (1 - np.exp(-lmbda * t)) + 109 | k0**(1 - alpha) * np.exp(-lmbda * t))**(1 / (1 - alpha))) 110 | 111 | # combine into a (T, 2) array 112 | analytic_traj = np.hstack((t[:, np.newaxis], k_t[:, np.newaxis])) 113 | 114 | return analytic_traj 115 | -------------------------------------------------------------------------------- /quantecon/models/optgrowth.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filename: optgrowth.py 3 | 4 | Authors: John Stachurski and Thomas Sargent 5 | 6 | Solving the optimal growth problem via value function iteration. 7 | 8 | """ 9 | from __future__ import division # Omit for Python 3.x 10 | from textwrap import dedent 11 | import numpy as np 12 | from scipy.optimize import fminbound 13 | from scipy import interp 14 | 15 | 16 | class GrowthModel(object): 17 | """ 18 | 19 | This class defines the primitives representing the growth model. 20 | 21 | Parameters 22 | ---------- 23 | f : function, optional(default=k**.65) 24 | The production function; the default is the Cobb-Douglas 25 | production function with power of .65 26 | beta : scalar(int), optional(default=.95) 27 | The utility discounting parameter 28 | u : function, optional(default=np.log) 29 | The utility function. Default is log utility 30 | grid_max : scalar(int), optional(default=2) 31 | The maximum grid value 32 | grid_size : scalar(int), optional(default=150) 33 | The size of grid to use. 34 | 35 | Attributes 36 | ---------- 37 | f, beta, u : see Parameters 38 | grid : array_like(float, ndim=1) 39 | The grid over savings. 40 | 41 | """ 42 | def __init__(self, f=lambda k: k**0.65, beta=0.95, u=np.log, 43 | grid_max=2, grid_size=150): 44 | 45 | self.u, self.f, self.beta = u, f, beta 46 | self.grid = np.linspace(1e-6, grid_max, grid_size) 47 | 48 | def __repr__(self): 49 | m = "GrowthModel(beta={b}, grid_max={gm}, grid_size={gs})" 50 | return m.format(b=self.beta, gm=self.grid.max(), gs=self.grid.size) 51 | 52 | def __str__(self): 53 | m = """\ 54 | GrowthModel: 55 | - beta (discount factor) : {b} 56 | - u (utility function) : {u} 57 | - f (production function) : {f} 58 | - grid bounds (bounds for grid over savings values) : ({gl}, {gm}) 59 | - grid points (number of points in grid for savings) : {gs} 60 | """ 61 | return dedent(m.format(b=self.beta, u=self.u, f=self.f, 62 | gl=self.grid.min(), gm=self.grid.max(), 63 | gs=self.grid.size)) 64 | 65 | def bellman_operator(self, w, compute_policy=False): 66 | """ 67 | The approximate Bellman operator, which computes and returns the 68 | updated value function Tw on the grid points. 69 | 70 | Parameters 71 | ---------- 72 | w : array_like(float, ndim=1) 73 | The value of the input function on different grid points 74 | compute_policy : Boolean, optional(default=False) 75 | Whether or not to compute policy function 76 | 77 | """ 78 | # === Apply linear interpolation to w === # 79 | Aw = lambda x: interp(x, self.grid, w) 80 | 81 | if compute_policy: 82 | sigma = np.empty(len(w)) 83 | 84 | # == set Tw[i] equal to max_c { u(c) + beta w(f(k_i) - c)} == # 85 | Tw = np.empty(len(w)) 86 | for i, k in enumerate(self.grid): 87 | objective = lambda c: - self.u(c) - self.beta * Aw(self.f(k) - c) 88 | c_star = fminbound(objective, 1e-6, self.f(k)) 89 | if compute_policy: 90 | # sigma[i] = argmax_c { u(c) + beta w(f(k_i) - c)} 91 | sigma[i] = c_star 92 | Tw[i] = - objective(c_star) 93 | 94 | if compute_policy: 95 | return Tw, sigma 96 | else: 97 | return Tw 98 | 99 | def compute_greedy(self, w): 100 | """ 101 | Compute the w-greedy policy on the grid points. 102 | 103 | Parameters 104 | ---------- 105 | w : array_like(float, ndim=1) 106 | The value of the input function on different grid points 107 | 108 | """ 109 | Tw, sigma = self.bellman_operator(w, compute_policy=True) 110 | return sigma 111 | --------------------------------------------------------------------------------