├── .conda ├── bld.bat ├── build.sh └── meta.yaml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── enhancement.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ └── continuous-integration-workflow.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── codecov.yml ├── development ├── documentation │ └── scalability │ │ ├── run_single_scalability_exercise.py │ │ └── scalability_setup.py └── testing │ ├── __init__.py │ ├── conftest.py │ ├── notifications.py │ ├── regression.py │ ├── test_cli.py │ └── testing_pull_request.py ├── docs ├── Makefile ├── _static │ ├── cscubs-2019 │ │ ├── paper.pdf │ │ ├── presentation.pdf │ │ └── sources.zip │ ├── css │ │ └── custom.css │ ├── funding │ │ └── Becker_Sebastian_Arbeitsprogramm.pdf │ ├── images │ │ ├── Logo_DIW_Berlin.svg │ │ ├── Logo_TRA1.png │ │ ├── OSE_logo_RGB.svg │ │ ├── UNI_Bonn_Logo_Standard_RZ_RGB.svg │ │ ├── algorithm_value_function_iteration.svg │ │ ├── book.svg │ │ ├── books.svg │ │ ├── coding.svg │ │ ├── computer.svg │ │ ├── dividers.svg │ │ ├── edit.svg │ │ ├── event_tree.svg │ │ ├── gears.svg │ │ ├── graduate-cap.svg │ │ ├── light-bulb.svg │ │ ├── note.svg │ │ ├── number-blocks.svg │ │ ├── peoples.svg │ │ ├── research.svg │ │ ├── respy-logo.svg │ │ ├── road.svg │ │ ├── study.svg │ │ └── timing_events.svg │ ├── respy.mplstyle │ └── thesis_proposals │ │ ├── Salience_and_human_capital.pdf │ │ └── Sparsity_and_human_capital.pdf ├── _templates │ ├── custom-about-us.html │ └── custom-intro.html ├── about_us.rst ├── conf.py ├── development │ ├── contributing_to_respy.rst │ ├── index.rst │ ├── releases.rst │ ├── roadmap.rst │ └── template_for_tutorials.rst ├── explanations │ ├── bib.rst │ ├── calibration.rst │ ├── computational_implementation.rst │ ├── economic_model.rst │ ├── glossary.rst │ ├── implementation_kw94.rst │ ├── index.rst │ ├── introduction.rst │ ├── mathematical_framework.rst │ ├── notation.rst │ ├── parameterization.rst │ ├── recommended_reading.rst │ └── refs.bib ├── how_to_guides │ ├── how_to_covariates.ipynb │ ├── how_to_example_models.rst │ ├── how_to_finite_mixture.ipynb │ ├── how_to_hyperbolic_discounting.ipynb │ ├── how_to_initial_conditions.ipynb │ ├── how_to_likelihood.ipynb │ ├── how_to_msm.ipynb │ ├── how_to_msm_estimation_exercise.ipynb │ ├── how_to_numerical_integration.ipynb │ ├── how_to_simulation.ipynb │ ├── how_to_specify_model.ipynb │ └── index.rst ├── index.rst ├── projects │ ├── _numerical_integration.py │ ├── estimating-keane-and-wolpin-1997-msm.ipynb │ ├── index.rst │ ├── keane-and-wolpin-1994.ipynb │ ├── numerical_integration.ipynb │ ├── research_papers.rst │ └── theses.rst ├── reference_guides │ ├── index.rst │ ├── randomness_and_reproducibility.rst │ ├── scalability.ipynb │ └── state_space.rst ├── release_notes.rst ├── rtd_environment.yml └── tutorials │ ├── index.rst │ ├── installation.rst │ ├── robinson_crusoe.ipynb │ ├── tutorial_exogenous_processes.ipynb │ ├── tutorial_experience.ipynb │ ├── tutorial_impatient_robinson.ipynb │ ├── tutorial_observables.ipynb │ └── tutorial_params_options_simulate.ipynb ├── environment.yml ├── respy ├── __init__.py ├── _numba.py ├── conditional_draws.py ├── config.py ├── conftest.py ├── data.py ├── exogenous_processes.py ├── interface.py ├── interpolate.py ├── likelihood.py ├── method_of_simulated_moments.py ├── parallelization.py ├── pre_processing │ ├── __init__.py │ ├── base_params.csv │ ├── data_checking.py │ ├── lagged_choice_params.csv │ ├── model_checking.py │ ├── model_processing.py │ ├── process_covariates.py │ └── specification_helpers.py ├── shared.py ├── simulate.py ├── solve.py ├── state_space.py └── tests │ ├── __init__.py │ ├── _former_code.py │ ├── random_model.py │ ├── resources │ ├── conditional_draws_fixture.pickle │ ├── kw_2000.csv │ ├── kw_2000.yaml │ ├── kw_2000_table_1_whites_choice_probabilities.csv │ ├── kw_2000_table_2_blacks_choice_probabilities.csv │ ├── kw_2000_table_3_wage_fit_blacks.csv │ ├── kw_2000_table_3_wage_fit_whites.csv │ ├── kw_2000_table_5_school_attainment.csv │ ├── kw_2000_table_a3_type_probabilities.csv │ ├── kw_94_one.csv │ ├── kw_94_one.yaml │ ├── kw_94_table_6.csv │ ├── kw_94_three.csv │ ├── kw_94_three.yaml │ ├── kw_94_two.csv │ ├── kw_94_two.yaml │ ├── kw_94_wp_table_2_1.csv │ ├── kw_94_wp_table_2_2.csv │ ├── kw_94_wp_table_2_3.csv │ ├── kw_97_basic.csv │ ├── kw_97_basic.yaml │ ├── kw_97_basic_respy.csv │ ├── kw_97_basic_respy.yaml │ ├── kw_97_data.csv │ ├── kw_97_extended.csv │ ├── kw_97_extended.yaml │ ├── kw_97_extended_respy.csv │ ├── kw_97_extended_respy.yaml │ ├── regression_vault.pickle │ ├── robinson_crusoe_basic.csv │ ├── robinson_crusoe_basic.yaml │ ├── robinson_crusoe_extended.csv │ ├── robinson_crusoe_extended.yaml │ ├── robinson_crusoe_with_observed_characteristics.csv │ └── robinson_crusoe_with_observed_characteristics.yaml │ ├── test_conditional_draws.py │ ├── test_exogenous_processes.py │ ├── test_flexible_choices.py │ ├── test_integration.py │ ├── test_interface.py │ ├── test_interpolate.py │ ├── test_likelihood.py │ ├── test_method_of_simulated_moments.py │ ├── test_model_processing.py │ ├── test_parallelization.py │ ├── test_process_covariates.py │ ├── test_randomness.py │ ├── test_regression.py │ ├── test_replication_kw_94.py │ ├── test_replication_kw_97.py │ ├── test_simulate.py │ ├── test_simulate │ ├── test_apply_law_of_motion_1_in.csv │ ├── test_apply_law_of_motion_1_optim_paras.yaml │ ├── test_apply_law_of_motion_1_out.csv │ ├── test_apply_law_of_motion_2_in.csv │ ├── test_apply_law_of_motion_2_optim_paras.yaml │ └── test_apply_law_of_motion_2_out.csv │ ├── test_solve.py │ └── utils.py ├── setup.cfg ├── setup.py └── tox.ini /.conda/bld.bat: -------------------------------------------------------------------------------- 1 | "%PYTHON%" setup.py install 2 | if errorlevel 1 exit 1 3 | -------------------------------------------------------------------------------- /.conda/build.sh: -------------------------------------------------------------------------------- 1 | $PYTHON setup.py install # Python command to install the script. 2 | -------------------------------------------------------------------------------- /.conda/meta.yaml: -------------------------------------------------------------------------------- 1 | {% set data = load_setup_py_data() %} 2 | 3 | package: 4 | name: respy 5 | version: {{ data.get('version') }} 6 | 7 | source: 8 | # git_url is nice in that it won't capture devenv stuff. However, it only captures 9 | # committed code, so pay attention. 10 | git_url: ../ 11 | 12 | build: 13 | number: 0 14 | noarch: python 15 | 16 | requirements: 17 | host: 18 | - python >=3.7 19 | run: 20 | - python >=3.7 21 | - click 22 | - estimagic >=0.1.2 23 | - hypothesis 24 | - joblib 25 | - fastparquet 26 | - mkl 27 | - numba >=0.42 28 | - numpy>=1.21.0 29 | - pandas >=0.24 30 | - pytest 31 | - python-snappy 32 | - pyyaml 33 | - scipy 34 | test: 35 | requires: 36 | - pytest 37 | - pytest-xdist 38 | commands: 39 | - pytest -n auto 40 | source_files: 41 | - respy 42 | - tox.ini 43 | 44 | about: 45 | home: {{ data.get('url') }} 46 | license: {{ data.get('license') }} 47 | license_file: LICENSE 48 | summary: {{ data.get('description') }} 49 | dev_url: https://github.com/OpenSourceEconomics/respy 50 | doc_url: {{ data.get('url') }} 51 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS are explained in 2 | # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/ 3 | # about-code-owners 4 | 5 | # These owners will be the default owners for everything in the repo. Unless a later 6 | # match takes precedence, global owners will be requested for review when someone opens 7 | # a pull request. 8 | * @mo2561057 9 | 10 | # Owners responsible for the documentation. 11 | /docs/ @amageh 12 | 13 | # Owners responsible for the method of simulated moments. 14 | respy/msm.py @mo2561057 @amageh 15 | 16 | # Owners responsible for the state space. 17 | respy/state_space.py @mo2561057 18 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | * respy version used, if any: 11 | * Python version, if any: 12 | * Operating System: 13 | 14 | ### Describe the bug 15 | 16 | A clear and concise description of what the bug is. 17 | 18 | ### To reproduce 19 | 20 | Steps to reproduce the behavior: 21 | 22 | 1. Go to '...' 23 | 2. Click on '....' 24 | 3. Scroll down to '....' 25 | 4. See error 26 | 27 | ### Expected behavior 28 | 29 | A clear and concise description of what you expected to happen. 30 | 31 | ### Screenshots 32 | 33 | If applicable, add screenshots to help explain your problem. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancement.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Enhancement 3 | about: Enhance an existing component. 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | * respy version used, if any: 11 | * Python version, if any: 12 | * Operating System: 13 | 14 | ### What would you like to enhance and why? Is it related to an issue/problem? 15 | 16 | A clear and concise description of the current implementation and its limitations. 17 | 18 | ### Describe the solution you'd like 19 | 20 | A clear and concise description of what you want to happen. 21 | 22 | ### Describe alternatives you've considered 23 | 24 | A clear and concise description of any alternative solutions or features you've 25 | considered and why you have discarded them. 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project. 4 | title: '' 5 | labels: feature-request 6 | assignees: '' 7 | 8 | --- 9 | 10 | * respy version used, if any: 11 | * Python version, if any: 12 | * Operating System: 13 | 14 | ### Is your feature request related to a problem? Please describe. 15 | 16 | A clear and concise description of what the problem is. 17 | 18 | ### Describe the solution you'd like 19 | 20 | A clear and concise description of what you want to happen. 21 | 22 | ### Describe alternatives you've considered 23 | 24 | A clear and concise description of any alternative solutions or features you've 25 | considered and why you have discarded them. 26 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Current behavior 2 | 3 | What is the current state? 4 | 5 | ### Desired behavior 6 | 7 | What do you want to achieve? 8 | 9 | ### Solution / Implementation 10 | 11 | What is your solution? 12 | 13 | ### Todo 14 | 15 | - [ ] Review whether the documentation needs to be updated. 16 | - [ ] Document PR in release_notes.rst. 17 | -------------------------------------------------------------------------------- /.github/workflows/continuous-integration-workflow.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration Workflow 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - '*' 9 | 10 | env: 11 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 12 | 13 | jobs: 14 | 15 | run-tests: 16 | 17 | name: Run tests for ${{ matrix.os }} on ${{ matrix.python-version }} 18 | runs-on: ${{ matrix.os }} 19 | strategy: 20 | fail-fast: false 21 | matrix: 22 | os: ['ubuntu-latest', 'macos-latest', 'windows-latest'] 23 | python-version: ['3.7', '3.8'] 24 | 25 | steps: 26 | - uses: actions/checkout@v2 27 | - uses: conda-incubator/setup-miniconda@v2 28 | with: 29 | auto-update-conda: true 30 | python-version: ${{ matrix.python-version }} 31 | 32 | - name: Install core dependencies. 33 | shell: bash -l {0} 34 | run: conda install -c conda-forge tox-conda 35 | 36 | ############################################################################### 37 | # tox-conda fixes on Windows (https://github.com/tox-dev/tox-conda/issues/37) # 38 | ############################################################################### 39 | 40 | - name: Install fixes for Python 3+ on Windows 41 | if: runner.os == 'Windows' 42 | shell: bash -l {0} 43 | run: conda install -c conda-forge -c opensourceeconomics conda-build estimagic matplotlib python-snappy 44 | 45 | ############### 46 | # Test Matrix # 47 | ############### 48 | 49 | # Unit tests. 50 | 51 | - name: Run unit tests and doctests. 52 | shell: bash -l {0} 53 | run: tox -e pytest -- -m "not slow and not integration and not end_to_end" --cov=./ --cov-report=xml -n auto 54 | 55 | - name: Upload coverage report 56 | if: runner.os == 'Linux' && matrix.python-version == '3.8' 57 | shell: bash -l {0} 58 | run: bash <(curl -s https://codecov.io/bash) -F unit -c 59 | 60 | # Integration tests. 61 | 62 | - name: Run integration tests. 63 | shell: bash -l {0} 64 | run: tox -e pytest -- -m "not slow and integration" --cov=./ --cov-report=xml -n auto 65 | 66 | - name: Upload coverage report 67 | if: runner.os == 'Linux' && matrix.python-version == '3.8' 68 | shell: bash -l {0} 69 | run: bash <(curl -s https://codecov.io/bash) -F integration -c 70 | 71 | # End-to-end tests. 72 | 73 | - name: Run end_to_end tests. 74 | shell: bash -l {0} 75 | run: tox -e pytest -- -m "not slow and end_to_end" --cov=./ --cov-report=xml -n auto 76 | 77 | - name: Upload coverage report 78 | if: runner.os == 'Linux' && matrix.python-version == '3.8' 79 | shell: bash -l {0} 80 | run: bash <(curl -s https://codecov.io/bash) -F end_to_end -c 81 | 82 | ################################## 83 | # Validate codecov configuration # 84 | ################################## 85 | 86 | - name: Validate codecov.yml 87 | if: runner.os == 'Linux' && matrix.python-version == '3.8' 88 | shell: bash -l {0} 89 | run: cat codecov.yml | curl --data-binary @- https://codecov.io/validate 90 | 91 | 92 | pre-commit: 93 | 94 | name: Run pre-commit. 95 | runs-on: ubuntu-latest 96 | 97 | steps: 98 | - uses: actions/checkout@v2 99 | 100 | - name: Set up Python 3.8 101 | uses: actions/setup-python@v1 102 | with: 103 | python-version: 3.8 104 | 105 | - name: Install dependencies 106 | run: pip install tox 107 | 108 | - name: Run pre-commit 109 | run: tox -e pre-commit 110 | 111 | 112 | docs: 113 | 114 | name: Run documentation. 115 | runs-on: ubuntu-latest 116 | 117 | steps: 118 | - uses: actions/checkout@v2 119 | - uses: conda-incubator/setup-miniconda@v2 120 | with: 121 | auto-update-conda: true 122 | python-version: 3.8 123 | 124 | - name: Install core dependencies. 125 | shell: bash -l {0} 126 | run: conda install -c conda-forge tox-conda 127 | 128 | - name: Build docs 129 | shell: bash -l {0} 130 | run: tox -e sphinx 131 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore folders. 2 | .cache/ 3 | .hypothesis/ 4 | .idea/ 5 | .ipynb_checkpoints/ 6 | .tox/ 7 | .vscode/ 8 | __pycache__/ 9 | __tutorial__/ 10 | dist/ 11 | docs/_build/ 12 | docs/_generated/ 13 | .respy/ 14 | 15 | # Ignore files. 16 | .coverage 17 | pytestdebug.log 18 | *.sublime-project 19 | *.sublime-workspace 20 | *.egg-info 21 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.0.1 4 | hooks: 5 | - id: check-added-large-files 6 | args: ['--maxkb=100'] 7 | - id: check-merge-conflict 8 | - id: check-yaml 9 | exclude: meta.yaml 10 | - id: debug-statements 11 | - id: end-of-file-fixer 12 | - repo: https://github.com/asottile/pyupgrade 13 | rev: v2.26.0 14 | hooks: 15 | - id: pyupgrade 16 | args: [--py36-plus] 17 | - repo: https://github.com/asottile/reorder_python_imports 18 | rev: v2.6.0 19 | hooks: 20 | - id: reorder-python-imports 21 | - repo: https://github.com/psf/black 22 | rev: 22.3.0 23 | hooks: 24 | - id: black 25 | - repo: https://github.com/asottile/blacken-docs 26 | rev: v1.12.1 27 | hooks: 28 | - id: blacken-docs 29 | additional_dependencies: [black] 30 | - repo: https://github.com/PyCQA/flake8 31 | rev: 3.9.2 32 | hooks: 33 | - id: flake8 34 | additional_dependencies: [ 35 | flake8-alfred, 36 | flake8-bugbear, 37 | flake8-builtins, 38 | flake8-comprehensions, 39 | flake8-docstrings, 40 | flake8-eradicate, 41 | flake8-print, 42 | flake8-pytest-style, 43 | flake8-todo, 44 | flake8-unused-arguments, 45 | pep8-naming, 46 | pydocstyle, 47 | Pygments, 48 | ] 49 | - repo: https://github.com/PyCQA/doc8 50 | rev: 0.9.0 51 | hooks: 52 | - id: doc8 53 | # - repo: https://github.com/codespell-project/codespell 54 | # rev: v1.17.1 55 | # hooks: 56 | # - id: codespell 57 | # - repo: https://github.com/mgedmin/check-manifest 58 | # rev: '0.41' 59 | # hooks: 60 | # - id: check-manifest 61 | - repo: meta 62 | hooks: 63 | - id: check-hooks-apply 64 | - id: check-useless-excludes 65 | # - id: identity # Prints all files passed to pre-commits. Debugging. 66 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | image: latest 5 | 6 | python: 7 | version: 3.8 8 | 9 | conda: 10 | environment: docs/rtd_environment.yml 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015-2020 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 6 | software and associated documentation files (the "Software"), to deal in the Software 7 | without restriction, including without limitation the rights to use, copy, modify, 8 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 9 | permit persons to whom the Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all copies or 13 | substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 16 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 17 | PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 19 | OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 | OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # The MANIFEST.in specifies which files are copied over from a temporary directory to 2 | # site-packages after ``pip install``. Examples can be found here: 3 | # https://www.reddit.com/r/Python/comments/40s8qw/simplify_your_manifestin_commands/ and 4 | # https://blog.ionelmc.ro/presentations/packaging. 5 | 6 | # Test what is included in the package by running ``python setup.py sdist`` and inspect 7 | # the tarball. 8 | 9 | include CHANGES.rst 10 | include CITATION 11 | include LICENSE 12 | include README.rst 13 | include tox.ini 14 | include *.sh 15 | include *.yaml 16 | include *.yml 17 | 18 | graft .conda 19 | graft respy 20 | 21 | prune development 22 | prune docs 23 | 24 | global-exclude __pycache__ 25 | global-exclude .ipynb_checkpoints 26 | global-exclude *.py[co] 27 | global-exclude *-checkpoint.ipynb 28 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | branch: main 3 | 4 | coverage: 5 | precision: 2 6 | round: down 7 | range: 80...100 8 | status: 9 | project: 10 | default: 11 | threshold: 1% 12 | patch: 13 | default: 14 | threshold: 5% 15 | unit: 16 | flags: 17 | - unit 18 | integration: 19 | flags: 20 | - integration 21 | end_to_end: 22 | flags: 23 | - end_to_end 24 | 25 | ignore: 26 | - ".tox/**/*" 27 | - "setup.py" 28 | - "respy/conftest.py" 29 | - "respy/tests/**/*" 30 | - "development/**/*" 31 | - "docs/**/*" 32 | -------------------------------------------------------------------------------- /development/documentation/scalability/run_single_scalability_exercise.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | import json 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | """Evaluate the criterion function multiple times for a scalability report. 9 | 10 | The criterion function is evaluated ``maxfun``-times. The number of threads used is 11 | limited by environment variables. **respy** has to be imported after the environment 12 | variables are set as Numpy, Numba and others load them at import time. 13 | 14 | """ 15 | model = sys.argv[1] 16 | maxfun = int(sys.argv[2]) 17 | n_threads = int(sys.argv[3]) 18 | 19 | # Validate input. 20 | assert maxfun >= 0, "Maximum number of function evaluations cannot be negative." 21 | assert n_threads >= 1 or n_threads == -1, ( 22 | "Use -1 to impose no restrictions on maximum number of threads or choose a " 23 | "number higher than zero." 24 | ) 25 | 26 | # Set number of threads 27 | os.environ["NUMBA_NUM_THREADS"] = f"{n_threads}" 28 | os.environ["MKL_NUM_THREADS"] = f"{n_threads}" 29 | os.environ["OMP_NUM_THREADS"] = f"{n_threads}" 30 | os.environ["NUMEXPR_NUM_THREADS"] = f"{n_threads}" 31 | 32 | # Late import of respy to ensure that environment variables are read by Numpy, etc.. 33 | import respy as rp 34 | 35 | # Get model 36 | params, options = rp.get_example_model(model, with_data=False) 37 | 38 | # Simulate the data 39 | simulate = rp.get_simulate_func(params, options) 40 | df = simulate(params) 41 | 42 | # Get the criterion function and the parameter vector. 43 | crit_func = rp.get_log_like_func(params, options, df) 44 | 45 | # Run the estimation 46 | start = dt.datetime.now() 47 | 48 | for _ in range(maxfun): 49 | crit_func(params) 50 | 51 | end = dt.datetime.now() 52 | 53 | # Aggregate information 54 | output = { 55 | "model": model, 56 | "maxfun": maxfun, 57 | "n_threads": n_threads, 58 | "start": str(start), 59 | "end": str(end), 60 | "duration": str(end - start), 61 | } 62 | 63 | # Save time to file 64 | with open("scalability_results.txt", "a+") as file: 65 | file.write(json.dumps(output)) 66 | file.write("\n") 67 | 68 | 69 | if __name__ == "__main__": 70 | main() 71 | -------------------------------------------------------------------------------- /development/documentation/scalability/scalability_setup.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from pathlib import Path 3 | 4 | 5 | def main(): 6 | """Run the scalability exercise. 7 | 8 | Define the model, a list with different number of threads and a maximum number of 9 | function evaluations. 10 | 11 | """ 12 | model = "kw_97_basic" 13 | maxfun = 3 14 | 15 | filepath = Path(__file__).resolve().parent / "run_single_scalability_exercise.py" 16 | 17 | # Run Python 18 | for n_threads in [2, 4, 6, 8, 10, 12, 14]: 19 | subprocess.check_call( 20 | ["python", str(filepath), model, str(maxfun), str(n_threads)] 21 | ) 22 | 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /development/testing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/development/testing/__init__.py -------------------------------------------------------------------------------- /development/testing/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture(autouse=True) 7 | def _fresh_directory(tmp_path): 8 | """Each test is executed in a fresh directory.""" 9 | os.chdir(tmp_path) 10 | -------------------------------------------------------------------------------- /development/testing/notifications.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import socket 4 | import sys 5 | import warnings 6 | from pathlib import Path 7 | 8 | import apprise 9 | 10 | RECIPIENTS = {"socrates": "janos.gabler@gmail.com", "abacus": "traabe92@gmail.com"} 11 | 12 | 13 | def send_notification(title, body): 14 | """Send notification.""" 15 | hostname = socket.gethostname() 16 | 17 | recipient = RECIPIENTS.get(hostname, "eisenhauer@policy-lab.org") 18 | 19 | # This allows to run the scripts even when no notification can be send. 20 | home = Path(os.environ.get("HOME") or os.environ.get("HOMEPATH")) 21 | credentials = home / ".credentials" 22 | 23 | if not credentials.exists(): 24 | warnings.warn("No configuration file for notifications available.") 25 | sys.exit(0) 26 | 27 | credentials = json.loads(credentials.read_text()) 28 | message_header = { 29 | "domain": "gmail.com", 30 | "to": recipient, 31 | "name": "respy", 32 | **credentials, 33 | } 34 | service = "mailto://{username}:{password}@{domain}?to={to}&name={name}" 35 | 36 | apobj = apprise.Apprise() 37 | apobj.add(service.format(**message_header)) 38 | apobj.notify(title=title, body=body) 39 | -------------------------------------------------------------------------------- /development/testing/regression.py: -------------------------------------------------------------------------------- 1 | """Create, run or investigate regression checks.""" 2 | import pickle 3 | import socket 4 | 5 | import click 6 | import numpy as np 7 | 8 | from respy.config import TEST_RESOURCES_DIR 9 | from respy.config import TOL_REGRESSION_TESTS 10 | from respy.tests.random_model import generate_random_model 11 | from respy.tests.test_regression import compute_log_likelihood 12 | from respy.tests.test_regression import load_regression_tests 13 | 14 | CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]} 15 | 16 | 17 | def _prepare_message(idx_failures): 18 | hostname = socket.gethostname() 19 | subject = " respy: Regression Testing" 20 | if idx_failures: 21 | message = ( 22 | f"Failure during regression testing @{hostname} for test(s): " 23 | f"{idx_failures}." 24 | ) 25 | else: 26 | message = f"Regression testing is completed on @{hostname}." 27 | 28 | return subject, message 29 | 30 | 31 | def run_regression_tests(n_tests, strict, notification): 32 | """Run regression tests. 33 | 34 | Parameters 35 | ---------- 36 | n_tests : int 37 | Number of tests to run. If None, all are run. 38 | strict : bool, default False 39 | Early failure on error. 40 | notification : bool, default True 41 | Send notification with test report. 42 | 43 | """ 44 | tests = load_regression_tests() 45 | tests = tests[: n_tests + 1] 46 | 47 | results = [_check_single(test, strict) for test in tests] 48 | idx_failures = [i for i, x in enumerate(results) if not x] 49 | 50 | if idx_failures: 51 | click.secho(f"Failures: {idx_failures}", fg="red") 52 | else: 53 | click.secho("Tests succeeded.", fg="green") 54 | 55 | subject, message = _prepare_message(idx_failures) 56 | 57 | if notification: 58 | from development.testing.notifications import send_notification 59 | 60 | send_notification(subject, message) 61 | 62 | 63 | def create_regression_tests(n_tests, save): 64 | """Create a regression vault. 65 | 66 | Parameters 67 | ---------- 68 | n_tests : int 69 | How many tests are in the vault. 70 | save : bool, default True 71 | Flag for saving new tests to disk. 72 | 73 | """ 74 | tests = [_create_single(i) for i in range(n_tests)] 75 | 76 | if save: 77 | with open(TEST_RESOURCES_DIR / "regression_vault.pickle", "wb") as p: 78 | pickle.dump(tests, p) 79 | 80 | 81 | def investigate_regression_test(idx): 82 | """Investigate regression tests.""" 83 | tests = load_regression_tests() 84 | params, options, exp_val = tests[idx] 85 | 86 | crit_val = compute_log_likelihood(params, options) 87 | 88 | assert np.isclose( 89 | crit_val, exp_val, rtol=TOL_REGRESSION_TESTS, atol=TOL_REGRESSION_TESTS 90 | ) 91 | 92 | 93 | def _check_single(test, strict): 94 | """Check a single test.""" 95 | params, options, exp_val = test 96 | 97 | try: 98 | crit_val = compute_log_likelihood(params, options) 99 | is_success = np.isclose( 100 | crit_val, exp_val, rtol=TOL_REGRESSION_TESTS, atol=TOL_REGRESSION_TESTS 101 | ) 102 | except Exception: 103 | is_success = False 104 | 105 | if strict is True: 106 | assert is_success, "Failed regression test." 107 | 108 | return is_success 109 | 110 | 111 | def _create_single(idx): 112 | """Create a single test.""" 113 | np.random.seed(idx) 114 | 115 | params, options = generate_random_model() 116 | 117 | crit_val = compute_log_likelihood(params, options) 118 | 119 | if not isinstance(crit_val, float): 120 | raise AssertionError(" ... value of criterion function too large.") 121 | 122 | return params, options, crit_val 123 | 124 | 125 | @click.group(context_settings=CONTEXT_SETTINGS) 126 | def cli(): 127 | """CLI manager for regression tests.""" 128 | pass 129 | 130 | 131 | @cli.command() 132 | @click.argument("number_of_tests", type=int) 133 | @click.option("--strict", is_flag=True, help="Immediate termination on failure.") 134 | @click.option("--notification/--no-notification", default=True, help="Send report.") 135 | def run(number_of_tests, strict, notification): 136 | """Run a number of regression tests.""" 137 | run_regression_tests( 138 | n_tests=number_of_tests, strict=strict, notification=notification 139 | ) 140 | 141 | 142 | @cli.command() 143 | @click.argument("number_of_test", type=int) 144 | def investigate(number_of_test): 145 | """Investigate a single regression test.""" 146 | investigate_regression_test(number_of_test) 147 | 148 | 149 | @cli.command() 150 | @click.argument("number_of_tests", type=int) 151 | @click.option("--save/--no-save", default=True, help="Saves new tests on disk.") 152 | def create(number_of_tests, save): 153 | """Create a new collection of regression tests.""" 154 | create_regression_tests(n_tests=number_of_tests, save=save) 155 | 156 | 157 | if __name__ == "__main__": 158 | cli() 159 | -------------------------------------------------------------------------------- /development/testing/test_cli.py: -------------------------------------------------------------------------------- 1 | from click.testing import CliRunner 2 | from testing.regression import create 3 | from testing.regression import investigate 4 | from testing.regression import run 5 | 6 | 7 | def test_investigate(): 8 | runner = CliRunner() 9 | result = runner.invoke(investigate, ["0"]) 10 | 11 | assert result.exit_code == 0 12 | 13 | 14 | def test_run(): 15 | runner = CliRunner() 16 | result = runner.invoke(run, ["0", "--no-notification"]) 17 | 18 | assert result.exit_code == 0 19 | 20 | 21 | def test_create(): 22 | runner = CliRunner() 23 | result = runner.invoke(create, ["1", "--no-save"]) 24 | 25 | assert result.exit_code == 0 26 | -------------------------------------------------------------------------------- /development/testing/testing_pull_request.py: -------------------------------------------------------------------------------- 1 | """Run a series of tests that are required for any pull request to be merged.""" 2 | import socket 3 | 4 | import click 5 | from click.testing import CliRunner 6 | from testing.regression import run 7 | 8 | import respy as rp 9 | 10 | 11 | def run_pull_request_tests(): 12 | is_short_run = socket.gethostname() in ["abacus", "socrates"] 13 | 14 | click.secho("Starting pytest", fg="green") 15 | rp.test() 16 | click.secho("Stopping pytest", fg="green") 17 | 18 | n_tests = 50 if is_short_run else 1000 19 | 20 | runner = CliRunner() 21 | 22 | click.secho("Starting regression test.", fg="green") 23 | runner.invoke(run, [str(n_tests), "--strict"]) 24 | click.secho("Stopping regression test.", fg="green") 25 | 26 | 27 | def main(): 28 | """Run tests for validating pull request.""" 29 | run_pull_request_tests() 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # You can set these variables from the command line. 2 | SPHINXOPTS = -a 3 | SPHINXBUILD = sphinx-build 4 | SOURCEDIR = . 5 | BUILDDIR = _build 6 | 7 | # Put it first so that "make" without argument is like "make help". 8 | help: 9 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 10 | 11 | .PHONY: help Makefile 12 | 13 | # Catch-all target: route all unknown targets to Sphinx using the new 14 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 15 | %: Makefile 16 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 17 | livehtml: 18 | sphinx-autobuild -b html -i "*.respy.*" -i "**.ipynb_checkpoints" $(SPHINXOPTS) $(SOURCEDIR) $(BUILDDIR)/html 19 | -------------------------------------------------------------------------------- /docs/_static/cscubs-2019/paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/cscubs-2019/paper.pdf -------------------------------------------------------------------------------- /docs/_static/cscubs-2019/presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/cscubs-2019/presentation.pdf -------------------------------------------------------------------------------- /docs/_static/cscubs-2019/sources.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/cscubs-2019/sources.zip -------------------------------------------------------------------------------- /docs/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | div.prompt { 2 | display: none; 3 | } 4 | 5 | /* General */ 6 | 7 | h1, h2, h3, h4, h5 { 8 | color: #004385 !important; 9 | } 10 | 11 | h1, h2 { 12 | text-align: center; 13 | } 14 | 15 | /*a { 16 | color: #0C619F !important; 17 | }*/ 18 | 19 | a:hover { 20 | color: #187FB9 !important; 21 | } 22 | 23 | a:active { 24 | color: #187FB9 !important; 25 | } 26 | 27 | a.headerlink { 28 | color: #004385 !important; 29 | } 30 | 31 | a.headerlink:hover { 32 | color: #fff !important; 33 | background-color: #187FB9 !important; 34 | } 35 | 36 | /* color for naigation bar when item has been selected */ 37 | .navbar-light .navbar-nav .active>.nav-link { 38 | color: #187FB9 !important; 39 | } 40 | 41 | /* color for sidebar item when it has been selected */ 42 | 43 | .bd-sidebar .nav .active > a { 44 | color: #187FB9 !important; 45 | } 46 | 47 | /* toc on right of page, color when item is selected */ 48 | .toc-entry > .nav-link.active { 49 | color: #187FB9; 50 | border-left: 2px solid #187FB9; 51 | } 52 | 53 | /* Getting started index page */ 54 | 55 | .intro-card { 56 | padding: 40px 10px 20px 10px; 57 | margin: 10px 0px; 58 | max-height: 85%; 59 | 60 | } 61 | 62 | .intro-card .card-text { 63 | margin: 20px 0px; 64 | 65 | } 66 | 67 | .card-img-top + .card-body { 68 | padding-top: 0; 69 | } 70 | 71 | div.index-container { 72 | padding-bottom: 20px; 73 | } 74 | 75 | a.index-link { 76 | color: #333; 77 | text-decoration: none; 78 | } 79 | 80 | /* reference to user guide */ 81 | .gs-torefguide { 82 | align-items: center; 83 | font-size: 0.9rem; 84 | } 85 | 86 | .gs-torefguide .badge { 87 | background-color: #004C8D; 88 | margin: 10px 10px 10px 0px; 89 | padding: 5px; 90 | } 91 | 92 | .gs-torefguide a { 93 | margin-left: 5px; 94 | color: #004C8D; 95 | border-bottom: 1px solid #fff79c; 96 | box-shadow: 0px -15px 0px #fff79c inset; 97 | } 98 | 99 | .gs-torefguide p { 100 | margin-top: 1rem; 101 | } 102 | 103 | .gs-torefguide a:hover { 104 | margin-left: 5px; 105 | color: grey; 106 | text-decoration: none; 107 | border-bottom: 1px solid #b2ff80f3; 108 | box-shadow: 0px -15px 0px #b2ff80f3 inset; 109 | } 110 | 111 | .blue { color: #004C8D; } 112 | 113 | .boldblue { 114 | font-weight: bold; 115 | color: #004C8D; 116 | } 117 | 118 | .centerblue { 119 | text-align: center; 120 | color: #004C8D; 121 | } 122 | 123 | .orange { color: orange; } 124 | 125 | .verbatimblue { 126 | font-weight: verbatim; 127 | color: #004C8D; 128 | } 129 | 130 | .boldcenterblue { 131 | font-weight: bold; 132 | text-align: center; 133 | color: #004C8D; 134 | } 135 | 136 | /* card tweaks */ 137 | 138 | .shadow { 139 | box-shadow: 0 .5rem 0 rgba(0,0,0,.15) !important; 140 | transition: transform 0.2s, box-shadow 0.2s; 141 | } 142 | 143 | .shadow:hover { 144 | box-shadow: 0.3rem 0.7rem 0 rgba(0,0,0,.2) !important; 145 | transform: translate(-0.3rem,-0.2rem); 146 | transition: transform 0.2s, box-shadow 0.2s; 147 | } 148 | 149 | .card-header { 150 | font-weight: bold; 151 | font-size: 1.1em; 152 | display: flex; 153 | justify-content: center; 154 | align-items: center; 155 | min-height: 78px 156 | } 157 | 158 | .col, .col-1, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-10, .col-11, .col-12, .col-auto, .col-lg, .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg-auto, .col-md, .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12, .col-md-auto, .col-sm, .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm-auto, .col-xl, .col-xl-1, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl-auto { 159 | padding: 15px; 160 | } 161 | -------------------------------------------------------------------------------- /docs/_static/funding/Becker_Sebastian_Arbeitsprogramm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/funding/Becker_Sebastian_Arbeitsprogramm.pdf -------------------------------------------------------------------------------- /docs/_static/images/Logo_DIW_Berlin.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/_static/images/Logo_TRA1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/images/Logo_TRA1.png -------------------------------------------------------------------------------- /docs/_static/images/UNI_Bonn_Logo_Standard_RZ_RGB.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/_static/images/book.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/_static/images/books.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/_static/images/coding.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | 15 | 16 | 17 | 18 | 19 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /docs/_static/images/dividers.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/_static/images/edit.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 14 | 20 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /docs/_static/images/graduate-cap.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 9 | 14 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /docs/_static/images/light-bulb.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 11 | 13 | 15 | 17 | 19 | 21 | 23 | 25 | 27 | 29 | 30 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /docs/_static/images/note.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /docs/_static/images/number-blocks.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /docs/_static/images/research.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 8 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/_static/images/road.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 10 | 11 | 12 | 13 | 14 | 30 | 31 | 32 | 33 | 34 | 36 | 37 | 38 | 39 | 40 | 42 | 43 | 44 | 45 | 46 | 48 | 49 | 50 | 51 | 52 | 54 | 55 | 56 | 57 | 58 | 60 | 61 | 62 | 63 | 64 | 66 | 67 | 68 | 69 | 70 | 72 | 73 | 74 | 75 | 76 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /docs/_static/images/study.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /docs/_static/respy.mplstyle: -------------------------------------------------------------------------------- 1 | lines.linewidth : 3 # line width in points 2 | font.size : 12 3 | axes.axisbelow : True # Draw axis grid lines and ticks below patches (True); above 4 | # patches but below lines ('line'); or above all (False). 5 | # Forces grid lines below figures. 6 | legend.frameon : False # Legend patch transparency. 7 | legend.scatterpoints : 3 # Number of scatter points in legend. 8 | figure.autolayout : true # Same as plt.tight_layout(). 9 | -------------------------------------------------------------------------------- /docs/_static/thesis_proposals/Salience_and_human_capital.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/thesis_proposals/Salience_and_human_capital.pdf -------------------------------------------------------------------------------- /docs/_static/thesis_proposals/Sparsity_and_human_capital.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/docs/_static/thesis_proposals/Sparsity_and_human_capital.pdf -------------------------------------------------------------------------------- /docs/_templates/custom-about-us.html: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /docs/_templates/custom-intro.html: -------------------------------------------------------------------------------- 1 | 22 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | import os 3 | import sys 4 | 5 | 6 | # Set variable so that todos are shown in local build 7 | on_rtd = os.environ.get("READTHEDOCS") == "True" 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | sys.path.insert(0, os.path.abspath("..")) 13 | 14 | # -- Project information ----------------------------------------------------- 15 | 16 | project = "respy" 17 | copyright = f"2015-{dt.datetime.now().year}, The respy Development Team" # noqa: A001 18 | author = "The respy Development Team" 19 | 20 | # The full version, including alpha/beta/rc tags. 21 | release = "2.1.1" 22 | version = ".".join(release.split(".")[:2]) 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | master_doc = "index" 27 | 28 | extensions = [ 29 | "sphinx.ext.autodoc", 30 | "sphinx.ext.autosummary", 31 | "sphinx.ext.coverage", 32 | "sphinx.ext.doctest", 33 | "sphinx.ext.extlinks", 34 | "sphinx.ext.ifconfig", 35 | "sphinx.ext.intersphinx", 36 | "sphinx.ext.mathjax", 37 | "sphinx.ext.todo", 38 | "sphinx.ext.viewcode", 39 | "sphinxcontrib.bibtex", 40 | "nbsphinx", 41 | "numpydoc", 42 | "autoapi.extension", 43 | "sphinx_tabs.tabs", 44 | ] 45 | 46 | autodoc_mock_imports = [ 47 | "chaospy", 48 | "estimagic", 49 | "hypothesis", 50 | "joblib", 51 | "numba", 52 | "numpy", 53 | "pandas", 54 | "pytest", 55 | "scipy", 56 | "yaml", 57 | ] 58 | 59 | extlinks = { 60 | "ghuser": ("https://github.com/%s", "@"), 61 | "gh": ("https://github.com/OpenSourceEconomics/respy/pull/%s", "#"), 62 | } 63 | 64 | intersphinx_mapping = { 65 | "numba": ("http://numba.pydata.org/numba-doc/latest", None), 66 | "numpy": ("https://numpy.org/doc/stable", None), 67 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), 68 | "python": ("https://docs.python.org/3.8", None), 69 | } 70 | 71 | bibtex_bibfiles = ["explanations/refs.bib"] 72 | 73 | # Add any paths that contain templates here, relative to this directory. 74 | templates_path = ["_templates"] 75 | html_static_path = ["_static"] 76 | 77 | # The language for content autogenerated by Sphinx. Refer to documentation 78 | # for a list of supported languages. 79 | language = "en" 80 | 81 | # List of patterns, relative to source directory, that match files and 82 | # directories to ignore when looking for source files. 83 | # This patterns also effect to html_static_path and html_extra_path 84 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] 85 | 86 | # If true, `todo` and `todoList` produce output, else they produce nothing. 87 | if not on_rtd: 88 | todo_include_todos = True 89 | todo_emit_warnings = True 90 | 91 | # Configure Sphinx' linkcheck 92 | linkcheck_ignore = [ 93 | r"http://cscubs\.cs\.uni-bonn\.de/*.", 94 | r"https://(dx\.)?doi\.org/*.", 95 | r"https://jstor\.org/*.", 96 | r"https://zenodo\.org/*.", 97 | ] 98 | 99 | # Configuration for nbsphinx 100 | nbsphinx_execute = "never" 101 | nbsphinx_allow_errors = False 102 | nbsphinx_prolog = r""" 103 | {% set docname = 'docs/' + env.doc2path(env.docname, base=None) %} 104 | 105 | .. only:: html 106 | 107 | .. nbinfo:: 108 | 109 | View and download the notebook `here `_! 110 | 111 | """ 112 | 113 | # Configuration for numpydoc 114 | numpydoc_xref_param_type = True 115 | numpydoc_xref_ignore = {"type", "optional", "default", "of"} 116 | 117 | # Configuration for autodoc 118 | autosummary_generate = True 119 | 120 | # Configuration for autoapi 121 | autoapi_type = "python" 122 | autoapi_dirs = ["../respy"] 123 | autoapi_ignore = ["*/tests/*"] 124 | 125 | 126 | # -- Options for HTML output ---------------------------------------------- 127 | 128 | # The theme to use for HTML and HTML Help pages. See the documentation for 129 | # a list of builtin themes. 130 | html_theme = "pydata_sphinx_theme" 131 | 132 | html_theme_options = { 133 | "github_url": "https://github.com/OpenSourceEconomics/respy", 134 | "twitter_url": "https://twitter.com/open_econ", 135 | } 136 | 137 | html_css_files = ["css/custom.css"] 138 | 139 | html_logo = "_static/images/respy-logo.svg" 140 | 141 | 142 | html_sidebars = { 143 | "index": ["search-field", "custom-intro"], 144 | "about_us": ["search-field", "custom-about-us"], 145 | } 146 | -------------------------------------------------------------------------------- /docs/development/index.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Development 3 | =========== 4 | 5 | This page offers information on the development of **respy** and provides guidelines 6 | for contributors. 7 | 8 | .. raw:: html 9 | 10 |
11 | 45 |
46 | 47 |
48 | 96 |
97 | 98 | 99 | 100 | 101 | .. toctree:: 102 | :maxdepth: 1 103 | :hidden: 104 | 105 | ../release_notes 106 | contributing_to_respy 107 | releases 108 | roadmap 109 | template_for_tutorials 110 | -------------------------------------------------------------------------------- /docs/development/releases.rst: -------------------------------------------------------------------------------- 1 | Releases 2 | ======== 3 | 4 | What is the new version number? 5 | ------------------------------- 6 | 7 | The version number depends on the severity of the changes and adheres to `semantic 8 | versioning `_. The format is ... Increment 9 | 10 | 1. version when you make incompatible API changes, 11 | 2. version when you add functionality in a backwards compatible manner, and 12 | 3. version when you make backwards compatible bug fixes. 13 | 14 | You can create development releases which are allowed to be deleted, once a new version 15 | is released. Append ``dev0`` to the version string. Do not separate the string with a 16 | dash or another dot because ``conda`` will complain about the format. 17 | 18 | 19 | How to release a new version? 20 | ----------------------------- 21 | 22 | 1. At first, we can draft a release on Github. Go to 23 | https://github.com/OpenSourceEconomics/respy/releases and click on "Draft a new 24 | release". Fill in the new version number as a tag and title. You can write a summary 25 | for the release, but also do it later. Important: Only save the draft. Do not 26 | publish, yet. 27 | 28 | 2. Second, we need to create a final PR to prepare everything for the new version. The 29 | name of the PR and the commit message will be "Release vx.y.z". We need to 30 | 31 | - use ``bumpversion part `` to increment the correct part of 32 | the version number in all files. 33 | - update information in ``CHANGES.rst`` to have summary of the changes which 34 | can also be posted in the Github repository under the tag. 35 | 36 | 3. Run 37 | 38 | .. code-block:: bash 39 | 40 | $ conda build . 41 | 42 | and check whether you can actually build a new version. If you experience errors, fix 43 | them here. Depending on whether you allowed automatic upload to Anaconda, the release 44 | appears under your account. Feel free to delete it. 45 | 46 | 4. Merge the PR into main. 47 | 48 | 5. After that, revisit the draft of the release. Make sure everything is fine. Now, you 49 | click on "Publish release" which creates a version tag on the latest commit of the 50 | specified branch. Make sure to target the master branch. 51 | 52 | 6. Check out the tag in your local repository and run 53 | 54 | .. code-block:: bash 55 | 56 | $ conda build . --user OpenSourceEconomics 57 | 58 | In case automatic upload is disabled, copy the path to the built package and type 59 | 60 | .. code-block:: bash 61 | 62 | $ anaconda upload --user OpenSourceEconomics 63 | 64 | 6. Visit `Anaconda.org `_ and check 65 | whether the release is available. 66 | 67 | 7. Spread the word! 68 | -------------------------------------------------------------------------------- /docs/development/roadmap.rst: -------------------------------------------------------------------------------- 1 | .. _roadmap: 2 | 3 | Roadmap 4 | ======= 5 | 6 | We aim for improvements to **respy** in Economics, Statistics, and Numerical Methods. 7 | 8 | Economics and Statistics 9 | ------------------------ 10 | 11 | All topics listed here can be tackled as part of a bachelor or master thesis. If you are 12 | interested, please contact us! 13 | 14 | Explore Simulation Based Estimation 15 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 16 | 17 | We want to add simulation based estimation to **respy** and compare the accuracy of 18 | parameters estimated with maximum likelihood and simulation based methods. As **respy** 19 | already has the ability to simulate data, it would be very simple to implement method of 20 | simulated moments or indirect inference estimation. As part of this project, we could 21 | also experiments with approaches that make the criterion function smooth and therefore 22 | allow the use of fast optimizers. A starting point could be `Frazier, Oka and Zhu (2019) 23 | `_ 24 | 25 | CCP and the Estimation of Nonseparable Dynamic Modes 26 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 | 28 | In a recent paper `Kristensen, Nesheim, and de Paulo (2015) 29 | `_ generalize the conditional 30 | choice probabilities (CCP) estimator (`Hotz, Miller (1993) 31 | `_) to non-separable economic models. However, 32 | they are still missing an empirical application of their method as a proof of concept. 33 | The **respy** package offers a suitable starting point. 34 | 35 | Estimate a model by Approximate Bayesian Computation (ABC) 36 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 37 | 38 | Use the `ABCpy package `_ and **respy**'s 39 | simulation capabilities to estimate the model via ABC. Compare it against other 40 | estimation methods in terms of computational burden and precision of the estimates. 41 | 42 | Sparse Maximization and Human Capital Investment 43 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 44 | 45 | Gabaix (2014) proposes a fully tractable, unifying theory of limited attention in 46 | decision-making. The idea is that the decision-maker pays less or no attention to some 47 | features of the situation. A potential application of sparse maximization is human 48 | capital investment, since young individuals could (partially or even fully) neglect some 49 | relevant features, which could tilt their choices. This may imply that a considerable 50 | share of the US labor force is miss-allocated. For more information check out the 51 | :download:`full description 52 | <../_static/thesis_proposals/Sparsity_and_human_capital.pdf>` 53 | 54 | Salience Theory and Human Capital Investment 55 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | 57 | Bordalo, Gennaioli and Shleifer (2013) propose a unifying theory of salience in 58 | decision-making. An attribute is salient when it “stands out” relative to the 59 | alternative choices. A potential application of salience theory is human capital 60 | investment, since young individuals could attach disproportionately high attention to 61 | professions with salient returns, which could tilt their choices. For more information 62 | check out the :download:`full description 63 | <../_static/thesis_proposals/Salience_and_human_capital.pdf>` 64 | 65 | Numerical Methods 66 | ----------------- 67 | 68 | Improve numerical integration 69 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | 71 | We use numerical integration to calculate value functions and choice probabilities in 72 | the maximum likelihood estimation. Currently we use a smoothed Monte-Carlo integration 73 | for both. For a thesis a student could explore how the accuracy and speed of the 74 | integrals changes with the following strategies: 75 | 76 | - Use a GHK simulator instead of current Monte-Carlo integration. 77 | - Use Gaussian quadrature for choice probabilities. 78 | - Allow for restrictions on the correlation structure of the shocks that make it 79 | possible to reduce the dimensionality of the integrals. 80 | 81 | Starting points are the following papers: 82 | 83 | - `Skrainka and Judd (2011) `_ 84 | - `Dunnet (1989) `_ 85 | 86 | Benchmark Different Optimizers 87 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | 89 | Explore the speed and reliability of local and global optimizers for maximum likelihood 90 | estimation of the model. The results should be transferable to other estimation problems 91 | with a noisy criterion function. Most relevant optimizers should already be implemented 92 | in ``estimagic``. Otherwise they can be added easily. 93 | 94 | Approximate Dynamic Programming (ADP) 95 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 96 | 97 | We want to explore the usefulness of ADP techniques for solving large scale structural 98 | economic models. The seminal references is `Powell (2011) 99 | `_. 100 | -------------------------------------------------------------------------------- /docs/development/template_for_tutorials.rst: -------------------------------------------------------------------------------- 1 | .. _template_for_tutorials: 2 | 3 | Template for tutorials 4 | ====================== 5 | 6 | *This document should serve as a template for all tutorials. If something is not covered 7 | in here, please extend it.* 8 | 9 | In order to get started on a tutorial, please refer to the other :ref:`tutorials` to get 10 | a feel of the structure and language you should use so your tutorial is aligned with the 11 | rest of the documentation. Check out this `checklist 12 | `_ as an 13 | additional resource to guide you through the writing process. 14 | 15 | Goal 16 | ---- 17 | 18 | Describe the goal and what the user should have learned by the end of this 19 | tutorial. 20 | 21 | 22 | Steps 23 | ----- 24 | 25 | Tutorials should be structured as clearly defined, sequential steps to reaching the 26 | specified goal. 27 | 28 | The tutorials should be short, focused, and concise. This means you might have to omit 29 | background information that you consider helpful for understanding the underlying code 30 | and theory. If you want to guide the user to further material like related 31 | :ref:`explanations`, :ref:`how_to_guides`, and :ref:`reference_guides`, use the 32 | following buttons. 33 | 34 | .. raw:: html 35 | 36 |
37 | To explanations 38 | 39 | A more extended explanation to the economic model is provided in :ref:`economic_model`. 40 | 41 | .. raw:: html 42 | 43 |
44 | -------------------------------------------------------------------------------- /docs/explanations/bib.rst: -------------------------------------------------------------------------------- 1 | Bibliography 2 | ============ 3 | 4 | .. bibliography:: refs.bib 5 | :all: 6 | :style: plain 7 | -------------------------------------------------------------------------------- /docs/explanations/economic_model.rst: -------------------------------------------------------------------------------- 1 | .. _economic_model: 2 | 3 | Economic Model 4 | ============== 5 | 6 | .. role:: boldblue 7 | 8 | To illustrate the usage of **respy** in solving :boldblue:`finite-horizon 9 | discrete choice dynamic programming` (DCDP) problems we will present the basic 10 | setup of the human capital model as conceptualized in Keane and Wolpin 11 | (1997, :cite:`Keane.1997`). 12 | 13 | -------------------------------------------------------------------------------- 14 | 15 | .. rst-class:: centerblue 16 | 17 | The promise: **respy** will become your preferred tool 18 | to develop, solve, estimate, and explore models within the EKW framework 19 | 20 | -------------------------------------------------------------------------------- 21 | 22 | At time t = 1,...,T each individual observes the state of the economic environment 23 | :math:`s_{t} \in \mathcal{S}_t` and chooses an action :math:`a_t` from the set of 24 | admissible actions :math:`\mathcal{A}`. The decision has two consequences: 25 | 26 | - Individuals receive per-period utility :math:`u_a(\cdot)` associated with the 27 | chosen alternative :math:`a_t \in \mathcal{A}`. [#]_ 28 | - The economy evolves from :math:`s_{t}` to :math:`s_{t+1}`. 29 | 30 | Individuals are :boldblue:`forward-looking` and so do not simply choose the 31 | alternative with the highest immediate per-period utility. Instead, they take 32 | future consequences of their actions into account and implement a 33 | :boldblue:`policy` :math:`\pi \equiv (a_1^{\pi}(s_1), \dots, a_T^{\pi}(s_T))`. 34 | A policy is a collection of :boldblue:`decision rules` :math:`a_t^{\pi}(s_t)` 35 | that prescribe an action for any possible state :math:`s_t`. The implementation 36 | of a policy generates a sequence of per-period utilities that depends on the 37 | :boldblue:`objective transition probability distribution` :math:`p_t(s_t, a_t)` 38 | for the evolution of state :math:`s_{t}` to :math:`s_{t+1}` induced by the model. 39 | Individuals entertain :boldblue:`rational expectations` (Muth, 1961, 40 | :cite:`Muth.1961`) so their subjective beliefs about the future agree with the 41 | objective transition probabilities of the model. 42 | 43 | :ref:`timing_events` depicts the timing of events in the model for two generic 44 | periods. At the beginning of period t, an individual fully learns about the 45 | immediate per-period utility of each alternative, chooses one of them, and 46 | receives its immediate utility. Then the state evolves from :math:`s_t` to 47 | :math:`s_{t+1}` and the process is repeated in :math:`t+1`. 48 | 49 | .. _timing_events: 50 | 51 | .. figure:: ../_static/images/timing_events.svg 52 | :width: 650 53 | :alt: Illustration timing of events 54 | 55 | Timing of events 56 | 57 | Individuals face :boldblue:`uncertainty` (Gilboa, 2009, :cite:`Gilboa.2009`) 58 | and discount future per-period utilities by an exponential discount factor 59 | :math:`0 < \delta < 1` that parameterizes their time preference. Per-period 60 | utilities are time-separable (Samuelson, 1937, :cite:`Samuelson.1937`). 61 | Given an initial state :math:`s_1` individuals implement the policy :math:`\pi` 62 | from the set of all feasible policies :math:`\Pi` that :boldblue:`maximizes the 63 | expected total discounted utilities` over all :math:`T` periods. 64 | 65 | .. math:: 66 | 67 | \underset{\pi \in \Pi}{\max} \, \mathbb{E}_{p^{\pi}} \left[ \sum_{t = 1}^T 68 | \delta^{t - 1} u(s_t, a_t^{\pi}(s_t)) \right], 69 | 70 | where :math:`\mathbb{E}_{p^{\pi}}[\cdot]` denotes the expectation operator under 71 | the probability measure :math:`p^{\pi}`. The decision problem is dynamic in the 72 | sense that expected inter-temporal per-period utilities at a certain period 73 | :math:`t` are influenced by past choices. 74 | 75 | .. raw:: html 76 | 77 |
79 | To Explanation

80 |

The operationalization of the model allows to proceed with the calibration as 81 | described in 82 | Mathematical Framework

83 |
84 | 85 | 86 | .. rubric:: Footnotes 87 | 88 | .. [#] For notational convenience we will omit the subscript :math:`a` whenever 89 | possible. 90 | -------------------------------------------------------------------------------- /docs/explanations/implementation_kw94.rst: -------------------------------------------------------------------------------- 1 | .. implementation_kw94: 2 | 3 | Model in Keane and Wolpin (1994) 4 | ================================ 5 | 6 | The explanations section of this documentation gives a detailed outline 7 | of the economic modeling components and the mathematical framework in 8 | Eckstein-Keane-Wolpin models using the example of Keane and Wolpin 9 | (1997, :cite:`Keane.1997`). In the documentation, you will often another model 10 | specification rooted in the publication Keane and Wolpin 11 | (1994, :cite:`Keane.1994`). This model constitutes a similar but simpler version 12 | of the model. We give a brief overview of the reward functions and components 13 | distinctive to this specification here. Note that the underlying economic and 14 | mathematical framework remains the same. 15 | 16 | -------------------------------------------------------------------------------- 17 | 18 | .. raw:: html 19 | 20 |
22 | To Explanations

23 |

Find the economic model and mathematical framework in the 24 | Explanations

25 |
26 | 27 | -------------------------------------------------------------------------------- 28 | 29 | The model from Keane and Wolpin (1994, :cite:`Keane.1994`) is characterized by four 30 | distinct choices. At each point in time :math:`t \in \{0, ...,39\}` individuals decide 31 | between :math:`a \in \{1,2,3,4\}` mutually exclusive alternatives: 32 | working in occupation *A*, working in occupation *B* ($a=1,2$), investing in 33 | *education* ($a=3$), or staying *home* ($a=4$). The alternatives are associated 34 | with the rewards: 35 | 36 | .. math:: 37 | 38 | \text{Occupation A: } R_1(t) &= w_{1t} = r_{1}exp\{\alpha_{1} + \beta_{1,1}h_{t} 39 | + \gamma_{1,1}k_{1t} + \gamma_{1,2}k^2_{1t} + \gamma_{1,7}k_{2t} 40 | + \gamma_{2,8}k^2_{2t} + \epsilon_{1t}\} \nonumber \\ 41 | \text{Occupation B: } R_2(t) &= w_{2t} = 42 | r_{2}exp\{\alpha_{2} + \beta_{2,1}h_{t} + \gamma_{2,1}k_{2t} 43 | + \gamma_{2,2}k^2_{2t} + \gamma_{2,7}k_{1t} + \gamma_{2,8}k^2_{1t} 44 | + \epsilon_{2t}\} \nonumber \\ 45 | \text{School: }R_3(t) &= \alpha_3 + \beta_{tc}I(h_t \geq 12) 46 | + \beta_{rc}(1-d_3(t-1)) + \epsilon_{3t}, \nonumber \\ 47 | \text{Home: }R_4(t) &= \alpha_4 + \epsilon_{4t} 48 | 49 | 50 | These rewards enter the alternative specific value functions of individuals. In these 51 | equations :math:`h(t)` denotes schooling in period :math:`t` and :math:`k_{at}` denotes 52 | work experience from sector :math:`A` or :math:`B` (:math:`a=1,2`). The reward for 53 | schooling includes an indicator :math:`I(h_t \geq 12)` which is connected to the cost of 54 | schooling after 12 periods (i.e. post-secondary schooling costs) and component that 55 | captures costs of returning to school when the choice in the previous period was 56 | something else. Aside from the parameters connected to these various components, each 57 | reward function also contains a constant and an alternative specific shock. The skill 58 | price in occupations is denoted by :math:`r_{a}`, it is set to 1 in this model. [#]_ 59 | 60 | 61 | The model from Keane and Wolpin (1994, :cite:`Keane.1994`) is not a complete 62 | subset of the model outlined in the explanations. The most important deviations are: 63 | 64 | - The model includes an additional squared experience term with parameter 65 | :math:`\gamma_{2,8}` for experience in the other occupation. 66 | 67 | - It also does not include unobserved heterogeneity i.e. types. Here we thus 68 | define :math:`\alpha_{a}` as the constant for alternative :math:`a`. 69 | 70 | - We do not distinguish between different levels of post-secondary education. 71 | The parameters :math:`\beta_{tc}` and :math:`\beta_{tr}` are thus not enumerated. 72 | 73 | 74 | .. rubric:: Footnotes 75 | 76 | .. [#] Note that the reward functions are not only time but also individual specific. 77 | A subscript for individuals is left out for simplicity. 78 | -------------------------------------------------------------------------------- /docs/explanations/introduction.rst: -------------------------------------------------------------------------------- 1 | .. _what_is_respy: 2 | 3 | What is respy? 4 | ============== 5 | 6 | **respy** is an open source framework written in Python for the simulation and 7 | estimation of some finite-horizon discrete choice dynamic programming (DCDP) models. 8 | In comparison to simple reduced-form analysis, these models allow the estimation 9 | of structural parameters which reflect agents' preferences and beliefs by assuming 10 | that agents are forward-looking and maximize expected intertemporal payoffs. 11 | Over the last decades, finite-horizon DCDP models have become a popular tool to 12 | answer research questions in areas of economics such as 13 | labor economics, industrial organization, economic demography, health economics, 14 | development economics, political economy, and marketing. 15 | 16 | What makes **respy** powerful is that it allows to build and solve structural 17 | models in weeks or months whose development previously took years. The design 18 | of **respy** allows the researcher to flexibly add the following components to 19 | her model. 20 | 21 | - Any number of discrete choices (e.g., working alternatives, schooling, home 22 | production, retirement) where each choice may yield a wage, may allow for 23 | experience accumulation and can be constrained by time, a maximum amount of 24 | accumulated experience or other characteristics. 25 | - Condition the decision of individuals on its previous choices or their labor 26 | market history. 27 | - Adding a finite mixture with any number of subgroups to account for 28 | unobserved heterogeneity among individuals as developed by Keane and Wolpin 29 | (1997, :cite:`Keane.1997`). 30 | - Any number of time-constant observed state variables (e.g., ability measures 31 | (Bhuller et al., 2020, :cite:`Bhuller.2020`), race (Keane and Wolpin, 2000, 32 | :cite:`Keane.2000`), demographic variables) found in the data. 33 | - Correct the estimation for measurement error in wages, either using a Kalman 34 | filter in maximum likelihood estimation or by adding the measurement error 35 | in simulation based approaches. 36 | 37 | 38 | As is common with structural economic models, finite-horizon DCDP models oftentimes 39 | rely on strong assumptions regarding unobservable state variables and error terms 40 | (see Aguirregabiria and Mira, 2010, :cite:`Aguirregabiria.2010`, p. 40 for a list 41 | of assumptions used in standard finite-horizon DCDP models). 42 | **respy** focuses on the estimation of so-called **Eckstein-Keane-Wolpin (EKW) models**. 43 | In accordance with Aguirregabiria and Mira (2010, :cite:`Aguirregabiria.2010`) 44 | , we classify a DCDP model as an EKW model if it departures from standard 45 | DCDP modeling by relaxing at least one of the following assumptions: 46 | 47 | 1. The one-period utility function does not have to be *additively separable* in 48 | its observable and unobservable components but can instead feature different 49 | compositions, e. g. multiplicative separability. 50 | 51 | 2. Observable payoff variables can be *choice-censored* and the value of the payoff 52 | variable does not have to be independent of the error term :math:`\epsilon`, 53 | conditional on the values of the decision and observable state variables. 54 | 55 | 3. *Permanent unobserved heterogeneity* is allowed to exist, i. e. the unobserved 56 | state variables do not have to be independently and identically distributed 57 | over agent and over time. As an example, the seminal work of Keane and Wolpin 58 | (1997, :cite:`Keane.1997`) introduces permanent unobserved heterogeneity by 59 | assigning each individual to one of four types. 60 | 61 | 4. Unobservables may be *correlated across choice alternatives*, i. e. unobserved 62 | state variables do not have to be independent across alternatives. 63 | 64 | .. raw:: html 65 | 66 |
68 | To Explanation

69 |

To learn more about DCDP models and related topics, check out 70 | 71 | Recommended Reading

72 |
73 | -------------------------------------------------------------------------------- /docs/explanations/notation.rst: -------------------------------------------------------------------------------- 1 | Notation 2 | ======== 3 | 4 | The following table summarizes the notation in order of their appearance. 5 | 6 | .. csv-table:: Table of Notation 7 | :header: "Symbol", "Explanation" 8 | :widths: 30, 70 9 | 10 | 11 | ":math:`s_t`", "state space at time :math:`t`" 12 | ":math:`s_t`", "state space at time :math:`t`" 13 | ":math:`\mathcal{S}_t`", "set of admissible states at time :math:`t`" 14 | ":math:`a_t`", "chosen alternative at time :math:`t`" 15 | ":math:`\mathcal{A}`", "set of feasible alternatives" 16 | ":math:`\pi`", "particular policy" 17 | ":math:`\Pi`", "set of implementable policies" 18 | ":math:`a_t^{\pi}(\cdot)`", "state-dependent decision rule under :math:`\pi`" 19 | ":math:`p_t(\cdot)`", "transition probability distribution for state :math:`s_{t+1}`" 20 | ":math:`\mathbb{E}_{p}[\cdot]`", "expectation under probability measure p" 21 | ":math:`\mathcal{J}`", "set of types" 22 | ":math:`e_{j,a}`", "skill endowment of type j in alternative a" 23 | ":math:`h_t`", "years of completed schooling at :math:`t`" 24 | ":math:`\bf{k}_{t}`", "vector of work-experience at :math:`t`" 25 | ":math:`\bf{\epsilon}`", "vector of productivity shocks" 26 | ":math:`r_a`", "market equilibrium rental price in alternative a" 27 | ":math:`x_a(\cdot)`", "occupation-specific skill level" 28 | ":math:`\Gamma_a(\cdot)`", "deterministic component of occupation-specific skill level" 29 | ":math:`\bar{u}_a`", "observable part per-period utility" 30 | ":math:`\bar{s}_t`", "observable part state-space" 31 | ":math:`\mathcal{D}`", "data structure" 32 | ":math:`M_D`", "moments of observed data" 33 | ":math:`M_S(\theta)`", "moments of simulated data under :math:`\theta`" 34 | -------------------------------------------------------------------------------- /docs/explanations/parameterization.rst: -------------------------------------------------------------------------------- 1 | .. _parameterization: 2 | 3 | Parameterization 4 | ================ 5 | 6 | .. raw:: html 7 | 8 |
10 | To Explanation

11 |

The following table keeps track of the parameterization for the computational 12 | implementation introduced in 13 | Computational Implementation

14 |
15 | 16 | The wildcard {civilian} means either "blue" or "white". 17 | 18 | .. csv-table:: Table of Parameterization 19 | :header: "Parameter", "State variable in **respy**", "Explanation" 20 | :widths: 20, 35, 45 21 | 22 | ":math:`\delta`", "delta", "discount factor" 23 | ":math:`e_{1,a}`", "type_1", "deviation for type 1 from type 0 in a" 24 | ":math:`e_{2,a}`", "type_2", "deviation for type 2 from type 0 in a" 25 | ":math:`e_{3,a}`", "type_3", "deviation for type 3 from type 0 in a" 26 | "", "**Common parameters**", "" 27 | ":math:`\alpha_a`", "constant", "log of rental price if the base skill endowment of type 0 is normalized to 0 (wage)" 28 | ":math:`\vartheta_1`", "common_hs_graduate", "common return to high school degree (non pecuniary)" 29 | ":math:`\vartheta_2`", "common_co_graduate", "common return to college degree (non pecuniary)" 30 | ":math:`\vartheta_3`", "common_hs_graduate", "effect of leaving the military early (after one year)" 31 | "", "**Schooling-related**", "" 32 | ":math:`\beta_{a,1}`", "exp_school", "linear return to an additional year of schooling (wage)" 33 | ":math:`\beta_{a,2}`", "exp_school", "skill premium of having finished high school (wage)" 34 | ":math:`\beta_{a,3}`", "exp_school", "skill premium of having finished college (wage)" 35 | ":math:`\beta_{tc_1}`", "hs_graduate", "net tuition costs college (non pecuniary)" 36 | ":math:`\beta_{tc_2}`", "co_graduate", "additional tuition costs graduate school (non pecuniary)" 37 | ":math:`\beta_{rc_1}`", "returns_to_high_school", "reward for going back to high school" 38 | ":math:`\beta_{rc_2}`", "returns_to_college", "reward for going back to college" 39 | "", "**Experience-related**", "" 40 | ":math:`\gamma_{a,1}`", "exp_{civilian}_collar", "return to experience, same sector, linear (wage)" 41 | ":math:`\gamma_{a,2}`", "exp_{civilian}_collar_square", "return to experience, same sector, quadratic (divided by 100) (wage)" 42 | ":math:`\gamma_{a,3}`", "any_exp_{civilian}_collar", "return for any experience in same sector" 43 | ":math:`\gamma_{a,4}`", "period", "linear age effect (wage)" 44 | ":math:`\gamma_{a,5}`", "is_minor", "effect of being a minor (wage)" 45 | ":math:`\gamma_{a,6}`", "work_{civilian}_collar_lagged", "effect of being a minor (wage)" 46 | ":math:`\gamma_{a,7}`", "exp_{civilian}_collar", "return to experience, other civilian sector, linear (wage)" 47 | ":math:`\gamma_{3,1}`", "exp_military", "return to experience, same sector, linear (wage)" 48 | ":math:`\gamma_{3,2}`", "exp_military_square", "return to experience, same sector, quadratic (divided by 100) (wage)" 49 | ":math:`\gamma_{3,3}`", "any_exp_military", "return to having any military experience" 50 | ":math:`\gamma_{3,4}`", "period", "linear age effect" 51 | ":math:`\gamma_{3,5}`", "is_minor", "effect of being a minor" 52 | ":math:`\gamma_{4,4}`", "period", "linear age effect" 53 | ":math:`\gamma_{4,5}`", "is_minor", "effect of being a minor" 54 | ":math:`\gamma_{5,4}`", "is_young_adult", "additional value of staying home if aged 18-20" 55 | ":math:`\gamma_{5,5}`", "is_adult", "additional value of staying home if 21 or older" 56 | "", "**Mobility and search**", "" 57 | ":math:`c_{a,1}`", "not_exp_{civilian}_collar_lagged", "reward of switching to a from other occupation (non pecuniary)" 58 | ":math:`c_{a,2}`", "not_any_exp_{civilian}_collar", "reward of working in a for the first time (non pecuniary)" 59 | ":math:`c_{3,2}`", "not_any_exp_military", "reward of being in the military sector for the first time (non pecuniary)" 60 | -------------------------------------------------------------------------------- /docs/explanations/recommended_reading.rst: -------------------------------------------------------------------------------- 1 | Recommended Reading 2 | =================== 3 | 4 | To gain a deeper understanding of the underlying economic model and the mathematical 5 | methods used for solving and estimating this model as well as obtaining a small 6 | overview of the relevant literature we suggest the following supplementary material 7 | depending on your current state of prior knowledge. 8 | 9 | Before jumping right into reading the original papers (Keane and Wolpin 1994; 1997) it 10 | can be very helpful to refresh your knowledge on discrete choice dynamic programming. 11 | 12 | #. **Discrete Choice Methods with Simulation** (Kenneth Train, 2009, :cite:`Train.2009`) 13 | This book gives a concise introduction to static discrete choice models and the 14 | difficulties encountered in their estimation. 15 | 16 | #. **Economic Dynamics: Theory and Computation** (John Stachurski, 2009, 17 | :cite:`Stachurski.2009`) 18 | In the first part of this book one finds a succinct introduction to deterministic and 19 | probabilistic dynamical systems. In particular, the topics Finite state Markov chains 20 | and Finite state dynamic programming are elucidated. 21 | 22 | #. **The Structural Estimation of Behavioral Models: Discrete Choice Dynamic Programming 23 | Methods and Applications** (Keane, Wolpin and Todd, 2011, :cite:`Keane.2011`) In this 24 | handbook chapter discrete choice dynamic programming models are introduced including 25 | some applications in the field of labor economics. 26 | 27 | #. **Dynamic discrete choice structural models: A survey** (Aguirregabiria and Mira, 28 | 2010, :cite:`Aguirregabiria.2010`) In this comprehensive survey a more mathematically 29 | rigorous treatment of the subject is given. 30 | 31 | 32 | Core References 33 | --------------- 34 | 35 | The following studies served as the foundations of **respy** and are implemented as 36 | exmaple models in the package. 37 | 38 | * Keane, M. P. and Wolpin, K. I. (1994). `The Solution and Estimation of Discrete Choice 39 | Dynamic Programming Models by Simulation and Interpolation: Monte Carlo Evidence 40 | `_. *The Review of Economics and Statistics*, 76(4): 41 | 648-672. 42 | 43 | 44 | * Keane, M. P., & Wolpin, K. I. (1997). `The Career Decisions of Young Men 45 | `_. *Journal of Political Economy*, 46 | 105(3), 473-522. 47 | 48 | * Keane, M. P., & Wolpin, K. I. (2000). `Eliminating Race Differences in School Attainment and 49 | Labor Market Success `_. 50 | *Journal of Labor Economics*, 18(4), 614-652. 51 | 52 | 53 | .. raw:: html 54 | 55 |
57 | To how-to guide

58 |

Find all such pre-implemented models in How to load 60 | example models

61 |
62 | -------------------------------------------------------------------------------- /docs/how_to_guides/how_to_numerical_integration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "# Numerical Integration\n", 7 | "\n", 8 | "One important component of the solution to the DCDP problem in **respy** models is numerical integration. A bottleneck in solving and estimating the model is the solution of the expected value function, the so-called $EMax(\\cdot)$. Solving the $EMax(\\cdot)$ requires us to solve a multi-dimensional integral at every point in the state space. The integrated value function does not have an analytical solution and thus requires the application of numerical methods.\n", 9 | "\n", 10 | "As the models become more complex, the computational burden increases as adding new features to the model increases the required number of function evaluations, which are the costly operation in numerical integration. Numerical integration usually uses monte carlo simulation. Results from applied mathematics, however, suggest methods that are more efficient and thus enable a performance increase. For the same number of function evaluations (and hence computational cost) quasi-Monte Carlo methods achieve a significantly higher accuracy. **respy** thus enables users to select between various methods for the numerical approximation of the $EMax(\\cdot)$. The numerical integration is controlled in the `options` of a specified model.\n" 11 | ], 12 | "metadata": {} 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "source": [ 18 | "import respy as rp\r\n", 19 | "_, options = rp.get_example_model(\"kw_94_one\", with_data=False)" 20 | ], 21 | "outputs": [], 22 | "metadata": {} 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "source": [ 27 | "## Numerical integration method\n", 28 | "\n", 29 | "The option `monte_carlo_sequence` controls how points are drawn.\n", 30 | "\n", 31 | "- `random`: Points are drawn randomly (crude Monte Carlo).\n", 32 | "- `sobol` or `halton`: Points are drawn from low-discrepancy sequences (superiority in coverage). This means a given approximation error can be achieved with less points." 33 | ], 34 | "metadata": {} 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "source": [ 39 | "
\r\n", 40 | "**Note**: **respy** relies on [chaospy](https://chaospy.readthedocs.io/en/master) for the `sobol` and `halton` sequence. You need to install it in addition to **respy**.\r\n", 41 | "
" 42 | ], 43 | "metadata": {} 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 3, 48 | "source": [ 49 | "options[\"monte_carlo_sequence\"]" 50 | ], 51 | "outputs": [ 52 | { 53 | "output_type": "execute_result", 54 | "data": { 55 | "text/plain": [ 56 | "'random'" 57 | ] 58 | }, 59 | "metadata": {}, 60 | "execution_count": 3 61 | } 62 | ], 63 | "metadata": {} 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "source": [ 68 | "## Solution draws\r\n", 69 | "\r\n", 70 | "The number of solution draws controls how many points are used to evaluate an integral. You can specify them using the option `solution_draws`." 71 | ], 72 | "metadata": {} 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 4, 77 | "source": [ 78 | "options[\"solution_draws\"]" 79 | ], 80 | "outputs": [ 81 | { 82 | "output_type": "execute_result", 83 | "data": { 84 | "text/plain": [ 85 | "500" 86 | ] 87 | }, 88 | "metadata": {}, 89 | "execution_count": 4 90 | } 91 | ], 92 | "metadata": {} 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "source": [ 97 | "Increasing the number of solution draws increases the accuracy of the solution at the cost of the computational burden." 98 | ], 99 | "metadata": {} 100 | }, 101 | { 102 | "cell_type": "raw", 103 | "source": [ 104 | "
\r\n", 105 | " Project\r\n", 106 | "\r\n", 107 | " Find an exploration of numerical integration methods in \r\n", 108 | " EKW models in Improving the Numerical Integration.\r\n", 110 | "
" 111 | ], 112 | "metadata": {} 113 | } 114 | ], 115 | "metadata": { 116 | "kernelspec": { 117 | "name": "python3", 118 | "display_name": "Python 3.9.5 64-bit ('respy': conda)" 119 | }, 120 | "language_info": { 121 | "name": "python", 122 | "version": "3.9.6", 123 | "mimetype": "text/x-python", 124 | "codemirror_mode": { 125 | "name": "ipython", 126 | "version": 3 127 | }, 128 | "pygments_lexer": "ipython3", 129 | "nbconvert_exporter": "python", 130 | "file_extension": ".py" 131 | }, 132 | "interpreter": { 133 | "hash": "b549dadab0c2edb1c58f223f7584f57e60f2cc8e65ef8392efb9b23cb30dad20" 134 | } 135 | }, 136 | "nbformat": 4, 137 | "nbformat_minor": 4 138 | } 139 | -------------------------------------------------------------------------------- /docs/projects/_numerical_integration.py: -------------------------------------------------------------------------------- 1 | """Auxiliary and plotting functions for the respy tutorial numerical integration.""" 2 | import matplotlib.pyplot as plt 3 | from matplotlib import ticker 4 | 5 | # Initialize methods for comparison of integration 6 | METHODS = ["random", "halton", "sobol"] 7 | TUITION_SUBSIDIES = [0, 500] 8 | 9 | 10 | def get_single_integrand( 11 | df, idx, scaling, methods, manual_limits="no", xlimits=(0, 1), ylimits=(0, 1) 12 | ): 13 | """Plot the values of the idx-th evaluation of an integral stored in a dataframe. 14 | 15 | The dataframe contains values of the integrand computed at 16 | a different number of points. 17 | 18 | Input 19 | - df (dataframe): dataframe of [integral] values 20 | - idx (scalar): number of integral evaluation 21 | - scaling (scalar): scaling of [integral] values (1 = no scaling) 22 | - manual_limits (string): determine whether to set manual limits 23 | - methods (list): integration methods 24 | Output 25 | - figure 26 | """ 27 | fig, ax = plt.subplots() 28 | 29 | for m in methods: 30 | label = get_label(m) 31 | x = df.index.get_level_values("integration_points").unique() 32 | y = df.loc[(slice(None), idx), m] / scaling 33 | 34 | ax.plot(x, y, label=label) 35 | 36 | ax.legend() 37 | if manual_limits == "yes": 38 | ax.set_xlim(xlimits) 39 | ax.set_ylim(ylimits) 40 | ax.set_xlabel("Points") 41 | ax.set_ylabel("Integrand") 42 | 43 | 44 | def get_label(method): 45 | """Standardize labels for graphs given METHOD list. 46 | 47 | Input 48 | - method (list): list of methods to standardize 49 | Output 50 | - label (string): standardizes labels 51 | """ 52 | label = method.capitalize() 53 | if label == "Random": 54 | label = "random" 55 | 56 | return label 57 | 58 | 59 | def get_rmse_rate(df, comparison_rates, methods): 60 | """Plot the absolute rate of rmse. 61 | 62 | Plots rate on a loglog scale for different METHODS 63 | of function/integral value evaluation. 64 | 65 | Input 66 | - df (dataframe): calculated [RMSE] values for various number of points 67 | - comparison_rates (list): list of reference rates 68 | 69 | Output 70 | - figure 71 | """ 72 | for measure in ["absolute", "relative"]: 73 | 74 | fig, ax = plt.subplots(1, 1, figsize=(6.5, 4.5)) 75 | 76 | for m in methods: 77 | label = get_label(m) 78 | 79 | x = df.index.get_level_values("integration_points").unique() 80 | y = df.loc[measure, m] 81 | 82 | ax.loglog(x, y, label=label) 83 | 84 | ax.loglog( 85 | x, 86 | comparison_rates[0], 87 | label="$C_1 log(n)/n$", 88 | linestyle="-.", 89 | linewidth=2, 90 | color="silver", 91 | ) 92 | ax.loglog( 93 | x, 94 | comparison_rates[1], 95 | label="$C_2 log(n)/n^{1/2}$", 96 | linestyle="-.", 97 | linewidth=2, 98 | color="grey", 99 | ) 100 | ax.loglog( 101 | x, 102 | comparison_rates[2], 103 | label="$C_3 log(n)/n^{3/2}$", 104 | linestyle="-.", 105 | linewidth=2.5, 106 | color="darkviolet", 107 | ) 108 | 109 | ax.set_xlabel("Points"), ax.legend() 110 | ax.set_ylabel(f"Rate {measure} RMSE") 111 | 112 | 113 | def get_policy_prediction(df, baseline, alternative, methods): 114 | """Plot the policy prediction under different integrations methods. 115 | 116 | Input 117 | - df (dataframe): policy-values for various number of points 118 | - baseline (scalar): location of baseline values in the df 119 | - alternative (scalar): location of policy values 120 | - methods (list): integration methods 121 | 122 | Output 123 | figure 124 | """ 125 | fig, ax = plt.subplots() 126 | 127 | for m in methods: 128 | label = get_label(m) 129 | 130 | x = df.index.get_level_values("Points").unique() 131 | y = df.loc[(m, slice(None)), baseline] - df.loc[(m, slice(None)), alternative] 132 | ax.plot(x, y, label=label) 133 | 134 | ax.set_xlabel("Points") 135 | set_formatter(ax, which="x") 136 | 137 | ax.set_ylabel(r"$\Delta$ Schooling ") 138 | ax.legend() 139 | 140 | 141 | def set_formatter(ax, which="xy"): 142 | """Format axis values. 143 | 144 | Input 145 | - ax: which ax object to format 146 | - which: axis 147 | 148 | Output 149 | - formatted ax object 150 | """ 151 | formatter = ticker.FuncFormatter(lambda x, p: format(int(x), ",")) 152 | if "x" in which: 153 | ax.get_xaxis().set_major_formatter(formatter) 154 | if "y" in which: 155 | ax.get_yaxis().set_major_formatter(formatter) 156 | -------------------------------------------------------------------------------- /docs/projects/index.rst: -------------------------------------------------------------------------------- 1 | Projects 2 | ======== 3 | 4 | To illustrate how **respy** is used in several projects, here is a list of published or 5 | ongoing research papers and theses. Feel free to contact the authors if you are 6 | interested. 7 | 8 | 9 | .. raw:: html 10 | 11 | 45 | 46 | 47 | We also maintain a list of replicated models from some research papers to validate 48 | **respy** against specific implementations and to showcase **respy**'s flexibility. 49 | You can additionally find projects exploring specific components of the implementation 50 | of Eckstein-Keane-Wolpin models. 51 | 52 | .. raw:: html 53 | 54 | 81 | 82 | 83 | 84 | 85 | .. toctree:: 86 | :maxdepth: 1 87 | :hidden: 88 | 89 | research_papers 90 | theses 91 | keane-and-wolpin-1994 92 | estimating-keane-and-wolpin-1997-msm 93 | numerical_integration 94 | -------------------------------------------------------------------------------- /docs/projects/research_papers.rst: -------------------------------------------------------------------------------- 1 | Research papers 2 | =============== 3 | 4 | Here is a list of research papers using **respy**. 5 | 6 | ---- 7 | 8 | Eisenhauer, P., Janys, L. and Gabler, J. (2021). `Structural models for policy-making: 9 | Coping with parametric uncertainty `_. *Working Paper*. 10 | 11 | The ex-ante evaluation of policies using structural microeconometric models is based 12 | on estimated parameters as a stand-in for the truth. This practice ignores uncertainty 13 | in the counterfactual policy predictions of the model. We develop an approach that deals 14 | with parametric uncertainty and properly frames model-informed 15 | policy-making as a decision problem under uncertainty. We use the seminal human 16 | capital investment model by Keane and Wolpin (1997) as a well-known, influential, 17 | and empirically-grounded test case. We document considerable uncertainty in their 18 | policy predictions and highlight the resulting policy recommendations from using 19 | different formal rules on decision-making under uncertainty. 20 | 21 | Contact: `@peisenha `_, `@LJanys 22 | `_, `@janosg 23 | `_ 24 | 25 | ---- 26 | 27 | Bhuller, M., Eisenhauer, P. and Mendel, M. (2020). The Option Value of Education. 28 | *Working Paper*. 29 | 30 | We provide a comprehensive account of the returns to education using Norwegian 31 | population panel data with nearly career-long earnings histories. We use variation 32 | induced by a mandatory schooling reform for an instrumental variables strategy as 33 | well as the validation of a full structural model. We discuss the trade-offs between 34 | the two approaches. Using the structural model, we go beyond the standard return 35 | concepts such as Mincer returns and the internal rate of return. This allows us to 36 | account for the sequential resolution of uncertainty and nonlinearities in the 37 | returns to education. Both give rise to option values as each additional year of 38 | schooling provides information about the value of different schooling choices and 39 | new opportunities become available. We are thus able to estimate the true return to 40 | education and find an important role for option values. 41 | 42 | Contact: `@peisenha `_, `@mo2561057 43 | `_ 44 | 45 | ---- 46 | 47 | Eisenhauer, P. and Suchy, R. (2020). Robust Human Capital Investment under Risk and 48 | Ambiguity. *Working Paper*. 49 | 50 | We build on the prototypical life cycle model of human capital investment Keane and 51 | Wolpin (1994) and study individual decision-making under risk as well as ambiguity. 52 | Individuals fear model misspecification and seek robust decisions that work well 53 | over a whole range of models about their economic environment. We describe the 54 | individual's decision problem as a robust Markov decision process. Our Monte Carlo 55 | analysis indicates that the empirical finding of large psychic cost of schooling is 56 | in part due to model misspecification by econometricians who only analyze individual 57 | investment decisions under risk. This changes the mechanisms driving schooling 58 | decisions and affects the ex ante evaluation of tuition policies. 59 | 60 | Contact: `@peisenha `_, `@rafaelsuchy 61 | `_ 62 | 63 | ---- 64 | 65 | Eisenhauer, P. (2019). `The Approximate Solution of Finite-Horizon Discrete Choice 66 | Dynamic Programming Models: Revisiting Keane & Wolpin (1994) 67 | `_. *Journal of Applied Econometrics, 34* (1), 68 | 149-154. 69 | 70 | The estimation of finite‐horizon discrete‐choice dynamic programming (DCDP) models 71 | is computationally expensive. This limits their realism and impedes verification and 72 | validation efforts. `Keane and Wolpin (Review of Economics and Statistics, 1994, 73 | 76(4), 648–672) `_ propose an interpolation method 74 | that ameliorates the computational burden but introduces approximation error. I 75 | describe their approach in detail, successfully recompute their original quality 76 | diagnostics, and provide some additional insights that underscore the trade‐off 77 | between computation time and the accuracy of estimation results. 78 | 79 | Contact: `@peisenha `_ 80 | -------------------------------------------------------------------------------- /docs/reference_guides/index.rst: -------------------------------------------------------------------------------- 1 | .. _reference_guides: 2 | 3 | Reference Guides 4 | ================ 5 | 6 | Reference Guides explain how **respy** is implemented. If you want to contribute to 7 | **respy** or if you are simply interested in the inner workings, you will find this 8 | section helpful. They assume you are already familiar with using **respy**. 9 | 10 | 11 | .. raw:: html 12 | 13 | 61 | 62 | 63 | .. toctree:: 64 | :hidden: 65 | :maxdepth: 1 66 | 67 | state_space 68 | randomness_and_reproducibility 69 | scalability 70 | -------------------------------------------------------------------------------- /docs/reference_guides/randomness_and_reproducibility.rst: -------------------------------------------------------------------------------- 1 | .. _randomness-and-reproducibility: 2 | 3 | Randomness and Reproducibility 4 | ============================== 5 | 6 | **respy** embraces randomness to study individual behavior under risk. At the same time, 7 | it is crucial to make results reproducible. To build a reproducible model, users must 8 | define three seeds for the solution, simulation and estimation of the model in the 9 | options. This allows to study the impact of randomness for each of the components 10 | independently. 11 | 12 | .. code-block:: python 13 | 14 | options = {"solution_seed": 1, "simulation_seed": 2, "estimation_seed": 3} 15 | 16 | The seeds for the solution, simulation and estimation are used to draw a 3-, 5- and 17 | 7-digit seed sequence [#f1]_. The first 100 seeds in the sequences are reserved for 18 | randomness in the startup of functions like :func:`~respy.simulate.simulate` or 19 | :func:`~respy.likelihood.log_like`, e.g., to create draws from a uniform distribution. 20 | All other seeds are used during the iterations of those functions and reset to the 21 | initial value at the begin of every iteration. 22 | 23 | As a general rule, models in **respy** are reproducible or use the same randomness as 24 | long as only model parameters are changed, for example utility parameters, but the 25 | structure of the model stays the same. The following list includes example of structural 26 | changes to the model. 27 | 28 | - Changing the choice set (forms of renaming, removing choices). 29 | - Changing the initial conditions (experiences, lagged choices, type probabilities). 30 | - Changing the Monte Carlo integrations (sequence, number of draws). 31 | - Using interpolation and changing the number of non-interpolated states. 32 | - Removing states from the state space via filters. 33 | 34 | In the following, we document for each module the functions which use seeds to control 35 | randomness. 36 | 37 | 38 | respy.shared 39 | ------------ 40 | 41 | .. currentmodule:: respy.shared 42 | 43 | The function :func:`create_base_draws` is used in all parts, solution, simulation, and 44 | estimation, to generate random draws. :func:`transform_base_draws_with_cholesky_factor` 45 | transforms the base draws to the variance-covariance matrix implied by the model 46 | parameters. 47 | 48 | .. autosummary:: 49 | 50 | create_base_draws 51 | transform_base_draws_with_cholesky_factor 52 | 53 | 54 | respy.solve 55 | ----------- 56 | 57 | Routines under ``respy.solve`` use a seed from the sequence initialized by 58 | ``options["solution_seed"]`` to control randomness. Apart from the draws, 59 | :func:`~respy.solve.solve` relies on the following function. 60 | 61 | .. currentmodule:: respy.interpolate 62 | 63 | .. autosummary:: 64 | 65 | _get_not_interpolated_indicator 66 | 67 | 68 | respy.simulate 69 | -------------- 70 | 71 | Routines under ``respy.simulate`` use a seed from the sequence of 72 | ``options["simulation_seed"]`` to control randomness. Apart from the draws, 73 | :func:`~respy.simulate.simulate` relies on the following function to generate 74 | starting values for simulated individuals (experiences, types, etc.). 75 | 76 | .. currentmodule:: respy.simulate 77 | 78 | .. autosummary:: 79 | 80 | _sample_characteristic 81 | 82 | 83 | respy.likelihood 84 | ---------------- 85 | 86 | Routines under ``respy.likelihood`` use a seed from the sequence specified under 87 | ``options["estimation_seed"]`` to control randomness. The seed is used to create the 88 | draws to simulate the probability of observed wages with 89 | :func:`~respy.shared.create_base_draws`. 90 | 91 | 92 | respy.tests.random_model 93 | ------------------------ 94 | 95 | The regression tests are run on truncated data set which contains truncated history of 96 | individuals or missing wage information. The truncation process is controlled via a seed 97 | in the sequence initialized by ``options["simulation_seed"]``. 98 | 99 | .. currentmodule:: respy.tests.random_model 100 | 101 | .. autosummary:: 102 | 103 | simulate_truncated_data 104 | 105 | 106 | .. seealso:: 107 | 108 | See `Random number generator seed mistakes `_ for a general introduction to seeding 110 | problems. 111 | 112 | See `this comment `_ in the same post which 114 | verifies independence between sequential seeds. 115 | 116 | NumPy documentation on their `RandomState object `_ which wraps the 118 | pseudo-random number generator `Mersenne Twister `_. 120 | 121 | 122 | .. rubric:: Footnotes 123 | 124 | .. [#f1] The need for seed sequences became apparent in `#268 125 | `_. 126 | -------------------------------------------------------------------------------- /docs/rtd_environment.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - defaults 3 | - conda-forge 4 | dependencies: 5 | - python 6 | - ipython 7 | - nbsphinx 8 | - numpydoc 9 | - sphinx 10 | - sphinx-autobuild 11 | - sphinx-autoapi 12 | - sphinxcontrib-bibtex>=2.0.0 13 | - pydata-sphinx-theme>=0.6.0 14 | - parso>=0.8.1 15 | - pip: 16 | - sphinx-tabs 17 | - jinja2>=3.0 18 | -------------------------------------------------------------------------------- /docs/tutorials/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | To use **respy** in the following tutorials, you need the following three components. 5 | 6 | Anaconda 7 | -------- 8 | 9 | The Anaconda distribution is a bundle of compatible Python packages. It also includes 10 | ``conda`` which is a package manager to install, update, and remove packages. You can 11 | also manage environments with ``conda`` which are a collection of packages you need for 12 | a project. 13 | 14 | The installation instructions for multiple platforms can be found `here 15 | `_. 16 | 17 | 18 | Jupyter Lab 19 | ----------- 20 | 21 | Jupyter Lab is an IDE (integrated development environment) for literate programming 22 | meaning that the notebook display code and text alongside each other in a pleasant way. 23 | 24 | Jupyter Lab can be installed with 25 | 26 | .. code-block:: bash 27 | 28 | $ conda install jupyterlab 29 | 30 | Although `this tutorial `_ is 31 | dedicated to Jupyter notebooks, the same instructions apply to Jupyter Lab which will in 32 | the long-run supersede Jupyter notebooks. 33 | 34 | 35 | respy 36 | ----- 37 | 38 | The recommended way to install **respy** is via `conda `_, the 39 | standard package manager for scientific Python libraries. With conda available on your 40 | path, installing **respy** is as simple as typing 41 | 42 | .. code-block:: bash 43 | 44 | $ conda config --add channels conda-forge 45 | $ conda install -c opensourceeconomics respy 46 | 47 | in a command shell. The whole package repository can be found under 48 | https://anaconda.org/OpenSourceEconomics/respy. 49 | 50 | 51 | If you want to use different numerical integration methods implemented in **respy** you 52 | also need to additionally install the package 53 | `chaospy `_ as it is not added automatically as a 54 | package dependency. 55 | 56 | .. code-block:: bash 57 | 58 | $ pip install chaospy 59 | 60 | 61 | As **respy** relies heavily on ``pandas``, you might also want to install their 62 | `recommended dependencies `_ to speed up internal calculations done with 64 | `pd.eval `_. 66 | 67 | .. code-block:: bash 68 | 69 | conda install -c conda-forge bottleneck numexpr 70 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: respy 2 | channels: 3 | - opensourceeconomics 4 | - conda-forge 5 | dependencies: 6 | - python>=3.6 7 | - pip 8 | - anaconda-client 9 | - bottleneck 10 | - click 11 | - codecov 12 | - conda-build 13 | - conda-verify 14 | - doc8 15 | - estimagic=0.4.3 16 | - fastparquet 17 | - hypothesis 18 | - joblib 19 | - jupyterlab 20 | - line_profiler 21 | - matplotlib 22 | - mkl 23 | - nbsphinx 24 | - numba 25 | - numexpr 26 | - numpydoc 27 | - pandas>=0.24 28 | - numpy>=1.21.0 29 | - pdbpp 30 | - pre-commit 31 | - pyarrow 32 | - pyyaml 33 | - pytest>=6.2.1 34 | - pytest-cov 35 | - pytest-xdist 36 | - python-snappy 37 | - restructuredtext_lint 38 | - scipy 39 | - seaborn 40 | - snakeviz 41 | - sphinx 42 | - sphinx-autobuild 43 | - sphinxcontrib-bibtex>=2.0.0 44 | - sphinx-autoapi 45 | - pydata-sphinx-theme>=0.6.0 46 | - tox-conda 47 | - pip: 48 | - apprise 49 | - bump2version 50 | - pytest-randomly 51 | - sphinx-tabs 52 | - chaospy==3.3.8 53 | -------------------------------------------------------------------------------- /respy/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the entry-point to the respy package. 2 | 3 | Include only imports which should be available using 4 | 5 | .. code-block:: 6 | 7 | import respy as rp 8 | 9 | rp. 10 | 11 | """ 12 | import pytest 13 | 14 | from respy.config import ROOT_DIR 15 | from respy.interface import get_example_model # noqa: F401 16 | from respy.interface import get_parameter_constraints # noqa: F401 17 | from respy.likelihood import get_log_like_func # noqa: F401 18 | from respy.method_of_simulated_moments import get_diag_weighting_matrix # noqa: F401 19 | from respy.method_of_simulated_moments import get_flat_moments # noqa: F401 20 | from respy.method_of_simulated_moments import get_moment_errors_func # noqa: F401 21 | from respy.simulate import get_simulate_func # noqa: F401 22 | from respy.solve import get_solve_func # noqa: F401 23 | from respy.tests.random_model import add_noise_to_params # noqa: F401 24 | 25 | 26 | __all__ = [ 27 | "get_example_model", 28 | "get_parameter_constraints", 29 | "get_solve_func", 30 | "get_simulate_func", 31 | "get_log_like_func", 32 | "get_moment_errors_func", 33 | "get_diag_weighting_matrix", 34 | "get_flat_moments", 35 | "add_noise_to_params", 36 | ] 37 | 38 | __version__ = "2.1.1" 39 | 40 | 41 | def test(*args, **kwargs): 42 | """Run basic tests of the package.""" 43 | pytest.main([str(ROOT_DIR), *args], **kwargs) 44 | -------------------------------------------------------------------------------- /respy/_numba.py: -------------------------------------------------------------------------------- 1 | """Special functions for using numba.""" 2 | import warnings 3 | 4 | import numba as nb 5 | import numpy as np 6 | from numba import NumbaDeprecationWarning 7 | from numba import types 8 | from numba.extending import intrinsic 9 | 10 | # Fix for transition to Numba 0.50. cgutils was moved from numba.cgutils to 11 | # numba.core.cgutils. 12 | try: 13 | with warnings.catch_warnings(): 14 | warnings.simplefilter("ignore", NumbaDeprecationWarning) 15 | from numba import cgutils 16 | except ImportError: 17 | from numba.core import cgutils 18 | 19 | 20 | @intrinsic # noqa: U100 21 | def array_to_tuple(tyctx, array_or_dict, indexer_array): # noqa: U100 22 | """Convert an array to a tuple for indexing. 23 | 24 | This function is taken from 25 | https://gist.github.com/sklam/830fe01343ba95828c3b24c391855c86 to create tuple from 26 | an array for indexing which is not possible within a Numba function. 27 | 28 | Parameters 29 | ---------- 30 | array_or_dict : numpy.ndarray or numba.typed.Dict 31 | Array for which the indexer is used. 32 | indexer_array : numpy.ndarray 33 | Array which should be converted to a tuple. 34 | 35 | """ 36 | # This is the typing level. Setup the type and constant information here. 37 | if isinstance(array_or_dict, types.DictType): 38 | tuple_size = len(array_or_dict.key_type) 39 | else: 40 | tuple_size = array_or_dict.ndim 41 | 42 | tuple_type = indexer_array.dtype 43 | typed_tuple = types.UniTuple(dtype=tuple_type, count=tuple_size) 44 | function_signature = typed_tuple(array_or_dict, indexer_array) 45 | 46 | def codegen(cgctx, builder, signature, args): 47 | # This is the implementation defined using LLVM builder. 48 | lltupty = cgctx.get_value_type(typed_tuple) 49 | tup = cgutils.get_null_value(lltupty) 50 | 51 | [_, idxaryval] = args 52 | 53 | def array_checker(a): 54 | if a.size != tuple_size: 55 | raise IndexError("index array size mismatch") 56 | 57 | # Compile and call array_checker. 58 | cgctx.compile_internal( 59 | builder, array_checker, types.none(indexer_array), [idxaryval] 60 | ) 61 | 62 | def array_indexer(a, i): 63 | return a[i] 64 | 65 | # loop to fill the tuple 66 | for i in range(tuple_size): 67 | dataidx = cgctx.get_constant(types.intp, i) 68 | # compile and call array_indexer 69 | data = cgctx.compile_internal( 70 | builder, 71 | array_indexer, 72 | indexer_array.dtype(indexer_array, types.intp), 73 | [idxaryval, dataidx], 74 | ) 75 | tup = builder.insert_value(tup, data, i) 76 | return tup 77 | 78 | return function_signature, codegen 79 | 80 | 81 | @nb.njit 82 | def sum_over_numba_boolean_unituple(tuple_): 83 | """Compute the sum over a boolean :class:`numba.types.UniTuple`. 84 | 85 | Parameters 86 | ---------- 87 | tuple_ : numba.types.UniTuple[bool] 88 | 89 | Returns 90 | ------- 91 | sum_ : Union[float, int] 92 | 93 | """ 94 | return np.sum(np.array([1 for i in tuple_ if i])) 95 | -------------------------------------------------------------------------------- /respy/config.py: -------------------------------------------------------------------------------- 1 | """General configuration for respy.""" 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | 6 | # Check if chaospy is installed. 7 | try: 8 | import chaospy # noqa 9 | except ImportError: 10 | CHAOSPY_INSTALLED = False 11 | else: 12 | CHAOSPY_INSTALLED = True 13 | 14 | # Obtain the root directory of the package. Do not import respy which creates a circular 15 | # import. 16 | ROOT_DIR = Path(__file__).parent 17 | 18 | # Directory with additional resources for the testing harness 19 | TEST_DIR = ROOT_DIR / "tests" 20 | TEST_RESOURCES_DIR = ROOT_DIR / "tests" / "resources" 21 | 22 | # Set maximum numbers to 1e200 and log(1e200) = 460. 23 | MAX_FLOAT = 1e200 24 | MIN_FLOAT = -MAX_FLOAT 25 | MAX_LOG_FLOAT = 460 26 | MIN_LOG_FLOAT = -MAX_LOG_FLOAT 27 | 28 | COVARIATES_DOT_PRODUCT_DTYPE = np.float64 29 | """numpy.dtype : Dtype of covariates before being used in a dot product. 30 | 31 | If you convert a DataFrame with boolean variables and others to an NumPy array, the 32 | resulting array will have an 'object' dtype. Having an 'object' dtype array causes a lot 33 | of problems as functions like :func:`numpy.exp` will fail raising an uninformative error 34 | message. 35 | 36 | """ 37 | 38 | DTYPE_STATES = np.uint8 39 | INDEXER_DTYPE = np.int32 40 | """numpy.dtype : Data type for the entries in the state space indexer.""" 41 | INDEXER_INVALID_INDEX = np.iinfo(INDEXER_DTYPE).min 42 | """int : Identifier for invalid states. 43 | 44 | Every valid state has a unique number which is stored in the state space indexer at the 45 | correct position. Invalid entries in the indexer are filled with 46 | :data:`INDEXER_INVALID_INDEX` which is the most negative value for 47 | :data:`INDEXER_DTYPE`. Using the invalid value as an index likely raises an 48 | :class:`IndexError` as negative indices cannot exceed the length of the indexed array 49 | dimension. 50 | 51 | """ 52 | 53 | # Some assert functions take rtol instead of decimals 54 | TOL_REGRESSION_TESTS = 1e-10 55 | 56 | SEED_STARTUP_ITERATION_GAP = 1_000_000 57 | 58 | DEFAULT_OPTIONS = { 59 | "estimation_draws": 200, 60 | "estimation_seed": 1, 61 | "estimation_tau": 500, 62 | "interpolation_points": -1, 63 | "simulation_agents": 1000, 64 | "simulation_seed": 2, 65 | "solution_draws": 200, 66 | "solution_seed": 3, 67 | "core_state_space_filters": [], 68 | "negative_choice_set": {}, 69 | "monte_carlo_sequence": "random", 70 | "cache_compression": "snappy", 71 | } 72 | 73 | KEANE_WOLPIN_1994_MODELS = [f"kw_94_{suffix}" for suffix in ["one", "two", "three"]] 74 | KEANE_WOLPIN_1997_MODELS = [ 75 | "kw_97_basic", 76 | "kw_97_basic_respy", 77 | "kw_97_extended", 78 | "kw_97_extended_respy", 79 | ] 80 | KEANE_WOLPIN_2000_MODELS = ["kw_2000"] 81 | ROBINSON_CRUSOE_MODELS = [ 82 | "robinson_crusoe_basic", 83 | "robinson_crusoe_extended", 84 | "robinson_crusoe_with_observed_characteristics", 85 | ] 86 | 87 | EXAMPLE_MODELS = ( 88 | KEANE_WOLPIN_1994_MODELS 89 | + KEANE_WOLPIN_1997_MODELS 90 | + KEANE_WOLPIN_2000_MODELS 91 | + ROBINSON_CRUSOE_MODELS 92 | ) 93 | -------------------------------------------------------------------------------- /respy/conftest.py: -------------------------------------------------------------------------------- 1 | """This module includes code to configure pytest.""" 2 | import os 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import pytest 7 | 8 | import respy as rp 9 | 10 | 11 | @pytest.fixture(autouse=True) 12 | def _fresh_directory(tmp_path): 13 | """Each test is executed in a fresh directory.""" 14 | os.chdir(tmp_path) 15 | 16 | 17 | @pytest.fixture(autouse=True) 18 | def _patch_doctest_namespace(doctest_namespace): 19 | """Patch the namespace for doctests. 20 | 21 | This function adds some packages to namespace of every doctest. 22 | 23 | """ 24 | doctest_namespace["np"] = np 25 | doctest_namespace["pd"] = pd 26 | doctest_namespace["rp"] = rp 27 | 28 | 29 | @pytest.fixture(scope="session") 30 | def seed(): 31 | """Create placeholder value for function argument `seed` to be overwritten.""" 32 | return "placeholder value" 33 | 34 | 35 | def pytest_addoption(parser): 36 | """Add a custom option to the pytest call. 37 | 38 | Simply use 39 | 40 | .. code-block:: bash 41 | 42 | $ pytest --n-random-tests=n 43 | 44 | to restrict running each random test with `n` increments of the base seed given by 45 | `pytest-randomly`. 46 | 47 | """ 48 | parser.addoption( 49 | "--n-random-tests", 50 | action="store", 51 | default=5, 52 | help="Number of runs for each random test with different seeds.", 53 | ) 54 | 55 | 56 | def pytest_generate_tests(metafunc): 57 | """Re-run some tests with different seeds by incrementing the base seed. 58 | 59 | The base seed is given by `pytest-randomly` in each session derived from the 60 | timestamp. You can use five as the seed value with 61 | 62 | .. code-block:: bash 63 | 64 | $ pytest --randomly-seed=5 65 | 66 | Then, tests with certain parameter names are parameterized with incremented seed 67 | values (5, 6, 7, 8, ...). Existing parameterizations are extended. 68 | 69 | """ 70 | if "model_or_seed" in metafunc.fixturenames: 71 | argument = "model_or_seed" 72 | elif "seed" in metafunc.fixturenames: 73 | argument = "seed" 74 | else: 75 | argument = False 76 | 77 | if argument: 78 | n_random_tests = int(metafunc.config.getoption("--n-random-tests")) 79 | seeds = [ 80 | metafunc.config.getoption("--randomly-seed", 0) + i 81 | for i in range(n_random_tests) 82 | ] 83 | 84 | mark = metafunc.definition.get_closest_marker("parametrize") 85 | # If a marker exist, extend it with the seeds. 86 | if mark: 87 | # Combine the existing parametrize with the seeds. 88 | extended_mark = pytest.mark.parametrize(argument, mark.args[1] + seeds).mark 89 | 90 | # Replace the old parametrize marker with the extended marker. 91 | all_marks = metafunc.definition.own_markers 92 | pos = [ 93 | i for i in range(len(all_marks)) if all_marks[i].name == "parametrize" 94 | ][0] 95 | metafunc.definition.own_markers[pos] = extended_mark 96 | # Else, parametrize with the seeds. 97 | else: 98 | metafunc.parametrize(argument, seeds) 99 | -------------------------------------------------------------------------------- /respy/data.py: -------------------------------------------------------------------------------- 1 | """Everything related to the original data from Keane and Wolpin (1997).""" 2 | import numpy as np 3 | import pandas as pd 4 | 5 | from respy import shared as rp_shared 6 | from respy.config import TEST_RESOURCES_DIR 7 | from respy.pre_processing.model_processing import process_params_and_options 8 | 9 | 10 | def _create_working_experience(df, optim_paras): 11 | for choice in optim_paras["choices_w_wage"]: 12 | df[f"Experience_{choice.title()}"] = df.Choice.eq(choice) 13 | df[f"Experience_{choice.title()}"] = ( 14 | df.groupby("Identifier")[f"Experience_{choice.title()}"] 15 | .shift() 16 | .fillna(0) 17 | .astype(np.uint8) 18 | ) 19 | df[f"Experience_{choice.title()}"] = df.groupby("Identifier")[ 20 | f"Experience_{choice.title()}" 21 | ].cumsum() 22 | 23 | return df 24 | 25 | 26 | def create_kw_97(params, options): 27 | """Create data for Keane and Wolpin (1997). 28 | 29 | The data includes individuals labor market history and accumulated experiences in 30 | white-collar, blue-collar occupations, military and schooling. 31 | 32 | """ 33 | optim_paras, options = process_params_and_options(params, options) 34 | 35 | dtypes = { 36 | "Identifier": int, 37 | "Age": int, 38 | "Experience_School": np.uint8, 39 | "Choice": "category", 40 | "Wage": float, 41 | } 42 | 43 | df = pd.read_csv( 44 | TEST_RESOURCES_DIR / "kw_97_data.csv", dtype=dtypes, float_precision="high" 45 | ) 46 | 47 | df.Identifier = df.groupby("Identifier").ngroup().astype(np.uint16) 48 | 49 | codes_to_choices = { 50 | "3": "white_collar", 51 | "4": "blue_collar", 52 | "5": "military", 53 | "1": "school", 54 | "2": "home", 55 | } 56 | df.Choice = df.Choice.cat.set_categories(codes_to_choices).cat.rename_categories( 57 | codes_to_choices 58 | ) 59 | 60 | df = _create_working_experience(df, optim_paras) 61 | 62 | df["Lagged_Choice_1"] = df.groupby("Identifier").Choice.shift(1) 63 | df["Period"] = df.Age - 16 64 | df = df.query("Age >= 16") 65 | 66 | cd_dict = rp_shared.generate_column_dtype_dict_for_estimation(optim_paras) 67 | 68 | df = df[cd_dict].set_index(["Identifier", "Period"]) 69 | 70 | return df 71 | -------------------------------------------------------------------------------- /respy/pre_processing/__init__.py: -------------------------------------------------------------------------------- 1 | """Everything related to processing of models, data and so forth.""" 2 | -------------------------------------------------------------------------------- /respy/pre_processing/data_checking.py: -------------------------------------------------------------------------------- 1 | """Test functions to ensure the validity of data.""" 2 | import numpy as np 3 | 4 | from respy.shared import generate_column_dtype_dict_for_estimation 5 | 6 | 7 | def check_estimation_data(df, optim_paras): 8 | """Check data for estimation. 9 | 10 | Parameters 11 | ---------- 12 | optim_paras : dict 13 | Dictionary containing model optim_paras. 14 | df : pandas.DataFrame 15 | Data for estimation. 16 | 17 | Raises 18 | ------ 19 | AssertionError 20 | If data has not the expected format. 21 | 22 | """ 23 | # Make sure all columns are available. 24 | col_dtype = generate_column_dtype_dict_for_estimation(optim_paras) 25 | df = df.reset_index()[col_dtype] 26 | 27 | n_periods = optim_paras["n_periods"] 28 | 29 | # 1. Identifier. 30 | # It is assumed in the likelihood function that Identifier starts at 0 and 31 | # increments in steps of one. 32 | unique = df["Identifier"].unique() 33 | assert (unique == np.arange(len(unique))).all() 34 | 35 | # 2. Period. 36 | assert df.Period.le(n_periods - 1).all() 37 | 38 | # 3. Choice. 39 | assert df.Choice.isin(optim_paras["choices"]).all() 40 | 41 | # 4. Wage. 42 | assert df.Wage.fillna(1).gt(0).all() 43 | 44 | # 8. Lagged_Choice. 45 | for i in range(1, optim_paras["n_lagged_choices"] + 1): 46 | assert df[f"Lagged_Choice_{i}"].isin(optim_paras["choices"]).all() 47 | 48 | if optim_paras["n_periods"] > 1 and optim_paras["n_lagged_choices"] > 0: 49 | choices = ["Choice"] + [ 50 | f"Lagged_Choice_{i}" for i in range(1, optim_paras["n_lagged_choices"] + 1) 51 | ][:-1] 52 | 53 | for i in range(len(choices) - 1): 54 | lc = df.groupby("Identifier")[choices[i]].transform("shift").dropna() 55 | assert ( 56 | df[choices[i + 1]].loc[~df.Period.le(i)].cat.codes == lc.cat.codes 57 | ).all() 58 | 59 | # Observable characteristics. 60 | for observable in optim_paras["observables"]: 61 | assert df[observable.title()].nunique() <= len( 62 | optim_paras["observables"][observable] 63 | ) 64 | 65 | # Others. 66 | assert df.drop(columns="Wage").notna().all().all() 67 | 68 | # We check individual state variables against the recorded choices. 69 | for choice in optim_paras["choices_w_exp"]: 70 | df["__exp"] = df[f"Experience_{choice.title()}"] + df["Choice"].eq(choice) 71 | shifted_exp = ( 72 | df.groupby("Identifier")["__exp"].transform("shift").dropna().astype(int) 73 | ) 74 | assert shifted_exp.eq( 75 | df.loc[shifted_exp.index, f"Experience_{choice.title()}"] 76 | ).all() 77 | 78 | # Check that there are no duplicated observations for any period by agent. 79 | assert ~df.duplicated(subset=["Identifier", "Period"]).any() 80 | 81 | # Check that we observe the whole sequence of observations. 82 | max_periods_per_ind = df.groupby("Identifier").Period.max() + 1 83 | n_obs_per_ind = df.groupby("Identifier").size() 84 | assert (max_periods_per_ind == n_obs_per_ind).all() 85 | 86 | 87 | def check_simulated_data(optim_paras, df): 88 | """Check simulated data. 89 | 90 | This routine runs some consistency checks on the simulated dataset. Some more 91 | restrictions are imposed on the simulated dataset than the observed data. 92 | 93 | """ 94 | df = df.copy() 95 | 96 | # Distribute class attributes 97 | n_types = optim_paras["n_types"] 98 | 99 | # Run all tests available for the estimation data. 100 | check_estimation_data(df, optim_paras) 101 | 102 | # 9. Types. 103 | if optim_paras["n_types"] >= 2: 104 | assert df.Type.max() <= n_types - 1 105 | assert df.Type.notna().all() 106 | assert df.groupby("Identifier").Type.nunique().eq(1).all() 107 | 108 | # Check that there are not missing wage observations if an agent is working. Also, 109 | # we check that if an agent is not working, there also is no wage observation. 110 | is_working = df["Choice"].isin(optim_paras["choices_w_wage"]) 111 | assert df.Wage[is_working].notna().all() 112 | assert df.Wage[~is_working].isna().all() 113 | -------------------------------------------------------------------------------- /respy/pre_processing/lagged_choice_params.csv: -------------------------------------------------------------------------------- 1 | category,name,value,comment 2 | wage_a,work_a_lagged,0.3054,gain of remaining in the same occupation as previous period (wage) 3 | nonpec_a,not_exp_a_lagged,-1182,reward of switching to a from other occupation (non pecuniary) 4 | wage_b,work_b_lagged,0.0964,gain of remaining in the same occupation as previous period (wage) 5 | nonpec_b,not_exp_b_lagged,-1647,reward of switching to a from other occupation (non pecuniary) 6 | nonpec_edu,returns_to_high_school,-23283,reward for going back to high school 7 | nonpec_edu,returns_to_college,-10700,reward for going back to college 8 | -------------------------------------------------------------------------------- /respy/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/respy/tests/__init__.py -------------------------------------------------------------------------------- /respy/tests/resources/conditional_draws_fixture.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/respy/tests/resources/conditional_draws_fixture.pickle -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 500 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 50 6 | simulation_agents: 5000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 456 10 | core_state_space_filters: 11 | # In periods > 0, if agents accumulated experience only in one choice, lagged choice 12 | # cannot be different. 13 | - period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}' 14 | # In periods > 0, if agents always accumulated experience, lagged choice cannot be 15 | # non-experience choice. 16 | - period > 0 and exp_white_collar + exp_blue_collar + exp_military + exp_school == period and lagged_choice_1 == '{choices_wo_exp}' 17 | # In periods > 0, if agents accumulated no years of schooling, lagged choice cannot 18 | # be school. 19 | - period > 0 and lagged_choice_1 == 'school' and exp_school == 0 20 | # If experience in choice 0 and 1 are zero, lagged choice cannot be this choice. 21 | - lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0 22 | # In period 0, agents cannot choose occupation a or b or mil. 23 | - period == 0 and lagged_choice_1 == '{choices_w_wage}' 24 | covariates: 25 | not_exp_white_collar_lagged: exp_white_collar > 0 and lagged_choice_1 != 'white_collar' 26 | not_exp_blue_collar_lagged: exp_blue_collar > 0 and lagged_choice_1 != 'blue_collar' 27 | work_white_collar_lagged: lagged_choice_1 == 'white_collar' 28 | work_blue_collar_lagged: lagged_choice_1 == 'blue_collar' 29 | school_lagged: lagged_choice_1 == 'school' 30 | not_any_exp_white_collar: exp_white_collar == 0 31 | not_any_exp_blue_collar: exp_blue_collar == 0 32 | not_any_exp_military: exp_military == 0 33 | any_exp_white_collar: exp_white_collar > 0 34 | any_exp_blue_collar: exp_blue_collar > 0 35 | any_exp_military: exp_military > 0 36 | hs_graduate: exp_school >= 12 37 | co_graduate: exp_school >= 16 38 | common_hs_graduate: hs_graduate 39 | common_co_graduate: co_graduate 40 | returns_to_high_school: ~school_lagged and ~hs_graduate 41 | returns_to_college: ~school_lagged and hs_graduate 42 | is_minor: period < 2 43 | is_young_adult: 2 <= period <= 4 44 | is_adult: 5 <= period 45 | constant: "1" 46 | exp_white_collar_square: exp_white_collar ** 2 / 100 47 | exp_blue_collar_square: exp_blue_collar ** 2 / 100 48 | exp_military_square: exp_military ** 2 / 100 49 | military_dropout: exp_military == 1 50 | up_to_nine_years_school: exp_school <= 9 51 | at_least_ten_years_school: 10 <= exp_school 52 | black: race == 'black' 53 | white: race == 'white' 54 | black_and_up_to_nine_years_school: exp_school <= 9 and black 55 | black_and_at_least_ten_years_school: 10 <= exp_school and black 56 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000_table_1_whites_choice_probabilities.csv: -------------------------------------------------------------------------------- 1 | age,school,home,white_collar,blue_collar,military 2 | 16,0.858,0.106,0.003,0.033,0.001 3 | 17,0.746,0.145,0.011,0.083,0.015 4 | 18,0.416,0.219,0.068,0.245,0.052 5 | 19,0.313,0.219,0.086,0.303,0.08 6 | 20,0.256,0.205,0.112,0.341,0.085 7 | 21,0.211,0.197,0.13,0.381,0.081 8 | 22,0.131,0.165,0.199,0.435,0.07 9 | 23,0.085,0.149,0.271,0.44,0.055 10 | 24,0.071,0.122,0.308,0.452,0.048 11 | 25,0.041,0.103,0.364,0.452,0.041 12 | 26,0.05,0.122,0.336,0.485,0.0081 13 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000_table_2_blacks_choice_probabilities.csv: -------------------------------------------------------------------------------- 1 | age,school,home,white_collar,blue_collar,military 2 | 16,0.816,0.151,0.006,0.027,0.001 3 | 17,0.649,0.272,0.004,0.058,0.018 4 | 18,0.377,0.369,0.023,0.153,0.0784 5 | 19,0.211,0.405,0.049,0.223,0.113 6 | 20,0.133,0.395,0.058,0.296,0.118 7 | 21,0.083,0.362,0.082,0.362,0.11 8 | 22,0.069,0.347,0.121,0.369,0.095 9 | 23,0.051,0.283,0.152,0.429,0.086 10 | 24,0.031,0.274,0.174,0.439,0.082 11 | 25,0.023,0.29,0.187,0.435,0.065 12 | 26,0.021,0.259,0.228,0.43,0.062 13 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000_table_3_wage_fit_blacks.csv: -------------------------------------------------------------------------------- 1 | age,all_occupations,white_collar,blue_collar,military 2 | 17,9413,,9597, 3 | 18,10043,,10480,9230 4 | 19,11424,15226,11400,9971 5 | 20,11982,11631,12547,10823 6 | 21,12696,14309,12524,12033 7 | 22,13880,15577,13322,13920 8 | 23,14390,16850,13536,14453 9 | 24,15750,17686,15019,15536 10 | 25,16665,18371,16050, 11 | 26,17044,19952,15489, 12 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000_table_3_wage_fit_whites.csv: -------------------------------------------------------------------------------- 1 | age,all_occupations,white_collar,blue_collar,military 2 | 17,11036,,11572, 3 | 18,12060,11775,12603,10171 4 | 19,12246,12376,12949,9714 5 | 20,13635,13824,14363,10852 6 | 21,14977,15578,15313,12619 7 | 22,17561,20236,16947,13771 8 | 23,18719,20745,17884,14868 9 | 24,20942,24066,19245,15910 10 | 25,22754,24899,21473, 11 | 26,25390,32756,20738, 12 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000_table_5_school_attainment.csv: -------------------------------------------------------------------------------- 1 | ,age,white,black 2 | less_than_twelve_years_school,23,0.24,0.359 3 | twelve_years_school,23,0.362,0.377 4 | between_thirteen_and_fifteen_years_school,23,0.187,0.196 5 | at_least_sixteen_years_school,23,0.213,0.068 6 | mean_years_school,23,12.7,11.9 7 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_2000_table_a3_type_probabilities.csv: -------------------------------------------------------------------------------- 1 | race,exp_school,type_1,type_2,type_3,type_4 2 | white,up_to_nine_years_school,0.0339,0.2435,0.4012,0.3215 3 | white,at_least_ten_years_school,.1688,0.3151,0.3527,0.1634 4 | black,up_to_nine_years_school,2.0262,2.1264,2.0349,0.1875 5 | black,at_least_ten_years_school,2.1053,2.1712,0.0870,0.1895 6 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_one.csv: -------------------------------------------------------------------------------- 1 | category,name,value,comment 2 | delta,delta,0.95,discount factor 3 | wage_a,constant,9.21,log of rental price 4 | wage_a,exp_edu,0.038,return to an additional year of schooling 5 | wage_a,exp_a,0.033,return to same sector experience 6 | wage_a,exp_a_square,-0.0005,"return to same sector, quadratic experience" 7 | wage_a,exp_b,0,return to other sector experience 8 | wage_a,exp_b_square,0,"return to other sector, quadratic experience" 9 | wage_b,constant,8.48,log of rental price 10 | wage_b,exp_edu,0.07,return to an additional year of schooling 11 | wage_b,exp_b,0.067,return to same sector experience 12 | wage_b,exp_b_square,-0.001,"return to same sector, quadratic experience" 13 | wage_b,exp_a,0.022,return to other sector experience 14 | wage_b,exp_a_square,-0.0005,"return to other sector, quadratic experience" 15 | nonpec_edu,constant,0,constant reward for choosing education 16 | nonpec_edu,at_least_twelve_exp_edu,0,"reward for going to college (tuition, etc.)" 17 | nonpec_edu,not_edu_last_period,-4000,reward for going back to school 18 | nonpec_home,constant,17750,constant reward of non-market alternative 19 | shocks_sdcorr,sd_a,0.2,"Element 1,1 of standard-deviation/correlation matrix" 20 | shocks_sdcorr,sd_b,0.25,"Element 2,2 of standard-deviation/correlation matrix" 21 | shocks_sdcorr,sd_edu,1500,"Element 3,3 of standard-deviation/correlation matrix" 22 | shocks_sdcorr,sd_home,1500,"Element 4,4 of standard-deviation/correlation matrix" 23 | shocks_sdcorr,corr_b_a,0,"Element 2,1 of standard-deviation/correlation matrix" 24 | shocks_sdcorr,corr_edu_a,0,"Element 3,1 of standard-deviation/correlation matrix" 25 | shocks_sdcorr,corr_edu_b,0,"Element 3,2 of standard-deviation/correlation matrix" 26 | shocks_sdcorr,corr_home_a,0,"Element 4,1 of standard-deviation/correlation matrix" 27 | shocks_sdcorr,corr_home_b,0,"Element 4,2 of standard-deviation/correlation matrix" 28 | shocks_sdcorr,corr_home_edu,0,"Element 4,3 of standard-deviation/correlation matrix" 29 | lagged_choice_1_edu,probability,1,Probability that the first lagged choice is education (simulation only) 30 | initial_exp_edu_10,probability,1,Probability that the initial level of education is 10 31 | maximum_exp,edu,20,"Maximum level of experience for education (optional, reduces computation complexity)" 32 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_one.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 500 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 40 6 | simulation_agents: 1000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 15 10 | monte_carlo_sequence: random 11 | core_state_space_filters: 12 | # In periods > 0, if agents accumulated experience only in one choice, lagged choice 13 | # cannot be different. 14 | - period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}' 15 | # In periods > 0, if agents always accumulated experience, lagged choice cannot be 16 | # non-experience choice. 17 | - period > 0 and exp_a + exp_b + exp_edu == period and lagged_choice_1 == '{choices_wo_exp}' 18 | # In periods > 0, if agents accumulated no years of schooling, lagged choice cannot 19 | # be school. 20 | - period > 0 and lagged_choice_1 == 'edu' and exp_edu == 0 21 | # If experience in choice 0 and 1 are zero, lagged choice cannot be this choice. 22 | - lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0 23 | # In period 0, agents cannot choose occupation a or b or mil. 24 | - period == 0 and lagged_choice_1 == '{choices_w_wage}' 25 | covariates: 26 | constant: "1" 27 | exp_a_square: exp_a ** 2 28 | exp_b_square: exp_b ** 2 29 | at_least_twelve_exp_edu: exp_edu >= 12 30 | not_edu_last_period: lagged_choice_1 != 'edu' 31 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_table_6.csv: -------------------------------------------------------------------------------- 1 | ,kw_94_one,kw_94_one,kw_94_one,kw_94_two,kw_94_two,kw_94_two,kw_94_three,kw_94_three,kw_94_three 2 | ,edu,a,b,edu,a,b,edu,a,b 3 | Exact Solution - Mean,1.44,-3.43,2.19,1.12,-2.71,2.08,1.67,-1.27,-0.236 4 | Exact Solution - Std,0.18,0.94,0.89,0.22,0.53,0.43,0.2,0.18,0.1 5 | Approximate Solution - Mean,1.72,-4.36,2.9,1.08,-2.7,2.12,1.14,-0.812,-0.154 6 | Approximate Solution - Std,0.27,1.1,1.06,0.3,0.75,0.6,0.23,0.27,0.2 7 | Absolute Prediction Error,0.28,-0.93,0.71,-0.035,0.006,0.039,-0.529,0.455,0.083 8 | 95% Confidence Interval - Lower,0.178,-1.47,0.19,-0.159,-0.304,-0.207,-0.641,0.339,0.015 9 | 95% Confidence Interval - Upper,0.382,-0.39,1.27,0.089,0.316,0.285,-0.417,0.569,0.151 10 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_three.csv: -------------------------------------------------------------------------------- 1 | category,name,value,comment 2 | delta,delta,0.95,discount factor 3 | wage_a,constant,8,log of rental price 4 | wage_a,exp_edu,0.07,return to an additional year of schooling 5 | wage_a,exp_a,0.055,return to same sector experience 6 | wage_a,exp_a_square,0,"return to same sector, quadratic experience" 7 | wage_a,exp_b,0,return to other sector experience 8 | wage_a,exp_b_square,0,"return to other sector, quadratic experience" 9 | wage_b,constant,7.9,log of rental price 10 | wage_b,exp_edu,0.07,return to an additional year of schooling 11 | wage_b,exp_b,0.06,return to same sector experience 12 | wage_b,exp_b_square,0,"return to same sector, quadratic experience" 13 | wage_b,exp_a,0.055,return to other sector experience 14 | wage_b,exp_a_square,0,"return to other sector, quadratic experience" 15 | nonpec_edu,constant,5000,constant reward for choosing education 16 | nonpec_edu,at_least_twelve_exp_edu,-5000,"reward for going to college (tuition, etc.)" 17 | nonpec_edu,not_edu_last_period,-20000,reward for going back to school 18 | nonpec_home,constant,21500,constant reward of non-market alternative 19 | shocks_sdcorr,sd_a,1,"Element 1,1 of standard-deviation/correlation matrix" 20 | shocks_sdcorr,sd_b,1,"Element 2,2 of standard-deviation/correlation matrix" 21 | shocks_sdcorr,sd_edu,7000,"Element 3,3 of standard-deviation/correlation matrix" 22 | shocks_sdcorr,sd_home,8500,"Element 4,4 of standard-deviation/correlation matrix" 23 | shocks_sdcorr,corr_b_a,0.5,"Element 2,1 of standard-deviation/correlation matrix" 24 | shocks_sdcorr,corr_edu_a,0,"Element 3,1 of standard-deviation/correlation matrix" 25 | shocks_sdcorr,corr_edu_b,0,"Element 3,2 of standard-deviation/correlation matrix" 26 | shocks_sdcorr,corr_home_a,0,"Element 4,1 of standard-deviation/correlation matrix" 27 | shocks_sdcorr,corr_home_b,0,"Element 4,2 of standard-deviation/correlation matrix" 28 | shocks_sdcorr,corr_home_edu,-0.5,"Element 4,3 of standard-deviation/correlation matrix" 29 | lagged_choice_1_edu,probability,1,Probability that the first lagged choice is education (simulation only) 30 | initial_exp_edu_10,probability,1,Probability that the initial level of education is 10 31 | maximum_exp,edu,20,"Maximum level of experience for education (optional, reduces computation complexity)" 32 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_three.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 500 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 40 6 | simulation_agents: 1000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 461 10 | monte_carlo_sequence: random 11 | core_state_space_filters: 12 | # In periods > 0, if agents accumulated experience only in one choice, lagged choice 13 | # cannot be different. 14 | - "period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}'" 15 | # In periods > 0, if agents always accumulated experience, lagged choice cannot be 16 | # non-experience choice. 17 | - "period > 0 and exp_a + exp_b + exp_edu == period and lagged_choice_1 == '{choices_wo_exp}'" 18 | # In periods > 0, if agents accumulated no years of schooling, lagged choice cannot 19 | # be school. 20 | - "period > 0 and lagged_choice_1 == 'edu' and exp_edu == 0" 21 | # If experience in choice 0 and 1 are zero, lagged choice cannot be this choice. 22 | - "lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0" 23 | # In period 0, agents cannot choose occupation a or b or mil. 24 | - "period == 0 and lagged_choice_1 == '{choices_w_wage}'" 25 | covariates: 26 | constant: "1" 27 | exp_a_square: exp_a ** 2 28 | exp_b_square: exp_b ** 2 29 | at_least_twelve_exp_edu: exp_edu >= 12 30 | not_edu_last_period: lagged_choice_1 != 'edu' 31 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_two.csv: -------------------------------------------------------------------------------- 1 | category,name,value,comment 2 | delta,delta,0.95,discount factor 3 | wage_a,constant,9.21,log of rental price 4 | wage_a,exp_edu,0.04,return to an additional year of schooling 5 | wage_a,exp_a,0.033,return to same sector experience 6 | wage_a,exp_a_square,-0.0005,"return to same sector, quadratic experience" 7 | wage_a,exp_b,0,return to other sector experience 8 | wage_a,exp_b_square,0,"return to other sector, quadratic experience" 9 | wage_b,constant,8.2,log of rental price 10 | wage_b,exp_edu,0.08,return to an additional year of schooling 11 | wage_b,exp_b,0.067,return to same sector experience 12 | wage_b,exp_b_square,-0.001,"return to same sector, quadratic experience" 13 | wage_b,exp_a,0.022,return to other sector experience 14 | wage_b,exp_a_square,-0.0005,"return to other sector, quadratic experience" 15 | nonpec_edu,constant,5000,constant reward for choosing education 16 | nonpec_edu,at_least_twelve_exp_edu,-5000,"reward for going to college (tuition, etc.)" 17 | nonpec_edu,not_edu_last_period,-15000,reward for going back to school 18 | nonpec_home,constant,14500,constant reward of non-market alternative 19 | shocks_sdcorr,sd_a,0.4,"Element 1,1 of standard-deviation/correlation matrix" 20 | shocks_sdcorr,sd_b,0.5,"Element 2,2 of standard-deviation/correlation matrix" 21 | shocks_sdcorr,sd_edu,6000,"Element 3,3 of standard-deviation/correlation matrix" 22 | shocks_sdcorr,sd_home,6000,"Element 4,4 of standard-deviation/correlation matrix" 23 | shocks_sdcorr,corr_b_a,0,"Element 2,1 of standard-deviation/correlation matrix" 24 | shocks_sdcorr,corr_edu_a,0,"Element 3,1 of standard-deviation/correlation matrix" 25 | shocks_sdcorr,corr_edu_b,0,"Element 3,2 of standard-deviation/correlation matrix" 26 | shocks_sdcorr,corr_home_a,0,"Element 4,1 of standard-deviation/correlation matrix" 27 | shocks_sdcorr,corr_home_b,0,"Element 4,2 of standard-deviation/correlation matrix" 28 | shocks_sdcorr,corr_home_edu,0,"Element 4,3 of standard-deviation/correlation matrix" 29 | lagged_choice_1_edu,probability,1,Probability that the first lagged choice is education (simulation only) 30 | initial_exp_edu_10,probability,1,Probability that the initial level of education is 10 31 | maximum_exp,edu,20,"Maximum level of experience for education (optional, reduces computation complexity)" 32 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_two.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 500 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 40 6 | simulation_agents: 1000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 1 10 | monte_carlo_sequence: random 11 | core_state_space_filters: 12 | # In periods > 0, if agents accumulated experience only in one choice, lagged choice 13 | # cannot be different. 14 | - "period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}'" 15 | # In periods > 0, if agents always accumulated experience, lagged choice cannot be 16 | # non-experience choice. 17 | - "period > 0 and exp_a + exp_b + exp_edu == period and lagged_choice_1 == '{choices_wo_exp}'" 18 | # In periods > 0, if agents accumulated no years of schooling, lagged choice cannot 19 | # be school. 20 | - "period > 0 and lagged_choice_1 == 'edu' and exp_edu == 0" 21 | # If experience in choice 0 and 1 are zero, lagged choice cannot be this choice. 22 | - "lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0" 23 | # In period 0, agents cannot choose occupation a or b or mil. 24 | - "period == 0 and lagged_choice_1 == '{choices_w_wage}'" 25 | covariates: 26 | constant: "1" 27 | exp_a_square: exp_a ** 2 28 | exp_b_square: exp_b ** 2 29 | at_least_twelve_exp_edu: exp_edu >= 12 30 | not_edu_last_period: lagged_choice_1 != 'edu' 31 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_wp_table_2_1.csv: -------------------------------------------------------------------------------- 1 | period,a,b,edu,home 2 | 1,.386,.116,.490,.008 3 | 2,.427,.175,.354,.044 4 | 3,.444,.220,.308,.028 5 | 4,.459,.263,.255,.023 6 | 5,.417,.332,.218,.033 7 | 6,.427,.374,.175,.024 8 | 7,.412,.387,.179,.022 9 | 8,.399,.421,.155,.025 10 | 9,.372,.475,.130,.023 11 | 10,.355,.501,.126,.018 12 | 11,.340,.537,.099,.024 13 | 12,.342,.567,.081,.010 14 | 13,.322,.585,.073,.020 15 | 14,.321,.612,.056,.011 16 | 15,.303,.619,.062,.016 17 | 16,.297,.640,.052,.011 18 | 17,.290,.664,.034,.012 19 | 18,.304,.656,.028,.012 20 | 19,.283,.686,.018,.013 21 | 20,.277,.695,.016,.012 22 | 21,.288,.691,.011,.010 23 | 22,.266,.716,.003,.015 24 | 23,.268,.717,.006,.009 25 | 24,.258,.731,.001,.010 26 | 25,.265,.715,.005,.015 27 | 26,.270,.720,.003,.007 28 | 27,.254,.730,.000,.016 29 | 28,.252,.743,.000,.005 30 | 29,.249,.736,.000,.015 31 | 30,.241,.742,.000,.017 32 | 31,.246,.743,.000,.011 33 | 32,.243,.750,.000,.007 34 | 33,.242,.748,.000,.010 35 | 34,.243,.746,.000,.011 36 | 35,.229,.757,.000,.014 37 | 36,.244,.750,.000,.006 38 | 37,.234,.755,.000,.011 39 | 38,.238,.749,.000,.013 40 | 39,.231,.753,.000,.016 41 | 40,.230,.758,.000,.012 42 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_wp_table_2_2.csv: -------------------------------------------------------------------------------- 1 | period,a,b,edu,home 2 | 1,.344,.038,.575,.043 3 | 2,.481,.059,.375,.085 4 | 3,.606,.073,.238,.083 5 | 4,.633,.115,.176,.076 6 | 5,.658,.126,.143,.073 7 | 6,.659,.146,.111,.084 8 | 7,.662,.151,.096,.091 9 | 8,.642,.182,.097,.079 10 | 9,.657,.174,.084,.085 11 | 10,.632,.210,.082,.076 12 | 11,.648,.227,.056,.069 13 | 12,.642,.241,.046,.071 14 | 13,.641,.254,.044,.061 15 | 14,.643,.265,.036,.056 16 | 15,.633,.278,.029,.060 17 | 16,.625,.291,.023,.061 18 | 17,.623,.305,.020,.052 19 | 18,.628,.289,.028,.055 20 | 19,.599,.325,.014,.062 21 | 20,.597,.322,.020,.061 22 | 21,.621,.317,.017,.045 23 | 22,.613,.327,.010,.050 24 | 23,.585,.358,.006,.051 25 | 24,.580,.360,.005,.055 26 | 25,.596,.344,.000,.060 27 | 26,.622,.334,.003,.041 28 | 27,.566,.376,.002,.056 29 | 28,.567,.386,.001,.046 30 | 29,.548,.394,.000,.058 31 | 30,.560,.373,.002,.065 32 | 31,.562,.374,.000,.064 33 | 32,.568,.388,.000,.044 34 | 33,.562,.374,.000,.064 35 | 34,.569,.367,.000,.064 36 | 35,.578,.369,.000,.053 37 | 36,.557,.390,.000,.053 38 | 37,.562,.387,.000,.051 39 | 38,.542,.397,.000,.061 40 | 39,.562,.385,.000,.053 41 | 40,.551,.390,.000,.059 42 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_94_wp_table_2_3.csv: -------------------------------------------------------------------------------- 1 | period,a,b,edu,home 2 | 1,.169,.036,.752,.043 3 | 2,.308,.042,.594,.056 4 | 3,.455,.058,.430,.057 5 | 4,.574,.066,.326,.034 6 | 5,.628,.070,.255,.047 7 | 6,.710,.071,.189,.030 8 | 7,.725,.080,.166,.029 9 | 8,.746,.090,.139,.025 10 | 9,.752,.090,.132,.026 11 | 10,.762,.101,.123,.014 12 | 11,.782,.115,.083,.020 13 | 12,.797,.120,.071,.012 14 | 13,.793,.129,.070,.008 15 | 14,.782,.153,.059,.006 16 | 15,.788,.148,.055,.009 17 | 16,.779,.158,.054,.009 18 | 17,.783,.173,.042,.002 19 | 18,.775,.182,.035,.008 20 | 19,.776,.192,.029,.003 21 | 20,.763,.208,.028,.001 22 | 21,.757,.218,.022,.003 23 | 22,.740,.235,.020,.005 24 | 23,.704,.280,.014,.002 25 | 24,.712,.274,.012,.002 26 | 25,.712,.269,.013,.006 27 | 26,.698,.290,.008,.004 28 | 27,.657,.332,.004,.007 29 | 28,.625,.368,.003,.004 30 | 29,.628,.369,.001,.002 31 | 30,.587,.396,.004,.013 32 | 31,.557,.433,.001,.009 33 | 32,.541,.452,.000,.007 34 | 33,.516,.468,.000,.016 35 | 34,.494,.484,.001,.021 36 | 35,.445,.518,.000,.037 37 | 36,.388,.571,.000,.041 38 | 37,.370,.575,.001,.054 39 | 38,.329,.584,.000,.087 40 | 39,.306,.595,.000,.099 41 | 40,.270,.604,.000,.126 42 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_97_basic.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 500 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 50 6 | simulation_agents: 5000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 456 10 | monte_carlo_sequence: random 11 | covariates: 12 | hs_graduate: exp_school >= 12 13 | co_graduate: exp_school >= 16 14 | constant: "1" 15 | exp_white_collar_square: exp_white_collar ** 2 / 100 16 | exp_blue_collar_square: exp_blue_collar ** 2 / 100 17 | exp_military_square: exp_military ** 2 / 100 18 | up_to_nine_years_school: exp_school <= 9 19 | at_least_ten_years_school: 10 <= exp_school 20 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_97_basic_respy.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 500 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 50 6 | simulation_agents: 5000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 456 10 | monte_carlo_sequence: random 11 | covariates: 12 | hs_graduate: exp_school >= 12 13 | co_graduate: exp_school >= 16 14 | constant: "1" 15 | exp_white_collar_square: exp_white_collar ** 2 / 100 16 | exp_blue_collar_square: exp_blue_collar ** 2 / 100 17 | exp_military_square: exp_military ** 2 / 100 18 | up_to_nine_years_school: exp_school <= 9 19 | at_least_ten_years_school: 10 <= exp_school 20 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_97_extended.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 3 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 50 6 | simulation_agents: 5000 7 | simulation_seed: 2 8 | solution_draws: 500 9 | solution_seed: 1 10 | monte_carlo_sequence: random 11 | core_state_space_filters: 12 | # In periods > 0, if agents accumulated experience only in one choice, lagged choice 13 | # cannot be different. 14 | - period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}' 15 | # In periods > 0, if agents always accumulated experience, lagged choice cannot be 16 | # non-experience choice. 17 | - period > 0 and exp_white_collar + exp_blue_collar + exp_military + exp_school == period and lagged_choice_1 == '{choices_wo_exp}' 18 | # In periods > 0, if agents accumulated no years of schooling, lagged choice cannot 19 | # be school. 20 | - period > 0 and lagged_choice_1 == 'school' and exp_school == 0 21 | # If experience in choice 0 and 1 are zero, lagged choice cannot be this choice. 22 | - lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0 23 | # In period 0, agents cannot choose occupation a or b or mil. 24 | - period == 0 and lagged_choice_1 == '{choices_w_wage}' 25 | covariates: 26 | not_exp_white_collar_lagged: exp_white_collar > 0 and lagged_choice_1 != 'white_collar' 27 | not_exp_blue_collar_lagged: exp_blue_collar > 0 and lagged_choice_1 != 'blue_collar' 28 | work_white_collar_lagged: lagged_choice_1 == 'white_collar' 29 | work_blue_collar_lagged: lagged_choice_1 == 'blue_collar' 30 | school_lagged: lagged_choice_1 == 'school' 31 | not_any_exp_white_collar: exp_white_collar == 0 32 | not_any_exp_blue_collar: exp_blue_collar == 0 33 | not_any_exp_military: exp_military == 0 34 | any_exp_white_collar: exp_white_collar > 0 35 | any_exp_blue_collar: exp_blue_collar > 0 36 | any_exp_military: exp_military > 0 37 | hs_graduate: exp_school >= 12 38 | co_graduate: exp_school >= 16 39 | common_hs_graduate: hs_graduate 40 | common_co_graduate: co_graduate 41 | returns_to_high_school: ~school_lagged and ~hs_graduate 42 | returns_to_college: ~school_lagged and hs_graduate 43 | is_minor: period < 2 44 | is_young_adult: 2 <= period <= 4 45 | is_adult: 5 <= period 46 | constant: "1" 47 | exp_white_collar_square: exp_white_collar ** 2 / 100 48 | exp_blue_collar_square: exp_blue_collar ** 2 / 100 49 | exp_military_square: exp_military ** 2 / 100 50 | military_dropout: exp_military == 1 51 | up_to_nine_years_school: exp_school <= 9 52 | at_least_ten_years_school: 10 <= exp_school 53 | seven_years_of_schooling: exp_school == 7 54 | eight_years_of_schooling: exp_school == 8 55 | nine_years_of_schooling: exp_school == 9 56 | ten_years_of_schooling: exp_school == 10 57 | eleven_years_of_schooling: exp_school == 11 58 | -------------------------------------------------------------------------------- /respy/tests/resources/kw_97_extended_respy.yaml: -------------------------------------------------------------------------------- 1 | estimation_draws: 200 2 | estimation_seed: 3 3 | estimation_tau: 500 4 | interpolation_points: -1 5 | n_periods: 50 6 | simulation_agents: 5000 7 | simulation_seed: 2 8 | solution_draws: 500 9 | solution_seed: 1 10 | monte_carlo_sequence: random 11 | core_state_space_filters: 12 | # In periods > 0, if agents accumulated experience only in one choice, lagged choice 13 | # cannot be different. 14 | - period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}' 15 | # In periods > 0, if agents always accumulated experience, lagged choice cannot be 16 | # non-experience choice. 17 | - period > 0 and exp_white_collar + exp_blue_collar + exp_military + exp_school == period and lagged_choice_1 == '{choices_wo_exp}' 18 | # In periods > 0, if agents accumulated no years of schooling, lagged choice cannot 19 | # be school. 20 | - period > 0 and lagged_choice_1 == 'school' and exp_school == 0 21 | # If experience in choice 0 and 1 are zero, lagged choice cannot be this choice. 22 | - lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0 23 | # In period 0, agents cannot choose occupation a or b or mil. 24 | - period == 0 and lagged_choice_1 == '{choices_w_wage}' 25 | covariates: 26 | not_exp_white_collar_lagged: exp_white_collar > 0 and lagged_choice_1 != 'white_collar' 27 | not_exp_blue_collar_lagged: exp_blue_collar > 0 and lagged_choice_1 != 'blue_collar' 28 | work_white_collar_lagged: lagged_choice_1 == 'white_collar' 29 | work_blue_collar_lagged: lagged_choice_1 == 'blue_collar' 30 | school_lagged: lagged_choice_1 == 'school' 31 | not_any_exp_white_collar: exp_white_collar == 0 32 | not_any_exp_blue_collar: exp_blue_collar == 0 33 | not_any_exp_military: exp_military == 0 34 | any_exp_white_collar: exp_white_collar > 0 35 | any_exp_blue_collar: exp_blue_collar > 0 36 | any_exp_military: exp_military > 0 37 | hs_graduate: exp_school >= 12 38 | co_graduate: exp_school >= 16 39 | common_hs_graduate: hs_graduate 40 | common_co_graduate: co_graduate 41 | returns_to_high_school: ~school_lagged and ~hs_graduate 42 | returns_to_college: ~school_lagged and hs_graduate 43 | is_minor: period < 2 44 | is_young_adult: 2 <= period <= 4 45 | is_adult: 5 <= period 46 | constant: "1" 47 | exp_white_collar_square: exp_white_collar ** 2 / 100 48 | exp_blue_collar_square: exp_blue_collar ** 2 / 100 49 | exp_military_square: exp_military ** 2 / 100 50 | military_dropout: exp_military == 1 51 | up_to_nine_years_school: exp_school <= 9 52 | at_least_ten_years_school: 10 <= exp_school 53 | seven_years_of_schooling: exp_school == 7 54 | eight_years_of_schooling: exp_school == 8 55 | nine_years_of_schooling: exp_school == 9 56 | ten_years_of_schooling: exp_school == 10 57 | eleven_years_of_schooling: exp_school == 11 58 | -------------------------------------------------------------------------------- /respy/tests/resources/regression_vault.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenSourceEconomics/respy/91e8a193fb7d5cf7212c15e50ae43f810bb9b1eb/respy/tests/resources/regression_vault.pickle -------------------------------------------------------------------------------- /respy/tests/resources/robinson_crusoe_basic.csv: -------------------------------------------------------------------------------- 1 | category,name,value 2 | delta,delta,0.95 3 | wage_fishing,exp_fishing,0.3 4 | nonpec_fishing,constant,-0.2 5 | nonpec_hammock,constant,2 6 | shocks_sdcorr,sd_fishing,0.5 7 | shocks_sdcorr,sd_hammock,0.5 8 | shocks_sdcorr,corr_hammock_fishing,0 9 | -------------------------------------------------------------------------------- /respy/tests/resources/robinson_crusoe_basic.yaml: -------------------------------------------------------------------------------- 1 | solution_draws: 100 2 | solution_seed: 456 3 | n_periods: 5 4 | simulation_agents: 1_000 5 | simulation_seed: 132 6 | estimation_draws: 100 7 | estimation_seed: 100 8 | estimation_tau: 0.001 9 | interpolation_points: -1 10 | covariates: 11 | constant: "1" 12 | -------------------------------------------------------------------------------- /respy/tests/resources/robinson_crusoe_extended.csv: -------------------------------------------------------------------------------- 1 | category,name,value 2 | delta,delta,0.95 3 | wage_fishing,exp_fishing,0.1 4 | wage_fishing,contemplation_with_friday,0.4 5 | nonpec_fishing,constant,-1 6 | nonpec_friday,constant,-1 7 | nonpec_friday,not_fishing_last_period,-1 8 | nonpec_hammock,constant,2.5 9 | nonpec_hammock,not_fishing_last_period,-1 10 | shocks_sdcorr,sd_fishing,1 11 | shocks_sdcorr,sd_friday,1 12 | shocks_sdcorr,sd_hammock,1 13 | shocks_sdcorr,corr_friday_fishing,0 14 | shocks_sdcorr,corr_hammock_fishing,0 15 | shocks_sdcorr,corr_hammock_friday,0 16 | lagged_choice_1_hammock,constant,1 17 | -------------------------------------------------------------------------------- /respy/tests/resources/robinson_crusoe_extended.yaml: -------------------------------------------------------------------------------- 1 | n_periods: 10 2 | estimation_draws: 200 3 | estimation_seed: 500 4 | estimation_tau: 0.001 5 | interpolation_points: -1 6 | simulation_agents: 1_000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 456 10 | covariates: 11 | constant: "1" 12 | contemplation_with_friday: "exp_friday == 1 and exp_fishing >= 1" 13 | not_fishing_last_period: "lagged_choice_1 != 'fishing'" 14 | negative_choice_set: 15 | friday: [ 16 | "period < 2", 17 | "exp_fishing == 0" 18 | ] 19 | core_state_space_filters: 20 | # If Robinson has always been fishing or talking to Friday, the previous choice cannot 21 | # be 'hammock'. 22 | - period > 0 and exp_fishing + exp_friday == period and lagged_choice_1 == 'hammock' 23 | # Talking to Friday is restricted up to the third period. 24 | - period <= 2 and exp_friday != 0 25 | # Experience of choice 'friday' has to be lower than period by 2. 26 | - period >= 3 and period - exp_friday < 2 27 | # Robinson has to go fishing before talking to Friday. 28 | - exp_friday > 0 and exp_fishing == 0 29 | # Same applies if Robinson acquired experienced in fishing just in the previous 30 | # period, but had already talked to Friday. 31 | - exp_friday > 0 and exp_fishing == 1 and lagged_choice_1 == 'fishing' 32 | # If Robinson has maximum possible experience with Friday, the previous choice can 33 | # only be 'friday'. 34 | - period - exp_friday == 2 and lagged_choice_1 != 'friday' and period > 2 35 | # If experience in fishing (talking to Friday) is zero, previous choice cannot be 36 | # fishing (friday). The '{choices_w_exp}' is a placeholder for choices with experience. 37 | - exp_{choices_w_exp} == 0 and lagged_choice_1 == '{choices_w_exp}' 38 | -------------------------------------------------------------------------------- /respy/tests/resources/robinson_crusoe_with_observed_characteristics.csv: -------------------------------------------------------------------------------- 1 | category,name,value 2 | delta,delta,0.95 3 | wage_fishing,exp_fishing,0.3 4 | nonpec_fishing,constant,-0.2 5 | nonpec_hammock,constant,2 6 | shocks_sdcorr,sd_fishing,0.5 7 | shocks_sdcorr,sd_hammock,0.5 8 | shocks_sdcorr,corr_hammock_fishing,0 9 | observable_fishing_grounds_rich,probability,0.5 10 | observable_fishing_grounds_poor,probability,0.5 11 | -------------------------------------------------------------------------------- /respy/tests/resources/robinson_crusoe_with_observed_characteristics.yaml: -------------------------------------------------------------------------------- 1 | n_periods: 10 2 | estimation_draws: 200 3 | estimation_seed: 500 4 | estimation_tau: 0.001 5 | interpolation_points: -1 6 | simulation_agents: 1_000 7 | simulation_seed: 132 8 | solution_draws: 500 9 | solution_seed: 456 10 | covariates: 11 | constant: "1" 12 | rich_fishing_grounds: fishing_grounds == 'rich' 13 | -------------------------------------------------------------------------------- /respy/tests/test_conditional_draws.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | import numpy as np 4 | import pytest 5 | from numpy.testing import assert_array_almost_equal as aaae 6 | 7 | from respy.conditional_draws import calculate_conditional_draws 8 | from respy.conditional_draws import update_cholcov 9 | from respy.conditional_draws import update_cholcov_with_measurement_error 10 | from respy.conditional_draws import update_mean_and_evaluate_likelihood 11 | from respy.config import MAX_LOG_FLOAT 12 | from respy.config import TEST_RESOURCES_DIR 13 | 14 | 15 | @pytest.fixture() 16 | def kalman_results(): 17 | """The inputs and outputs were generated using a well tested Kalman filter.""" 18 | with open(TEST_RESOURCES_DIR / "conditional_draws_fixture.pickle", "rb") as p: 19 | fix = pickle.load(p) 20 | return fix 21 | 22 | 23 | @pytest.mark.precise 24 | @pytest.mark.unit 25 | @pytest.mark.parametrize("i", range(20)) 26 | def test_update_and_evaluate_likelihood(i, kalman_results): 27 | inp = kalman_results["mean"][i]["input"] 28 | calculated_mean, calculated_like = update_mean_and_evaluate_likelihood(*inp) 29 | expected_mean = kalman_results["mean"][i]["output_mean"] 30 | expected_like = kalman_results["mean"][i]["output_loglike"] 31 | aaae(calculated_mean, expected_mean) 32 | aaae(calculated_like, expected_like) 33 | 34 | 35 | @pytest.mark.precise 36 | @pytest.mark.unit 37 | @pytest.mark.parametrize("i", range(10)) 38 | def test_update_cholcovs_with_error(i, kalman_results): 39 | inp = kalman_results["cov_error"][i]["input"] 40 | calculated_chol = update_cholcov_with_measurement_error(**inp) 41 | expected_chol = kalman_results["cov_error"][i]["output"] 42 | 43 | calculated_cov = np.matmul( 44 | calculated_chol, np.transpose(calculated_chol, axes=(0, 2, 1)) 45 | ) 46 | expected_cov = np.matmul(expected_chol, np.transpose(expected_chol, axes=(0, 2, 1))) 47 | 48 | aaae(calculated_cov, expected_cov) 49 | 50 | 51 | @pytest.mark.precise 52 | @pytest.mark.unit 53 | def test_update_cholcovs(): 54 | cov = np.array([[1, 0.8, 0.8], [0.8, 1, 0.8], [0.8, 0.8, 1]]) 55 | 56 | chol = np.linalg.cholesky(cov) 57 | 58 | expected = np.array( 59 | [ 60 | [[0, 0, 0], [0, 0.6, 0], [0, 0.26666667, 0.53748385]], 61 | [[0.6, 0, 0], [0, 0, 0], [0.26666667, 0, 0.53748385]], 62 | [[1, 0, 0], [0.8, 0.6, 0], [0.8, 0.26666667, 0.53748385]], 63 | ] 64 | ) 65 | 66 | calculated = update_cholcov(chol, 2) 67 | i = 1 68 | aaae(calculated[i], expected[i], decimal=5) 69 | 70 | 71 | @pytest.mark.precise 72 | @pytest.mark.unit 73 | def test_calculate_conditional_draws(): 74 | draws = np.array([[0.5, -1, 1]]) 75 | updated_mean = np.arange(3) 76 | updated_chols = np.zeros((3, 3, 3)) 77 | updated_chols[1] = np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]]) 78 | 79 | calculated = calculate_conditional_draws( 80 | draws, updated_mean, updated_chols, 1, MAX_LOG_FLOAT 81 | )[0] 82 | expected = np.array([1.64872127, 0.36787944, 5]) 83 | 84 | aaae(calculated, expected) 85 | -------------------------------------------------------------------------------- /respy/tests/test_flexible_choices.py: -------------------------------------------------------------------------------- 1 | """Contains code for testing for flexible choices.""" 2 | import pandas as pd 3 | import pytest 4 | 5 | from respy.pre_processing.model_processing import process_params_and_options 6 | from respy.simulate import get_simulate_func 7 | from respy.state_space import create_state_space_class 8 | from respy.tests.utils import process_model_or_seed 9 | 10 | 11 | @pytest.mark.integration 12 | def test_choice_restrictions(): 13 | """Basic first test.""" 14 | # Load model. 15 | params, options = process_model_or_seed("robinson_crusoe_extended") 16 | 17 | # Extend with observable characteristic. 18 | params.loc[("observable_health_well", "probability"), "value"] = 0.9 19 | params.loc[("observable_health_sick", "probability"), "value"] = 0.1 20 | 21 | # Sick people can never work. 22 | options["negative_choice_set"] = { 23 | "fishing": ["health == 'sick' & period < 2", "health == 'sick' & period >= 2"], 24 | "friday": ["period < 2", "exp_fishing == 0"], 25 | } 26 | # Create internal specification objects. 27 | optim_paras, options = process_params_and_options(params, options) 28 | 29 | state_space = create_state_space_class(optim_paras, options) 30 | 31 | for x in state_space.dense_key_to_complex.values(): 32 | if (x[0] < 2) & (x[2] == (0,)): 33 | assert x[1] == (False, False, True) 34 | elif x[2] == (0,): 35 | assert x[1] in [(False, False, True), (False, True, True)] 36 | elif (x[0] < 2) & (x[2] == (1,)): 37 | assert x[1] == (True, False, True) 38 | elif x[2] == (1,): 39 | assert x[1] in [(True, False, True), (True, True, True)] 40 | 41 | 42 | @pytest.mark.end_to_end 43 | def test_simulation_with_flexible_choice_sets(): 44 | params, options = process_model_or_seed("robinson_crusoe_basic") 45 | 46 | # Extend with observable characteristic. 47 | params.loc[("observable_health_well", "probability"), "value"] = 0.9 48 | params.loc[("observable_health_sick", "probability"), "value"] = 0.1 49 | 50 | # Sick people can never work. 51 | options["negative_choice_set"] = { 52 | "fishing": ["health == 'sick'"], 53 | "friday": ["period < 2", "exp_fishing == 0"], 54 | } 55 | # Create internal specification objects. 56 | optim_paras, options = process_params_and_options(params, options) 57 | simulate = get_simulate_func(params, options) 58 | df = simulate(params) 59 | 60 | assert isinstance(df, pd.DataFrame) 61 | -------------------------------------------------------------------------------- /respy/tests/test_integration.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from respy.likelihood import get_log_like_func 4 | from respy.tests.random_model import add_noise_to_params 5 | from respy.tests.random_model import generate_random_model 6 | from respy.tests.random_model import simulate_truncated_data 7 | 8 | 9 | @pytest.mark.end_to_end 10 | def test_simulation_and_estimation_with_different_models(): 11 | """Test the evaluation of the criterion function not at the true parameters.""" 12 | # Simulate a dataset 13 | params, options = generate_random_model() 14 | df = simulate_truncated_data(params, options) 15 | 16 | # Evaluate at different points, ensuring that the simulated dataset still fits. 17 | log_like = get_log_like_func(params, options, df) 18 | params_ = add_noise_to_params(params, options) 19 | 20 | params.equals(params_) 21 | 22 | log_like(params) 23 | 24 | 25 | @pytest.mark.end_to_end 26 | def test_invariant_results_for_two_estimations(): 27 | params, options = generate_random_model() 28 | df = simulate_truncated_data(params, options) 29 | 30 | log_like = get_log_like_func(params, options, df) 31 | 32 | # First estimation. 33 | crit_val = log_like(params) 34 | 35 | # Second estimation. 36 | crit_val_ = log_like(params) 37 | 38 | assert crit_val == crit_val_ 39 | -------------------------------------------------------------------------------- /respy/tests/test_interface.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from respy.config import EXAMPLE_MODELS 4 | from respy.interface import get_example_model 5 | from respy.interface import get_parameter_constraints 6 | 7 | 8 | @pytest.mark.unit 9 | @pytest.mark.parametrize("model", EXAMPLE_MODELS) 10 | def test_get_example_model(model): 11 | _ = get_example_model(model, with_data=False) 12 | 13 | 14 | @pytest.mark.unit 15 | @pytest.mark.parametrize("model", EXAMPLE_MODELS) 16 | def test_get_parameter_constraints(model): 17 | _ = get_parameter_constraints(model) 18 | -------------------------------------------------------------------------------- /respy/tests/test_interpolate.py: -------------------------------------------------------------------------------- 1 | from itertools import count 2 | 3 | import pytest 4 | 5 | from respy.interpolate import _split_interpolation_points_evenly 6 | from respy.solve import get_solve_func 7 | from respy.tests.utils import process_model_or_seed 8 | 9 | 10 | @pytest.mark.end_to_end 11 | def test_run_through_of_solve_with_interpolation(seed): 12 | params, options = process_model_or_seed( 13 | seed, point_constr={"n_periods": 5, "interpolation_points": 10} 14 | ) 15 | 16 | solve = get_solve_func(params, options) 17 | solve(params) 18 | 19 | 20 | @pytest.mark.unit 21 | @pytest.mark.parametrize( 22 | "dense_index_to_n_states, interpolation_points", 23 | [({0: 50, 1: 150}, 100), ({0: 4, 5: 4, 10: 4}, 10)], 24 | ) 25 | def test_split_interpolation_points_evenly( 26 | dense_index_to_n_states, interpolation_points 27 | ): 28 | options = { 29 | "interpolation_points": interpolation_points, 30 | "solution_seed_iteration": count(0), 31 | } 32 | 33 | interpolations_points_splitted = _split_interpolation_points_evenly( 34 | dense_index_to_n_states, 0, options 35 | ) 36 | 37 | for index in dense_index_to_n_states: 38 | assert dense_index_to_n_states[index] >= interpolations_points_splitted[index] 39 | -------------------------------------------------------------------------------- /respy/tests/test_likelihood.py: -------------------------------------------------------------------------------- 1 | import hypothesis.strategies as st 2 | import numpy as np 3 | import pandas as pd 4 | import pytest 5 | from hypothesis import given 6 | from hypothesis.extra.numpy import arrays 7 | from scipy import special 8 | 9 | from respy.likelihood import _logsumexp 10 | from respy.likelihood import get_log_like_func 11 | from respy.simulate import get_simulate_func 12 | from respy.tests.utils import process_model_or_seed 13 | 14 | 15 | @pytest.mark.integration 16 | @pytest.mark.parametrize("model", ["robinson_crusoe_basic"]) 17 | def test_return_output_dict_for_likelihood(model): 18 | params, options = process_model_or_seed(model) 19 | options["n_periods"] = 3 20 | 21 | simulate = get_simulate_func(params, options) 22 | df = simulate(params) 23 | 24 | log_like = get_log_like_func(params, options, df, return_scalar=False) 25 | log_like = log_like(params) 26 | 27 | assert isinstance(log_like["value"], float) 28 | assert isinstance(log_like["contributions"], np.ndarray) 29 | assert isinstance(log_like["comparison_plot_data"], pd.DataFrame) 30 | 31 | 32 | @pytest.mark.integration 33 | @pytest.mark.parametrize("model", ["robinson_crusoe_basic"]) 34 | def test_return_scalar_for_likelihood(model): 35 | params, options = process_model_or_seed(model) 36 | options["n_periods"] = 3 37 | 38 | simulate = get_simulate_func(params, options) 39 | df = simulate(params) 40 | 41 | log_like = get_log_like_func(params, options, df, return_scalar=True) 42 | value = log_like(params) 43 | 44 | assert isinstance(value, float) 45 | 46 | log_like_contribs = get_log_like_func(params, options, df, return_scalar=False) 47 | outputs = log_like_contribs(params) 48 | 49 | assert isinstance(outputs, dict) 50 | 51 | 52 | @pytest.mark.unit 53 | @pytest.mark.precise 54 | @given( 55 | arrays( 56 | dtype=np.float64, 57 | shape=st.integers(2, 10), 58 | elements=st.floats(allow_nan=False, allow_infinity=False), 59 | ) 60 | ) 61 | def test_logsumexp(array): 62 | expected = special.logsumexp(array) 63 | result = _logsumexp(array) 64 | 65 | np.testing.assert_allclose(result, expected) 66 | -------------------------------------------------------------------------------- /respy/tests/test_parallelization.py: -------------------------------------------------------------------------------- 1 | import numba as nb 2 | import pytest 3 | from numba.typed import Dict 4 | 5 | from respy.parallelization import _infer_dense_keys_from_arguments 6 | from respy.parallelization import _is_dense_dictionary_argument 7 | from respy.parallelization import _is_dictionary_with_integer_keys 8 | 9 | 10 | def _typeddict_wo_integer_keys(): 11 | dictionary = Dict.empty( 12 | key_type=nb.types.UniTuple(nb.types.int64, 2), 13 | value_type=nb.types.int64, 14 | ) 15 | dictionary[(1, 2)] = 1 16 | return dictionary 17 | 18 | 19 | def _typeddict_w_integer_keys(): 20 | dictionary = Dict.empty( 21 | key_type=nb.types.int64, 22 | value_type=nb.types.int64, 23 | ) 24 | dictionary[1] = 1 25 | return dictionary 26 | 27 | 28 | @pytest.mark.unit 29 | @pytest.mark.parametrize( 30 | "input_, expected", 31 | [ 32 | ({1: 2, 3: 4}, True), 33 | (1, False), 34 | ([3, 4, 5], False), 35 | (_typeddict_wo_integer_keys(), False), 36 | (_typeddict_w_integer_keys(), False), 37 | ], 38 | ) 39 | def test_is_dictionary_with_integer_keys(input_, expected): 40 | assert _is_dictionary_with_integer_keys(input_) is expected 41 | 42 | 43 | @pytest.mark.unit 44 | @pytest.mark.parametrize( 45 | "args, kwargs, expected", 46 | [(({1: None, 2: None},), {"kwarg_1": {2: None, 3: None}}, {2})], 47 | ) 48 | def test_infer_dense_keys_from_arguments(args, kwargs, expected): 49 | result = _infer_dense_keys_from_arguments(args, kwargs) 50 | assert result == expected 51 | 52 | 53 | @pytest.mark.unit 54 | @pytest.mark.parametrize( 55 | "arg, dense_keys, expected", 56 | [({1: None, 2: None}, {1, 2}, True), ((1,), {1, 2, 3}, False)], 57 | ) 58 | def test_is_dense_dictionary_argument(arg, dense_keys, expected): 59 | result = _is_dense_dictionary_argument(arg, dense_keys) 60 | assert result is expected 61 | -------------------------------------------------------------------------------- /respy/tests/test_process_covariates.py: -------------------------------------------------------------------------------- 1 | import io 2 | from textwrap import dedent 3 | 4 | import pandas as pd 5 | import pytest 6 | 7 | from respy.pre_processing.process_covariates import remove_irrelevant_covariates 8 | 9 | 10 | @pytest.mark.unit 11 | @pytest.mark.precise 12 | def test_identify_relevant_covariates(): 13 | params = pd.read_csv( 14 | io.StringIO( 15 | dedent( 16 | """ 17 | category,name,value 18 | wage_a,constant,1 19 | nonpec_b,upper_upper,1 20 | wage_c,upper_upper_with_spacing_problem,1 21 | """ 22 | ) 23 | ), 24 | index_col=["category", "name"], 25 | ) 26 | 27 | options = { 28 | "covariates": { 29 | "constant": "1", 30 | "nested_covariate": "2", 31 | "upper": "nested_covariate > 2", 32 | "upper_upper": "upper == 5", 33 | "unrelated_covariate": "2", 34 | "unrelated_covariate_upper": "unrelated_covariate", 35 | "upper_upper_with_spacing_problem": "upper>2", 36 | } 37 | } 38 | 39 | expected = options.copy() 40 | expected["covariates"].pop("unrelated_covariate") 41 | expected["covariates"].pop("unrelated_covariate_upper") 42 | 43 | relevant_covariates = remove_irrelevant_covariates(options, params) 44 | 45 | assert expected == relevant_covariates 46 | -------------------------------------------------------------------------------- /respy/tests/test_randomness.py: -------------------------------------------------------------------------------- 1 | """This module includes test to specifically test that randomness is held constant.""" 2 | import numpy as np 3 | import pytest 4 | 5 | from respy.likelihood import get_log_like_func 6 | from respy.simulate import get_simulate_func 7 | from respy.solve import get_solve_func 8 | from respy.tests.utils import apply_to_attributes_of_two_state_spaces 9 | from respy.tests.utils import process_model_or_seed 10 | 11 | 12 | @pytest.mark.end_to_end 13 | @pytest.mark.parametrize( 14 | "model", 15 | ["robinson_crusoe_extended", "robinson_crusoe_with_observed_characteristics"], 16 | ) 17 | def test_invariance_of_model_solution_in_solve_and_criterion_functions(model): 18 | params, options = process_model_or_seed(model) 19 | 20 | solve = get_solve_func(params, options) 21 | state_space = solve(params) 22 | 23 | simulate = get_simulate_func(params, options) 24 | df = simulate(params) 25 | state_space_sim = simulate.keywords["solve"].keywords["state_space"] 26 | 27 | log_like = get_log_like_func(params, options, df) 28 | _ = log_like(params) 29 | state_space_crit = log_like.keywords["solve"].keywords["state_space"] 30 | 31 | for state_space_ in [state_space_sim, state_space_crit]: 32 | assert state_space.core.equals(state_space_.core.reindex_like(state_space.core)) 33 | 34 | apply_to_attributes_of_two_state_spaces( 35 | state_space.wages, 36 | state_space_.wages, 37 | np.testing.assert_array_equal, 38 | ) 39 | apply_to_attributes_of_two_state_spaces( 40 | state_space.nonpecs, 41 | state_space_.nonpecs, 42 | np.testing.assert_array_equal, 43 | ) 44 | apply_to_attributes_of_two_state_spaces( 45 | state_space.expected_value_functions, 46 | state_space_.expected_value_functions, 47 | np.testing.assert_array_equal, 48 | ) 49 | apply_to_attributes_of_two_state_spaces( 50 | state_space.base_draws_sol, 51 | state_space_.base_draws_sol, 52 | np.testing.assert_array_equal, 53 | ) 54 | -------------------------------------------------------------------------------- /respy/tests/test_regression.py: -------------------------------------------------------------------------------- 1 | """Run a few regression tests.""" 2 | import pickle 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | from respy.config import CHAOSPY_INSTALLED 8 | from respy.config import TEST_RESOURCES_DIR 9 | from respy.config import TOL_REGRESSION_TESTS 10 | from respy.likelihood import get_log_like_func 11 | from respy.tests.random_model import simulate_truncated_data 12 | 13 | 14 | def compute_log_likelihood(params, options): 15 | df = simulate_truncated_data(params, options) 16 | log_like = get_log_like_func(params, options, df) 17 | crit_val = log_like(params) 18 | 19 | return crit_val 20 | 21 | 22 | def load_regression_tests(): 23 | """Load regression tests from disk.""" 24 | with open(TEST_RESOURCES_DIR / "regression_vault.pickle", "rb") as p: 25 | tests = pickle.load(p) 26 | 27 | return tests 28 | 29 | 30 | @pytest.fixture(scope="session") 31 | def regression_vault(): 32 | """Make regression vault available to tests.""" 33 | return load_regression_tests() 34 | 35 | 36 | @pytest.mark.end_to_end 37 | @pytest.mark.parametrize("index", range(10)) 38 | def test_single_regression(regression_vault, index): 39 | """Run a single regression test.""" 40 | params, options, exp_val = regression_vault[index] 41 | 42 | if CHAOSPY_INSTALLED or options["monte_carlo_sequence"] == "random": 43 | crit_val = compute_log_likelihood(params, options) 44 | 45 | assert np.isclose( 46 | crit_val, exp_val, rtol=TOL_REGRESSION_TESTS, atol=TOL_REGRESSION_TESTS 47 | ) 48 | -------------------------------------------------------------------------------- /respy/tests/test_replication_kw_97.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import pytest 4 | 5 | import respy as rp 6 | 7 | 8 | @pytest.mark.end_to_end 9 | @pytest.mark.precise 10 | @pytest.mark.parametrize( 11 | "model, type_proportions", 12 | [ 13 | ( 14 | "kw_97_basic", 15 | ([0.1751, 0.2396, 0.5015, 0.0838], [0.0386, 0.4409, 0.4876, 0.0329]), 16 | ), 17 | ( 18 | "kw_97_extended", 19 | ([0.0491, 0.1987, 0.4066, 0.3456], [0.2343, 0.2335, 0.3734, 0.1588]), 20 | ), 21 | ], 22 | ) 23 | def test_type_proportions(model, type_proportions): 24 | nine_years_or_less = type_proportions[0] 25 | ten_years_or_more = type_proportions[1] 26 | 27 | params, options = rp.get_example_model(model, with_data=False) 28 | 29 | options["n_periods"] = 1 30 | options["simulated_agents"] = 10_000 31 | 32 | simulate = rp.get_simulate_func(params, options) 33 | 34 | df = simulate(params) 35 | 36 | np.testing.assert_allclose( 37 | df.loc[df.Experience_School.le(9), "Type"] 38 | .value_counts(normalize=True, sort=False) 39 | .sort_index(), 40 | nine_years_or_less, 41 | atol=0.05, 42 | ) 43 | 44 | np.testing.assert_allclose( 45 | df.loc[df.Experience_School.ge(10), "Type"] 46 | .value_counts(normalize=True, sort=False) 47 | .sort_index(), 48 | ten_years_or_more, 49 | atol=0.05, 50 | ) 51 | 52 | 53 | @pytest.mark.end_to_end 54 | @pytest.mark.precise 55 | def test_distribution_of_lagged_choices(): 56 | params, options, actual_df = rp.get_example_model("kw_97_extended") 57 | 58 | options["n_periods"] = 1 59 | options["simulated_agents"] = 10_000 60 | 61 | simulate = rp.get_simulate_func(params, options) 62 | df = simulate(params) 63 | 64 | actual_df = actual_df.query("Period == 0") 65 | expected = pd.crosstab( 66 | actual_df.Lagged_Choice_1, actual_df.Experience_School, normalize="columns" 67 | ) 68 | 69 | df = df.query("Period == 0") 70 | calculated = pd.crosstab( 71 | df.Lagged_Choice_1, df.Experience_School, normalize="columns" 72 | ) 73 | 74 | # Allow for 4% differences which likely for small subsets. 75 | np.testing.assert_allclose(expected, calculated, atol=0.04) 76 | -------------------------------------------------------------------------------- /respy/tests/test_simulate/test_apply_law_of_motion_1_in.csv: -------------------------------------------------------------------------------- 1 | identifier,period,exp_fishing,exp_friday,lagged_choice_1,choice 2 | 1,0,0,0,2,0 3 | 2,0,1,0,0,1 4 | 3,0,1,1,1,2 5 | 4,0,2,0,0,1 6 | 5,0,2,2,0,0 7 | -------------------------------------------------------------------------------- /respy/tests/test_simulate/test_apply_law_of_motion_1_optim_paras.yaml: -------------------------------------------------------------------------------- 1 | n_lagged_choices: 1 2 | choices_w_exp: 3 | - fishing 4 | - friday 5 | observables: {} 6 | n_types: 1 7 | -------------------------------------------------------------------------------- /respy/tests/test_simulate/test_apply_law_of_motion_1_out.csv: -------------------------------------------------------------------------------- 1 | identifier,period,exp_fishing,exp_friday,lagged_choice_1 2 | 1,1,1,0,0 3 | 2,1,1,1,1 4 | 3,1,1,1,2 5 | 4,1,2,1,1 6 | 5,1,3,2,0 7 | -------------------------------------------------------------------------------- /respy/tests/test_simulate/test_apply_law_of_motion_2_in.csv: -------------------------------------------------------------------------------- 1 | identifier,period,exp_fishing,fishing_grounds,choice 2 | 0,3,0,0,0 3 | 1,3,0,0,1 4 | 2,3,0,1,0 5 | 3,3,0,1,1 6 | 4,3,1,0,0 7 | 5,3,1,0,1 8 | 6,3,1,1,0 9 | 7,3,1,1,1 10 | 8,3,2,0,0 11 | 9,3,2,0,1 12 | 10,3,2,1,0 13 | 11,3,2,1,1 14 | -------------------------------------------------------------------------------- /respy/tests/test_simulate/test_apply_law_of_motion_2_optim_paras.yaml: -------------------------------------------------------------------------------- 1 | n_lagged_choices: 0 2 | choices_w_exp: 3 | - fishing 4 | observables: 5 | fishing_grounds: 6 | rich: {} 7 | poor: {} 8 | n_types: 1 9 | -------------------------------------------------------------------------------- /respy/tests/test_simulate/test_apply_law_of_motion_2_out.csv: -------------------------------------------------------------------------------- 1 | identifier,period,exp_fishing,fishing_grounds 2 | 0,4,1,0 3 | 1,4,0,0 4 | 2,4,1,1 5 | 3,4,0,1 6 | 4,4,2,0 7 | 5,4,1,0 8 | 6,4,2,1 9 | 7,4,1,1 10 | 8,4,3,0 11 | 9,4,2,0 12 | 10,4,3,1 13 | 11,4,2,1 14 | -------------------------------------------------------------------------------- /respy/tests/utils.py: -------------------------------------------------------------------------------- 1 | import numba as nb 2 | import numpy as np 3 | 4 | import respy as rp 5 | from respy.tests.random_model import generate_random_model 6 | 7 | 8 | def process_model_or_seed(model_or_seed=None, **kwargs): 9 | if isinstance(model_or_seed, str): 10 | params, options = rp.get_example_model(model_or_seed, with_data=False) 11 | elif isinstance(model_or_seed, int): 12 | np.random.seed(model_or_seed) 13 | params, options = generate_random_model(**kwargs) 14 | else: 15 | raise ValueError 16 | 17 | if "kw_94" in str(model_or_seed): 18 | options["n_periods"] = 10 19 | elif "kw_97" in str(model_or_seed): 20 | options["n_periods"] = 5 21 | elif "kw_2000" in str(model_or_seed): 22 | options["n_periods"] = 3 23 | elif "robinson_crusoe_extended" in str(model_or_seed): 24 | options["n_periods"] = 5 25 | elif "robinson_crusoe_with_observed_characteristics" in str(model_or_seed): 26 | options["n_periods"] = 5 27 | 28 | return params, options 29 | 30 | 31 | def apply_to_attributes_of_two_state_spaces(attr_1, attr_2, func): 32 | """Apply a function to two state space attributes, dense or not. 33 | 34 | Attributes might be `state_space.wages` which can be a dictionary or a Numpy array. 35 | 36 | """ 37 | if isinstance(attr_1, dict) or isinstance(attr_1, nb.typed.typeddict.Dict): 38 | out = {key: func(attr_1[key], attr_2[key]) for key in attr_1} 39 | else: 40 | out = func(attr_1, attr_2) 41 | 42 | return out 43 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 2.0.0dev5 3 | parse = (?P\d+)\.(?P\d+)(\.(?P\d+))(\-?((dev)?(?P\d+))?) 4 | serialize = 5 | {major}.{minor}.{patch}dev{dev} 6 | {major}.{minor}.{patch} 7 | 8 | [bumpversion:file:setup.py] 9 | 10 | [bumpversion:file:docs/conf.py] 11 | 12 | [bumpversion:file:respy/__init__.py] 13 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """The general package information for respy.""" 2 | from pathlib import Path 3 | 4 | from setuptools import find_packages 5 | from setuptools import setup 6 | 7 | 8 | DESCRIPTION = ( 9 | "respy is a Python package for the simulation and estimation of a prototypical " 10 | "finite-horizon dynamic discrete choice model." 11 | ) 12 | EMAIL = "research-codes-respy.9b46528f81292a712fa4855ff362f40f.show-sender@streams.zulipchat.com" # noqa 13 | README = Path("README.rst").read_text() 14 | PROJECT_URLS = { 15 | "Bug Tracker": "https://github.com/OpenSourceEconomics/respy/issues", 16 | "Documentation": "https://respy.readthedocs.io/en/latest", 17 | "Source Code": "https://github.com/OpenSourceEconomics/respy", 18 | } 19 | 20 | 21 | setup( 22 | name="respy", 23 | version="2.1.1", 24 | description=DESCRIPTION, 25 | long_description=DESCRIPTION + "\n\n" + README, 26 | long_description_content_type="text/x-rst", 27 | author="The respy Development Team", 28 | author_email=EMAIL, 29 | python_requires=">=3.6.0", 30 | url="https://respy.readthedocs.io/en/latest/", 31 | project_urls=PROJECT_URLS, 32 | packages=find_packages(), 33 | license="MIT", 34 | keywords=["Economics", " Discrete Choice Dynamic Programming Model"], 35 | classifiers=[ 36 | "Intended Audience :: Science/Research", 37 | "License :: OSI Approved :: MIT License", 38 | "Operating System :: OS Independent", 39 | "Programming Language :: Python :: 3.6", 40 | "Programming Language :: Python :: 3.7", 41 | ], 42 | platforms="any", 43 | package_data={ 44 | "respy": [ 45 | "tests/resources/*.csv", 46 | "tests/resources/*.pickle", 47 | "tests/resources/*.yaml", 48 | "tox.ini", 49 | ] 50 | }, 51 | include_package_data=True, 52 | zip_safe=False, 53 | ) 54 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = pytest, pre-commit, sphinx 3 | skipsdist = True 4 | skip_missing_interpreters = True 5 | 6 | [testenv] 7 | basepython = python 8 | 9 | [testenv:pytest] 10 | setenv = 11 | CONDA_DLL_SEARCH_MODIFICATION_ENABLE = 1 12 | conda_deps = 13 | bottleneck 14 | click 15 | codecov 16 | conda-build 17 | estimagic >= 0.1.2 18 | fastparquet 19 | hypothesis 20 | joblib 21 | matplotlib 22 | mkl 23 | numba 24 | numexpr 25 | numpy >=1.21.0 26 | pandas >= 0.24 27 | scipy 28 | pyaml 29 | pytest >= 6.2.1 30 | pytest-cov 31 | pytest-xdist 32 | python-snappy 33 | pyarrow 34 | conda_channels = 35 | opensourceeconomics 36 | conda-forge 37 | deps = 38 | apprise 39 | pytest-randomly 40 | chaospy >= 4.2.3 41 | commands = 42 | pytest {posargs} 43 | 44 | [testenv:pre-commit] 45 | passenv = USERPROFILE SSH_AUTH_SOCK 46 | deps = 47 | doc8 48 | pre-commit 49 | conda_channels = 50 | conda-forge 51 | commands = 52 | pre-commit install -f --install-hooks 53 | pre-commit run --all-files 54 | 55 | [testenv:sphinx] 56 | changedir = docs 57 | conda_deps = 58 | python 59 | ipython 60 | nbsphinx 61 | numpydoc 62 | sphinx 63 | sphinxcontrib-bibtex>=2.0.0 64 | sphinx-autoapi 65 | sphinx-tabs 66 | pydata-sphinx-theme>=0.6.0 67 | conda_channels = 68 | conda-forge 69 | commands = 70 | sphinx-build -T -b html -d {envtmpdir}/doctrees . {envtmpdir}/html 71 | - sphinx-build -T -b linkcheck -d {envtmpdir}/doctrees . {envtmpdir}/linkcheck 72 | 73 | [doc8] 74 | max-line-length = 89 75 | ignore = D002,D004 76 | 77 | [flake8] 78 | max-line-length = 88 79 | ignore = 80 | E203 ; ignores whitespace around : which is enforced by Black. 81 | W503 ; ignores linebreak before binary operator which is enforced by Black. 82 | PT006 ; ignores using tuples of strings as default for pytest parametrize. 83 | warn-symbols = 84 | pytest.mark.wip = Remove 'wip' mark for tests before commits. 85 | pytest.mark.skip = Remove 'skip' flag for tests before commits. 86 | .loc[:, = Use `df["a"]` instead of `df.loc[:, "a"]` to get a column. 87 | per-file-ignores = 88 | docs/conf.py:E501,D 89 | respy/tests/*:D 90 | development/*:D 91 | respy/pre_processing/specification_helpers.py:D 92 | respy/parallelization.py:D202 93 | pytest-mark-no-parentheses = True 94 | 95 | 96 | [pytest] 97 | junit_family = xunit2 98 | addopts = --doctest-modules 99 | markers = 100 | slow: Tests that take a long time to run. 101 | wip: Tests that are work-in-progress. 102 | precise: Tests that assert a numeric value is correct, up to rounding error. 103 | edge_case: Tests that exploit edge cases with closed form solutions, 104 | unit: Unit tests, i.e. tests that only test one function in isolation. 105 | integration: Tests that test the interplay of several functions. 106 | end_to_end: Tests that test the whole system. 107 | hypothesis: Tests that work with hypothesis. 108 | norecursedirs = 109 | .idea 110 | .tox 111 | filterwarnings = 112 | ignore:Using or importing the ABCs from 'collections' 113 | ignore:the imp module is deprecated 114 | ignore:The initial experience(s) for choice 115 | ignore:The number of interpolation points for one 'dense_index' in period 116 | ignore:PY_SSIZE_T_CLEAN will be required for '#' formats 117 | --------------------------------------------------------------------------------