├── .bandit.yml ├── .codecov.yml ├── .coveragerc ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ ├── ci-cd-workflow.yml │ ├── test-conda-install.yml │ └── test-pypi-install.yml ├── .gitignore ├── .pylintrc ├── .readthedocs.yml ├── CHANGELOG.rst ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── docs ├── Makefile ├── make.bat └── source │ ├── _static │ └── .gitkeep │ ├── base.rst │ ├── changelog.rst │ ├── conf.py │ ├── constants.rst │ ├── development.rst │ ├── errors.rst │ ├── impulse_response_model.rst │ ├── index.rst │ ├── installation.rst │ ├── two_layer_model.rst │ ├── usage.rst │ ├── usage │ ├── getting-started.ipynb │ ├── impulse-response-equivalence.ipynb │ ├── one-layer-model.ipynb │ └── running-scenarios.ipynb │ └── utils.rst ├── latex.template ├── paper.bib ├── paper.md ├── scripts ├── check_requirements_in_readme.py └── test_install.py ├── setup.cfg ├── setup.py ├── src └── openscm_twolayermodel │ ├── __init__.py │ ├── _version.py │ ├── base.py │ ├── constants.py │ ├── errors.py │ ├── impulse_response_model.py │ ├── two_layer_model.py │ └── utils.py ├── tests ├── conftest.py ├── integration │ ├── test_impulse_response_integration.py │ ├── test_model_integration_base.py │ ├── test_twolayer_impulse_response_equivalence.py │ └── test_twolayer_integration.py ├── notebook-tests.cfg ├── regression │ ├── test_impulse_response_defaults.py │ └── test_twolayer_defaults.py ├── test-data │ ├── impulse-response-output │ │ └── test_impulse_response_defaults.csv │ ├── rcmip-radiative-forcing-annual-means-v4-0-0.csv │ └── two-layer-output │ │ ├── test_twolayer_defaults.csv │ │ ├── test_twolayer_plus_efficacy.csv │ │ └── test_twolayer_plus_state_dependence.csv └── unit │ ├── test_impulse_response_unit.py │ ├── test_misc.py │ ├── test_model_base.py │ ├── test_two_layer_model_unit.py │ └── test_utils.py └── versioneer.py /.bandit.yml: -------------------------------------------------------------------------------- 1 | exclude_dirs: 2 | - openscm_twolayermodel/_version.py 3 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | # this explains how to configure this file 2 | # https://docs.codecov.io/docs/commit-status 3 | 4 | # validate with `curl --data-binary @.codecov.yml https://codecov.io/validate` 5 | coverage: 6 | status: 7 | project: 8 | default: 9 | target: 90% 10 | threshold: 2% 11 | patch: 12 | default: 13 | target: 95% 14 | 15 | comment: 16 | behaviour: new 17 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = openscm_twolayermodel 4 | omit = *_version.py 5 | 6 | [report] 7 | fail_under = 90 8 | exclude_lines = 9 | if self.debug: 10 | pragma: no cover 11 | raise NotImplementedError 12 | if __name__ == .__main__.: 13 | ignore_errors = True 14 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | src/openscm_twolayermodel/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Write a report to help us improve 4 | 5 | --- 6 | 7 | **Describe the bug** 8 | 9 | A clear and concise description of what the bug is, in particular: 10 | 11 | - What did you do? 12 | - What actually happened? 13 | - What did you expect to happen? 14 | 15 | **Failing Test** 16 | 17 | Please put code (ideally in the form of a unit test) which fails below 18 | 19 | **Screenshots** 20 | 21 | If applicable, add screenshots to help explain your problem. 22 | 23 | **System (please complete the following information):** 24 | 25 | - OS: [e.g. Windows, Linux, macOS] 26 | - Python and openscm commit/version [e.g. Python 3.6] 27 | 28 | **Additional context** 29 | 30 | Add any other context about the problem here. 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest an idea for this project 4 | 5 | --- 6 | 7 | **Is your feature request related to a problem? Please describe.** 8 | 9 | A clear and concise description of what the problem is. E.g. It's annoying that I always have to [...] 10 | 11 | **Describe the solution you'd like** 12 | 13 | A clear and concise description of the solution you would like to see. 14 | 15 | **Describe alternatives you've considered** 16 | 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | 21 | Add any other context or screenshots about the feature request here. 22 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | - [ ] Tests added 4 | - [ ] Documentation added 5 | - [ ] Example added (in the documentation, to an existing notebook, or in a new notebook) 6 | - [ ] Description in ``CHANGELOG.rst`` added (single line such as: ``(`#XX `_) Added feature which does something``) 7 | -------------------------------------------------------------------------------- /.github/workflows/ci-cd-workflow.yml: -------------------------------------------------------------------------------- 1 | name: OpenSCM Two Layer Model CI-CD 2 | on: push 3 | 4 | jobs: 5 | linting-and-docs: 6 | 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: [3.7] 11 | 12 | steps: 13 | - name: Checkout repository 14 | uses: actions/checkout@v2 15 | - name: Setup python 16 | uses: actions/setup-python@v1 17 | with: 18 | python-version: ${{ matrix.python-version }} 19 | - name: Install dev dependencies 20 | run: | 21 | pip install --upgrade pip wheel 22 | pip install -e .[dev] 23 | - name: Formatting and linters 24 | # TODO: add `pylint src` back in 25 | run: | 26 | black --check src tests setup.py --exclude openscm_twolayermodel/_version.py 27 | isort --check-only --quiet --recursive src tests setup.py 28 | pydocstyle src 29 | bandit -c .bandit.yml -r openscm_twolayermodel 30 | flake8 src tests setup.py 31 | - name: Install pandoc 32 | uses: r-lib/actions/setup-pandoc@v1 33 | with: 34 | pandoc-version: '2.11' 35 | - name: Build docs 36 | # treat warnings as errors (-W)... 37 | # ...but not when being nitpicky (-n) 38 | run: | 39 | sphinx-build -M html docs/source docs/build -qW 40 | sphinx-build -M html docs/source docs/build -Eqn -b coverage 41 | if [[ -s docs/build/html/python.txt ]] 42 | then 43 | echo 44 | echo \"Error: Documentation missing:\" 45 | echo 46 | cat docs/build/html/python.txt 47 | exit 1 48 | fi 49 | 50 | build: 51 | runs-on: ubuntu-latest 52 | strategy: 53 | matrix: 54 | python-version: [3.6, 3.7, 3.8, 3.9] 55 | 56 | steps: 57 | - name: Checkout repository 58 | uses: actions/checkout@v2 59 | - name: Setup python 60 | uses: actions/setup-python@v1 61 | with: 62 | python-version: ${{ matrix.python-version }} 63 | - name: Install test dependencies 64 | run: | 65 | pip install --upgrade pip wheel 66 | pip install -e .[tests] 67 | - name: Test with pytest 68 | env: 69 | MIN_COVERAGE: 95 70 | run: | 71 | pytest tests -r a --cov=openscm_twolayermodel --cov-report=xml 72 | if ! coverage report --fail-under=${MIN_COVERAGE} --show-missing 73 | then 74 | echo 75 | echo "Error: Test coverage has to be at least ${MIN_COVERAGE}" 76 | exit 1 77 | fi 78 | - name: Upload coverage to Codecov 79 | if: startsWith(runner.os, 'Linux') && matrix.python-version == 3.7 80 | uses: codecov/codecov-action@v1 81 | with: 82 | file: ./coverage.xml 83 | 84 | test-notebooks: 85 | runs-on: ubuntu-latest 86 | strategy: 87 | matrix: 88 | python-version: [3.6, 3.7, 3.8, 3.9] 89 | 90 | steps: 91 | - name: Checkout repository 92 | uses: actions/checkout@v2 93 | - name: Setup python 94 | uses: actions/setup-python@v1 95 | with: 96 | python-version: ${{ matrix.python-version }} 97 | - name: Install notebook dependencies 98 | run: | 99 | pip install --upgrade pip wheel 100 | pip install -e .[tests,notebooks] 101 | - name: Test notebooks with nbval 102 | run: | 103 | pytest docs/source/usage -r a --nbval-lax --sanitize-with tests/notebook-tests.cfg --no-cov 104 | - name: Test notebooks strictly 105 | if: matrix.python-version == 3.7 106 | run: | 107 | pytest docs/source/usage -r a --nbval --sanitize-with tests/notebook-tests.cfg --no-cov 108 | 109 | readme-requirements-up-to-date: 110 | runs-on: ubuntu-latest 111 | strategy: 112 | matrix: 113 | python-version: [3.7] 114 | 115 | steps: 116 | - name: Checkout repository 117 | uses: actions/checkout@v2 118 | - name: Setup python 119 | uses: actions/setup-python@v1 120 | with: 121 | python-version: ${{ matrix.python-version }} 122 | - name: Check README is up-to-date with setup.py 123 | run: | 124 | python scripts/check_requirements_in_readme.py 125 | 126 | deploy-pypi: 127 | needs: [build,linting-and-docs,readme-requirements-up-to-date,test-notebooks] 128 | if: startsWith(github.ref, 'refs/tags/v') 129 | 130 | runs-on: ubuntu-latest 131 | strategy: 132 | matrix: 133 | python-version: [3.7] 134 | 135 | steps: 136 | - name: Checkout repository 137 | uses: actions/checkout@v2 138 | - name: Setup python 139 | uses: actions/setup-python@v1 140 | with: 141 | python-version: ${{ matrix.python-version }} 142 | - name: Install dependencies 143 | run: | 144 | pip install --upgrade pip wheel 145 | pip install -e .[dev] 146 | - name: Create package 147 | run: python setup.py sdist bdist_wheel --universal 148 | - name: Publish package to PyPI 149 | uses: pypa/gh-action-pypi-publish@master 150 | with: 151 | user: __token__ 152 | password: ${{ secrets.pypi_password }} 153 | -------------------------------------------------------------------------------- /.github/workflows/test-conda-install.yml: -------------------------------------------------------------------------------- 1 | name: Test conda install 2 | on: 3 | schedule: 4 | # * is a special character in YAML so you have to quote this string 5 | - cron: '0 * * * 3' 6 | 7 | jobs: 8 | test-installation: 9 | name: Test conda install (${{ matrix.python-version }}, ${{ matrix.os }}) 10 | runs-on: ${{ matrix.os }} 11 | strategy: 12 | matrix: 13 | os: ["ubuntu-latest", "macos-latest", "windows-latest"] 14 | python-version: [3.7, 3.8, 3.9] 15 | 16 | steps: 17 | - name: Setup conda 18 | uses: conda-incubator/setup-miniconda@v2.0.1 19 | with: 20 | auto-update-conda: true 21 | python-version: ${{ matrix.python-version }} 22 | activate-environment: test 23 | - name: Conda info 24 | shell: bash -l {0} 25 | run: conda info 26 | - name: Install package 27 | shell: bash -l {0} 28 | run: conda install -c conda-forge openscm-twolayermodel 29 | - name: Checkout repository 30 | uses: actions/checkout@v2 31 | - name: Test installation 32 | shell: bash -l {0} 33 | run: | 34 | which python 35 | python scripts/test_install.py 36 | -------------------------------------------------------------------------------- /.github/workflows/test-pypi-install.yml: -------------------------------------------------------------------------------- 1 | name: Test PyPI install 2 | on: 3 | schedule: 4 | # * is a special character in YAML so you have to quote this string 5 | - cron: '0 * * * 3' 6 | 7 | jobs: 8 | test-installation: 9 | name: Test PyPI install (${{ matrix.python-version }}, ${{ matrix.os }}) 10 | runs-on: ${{ matrix.os }} 11 | strategy: 12 | matrix: 13 | os: ["ubuntu-latest", "macos-latest", "windows-latest"] 14 | python-version: [3.6, 3.7, 3.8, 3.9] 15 | 16 | steps: 17 | - name: Setup python 18 | uses: actions/setup-python@v1 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | - name: Install package 22 | # install basic first, then install pre-releases afterwards so we don't 23 | # pick up pre-releases of dependencies (e.g. pandas) 24 | run: | 25 | pip install --upgrade pip 26 | pip install openscm-twolayermodel 27 | pip install openscm-twolayermodel --pre 28 | - name: Checkout repository 29 | uses: actions/checkout@v2 30 | - name: Test installation 31 | run: | 32 | python scripts/test_install.py 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # paper 2 | paper.pdf 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | .venv3 91 | env/ 92 | venv/ 93 | venv3/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | venv3.bak/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | 112 | # DS_Store 113 | *.DS_Store 114 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | ignore=_version.py 3 | load-plugins= 4 | pylint.extensions.bad_builtin, 5 | pylint.extensions.check_elif, 6 | pylint.extensions.comparetozero, 7 | pylint.extensions.docparams, 8 | pylint.extensions.emptystring, 9 | pylint.extensions.overlapping_exceptions, 10 | pylint.extensions.redefined_variable_type 11 | 12 | [MESSAGES CONTROL] 13 | disable= 14 | # handled by black 15 | format, 16 | # annoyingly strict 17 | duplicate-code, 18 | # fails with venv 19 | import-error 20 | 21 | [SIMILARITIES] 22 | ignore-comments=no 23 | ignore-docstrings=yes 24 | ignore-imports=yes 25 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | build: 2 | image: latest 3 | 4 | python: 5 | version: 3.7 6 | pip_install: true 7 | extra_requirements: 8 | - docs 9 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | All notable changes to this project will be documented in this file. 5 | 6 | The format is based on `Keep a Changelog `_, and this project adheres to `Semantic Versioning `_. 7 | 8 | The changes listed in this file are categorised as follows: 9 | 10 | - Added: new features 11 | - Changed: changes in existing functionality 12 | - Deprecated: soon-to-be removed features 13 | - Removed: now removed features 14 | - Fixed: any bug fixes 15 | - Security: in case of vulnerabilities. 16 | 17 | v0.2.3 - 2021-04-27 18 | ------------------- 19 | 20 | Fixed 21 | ~~~~~ 22 | 23 | - (`#34 `_, `#35 `_, `#36 `_) Final tweaks to JOSS paper 24 | 25 | v0.2.2 - 2021-04-27 26 | ------------------- 27 | 28 | Added 29 | ~~~~~ 30 | 31 | - (`#33 `_) Information in README and testing for conda install 32 | 33 | Changed 34 | ~~~~~~~ 35 | 36 | - (`#32 `_) Include ``LICENSE``, ``README.rst`` and ``CHANGELOG`` in package 37 | - (`#30 `_) Require ``scmdata>=0.9`` 38 | - (`#27 `_) Fixed the discussion (in the relevant notebook) of how a one-layer model can be made from the two-layer implementation here 39 | 40 | Fixed 41 | ~~~~~ 42 | 43 | - (`#30 `_) Incorrect call to :meth:`scmdata.ScmRun` in tests 44 | 45 | v0.2.1 - 2020-12-23 46 | ------------------- 47 | 48 | Added 49 | ~~~~~ 50 | 51 | - (`#20 `_) Statement of need to the README following `JOSS review `_ (closes `#18 `_) 52 | 53 | Changed 54 | ~~~~~~~ 55 | 56 | - (`#26 `_) Updated to new scmdata version (and hence new openscm-units API) 57 | - (`#25 `_) JOSS paper following `JOSS review 1 `_ 58 | - (`#23 `_) Moved notebooks into full documentation following `JOSS review `_ (closes `#17 `_) 59 | - (`#21 `_) Quoted pip install instructions to ensure cross-shell compatibility following `JOSS review `_ (closes `#16 `_) 60 | - (`#20 `_) Option to remove tqdm progress bar by passing ``progress=False`` 61 | 62 | v0.2.0 - 2020-10-09 63 | ------------------- 64 | 65 | Added 66 | ~~~~~ 67 | 68 | - (`#7 `_) JOSS paper draft 69 | 70 | Changed 71 | ~~~~~~~ 72 | 73 | - (`#7 `_) Require ``scmdata>=0.7`` 74 | 75 | v0.1.2 - 2020-31-07 76 | ------------------- 77 | 78 | Changed 79 | ~~~~~~~ 80 | 81 | - (`#12 `_) Upgrade to ``scmdata>=0.6.2`` so that package can be installed 82 | 83 | v0.1.1 - 2020-06-29 84 | ------------------- 85 | 86 | Added 87 | ~~~~~ 88 | 89 | - (`#8 `_) Add notebook showing how to run a single-layer model 90 | 91 | Changed 92 | ~~~~~~~ 93 | 94 | - (`#11 `_) Re-wrote the getting started notebook 95 | - (`#10 `_) Re-wrote CHANGELOG 96 | - (`#9 `_) Update to scmdata 0.5.Y 97 | 98 | v0.1.0 - 2020-05-15 99 | ------------------- 100 | 101 | Added 102 | ~~~~~ 103 | 104 | - (`#3 `_) Add first implementation of the models 105 | - (`#1 `_) Setup repository 106 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, OpenSCM Two Layer Model Developers 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include LICENSE 3 | include CHANGELOG.rst 4 | 5 | include versioneer.py 6 | include src/openscm_twolayermodel/_version.py 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := help 2 | 3 | VENV_DIR ?= venv 4 | TESTS_DIR=$(PWD)/tests 5 | 6 | NOTEBOOKS_DIR=./docs/source/usage 7 | NOTEBOOKS_SANITIZE_FILE=$(TESTS_DIR)/notebook-tests.cfg 8 | 9 | define PRINT_HELP_PYSCRIPT 10 | import re, sys 11 | 12 | for line in sys.stdin: 13 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) 14 | if match: 15 | target, help = match.groups() 16 | print("%-20s %s" % (target, help)) 17 | endef 18 | export PRINT_HELP_PYSCRIPT 19 | 20 | .PHONY: help 21 | help: 22 | @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) 23 | 24 | checks: $(VENV_DIR) ## run all the checks 25 | @echo "=== bandit ==="; $(VENV_DIR)/bin/bandit -c .bandit.yml -r openscm_twolayermodel || echo "--- bandit failed ---" >&2; \ 26 | echo "\n\n=== black ==="; $(VENV_DIR)/bin/black --check src tests setup.py docs/source/conf.py scripts/*.py --exclude openscm_twolayermodel/_version.py || echo "--- black failed ---" >&2; \ 27 | echo "\n\n=== flake8 ==="; $(VENV_DIR)/bin/flake8 src tests setup.py || echo "--- flake8 failed ---" >&2; \ 28 | echo "\n\n=== isort ==="; $(VENV_DIR)/bin/isort --check-only --quiet --recursive src tests setup.py || echo "--- isort failed ---" >&2; \ 29 | echo "\n\n=== pydocstyle ==="; $(VENV_DIR)/bin/pydocstyle src || echo "--- pydocstyle failed ---" >&2; \ 30 | echo "\n\n=== pylint ==="; $(VENV_DIR)/bin/pylint src || echo "--- pylint failed ---" >&2; \ 31 | echo "\n\n=== notebook tests ==="; $(VENV_DIR)/bin/pytest $(NOTEBOOKS_DIR)k -r a --nbval --sanitize-with tests/notebook-tests.cfg || echo "--- notebook tests failed ---" >&2; \ 32 | echo "\n\n=== tests ==="; $(VENV_DIR)/bin/pytest tests -r a --cov=openscm_twolayermodel --cov-report='' \ 33 | && $(VENV_DIR)/bin/coverage report --fail-under=95 || echo "--- tests failed ---" >&2; \ 34 | echo 35 | 36 | .PHONY: format 37 | format: ## re-format files 38 | make isort 39 | make black 40 | 41 | .PHONY: format-notebooks 42 | format-notebooks: $(VENV_DIR) ## format the notebooks 43 | @status=$$(git status --porcelain $(NOTEBOOKS_DIR)); \ 44 | if test "x$${status}" = x; then \ 45 | $(VENV_DIR)/bin/black-nb $(NOTEBOOKS_DIR); \ 46 | else \ 47 | echo Not trying any formatting. Working directory is dirty ... >&2; \ 48 | fi; 49 | 50 | .PHONY: black 51 | black: $(VENV_DIR) ## apply black formatter to source and tests 52 | @status=$$(git status --porcelain src tests docs scripts); \ 53 | if test "x$${status}" = x; then \ 54 | $(VENV_DIR)/bin/black --exclude _version.py setup.py src tests docs/source/conf.py scripts/*.py; \ 55 | else \ 56 | echo Not trying any formatting. Working directory is dirty ... >&2; \ 57 | fi; 58 | 59 | .PHONY: isort 60 | isort: $(VENV_DIR) ## format the code 61 | @status=$$(git status --porcelain src tests); \ 62 | if test "x$${status}" = x; then \ 63 | $(VENV_DIR)/bin/isort --recursive src tests setup.py; \ 64 | else \ 65 | echo Not trying any formatting. Working directory is dirty ... >&2; \ 66 | fi; 67 | 68 | docs: $(VENV_DIR) ## build the docs 69 | $(VENV_DIR)/bin/sphinx-build -M html docs/source docs/build 70 | 71 | test: $(VENV_DIR) ## run the full testsuite 72 | $(VENV_DIR)/bin/pytest --cov -rfsxEX --cov-report term-missing 73 | 74 | test-notebooks: $(VENV_DIR) ## test the notebooks 75 | $(VENV_DIR)/bin/pytest ${NOTEBOOKS_DIR} -r a --nbval --sanitize-with tests/notebook-tests.cfg 76 | 77 | test-install: $(VENV_DIR) ## test whether installing in a fresh venv works 78 | $(eval TEMPVENV := $(shell mktemp -d)) 79 | python3 -m venv $(TEMPVENV) 80 | $(TEMPVENV)/bin/pip install pip wheel --upgrade 81 | $(TEMPVENV)/bin/pip install wheel 'setuptools>=41.2' 82 | $(TEMPVENV)/bin/pip install . 83 | $(TEMPVENV)/bin/python scripts/test_install.py 84 | 85 | test-testpypi-install: $(VENV_DIR) ## test whether installing from test PyPI works 86 | $(eval TEMPVENV := $(shell mktemp -d)) 87 | python3 -m venv $(TEMPVENV) 88 | $(TEMPVENV)/bin/pip install pip wheel --upgrade 89 | $(TEMPVENV)/bin/pip install wheel 'setuptools>=41.2' 90 | # Install dependencies not on testpypi registry 91 | $(TEMPVENV)/bin/pip install pandas 92 | # Install pymagicc without dependencies. 93 | $(TEMPVENV)/bin/pip install \ 94 | -i https://testpypi.python.org/pypi openscm-twolayermodel \ 95 | --no-dependencies --pre 96 | $(TEMPVENV)/bin/python -c "import sys; sys.path.remove(''); import openscm_twolayermodel; print(openscm_twolayermodel.__version__)" 97 | 98 | test-pypi-install: $(VENV_DIR) ## test whether installing from PyPI works 99 | $(eval TEMPVENV := $(shell mktemp -d)) 100 | python3 -m venv $(TEMPVENV) 101 | $(TEMPVENV)/bin/pip install pip wheel --upgrade 102 | $(TEMPVENV)/bin/pip install wheel 'setuptools>=41.2' 103 | $(TEMPVENV)/bin/pip install openscm-twolayermodel --pre 104 | $(TEMPVENV)/bin/python scripts/test_install.py 105 | 106 | virtual-environment: ## update venv, create a new venv if it doesn't exist 107 | make $(VENV_DIR) 108 | 109 | $(VENV_DIR): setup.py 110 | [ -d $(VENV_DIR) ] || python3 -m venv $(VENV_DIR) 111 | 112 | $(VENV_DIR)/bin/pip install --upgrade pip wheel 113 | $(VENV_DIR)/bin/pip install -e ".[dev]" 114 | $(VENV_DIR)/bin/jupyter nbextension enable --py widgetsnbextension 115 | 116 | touch $(VENV_DIR) 117 | 118 | paper.pdf: paper.md paper.bib latex.template 119 | pandoc --filter pandoc-citeproc --bibliography paper.bib paper.md \ 120 | --from markdown+grid_tables \ 121 | --template latex.template -o paper.pdf \ 122 | --pdf-engine=xelatex 123 | 124 | clean: 125 | rm paper.pdf 126 | 127 | 128 | first-venv: ## create a new virtual environment for the very first repo setup 129 | python3 -m venv $(VENV_DIR) 130 | 131 | $(VENV_DIR)/bin/pip install --upgrade pip wheel 132 | $(VENV_DIR)/bin/pip install versioneer 133 | # don't touch here as we don't want this venv to persist anyway 134 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | OpenSCM Two Layer Model 2 | ======================= 3 | 4 | +-------------------+----------------+--------------+ 5 | | Repository health | |CI CD| | |Coverage| | 6 | +-------------------+----------------+--------------+ 7 | 8 | +---------------+--------+--------+ 9 | | Documentation | |Docs| | |JOSS| | 10 | +---------------+--------+--------+ 11 | 12 | +------+------------------+----------------+------------------+ 13 | | PyPI | |PyPI Install| | |PyPI| | |PyPI Version| | 14 | +------+------------------+----------------+------------------+ 15 | 16 | +-------+-----------------+-------------------+-----------------+ 17 | | Conda | |conda install| | |conda platforms| | |conda version| | 18 | +-------+-----------------+-------------------+-----------------+ 19 | 20 | +-----------------+----------------+---------------+-----------+ 21 | | Other info | |Contributors| | |Last Commit| | |License| | 22 | +-----------------+----------------+---------------+-----------+ 23 | 24 | 25 | Brief summary 26 | +++++++++++++ 27 | 28 | .. sec-begin-long-description 29 | .. sec-begin-index 30 | 31 | OpenSCM two layer model contains implementations of the two layer radiative forcing driven models by `Held et al. `_, `Geoffroy et al. `_ and `Bloch-Johnson et al. `_ 32 | 33 | OpenSCM Two Layer Model was designed to provide a clean, modularised, extensible interface for one of the most commonly used simple climate models. 34 | It was used in `Phase 1 of the Reduced Complexity Model Intercomparison Project `_ as a point of comparison for the other participating models. 35 | 36 | The `FaIR model `_ implements a mathematically equivalent model (under certain assumptions) but does not provide as clear a conversion between the two-layer model and the two-timescale response as is provided here. 37 | We hope that this implementation could interface with other simple climate models like FaIR to allow simpler exploration of the combined behaviour of interacting climate components with minimal coupling headaches. 38 | 39 | As implemented here, the "OpenSCM Two Layer Model" interface is target at researchers and as an education tool. 40 | Users from other fields are of course encouraged to use it if they find it helpful. 41 | 42 | .. sec-end-index 43 | 44 | License 45 | ------- 46 | 47 | .. sec-begin-license 48 | 49 | OpenSCM two layer model is free software under a BSD 3-Clause License, see 50 | `LICENSE `_. 51 | 52 | .. sec-end-license 53 | .. sec-end-long-description 54 | 55 | .. sec-begin-installation 56 | 57 | Installation 58 | ------------ 59 | 60 | OpenSCM two layer model has only two dependencies: 61 | 62 | .. begin-dependencies 63 | 64 | - scmdata>=0.9 65 | - tqdm 66 | 67 | .. end-dependencies 68 | 69 | OpenSCM two layer model can be installed with pip 70 | 71 | .. code:: bash 72 | 73 | pip install openscm-twolayermodel 74 | 75 | If you also want to run the example notebooks install additional 76 | dependencies using 77 | 78 | .. code:: bash 79 | 80 | pip install "openscm-twolayermodel[notebooks]" 81 | 82 | **Coming soon** OpenSCM two layer model can also be installed with conda 83 | 84 | .. code:: bash 85 | 86 | conda install -c conda-forge openscm-twolayermodel 87 | 88 | We do not ship our tests with the packages. 89 | If you wish to run the tests, you must install from source (or download the tests separately and run them on your installation). 90 | 91 | Installing from source 92 | ~~~~~~~~~~~~~~~~~~~~~~ 93 | 94 | To install from source, simply clone the repository and then install it using pip e.g. ``pip install ".[dev]"``. 95 | Having done this, the tests can be run with ``pytest tests`` or using the ``Makefile`` (``make test`` will run only the code tests, ``make checks`` will run the code tests and all other tests e.g. linting, notebooks, documentation). 96 | 97 | .. sec-end-installation 98 | 99 | For more details, see the `development section of the docs `_. 100 | 101 | Documentation 102 | ------------- 103 | 104 | Documentation can be found at our `documentation pages `_ 105 | (we are thankful to `Read the Docs `_ for hosting us). 106 | 107 | Getting help 108 | ------------ 109 | 110 | .. sec-begin-getting-help 111 | 112 | If you have any issues or would like to discuss a feature request, please raise them in the `OpenSCM Two Layer Model issue tracker `_. 113 | If your issue is a feature request or a bug, please use the templates available, otherwise, simply open a normal issue. 114 | 115 | .. sec-end-getting-help 116 | 117 | Contributing 118 | ------------ 119 | 120 | Please see the `Development section of the docs `_. 121 | 122 | .. sec-begin-links 123 | 124 | .. |CI CD| image:: https://github.com/openscm/openscm-twolayermodel/workflows/OpenSCM%20Two%20Layer%20Model%20CI-CD/badge.svg 125 | :target: https://github.com/openscm/openscm-twolayermodel/actions?query=workflow%3A%22OpenSCM+Two+Layer+Model+CI-CD%22 126 | .. |Coverage| image:: https://codecov.io/gh/openscm/openscm-twolayermodel/branch/master/graph/badge.svg 127 | :target: https://codecov.io/gh/openscm/openscm-twolayermodel 128 | .. |Docs| image:: https://readthedocs.org/projects/openscm-two-layer-model/badge/?version=latest 129 | :target: https://openscm-two-layer-model.readthedocs.io/en/latest/?badge=latest 130 | .. |JOSS| image:: https://joss.theoj.org/papers/94a3759c9ea117499b90c56421ef4857/status.svg 131 | :target: https://joss.theoj.org/papers/94a3759c9ea117499b90c56421ef4857 132 | .. |PyPI Install| image:: https://github.com/openscm/openscm-twolayermodel/workflows/Test%20PyPI%20install/badge.svg 133 | :target: https://github.com/openscm/openscm-twolayermodel/actions?query=workflow%3A%22Test+PyPI+install%22 134 | .. |PyPI| image:: https://img.shields.io/pypi/pyversions/openscm-twolayermodel.svg 135 | :target: https://pypi.org/project/openscm-twolayermodel/ 136 | .. |PyPI Version| image:: https://img.shields.io/pypi/v/openscm-twolayermodel.svg 137 | :target: https://pypi.org/project/openscm-twolayermodel/ 138 | .. |conda install| image:: https://github.com/openscm/openscm-twolayermodel/workflows/Test%20conda%20install/badge.svg 139 | :target: https://github.com/openscm/openscm-twolayermodel/actions?query=workflow%3A%22Test+conda+install%22 140 | .. |conda platforms| image:: https://img.shields.io/conda/pn/conda-forge/openscm-twolayermodel.svg 141 | :target: https://anaconda.org/conda-forge/openscm-twolayermodel 142 | .. |conda version| image:: https://img.shields.io/conda/vn/conda-forge/openscm-twolayermodel.svg 143 | :target: https://anaconda.org/conda-forge/openscm-twolayermodel 144 | .. |Contributors| image:: https://img.shields.io/github/contributors/openscm/openscm-twolayermodel.svg 145 | :target: https://github.com/openscm/openscm-twolayermodel/graphs/contributors 146 | .. |Last Commit| image:: https://img.shields.io/github/last-commit/openscm/openscm-twolayermodel.svg 147 | :target: https://github.com/openscm/openscm-twolayermodel/commits/master 148 | .. |License| image:: https://img.shields.io/github/license/openscm/openscm-twolayermodel.svg 149 | :target: https://github.com/openscm/openscm-twolayermodel/blob/master/LICENSE 150 | 151 | .. sec-end-links 152 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openscm/openscm-twolayermodel/ec212a28d733d459765f8dcf9cfb2f96ccbb8c36/docs/source/_static/.gitkeep -------------------------------------------------------------------------------- /docs/source/base.rst: -------------------------------------------------------------------------------- 1 | .. _base-reference: 2 | 3 | Base API 4 | --------- 5 | 6 | .. automodule:: openscm_twolayermodel.base 7 | -------------------------------------------------------------------------------- /docs/source/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../CHANGELOG.rst 2 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | import os 10 | import sys 11 | 12 | sys.path.append(os.path.dirname(os.path.dirname(__file__))) 13 | sys.path.append( 14 | os.path.join(os.path.dirname(__file__), "../../src/openscm_twolayermodel") 15 | ) 16 | from _version import get_versions # isort:skip # append path before 17 | 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = "OpenSCM Two Layer Model" 22 | authors = ", ".join(["Chris Smith", "Zeb Nicholls"]) 23 | copyright_year = "2020" 24 | copyright = "{}, {}".format(copyright_year, authors) 25 | author = authors 26 | 27 | # The short X.Y version 28 | version = get_versions()["version"].split("+")[0] 29 | # The full version, including alpha/beta/rc tags 30 | release = get_versions()["version"] 31 | 32 | 33 | # -- General configuration --------------------------------------------------- 34 | 35 | # Add any Sphinx extension module names here, as strings. They can be 36 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 | # ones. 38 | extensions = [ 39 | "sphinx.ext.autodoc", 40 | "sphinx.ext.coverage", 41 | "sphinx.ext.intersphinx", 42 | "sphinx.ext.napoleon", # pass numpy style docstrings 43 | "nbsphinx", 44 | "sphinx.ext.mathjax", 45 | "sphinx_copybutton", 46 | ] 47 | autodoc_default_options = { 48 | "inherited-members": None, 49 | "members": None, 50 | # "private-members": False, 51 | "show-inheritance": None, 52 | "undoc-members": None, 53 | } 54 | 55 | # Add any paths that contain templates here, relative to this directory. 56 | templates_path = ["_templates"] 57 | 58 | # The suffix(es) of source filenames. 59 | # You can specify multiple suffix as a list of string: 60 | # 61 | # source_suffix = ['.rst', '.md'] 62 | source_suffix = ".rst" 63 | 64 | # The master toctree document. 65 | master_doc = "index" 66 | 67 | # The language for content autogenerated by Sphinx. Refer to documentation 68 | # for a list of supported languages. 69 | # 70 | # This is also used if you do content translation via gettext catalogs. 71 | # Usually you set "language" from the command line for these cases. 72 | language = "en" 73 | 74 | # List of patterns, relative to source directory, that match files and 75 | # directories to ignore when looking for source files. 76 | # This pattern also affects html_static_path and html_extra_path. 77 | exclude_patterns = ["build", "**.ipynb_checkpoints"] 78 | 79 | # The name of the Pygments (syntax highlighting) style to use. 80 | pygments_style = "sphinx" 81 | 82 | # -- Options for HTML output ------------------------------------------------- 83 | 84 | # The theme to use for HTML and HTML Help pages. See the documentation for 85 | # a list of builtin themes. 86 | # 87 | html_theme = "sphinx_rtd_theme" 88 | 89 | # Add any paths that contain custom static files (such as style sheets) here, 90 | # relative to this directory. They are copied after the builtin static files, 91 | # so a file named "default.css" will overwrite the builtin "default.css". 92 | html_static_path = ["_static"] 93 | 94 | # Custom sidebar templates, must be a dictionary that maps document names 95 | # to template names. 96 | # 97 | # The default sidebars (for documents that don't match any pattern) are 98 | # defined by theme itself. Builtin themes are using these templates by 99 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 100 | # 'searchbox.html']``. 101 | # 102 | # html_sidebars = {} 103 | 104 | html_context = { 105 | "display_github": False, 106 | "github_user": "openscm", 107 | "github_repo": "openscm-twolayermodel", 108 | "github_version": "master", 109 | "conf_py_path": "/docs/source", 110 | } 111 | 112 | # -- Options for HTMLHelp output --------------------------------------------- 113 | 114 | # Output file base name for HTML help builder. 115 | htmlhelp_basename = "openscmtwolayermodeldoc" 116 | 117 | 118 | # -- Options for LaTeX output ------------------------------------------------ 119 | 120 | latex_elements = { 121 | # The paper size ('letterpaper' or 'a4paper'). 122 | # 123 | # 'papersize': 'letterpaper', 124 | # The font size ('10pt', '11pt' or '12pt'). 125 | # 126 | # 'pointsize': '10pt', 127 | # Additional stuff for the LaTeX preamble. 128 | # 129 | # 'preamble': '', 130 | # Latex figure (float) alignment 131 | # 132 | # 'figure_align': 'htbp', 133 | } 134 | 135 | # Grouping the document tree into LaTeX files. List of tuples 136 | # (source start file, target name, title, 137 | # author, documentclass [howto, manual, or own class]). 138 | latex_documents = [ 139 | ( 140 | master_doc, 141 | "openscm-twolayermodel.tex", 142 | "OpenSCM Two Layer Model Documentation", 143 | author, 144 | "manual", 145 | ) 146 | ] 147 | 148 | 149 | # -- Options for manual page output ------------------------------------------ 150 | 151 | # One entry per manual page. List of tuples 152 | # (source start file, name, description, authors, manual section). 153 | man_pages = [ 154 | ( 155 | master_doc, 156 | "openscm-twolayermodel", 157 | "OpenSCM Two Layer Model Documentation", 158 | [author], 159 | 1, 160 | ) 161 | ] 162 | 163 | 164 | # -- Options for Texinfo output ---------------------------------------------- 165 | 166 | # Grouping the document tree into Texinfo files. List of tuples 167 | # (source start file, target name, title, author, 168 | # dir menu entry, description, category) 169 | texinfo_documents = [ 170 | ( 171 | master_doc, 172 | "openscm-twolayermodel", 173 | "OpenSCM Two Layer Model Documentation", 174 | author, 175 | "openscm-twolayermodel", 176 | ( 177 | "Implementations of the two layer radiative forcing driven models by " 178 | "`Held et al. `_ " 179 | "and `Geoffroy et al. `_" 180 | ), 181 | "Miscellaneous", 182 | ) 183 | ] 184 | 185 | # -- Options for Epub output ------------------------------------------------- 186 | 187 | # Bibliographic Dublin Core info. 188 | epub_title = project 189 | 190 | # The unique identifier of the text. This can be a ISBN number 191 | # or the project homepage. 192 | # 193 | # epub_identifier = '' 194 | 195 | # A unique identification for the text. 196 | # 197 | # epub_uid = '' 198 | 199 | # A list of files that should not be packed into the epub file. 200 | epub_exclude_files = ["search.html"] 201 | 202 | 203 | # -- Extension configuration ------------------------------------------------- 204 | 205 | ## cov 206 | 207 | coverage_write_headline = False # do not write headlines. 208 | 209 | ## nbsphinx 210 | 211 | nbsphinx_execute = "always" 212 | 213 | # -- Options for intersphinx extension --------------------------------------- 214 | 215 | # Example configuration for intersphinx: refer to the Python standard library. 216 | intersphinx_mapping = { 217 | "numpy": ("https://docs.scipy.org/doc/numpy", None), 218 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), 219 | "python": ("https://docs.python.org/3", None), 220 | "pyam": ("https://pyam-iamc.readthedocs.io/en/latest", None), 221 | "scmdata": ("https://scmdata.readthedocs.io/en/latest", None), 222 | # "pint": ("https://pint.readthedocs.io/en/latest", None), # no full API doc here, unfortunately 223 | } 224 | napoleon_google_docstring = False 225 | napoleon_numpy_docstring = True 226 | set_type_checking_flag = False 227 | 228 | # -- Options for todo extension ---------------------------------------------- 229 | 230 | # If true, `todo` and `todoList` produce output, else they produce nothing. 231 | todo_include_todos = True 232 | 233 | # # -- Logos ------------------------------------------------------------------- 234 | 235 | # html_logo = "_static/logo_200px_wide.png" 236 | # latex_logo = "_static/logo.png" 237 | 238 | 239 | # -- Misc configuration ------------------------------------------------- 240 | 241 | rst_epilog = """ 242 | .. |CO2| replace:: CO\ :sub:`2`\ 243 | """ 244 | -------------------------------------------------------------------------------- /docs/source/constants.rst: -------------------------------------------------------------------------------- 1 | .. _constants-reference: 2 | 3 | Constants API 4 | ------------- 5 | 6 | .. automodule:: openscm_twolayermodel.constants 7 | -------------------------------------------------------------------------------- /docs/source/development.rst: -------------------------------------------------------------------------------- 1 | .. _development: 2 | 3 | Development 4 | =========== 5 | 6 | If you're interested in contributing to OpenSCM Two Layer Model, we'd love to have you on board! 7 | This section of the docs details how to get setup to contribute and how best to communicate. 8 | 9 | .. contents:: :local: 10 | 11 | Contributing 12 | ------------ 13 | 14 | All contributions are welcome, some possible suggestions include: 15 | 16 | - tutorials (or support questions which, once solved, result in a new tutorial :D) 17 | - blog posts 18 | - improving the documentation 19 | - bug reports 20 | - feature requests 21 | - pull requests 22 | 23 | Please report issues or discuss feature requests in the `OpenSCM Two Layer Model issue tracker`_. 24 | If your issue is a feature request or a bug, please use the templates available, otherwise, simply open a normal issue. 25 | 26 | As a contributor, please follow a couple of conventions: 27 | 28 | - Create issues in the `OpenSCM Two Layer Model issue tracker`_ for changes and enhancements, this ensures that everyone in the community has a chance to comment 29 | - Be welcoming to newcomers and encourage diverse new contributors from all backgrounds: see the `Python Community Code of Conduct `_ 30 | - Only push to your own branches, this allows people to force push to their own branches as they need without fear or causing others headaches 31 | - Start all pull requests as draft pull requests and only mark them as ready for review once they've been rebased onto master, this makes it much simpler for reviewers 32 | - Try and make lots of small pull requests, this makes it easier for reviewers and faster for everyone as review time grows exponentially with the number of lines in a pull request 33 | 34 | 35 | Getting setup 36 | ------------- 37 | 38 | To get setup as a developer, we recommend the following steps (if any of these tools are unfamiliar, please see the resources we recommend in `Development tools`_): 39 | 40 | #. Install conda and make 41 | #. Run ``make virtual-environment``, if that fails you can try doing it manually 42 | 43 | #. Change your current directory to OpenSCM Two Layer Model's root directory (i.e. the one which contains ``README.rst``), ``cd openscm-twolayermodel`` 44 | #. Create a virtual environment to use with OpenSCM Two Layer Model ``python3 -m venv venv`` 45 | #. Activate your virtual environment ``source ./venv/bin/activate`` 46 | #. Upgrade pip ``pip install --upgrade pip`` 47 | #. Install the development dependencies (very important, make sure your virtual environment is active before doing this) ``pip install -e .[dev]`` 48 | 49 | #. Make sure the tests pass by running ``make checks``, if that fails the commands can be read out of the ``Makefile`` 50 | 51 | 52 | Getting help 53 | ~~~~~~~~~~~~ 54 | 55 | Whilst developing, unexpected things can go wrong (that's why it's called 'developing', if we knew what we were doing, it would already be 'developed'). 56 | Normally, the fastest way to solve an issue is to contact us via the `issue tracker `_. 57 | The other option is to debug yourself. 58 | For this purpose, we provide a list of the tools we use during our development as starting points for your search to find what has gone wrong. 59 | 60 | Development tools 61 | +++++++++++++++++ 62 | 63 | This list of development tools is what we rely on to develop OpenSCM Two Layer Model reliably and reproducibly. 64 | It gives you a few starting points in case things do go inexplicably wrong and you want to work out why. 65 | We include links with each of these tools to starting points that we think are useful, in case you want to learn more. 66 | 67 | - `Git `_ 68 | 69 | - `Make `_ 70 | 71 | - `Conda virtual environments `_ 72 | 73 | - `Pip and pip virtual environments `_ 74 | 75 | - `Tests `_ 76 | 77 | - we use a blend of `pytest `_ and the inbuilt Python testing capabilities for our tests so checkout what we've already done in ``tests`` to get a feel for how it works 78 | 79 | - `Continuous integration (CI) `_ (also `brief intro blog post `_ and a `longer read `_) 80 | 81 | - we use GitHub CI for our CI but there are a number of good providers 82 | 83 | - `Jupyter Notebooks `_ 84 | 85 | - Jupyter is automatically included in your virtual environment if you follow our `Getting setup`_ instructions 86 | 87 | - Sphinx_ 88 | 89 | 90 | Other tools 91 | +++++++++++ 92 | 93 | We also use some other tools which aren't necessarily the most familiar. 94 | Here we provide a list of these along with useful resources. 95 | 96 | .. _regular-expressions: 97 | 98 | - `Regular expressions `_ 99 | 100 | - we use `regex101.com `_ to help us write and check our regular expressions, make sure the language is set to Python to make your life easy! 101 | 102 | Formatting 103 | ---------- 104 | 105 | To help us focus on what the code does, not how it looks, we use a couple of automatic formatting tools. 106 | These automatically format the code for us and tell use where the errors are. 107 | To use them, after setting yourself up (see `Getting setup`_), simply run ``make format``. 108 | Note that ``make format`` can only be run if you have committed all your work i.e. your working directory is 'clean'. 109 | This restriction is made to ensure that you don't format code without being able to undo it, just in case something goes wrong. 110 | 111 | 112 | Buiding the docs 113 | ---------------- 114 | 115 | After setting yourself up (see `Getting setup`_), building the docs is as simple as running ``make docs`` (note, run ``make -B docs`` to force the docs to rebuild and ignore make when it says '... index.html is up to date'). 116 | This will build the docs for you. 117 | You can preview them by opening ``docs/build/html/index.html`` in a browser. 118 | 119 | For documentation we use Sphinx_. 120 | To get ourselves started with Sphinx, we started with `this example `_ then used `Sphinx's getting started guide `_. 121 | 122 | 123 | Gotchas 124 | ~~~~~~~ 125 | 126 | To get Sphinx to generate pdfs (rarely worth the hassle), you require `Latexmk `_. 127 | On a Mac this can be installed with ``sudo tlmgr install latexmk``. 128 | You will most likely also need to install some other packages (if you don't have the full distribution). 129 | You can check which package contains any missing files with ``tlmgr search --global --file [filename]``. 130 | You can then install the packages with ``sudo tlmgr install [package]``. 131 | 132 | 133 | Docstring style 134 | ~~~~~~~~~~~~~~~ 135 | 136 | For our docstrings we use numpy style docstrings. 137 | For more information on these, `here is the full guide `_ and `the quick reference we also use `_. 138 | 139 | 140 | Releasing 141 | --------- 142 | 143 | First step 144 | ~~~~~~~~~~ 145 | 146 | #. Test installation with dependencies ``make test-install`` 147 | #. Update ``CHANGELOG.rst`` 148 | 149 | - add a header for the new version between ``master`` and the latest bullet point 150 | - this should leave the section underneath the master header empty 151 | 152 | #. ``git add .`` 153 | #. ``git commit -m "Prepare for release of vX.Y.Z"`` 154 | #. ``git tag vX.Y.Z`` 155 | #. Test version updated as intended with ``make test-install`` 156 | 157 | 158 | Push to repository 159 | ~~~~~~~~~~~~~~~~~~ 160 | 161 | To do the release, push the tags and the repository state. 162 | 163 | #. ``git push`` 164 | #. ``git push --tags`` 165 | 166 | Assuming all the checks pass, this automatically triggers a release on PyPI via the ``.github/workflows/ci-cd-workflow.yml`` action. 167 | 168 | 169 | Why is there a ``Makefile`` in a pure Python repository? 170 | -------------------------------------------------------- 171 | 172 | Whilst it may not be standard practice, a ``Makefile`` is a simple way to automate general setup (environment setup in particular). 173 | Hence we have one here which basically acts as a notes file for how to do all those little jobs which we often forget e.g. setting up environments, running tests (and making sure we're in the right environment), building docs, setting up auxillary bits and pieces. 174 | 175 | .. _Sphinx: http://www.sphinx-doc.org/en/master/ 176 | .. _OpenSCM Two Layer Model issue tracker: https://github.com/openscm/openscm-twolayermodel/issues 177 | .. _`OpenSCM Two Layer Model's PyPI`: https://pypi.org/project/openscm-twolayermodel/ 178 | -------------------------------------------------------------------------------- /docs/source/errors.rst: -------------------------------------------------------------------------------- 1 | .. _errors-reference: 2 | 3 | Errors API 4 | ---------- 5 | 6 | .. automodule:: openscm_twolayermodel.errors 7 | -------------------------------------------------------------------------------- /docs/source/impulse_response_model.rst: -------------------------------------------------------------------------------- 1 | .. _impulse_response_model-reference: 2 | 3 | Impulse Response Model API 4 | -------------------------- 5 | 6 | .. automodule:: openscm_twolayermodel.impulse_response_model 7 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. OpenSCM Two Layer Model documentation master file, created by 2 | sphinx-quickstart on Thu Mar 19 12:35:34 2020. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | OpenSCM Two Layer Model 7 | ======================= 8 | 9 | .. include:: ../../README.rst 10 | :start-after: sec-begin-index 11 | :end-before: sec-end-index 12 | 13 | .. include:: ../../README.rst 14 | :start-after: sec-begin-license 15 | :end-before: sec-end-license 16 | 17 | .. include:: ../../README.rst 18 | :start-after: sec-begin-getting-help 19 | :end-before: sec-end-getting-help 20 | 21 | .. toctree:: 22 | :maxdepth: 2 23 | :caption: Documentation 24 | 25 | installation 26 | usage 27 | development 28 | 29 | .. toctree:: 30 | :maxdepth: 2 31 | :caption: API reference 32 | 33 | base 34 | impulse_response_model 35 | two_layer_model 36 | constants 37 | errors 38 | utils 39 | 40 | .. toctree:: 41 | :maxdepth: 2 42 | :caption: Versions 43 | 44 | changelog 45 | 46 | Index 47 | ----- 48 | 49 | - :ref:`genindex` 50 | - :ref:`modindex` 51 | - :ref:`search` 52 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../README.rst 2 | :start-after: sec-begin-installation 3 | :end-before: sec-end-installation 4 | 5 | For more details, see the :ref:`development section of the docs `. 6 | -------------------------------------------------------------------------------- /docs/source/two_layer_model.rst: -------------------------------------------------------------------------------- 1 | .. _two_layer_model-reference: 2 | 3 | Two Layer Model API 4 | ------------------- 5 | 6 | .. automodule:: openscm_twolayermodel.two_layer_model 7 | -------------------------------------------------------------------------------- /docs/source/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ===== 3 | 4 | Here we provide examples of OpenSCM two layer model's behaviour and usage. 5 | The source code of these usage examples is available in the folder 6 | `docs/source/usage`_ of the `GitHub repository`_. 7 | 8 | .. _`docs/source/usage`: 9 | https://github.com/openscm/openscm-twolayermodel/tree/master/docs/source/usage 10 | 11 | .. _`GitHub repository`: 12 | https://github.com/openscm/openscm-twolayermodel 13 | 14 | Basic demos 15 | +++++++++++ 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | usage/getting-started.ipynb 21 | usage/running-scenarios.ipynb 22 | 23 | More detail 24 | +++++++++++ 25 | 26 | .. toctree:: 27 | :maxdepth: 1 28 | 29 | usage/impulse-response-equivalence.ipynb 30 | usage/one-layer-model.ipynb 31 | -------------------------------------------------------------------------------- /docs/source/utils.rst: -------------------------------------------------------------------------------- 1 | .. _utils-reference: 2 | 3 | Utils API 4 | --------- 5 | 6 | .. automodule:: openscm_twolayermodel.utils 7 | -------------------------------------------------------------------------------- /latex.template: -------------------------------------------------------------------------------- 1 | % Taken from MIT licensed `whedon` repository, Copyright (c) 2016 Open Journals 2 | % https://github.com/openjournals/whedon/ 3 | \documentclass[10pt,a4paper,onecolumn]{article} 4 | \usepackage{marginnote} 5 | \usepackage{graphicx} 6 | \usepackage{xcolor} 7 | \usepackage{authblk,etoolbox} 8 | \usepackage{titlesec} 9 | \usepackage{calc} 10 | \usepackage{tikz} 11 | \usepackage{hyperref} 12 | \usepackage{caption} 13 | \usepackage{tcolorbox} 14 | \usepackage{amssymb,amsmath} 15 | \usepackage{ifxetex,ifluatex} 16 | \usepackage{seqsplit} 17 | \usepackage{fixltx2e} % provides \textsubscript 18 | \usepackage[ 19 | backend=biber, 20 | % style=alphabetic, 21 | % citestyle=numeric 22 | ]{biblatex} 23 | \bibliography{$bibliography$} 24 | 25 | 26 | % --- Page layout ------------------------------------------------------------- 27 | \usepackage[top=3.5cm, bottom=3cm, right=1.5cm, left=1.0cm, 28 | headheight=2.2cm, reversemp, includemp, marginparwidth=4.5cm]{geometry} 29 | 30 | % --- Default font ------------------------------------------------------------ 31 | % \renewcommand\familydefault{\sfdefault} 32 | 33 | % --- Style ------------------------------------------------------------------- 34 | \renewcommand{\bibfont}{\small \sffamily} 35 | \renewcommand{\captionfont}{\small\sffamily} 36 | \renewcommand{\captionlabelfont}{\bfseries} 37 | 38 | % --- Section/SubSection/SubSubSection ---------------------------------------- 39 | \titleformat{\section} 40 | {\normalfont\sffamily\Large\bfseries} 41 | {}{0pt}{} 42 | \titleformat{\subsection} 43 | {\normalfont\sffamily\large\bfseries} 44 | {}{0pt}{} 45 | \titleformat{\subsubsection} 46 | {\normalfont\sffamily\bfseries} 47 | {}{0pt}{} 48 | \titleformat*{\paragraph} 49 | {\sffamily\normalsize} 50 | 51 | 52 | % --- Header / Footer --------------------------------------------------------- 53 | \usepackage{fancyhdr} 54 | \pagestyle{fancy} 55 | \fancyhf{} 56 | %\renewcommand{\headrulewidth}{0.50pt} 57 | \renewcommand{\headrulewidth}{0pt} 58 | %\fancyhead[L]{\hspace{-0.75cm}\includegraphics[width=5.5cm]{$joss_logo_path$}} 59 | \fancyhead[C]{} 60 | \fancyhead[R]{} 61 | \renewcommand{\footrulewidth}{0.25pt} 62 | 63 | \fancyfoot[L]{\footnotesize{\sffamily $citation_author$, ($year$). $paper_title$. \textit{Journal of Open Source Software}, $volume$($issue$), $page$, \href{https://doi.org/$formatted_doi$}{doi:$formatted_doi$}}} 64 | 65 | 66 | \fancyfoot[R]{\sffamily \thepage} 67 | \makeatletter 68 | \let\ps@plain\ps@fancy 69 | \fancyheadoffset[L]{4.5cm} 70 | \fancyfootoffset[L]{4.5cm} 71 | 72 | % --- Macros --------- 73 | 74 | \definecolor{linky}{rgb}{0.0, 0.5, 1.0} 75 | 76 | \newtcolorbox{repobox} 77 | {colback=red, colframe=red!75!black, 78 | boxrule=0.5pt, arc=2pt, left=6pt, right=6pt, top=3pt, bottom=3pt} 79 | 80 | \newcommand{\ExternalLink}{% 81 | \tikz[x=1.2ex, y=1.2ex, baseline=-0.05ex]{% 82 | \begin{scope}[x=1ex, y=1ex] 83 | \clip (-0.1,-0.1) 84 | --++ (-0, 1.2) 85 | --++ (0.6, 0) 86 | --++ (0, -0.6) 87 | --++ (0.6, 0) 88 | --++ (0, -1); 89 | \path[draw, 90 | line width = 0.5, 91 | rounded corners=0.5] 92 | (0,0) rectangle (1,1); 93 | \end{scope} 94 | \path[draw, line width = 0.5] (0.5, 0.5) 95 | -- (1, 1); 96 | \path[draw, line width = 0.5] (0.6, 1) 97 | -- (1, 1) -- (1, 0.6); 98 | } 99 | } 100 | 101 | % --- Title / Authors --------------------------------------------------------- 102 | % patch \maketitle so that it doesn't center 103 | \patchcmd{\@maketitle}{center}{flushleft}{}{} 104 | \patchcmd{\@maketitle}{center}{flushleft}{}{} 105 | % patch \maketitle so that the font size for the title is normal 106 | \patchcmd{\@maketitle}{\LARGE}{\LARGE\sffamily}{}{} 107 | % patch the patch by authblk so that the author block is flush left 108 | \def\maketitle{{% 109 | \renewenvironment{tabular}[2][] 110 | {\begin{flushleft}} 111 | {\end{flushleft}} 112 | \AB@maketitle}} 113 | \makeatletter 114 | \renewcommand\AB@affilsepx{ \protect\Affilfont} 115 | %\renewcommand\AB@affilnote[1]{{\bfseries #1}\hspace{2pt}} 116 | \renewcommand\AB@affilnote[1]{{\bfseries #1}\hspace{3pt}} 117 | \makeatother 118 | \renewcommand\Authfont{\sffamily\bfseries} 119 | \renewcommand\Affilfont{\sffamily\small\mdseries} 120 | \setlength{\affilsep}{1em} 121 | 122 | 123 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 124 | \usepackage[$if(fontenc)$$fontenc$$else$T1$endif$]{fontenc} 125 | \usepackage[utf8]{inputenc} 126 | 127 | \else % if luatex or xelatex 128 | \ifxetex 129 | \usepackage{mathspec} 130 | \else 131 | \usepackage{fontspec} 132 | \fi 133 | \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} 134 | 135 | \fi 136 | % use upquote if available, for straight quotes in verbatim environments 137 | \IfFileExists{upquote.sty}{\usepackage{upquote}}{} 138 | % use microtype if available 139 | \IfFileExists{microtype.sty}{% 140 | \usepackage{microtype} 141 | \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts 142 | }{} 143 | 144 | \usepackage{hyperref} 145 | $if(colorlinks)$ 146 | \PassOptionsToPackage{usenames,dvipsnames}{color} % color is loaded by hyperref 147 | $endif$ 148 | \hypersetup{unicode=true, 149 | $if(title-meta)$ 150 | pdftitle={$title-meta$}, 151 | $endif$ 152 | $if(author-meta)$ 153 | pdfauthor={$author-meta$}, 154 | $endif$ 155 | $if(keywords)$ 156 | pdfkeywords={$for(keywords)$$keywords$$sep$; $endfor$}, 157 | $endif$ 158 | $if(colorlinks)$ 159 | colorlinks=true, 160 | linkcolor=$if(linkcolor)$$linkcolor$$else$Maroon$endif$, 161 | citecolor=$if(citecolor)$$citecolor$$else$Blue$endif$, 162 | urlcolor=$if(urlcolor)$$urlcolor$$else$Blue$endif$, 163 | $else$ 164 | pdfborder={0 0 0}, 165 | $endif$ 166 | breaklinks=true} 167 | \urlstyle{same} % don't use monospace font for urls 168 | $if(lang)$ 169 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 170 | \usepackage[shorthands=off,$for(babel-otherlangs)$$babel-otherlangs$,$endfor$main=$babel-lang$]{babel} 171 | $if(babel-newcommands)$ 172 | $babel-newcommands$ 173 | $endif$ 174 | \else 175 | \usepackage{polyglossia} 176 | \setmainlanguage[$polyglossia-lang.options$]{$polyglossia-lang.name$} 177 | $for(polyglossia-otherlangs)$ 178 | \setotherlanguage[$polyglossia-otherlangs.options$]{$polyglossia-otherlangs.name$} 179 | $endfor$ 180 | \fi 181 | $endif$ 182 | $if(natbib)$ 183 | \usepackage{natbib} 184 | \bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$} 185 | $endif$ 186 | $if(biblatex)$ 187 | \usepackage$if(biblio-style)$[style=$biblio-style$]$endif${biblatex} 188 | $if(biblatexoptions)$\ExecuteBibliographyOptions{$for(biblatexoptions)$$biblatexoptions$$sep$,$endfor$}$endif$ 189 | $for(bibliography)$ 190 | \addbibresource{$bibliography$} 191 | $endfor$ 192 | $endif$ 193 | $if(listings)$ 194 | \usepackage{listings} 195 | $endif$ 196 | $if(lhs)$ 197 | \lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small\ttfamily}}{} 198 | $endif$ 199 | $if(highlighting-macros)$ 200 | $highlighting-macros$ 201 | $endif$ 202 | $if(verbatim-in-note)$ 203 | \usepackage{fancyvrb} 204 | \VerbatimFootnotes % allows verbatim text in footnotes 205 | $endif$ 206 | $if(tables)$ 207 | \usepackage{longtable,booktabs} 208 | $endif$ 209 | $if(graphics)$ 210 | \usepackage{graphicx,grffile} 211 | \makeatletter 212 | \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} 213 | \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} 214 | \makeatother 215 | % Scale images if necessary, so that they will not overflow the page 216 | % margins by default, and it is still possible to overwrite the defaults 217 | % using explicit options in \includegraphics[width, height, ...]{} 218 | \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} 219 | $endif$ 220 | $if(links-as-notes)$ 221 | % Make links footnotes instead of hotlinks: 222 | \renewcommand{\href}[2]{#2\footnote{\url{#1}}} 223 | $endif$ 224 | $if(strikeout)$ 225 | \usepackage[normalem]{ulem} 226 | % avoid problems with \sout in headers with hyperref: 227 | \pdfstringdefDisableCommands{\renewcommand{\sout}{}} 228 | $endif$ 229 | $if(indent)$ 230 | $else$ 231 | \IfFileExists{parskip.sty}{% 232 | \usepackage{parskip} 233 | }{% else 234 | \setlength{\parindent}{0pt} 235 | \setlength{\parskip}{6pt plus 2pt minus 1pt} 236 | } 237 | $endif$ 238 | \setlength{\emergencystretch}{3em} % prevent overfull lines 239 | \providecommand{\tightlist}{% 240 | \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} 241 | $if(numbersections)$ 242 | \setcounter{secnumdepth}{5} 243 | $else$ 244 | \setcounter{secnumdepth}{0} 245 | $endif$ 246 | $if(subparagraph)$ 247 | $else$ 248 | % Redefines (sub)paragraphs to behave more like sections 249 | \ifx\paragraph\undefined\else 250 | \let\oldparagraph\paragraph 251 | \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} 252 | \fi 253 | \ifx\subparagraph\undefined\else 254 | \let\oldsubparagraph\subparagraph 255 | \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} 256 | \fi 257 | $endif$ 258 | $if(dir)$ 259 | \ifxetex 260 | % load bidi as late as possible as it modifies e.g. graphicx 261 | $if(latex-dir-rtl)$ 262 | \usepackage[RTLdocument]{bidi} 263 | $else$ 264 | \usepackage{bidi} 265 | $endif$ 266 | \fi 267 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 268 | \TeXXeTstate=1 269 | \newcommand{\RL}[1]{\beginR #1\endR} 270 | \newcommand{\LR}[1]{\beginL #1\endL} 271 | \newenvironment{RTL}{\beginR}{\endR} 272 | \newenvironment{LTR}{\beginL}{\endL} 273 | \fi 274 | $endif$ 275 | $for(header-includes)$ 276 | $header-includes$ 277 | $endfor$ 278 | 279 | $if(title)$ 280 | \title{$title$$if(thanks)$\thanks{$thanks$}$endif$} 281 | $endif$ 282 | $if(subtitle)$ 283 | \providecommand{\subtitle}[1]{} 284 | \subtitle{$subtitle$} 285 | $endif$ 286 | 287 | $if(authors)$ 288 | $for(authors)$ 289 | $if(authors.affiliation)$ 290 | \author[$authors.affiliation$]{$authors.name$} 291 | $else$ 292 | \author{$authors.name$} 293 | $endif$ 294 | $endfor$ 295 | $endif$ 296 | 297 | $if(affiliations)$ 298 | $for(affiliations)$ 299 | \affil[$affiliations.index$]{$affiliations.name$} 300 | $endfor$ 301 | $endif$ 302 | \date{\vspace{-5ex}} 303 | 304 | \begin{document} 305 | $if(title)$ 306 | \maketitle 307 | $endif$ 308 | $if(abstract)$ 309 | \begin{abstract} 310 | $abstract$ 311 | \end{abstract} 312 | $endif$ 313 | 314 | \marginpar{ 315 | %\hrule 316 | \sffamily\small 317 | 318 | {\bfseries DOI:} \href{https://doi.org/$formatted_doi$}{\color{linky}{$formatted_doi$}} 319 | 320 | \vspace{2mm} 321 | 322 | {\bfseries Software} 323 | \begin{itemize} 324 | \setlength\itemsep{0em} 325 | \item \href{$review_issue_url$}{\color{linky}{Review}} \ExternalLink 326 | \item \href{$repository$}{\color{linky}{Repository}} \ExternalLink 327 | \item \href{$archive_doi$}{\color{linky}{Archive}} \ExternalLink 328 | \end{itemize} 329 | 330 | \vspace{2mm} 331 | {\bfseries Licence}\\ 332 | Authors of JOSS papers retain copyright and release the work under a Creative Commons Attribution 4.0 International License (\href{http://creativecommons.org/licenses/by/4.0/}{\color{linky}{CC-BY}}). 333 | } 334 | 335 | $for(include-before)$ 336 | $include-before$ 337 | 338 | $endfor$ 339 | $if(toc)$ 340 | { 341 | $if(colorlinks)$ 342 | \hypersetup{linkcolor=$if(toccolor)$$toccolor$$else$black$endif$} 343 | $endif$ 344 | \setcounter{tocdepth}{$toc-depth$} 345 | \tableofcontents 346 | } 347 | $endif$ 348 | $if(lot)$ 349 | \listoftables 350 | $endif$ 351 | $if(lof)$ 352 | \listoffigures 353 | $endif$ 354 | $body$ 355 | 356 | $if(natbib)$ 357 | $if(bibliography)$ 358 | $if(biblio-title)$ 359 | $if(book-class)$ 360 | \renewcommand\bibname{$biblio-title$} 361 | $else$ 362 | \renewcommand\refname{$biblio-title$} 363 | $endif$ 364 | $endif$ 365 | \bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} 366 | 367 | $endif$ 368 | $endif$ 369 | $if(biblatex)$ 370 | \printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ 371 | 372 | $endif$ 373 | $for(include-after)$ 374 | $include-after$ 375 | 376 | $endfor$ 377 | \end{document} 378 | -------------------------------------------------------------------------------- /paper.bib: -------------------------------------------------------------------------------- 1 | @article{geoffroy_2013_two_layer1, 2 | author = {Geoffroy, O. and Saint-Martin, D. and Olivié, D. J. L. and Voldoire, A. and Bellon, G. and Tytéca, S.}, 3 | title = {Transient Climate Response in a Two-Layer Energy-Balance Model. Part I: Analytical Solution and Parameter Calibration Using {CMIP5} {AOGCM} Experiments}, 4 | journal = {Journal of Climate}, 5 | volume = {26}, 6 | number = {6}, 7 | pages = {1841-1857}, 8 | year = {2013}, 9 | doi = {10.1175/JCLI-D-12-00195.1}, 10 | 11 | URL = { 12 | https://doi.org/10.1175/JCLI-D-12-00195.1 13 | 14 | }, 15 | eprint = { 16 | https://doi.org/10.1175/JCLI-D-12-00195.1 17 | 18 | } 19 | , 20 | abstract = { AbstractThis is the first part of a series of two articles analyzing the global thermal properties of atmosphere–ocean coupled general circulation models ({AOGCMs}) within the framework of a two-layer energy-balance model (EBM). In this part, the general analytical solution of the system is given and two idealized climate change scenarios, one with a step forcing and one with a linear forcing, are discussed. These solutions give a didactic description of the contributions from the equilibrium response and of the fast and slow transient responses during a climate transition. Based on these analytical solutions, a simple and physically based procedure to calibrate the two-layer model parameters using an {AOGCM} step-forcing experiment is introduced. Using this procedure, the global thermal properties of 16 {AOGCMs} participating in phase 5 of the Coupled Model Intercomparison Project ({CMIP5}) are determined. It is shown that, for a given {AOGCM}, the EBM tuned with only the abrupt 4×CO2 experiment is able to reproduce with a very good accuracy the temperature evolution in both a step-forcing and a linear-forcing experiment. The role of the upper-ocean and deep-ocean heat uptakes in the fast and slow responses is also discussed. One of the main weaknesses of the simple EBM discussed in this part is its ability to represent the evolution of the top-of-the-atmosphere radiative imbalance in the transient regime. This issue is addressed in Part II by taking into account the efficacy factor of deep-ocean heat uptake. } 21 | } 22 | 23 | @article{geoffroy_2013_two_layer2, 24 | author = {Geoffroy, O. and Saint-Martin, D. and Bellon, G. and Voldoire, A. and Olivié, D. J. L. and Tytéca, S.}, 25 | title = {Transient Climate Response in a Two-Layer Energy-Balance Model. Part II: Representation of the Efficacy of Deep-Ocean Heat Uptake and Validation for {CMIP5} {AOGCMs}}, 26 | journal = {Journal of Climate}, 27 | volume = {26}, 28 | number = {6}, 29 | pages = {1859-1876}, 30 | year = {2013}, 31 | doi = {10.1175/JCLI-D-12-00196.1}, 32 | 33 | URL = { 34 | https://doi.org/10.1175/JCLI-D-12-00196.1 35 | 36 | }, 37 | eprint = { 38 | https://doi.org/10.1175/JCLI-D-12-00196.1 39 | 40 | } 41 | , 42 | abstract = { AbstractIn this second part of a series of two articles analyzing the global thermal properties of atmosphere–ocean coupled general circulation models ({AOGCMs}) within the framework of a two-layer energy-balance model (EBM), the role of the efficacy of deep-ocean heat uptake is investigated. Taking into account such an efficacy factor is shown to amount to representing the effect of deep-ocean heat uptake on the local strength of the radiative feedback in the transient regime. It involves an additional term in the formulation of the radiative imbalance at the top of the atmosphere (TOA), which explains the nonlinearity between radiative imbalance and the mean surface temperature observed in some {AOGCMs}. An analytical solution of this system is given and this simple linear EBM is calibrated for the set of 16 {AOGCMs} of phase 5 of the Coupled Model Intercomparison Project ({CMIP5}) studied in Part I. It is shown that both the net radiative fluxes at TOA and the global surface temperature transient response are well represented by the simple EBM over the available period of simulations. Differences between this two-layer EBM and the previous version without an efficacy factor are analyzed and relationships between parameters are discussed. The simple model calibration applied to {AOGCMs} constitutes a new method for estimating their respective equilibrium climate sensitivity and adjusted radiative forcing amplitude from short-term step-forcing simulations and more generally a method to compute their global thermal properties. } 43 | } 44 | 45 | @article{held_2010_two_layer, 46 | author = {Held, Isaac M. and Winton, Michael and Takahashi, Ken and Delworth, Thomas and Zeng, Fanrong and Vallis, Geoffrey K.}, 47 | title = {Probing the Fast and Slow Components of Global Warming by Returning Abruptly to Preindustrial Forcing}, 48 | journal = {Journal of Climate}, 49 | volume = {23}, 50 | number = {9}, 51 | pages = {2418-2427}, 52 | year = {2010}, 53 | doi = {10.1175/2009JCLI3466.1}, 54 | 55 | URL = { 56 | https://doi.org/10.1175/2009JCLI3466.1 57 | 58 | }, 59 | eprint = { 60 | https://doi.org/10.1175/2009JCLI3466.1 61 | 62 | } 63 | , 64 | abstract = { Abstract The fast and slow components of global warming in a comprehensive climate model are isolated by examining the response to an instantaneous return to preindustrial forcing. The response is characterized by an initial fast exponential decay with an e-folding time smaller than 5 yr, leaving behind a remnant that evolves more slowly. The slow component is estimated to be small at present, as measured by the global mean near-surface air temperature, and, in the model examined, grows to 0.4°C by 2100 in the A1B scenario from the Special Report on Emissions Scenarios (SRES), and then to 1.4°C by 2300 if one holds radiative forcing fixed after 2100. The dominance of the fast component at present is supported by examining the response to an instantaneous doubling of CO2 and by the excellent fit to the model’s ensemble mean twentieth-century evolution with a simple one-box model with no long times scales. } 65 | } 66 | 67 | @article{bloch_johnson_2015_feedback_dependence, 68 | author = {Bloch-Johnson, Jonah and Pierrehumbert, Raymond T. and Abbot, Dorian S.}, 69 | title = {Feedback temperature dependence determines the risk of high warming}, 70 | journal = {Geophysical Research Letters}, 71 | volume = {42}, 72 | number = {12}, 73 | pages = {4973-4980}, 74 | keywords = {climate sensitivity, nonlinear feedbacks, observational estimates, GCMs, long tail}, 75 | doi = {10.1002/2015GL064240}, 76 | url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1002/2015GL064240}, 77 | eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1002/2015GL064240}, 78 | abstract = {AbstractThe long-term warming from an anthropogenic increase in atmospheric CO2 is often assumed to be proportional to the forcing associated with that increase. This paper examines this linear approximation using a zero-dimensional energy balance model with a temperature-dependent feedback, with parameter values drawn from physical arguments and general circulation models. For a positive feedback temperature dependence, warming increases Earth's sensitivity, while greater sensitivity makes Earth warm more. These effects can feed on each other, greatly amplifying warming. As a result, for reasonable values of feedback temperature dependence and preindustrial feedback, Earth can jump to a warmer state under only one or two CO2 doublings. The linear approximation breaks down in the long tail of high climate sensitivity commonly seen in observational studies. Understanding feedback temperature dependence is therefore essential for inferring the risk of high warming from modern observations. Studies that assume linearity likely underestimate the risk of high warming.}, 79 | year = {2015} 80 | } 81 | 82 | @article{meinshausen_2011_rcp, 83 | title={The {RCP} greenhouse gas concentrations and their extensions from 1765 to 2300}, 84 | author={Meinshausen, M. and Smith, Steven J and Calvin, K and Daniel, John S and Kainuma, MLT and Lamarque, Jean-Francois and Matsumoto, Km and Montzka, SA and Raper, SCB and Riahi, K and others}, 85 | journal={Climatic change}, 86 | volume={109}, 87 | number={1-2}, 88 | pages={213}, 89 | year={2011}, 90 | publisher={Springer}, 91 | doi = {10.1007/s10584-011-0156-z} 92 | } 93 | 94 | @article{rohrschneider_2019_simple, 95 | title={On simple representations of the climate response to external radiative forcing}, 96 | author={Rohrschneider, Tim and Stevens, Bjorn and Mauritsen, Thorsten}, 97 | journal={Climate Dynamics}, 98 | volume={53}, 99 | number={5-6}, 100 | pages={3131--3145}, 101 | year={2019}, 102 | publisher={Springer}, 103 | doi = {10.1007/s00382-019-04686-4} 104 | } 105 | 106 | @Article{smith_2018_fairv1_3, 107 | AUTHOR = {Smith, C. J. and Forster, P. M. and Allen, M. and Leach, N. and Millar, R. J. and Passerello, G. A. and Regayre, L. A.}, 108 | TITLE = {FAIR v1.3: a simple emissions-based impulse response and carbon cycle model}, 109 | JOURNAL = {Geoscientific Model Development}, 110 | VOLUME = {11}, 111 | YEAR = {2018}, 112 | NUMBER = {6}, 113 | PAGES = {2273--2297}, 114 | URL = {https://www.geosci-model-dev.net/11/2273/2018/}, 115 | DOI = {10.5194/gmd-11-2273-2018} 116 | } 117 | 118 | @misc{fair_repo, 119 | author = {Smith, C. J. and Nicholls, Z. R. J. and Gieseke, R.}, 120 | title = {FaIR: Finite Amplitude Impulse-Reponse simple climate-carbon-cycle model}, 121 | year = {2020}, 122 | publisher = {GitHub}, 123 | journal = {GitHub repository}, 124 | url = {https://github.com/OMS-NetZero/FAIR} 125 | } 126 | 127 | @Article{rcmip_phase_1, 128 | AUTHOR = {Nicholls, Z. R. J. and Meinshausen, M. and Lewis, J. and Gieseke, R. and Dommenget, D. and Dorheim, K. and Fan, C.-S. and Fuglestvedt, J. S. and Gasser, T. and Gol\"uke, U. and Goodwin, P. and Hartin, C. and Hope, A. P. and Kriegler, E. and Leach, N. J. and Marchegiani, D. and McBride, L. A. and Quilcaille, Y. and Rogelj, J. and Salawitch, R. J. and Samset, B. H. and Sandstad, M. and Shiklomanov, A. N. and Skeie, R. B. and Smith, C. J. and Smith, S. and Tanaka, K. and Tsutsui, J. and Xie, Z.}, 129 | TITLE = {Reduced Complexity Model Intercomparison Project Phase 1: introduction and evaluation of global-mean temperature response}, 130 | JOURNAL = {Geoscientific Model Development}, 131 | VOLUME = {13}, 132 | YEAR = {2020}, 133 | NUMBER = {11}, 134 | PAGES = {5175--5190}, 135 | URL = {https://gmd.copernicus.org/articles/13/5175/2020/}, 136 | DOI = {10.5194/gmd-13-5175-2020} 137 | } 138 | 139 | @misc{pint, 140 | author = {H. E. Grecco and others}, 141 | title = {Pint: Operate and manipulate physical quantities in Python}, 142 | year = {2020}, 143 | publisher = {GitHub}, 144 | journal = {GitHub repository}, 145 | url = {https://github.com/hgrecco/pint} 146 | } 147 | 148 | @Article{Dommenget_2011_greb, 149 | author={Dommenget, Dietmar 150 | and Fl{\"o}ter, Janine}, 151 | title={Conceptual understanding of climate change with a globally resolved energy balance model}, 152 | journal={Climate Dynamics}, 153 | year={2011}, 154 | month={Dec}, 155 | day={01}, 156 | volume={37}, 157 | number={11}, 158 | pages={2143-2165}, 159 | abstract={The future climate change projections are essentially based on coupled general circulation model (CGCM) simulations, which give a distinct global warming pattern with arctic winter amplification, an equilibrium land-sea warming contrast and an inter-hemispheric warming gradient. While these simulations are the most important tool of the Intergovernmental Panel on Climate Change (IPCC) predictions, the conceptual understanding of these predicted structures of climate change and the causes of their uncertainties is very difficult to reach if only based on these highly complex CGCM simulations. In the study presented here we will introduce a very simple, globally resolved energy balance (GREB) model, which is capable of simulating the main characteristics of global warming. The model shall give a bridge between the strongly simplified energy balance models and the fully coupled 4-dimensional complex CGCMs. It provides a fast tool for the conceptual understanding and development of hypotheses for climate change studies, which shall build a basis or starting point for more detailed studies of observations and CGCM simulations. It is based on the surface energy balance by very simple representations of solar and thermal radiation, the atmospheric hydrological cycle, sensible turbulent heat flux, transport by the mean atmospheric circulation and heat exchange with the deeper ocean. Despite some limitations in the representations of the basic processes, the models climate sensitivity and the spatial structure of the warming pattern are within the uncertainties of the IPCC models simulations. It is capable of simulating aspects of the arctic winter amplification, the equilibrium land-sea warming contrast and the inter-hemispheric warming gradient with good agreement to the IPCC models in amplitude and structure. The results give some insight into the understanding of the land-sea contrast and the polar amplification. The GREB model suggests that the regional inhomogeneous distribution of atmospheric water vapor and the non-linear sensitivity of the downward thermal radiation to changes in the atmospheric water vapor concentration partly cause the land-sea contrast and may also contribute to the polar amplification. The combination of these characteristics causes, in general, dry and cold regions to warm more than other regions.}, 160 | issn={1432-0894}, 161 | doi={10.1007/s00382-011-1026-0}, 162 | url={https://doi.org/10.1007/s00382-011-1026-0} 163 | } 164 | 165 | @Article{hartin_2015_hector, 166 | AUTHOR = {Hartin, C. A. and Patel, P. and Schwarber, A. and Link, R. P. and Bond-Lamberty, B. P.}, 167 | TITLE = {A simple object-oriented and open-source model for scientific and policy analyses of the global climate system – Hector v1.0}, 168 | JOURNAL = {Geoscientific Model Development}, 169 | VOLUME = {8}, 170 | YEAR = {2015}, 171 | NUMBER = {4}, 172 | PAGES = {939--955}, 173 | URL = {https://gmd.copernicus.org/articles/8/939/2015/}, 174 | DOI = {10.5194/gmd-8-939-2015} 175 | } 176 | 177 | @article{Meinshausen_2011_magicc, 178 | author = {Meinshausen, M. and Raper, S. C. B. and Wigley, T. M. L.}, 179 | title = {Emulating coupled atmosphere-ocean and carbon cycle models with a simpler model, {MAGICC6} – Part 1: Model description and calibration}, 180 | journal = {Atmospheric Chemistry and Physics}, 181 | volume = {11}, 182 | year = {2011}, 183 | number = {4}, 184 | pages = {1417--1456}, 185 | doi = {10.5194/acp-11-1417-2011}, 186 | } 187 | 188 | @article{Gieseke_2018_pymagicc, 189 | doi = {10.21105/joss.00516}, 190 | url = {https://doi.org/10.21105/joss.00516}, 191 | year = {2018}, 192 | publisher = {The Open Journal}, 193 | volume = {3}, 194 | number = {22}, 195 | pages = {516}, 196 | author = {Robert Gieseke and Sven N. Willner and Matthias Mengel}, 197 | title = {Pymagicc: A Python wrapper for the simple climate model MAGICC}, 198 | journal = {Journal of Open Source Software} 199 | } 200 | 201 | @Article{Gasser_2020_asdfjk, 202 | AUTHOR = {Gasser, T. and Crepin, L. and Quilcaille, Y. and Houghton, R. A. and Ciais, P. and Obersteiner, M.}, 203 | TITLE = {Historical {CO$_2$} emissions from land use and land cover change and their 204 | uncertainty}, 205 | JOURNAL = {Biogeosciences}, 206 | VOLUME = {17}, 207 | YEAR = {2020}, 208 | NUMBER = {15}, 209 | PAGES = {4075--4101}, 210 | URL = {https://bg.copernicus.org/articles/17/4075/2020/}, 211 | DOI = {10.5194/bg-17-4075-2020} 212 | } 213 | 214 | @article{Goodwin_2019_ggfp6s, 215 | author = "Goodwin, Philip and Williams, Richard G. and Roussenov, Vassil M. and Katavouta, Anna", 216 | doi = "10.1029/2019gl082887", 217 | year = "2019", 218 | month = "jul", 219 | publisher = "American Geophysical Union ({AGU})", 220 | volume = "46", 221 | number = "13", 222 | pages = "7554--7564", 223 | title = "Climate Sensitivity From Both Physical and Carbon Cycle Feedbacks", 224 | journal = "Geophysical Research Letters" 225 | } 226 | -------------------------------------------------------------------------------- /paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'OpenSCM Two Layer Model: A Python implementation of the two-layer climate model' 3 | tags: 4 | - Python 5 | - climate science 6 | - temperature projections 7 | - simple climate model 8 | - energy balance 9 | - reduced complexity climate model 10 | authors: 11 | - name: Zebedee Nicholls 12 | orcid: 0000-0002-4767-2723 13 | affiliation: "1, 2, 3" 14 | - name: Jared Lewis 15 | orcid: 0000-0002-8155-8924 16 | affiliation: "1, 2, 3" 17 | affiliations: 18 | - name: Australian-German Climate & Energy College, The University of Melbourne, Parkville, Victoria, Australia 19 | index: 1 20 | - name: School of Geography, Earth and Atmosphere Sciences, The University of Melbourne, Parkville, Victoria, Australia 21 | index: 2 22 | - name: Climate Resource, Northcote, Victoria, Australia 23 | index: 3 24 | date: 7 October 2020 25 | bibliography: paper.bib 26 | --- 27 | 28 | # Summary 29 | 30 | The evolution of the climate is controlled by highly complex physical dynamics. 31 | However, simplified representations are surprisingly powerful tools for understanding these dynamics [@held_2010_two_layer] and making climate projections [@meinshausen_2011_rcp]. 32 | The field of simple climate modelling is widely used, in particular for assessing the climatic implications of large numbers of different emissions scenarios, a task that cannot be performed with more complex models because of computational constraints. 33 | 34 | One of the most commonly used models of the climate's response to changes in the "Earth's energy balance" 35 | (energy input compared to energy output of the earth system) is the two-layer model originally introduced by @held_2010_two_layer. 36 | While this model must be given energy imbalances (more precisely, radiative forcing) rather than emissions, it is nonetheless a widely used tool within climate science. 37 | Approximately speaking, the model represents the Earth System as an ocean with two-layers. 38 | The upper layer absorbs excess heat in the Earth System and then exchanges heat with the deep layer. 39 | As a result, in response to a perturbation, the model responds with a distinctive two-timescale response, commonly referred to as the "fast" and "slow" warming components. 40 | Since @held_2010_two_layer, the model has been extended to include updated representations of the efficiency of ocean heat uptake [@geoffroy_2013_two_layer2] as well as a state-dependent response to radiative forcing [@bloch_johnson_2015_feedback_dependence; @rohrschneider_2019_simple]. 41 | 42 | There are many simple climate models in the scientific literature [@rcmip_phase_1]. 43 | Given the context of this paper, we provide below a table of openly accessible models, their programming language, and their approach. 44 | These models are conceptually similar to the two-layer model implemented here except they use different parameterisations for ocean heat uptake and the relationship between ocean heat uptake and warming. 45 | On top of the relationship between ocean heat uptake and warming, these models also implement many other components of the climate system, e.g., carbon cycle, methane cycle, and the relationship between changes in atmospheric greenhouse gas concentrations and atmospheric energy fluxes. 46 | The exception is the FaIR model [@smith_2018_fairv1_3], which uses the two-layer model as its thermal core. 47 | 48 | OpenSCM Two Layer Model is an object-oriented and open-source implementation of the two-layer model. 49 | It is written in Python, a user-friendly open-source language that is popular in the climate sciences, and uses the Pint package [@pint], a widely used units library, for unit handling. 50 | It provides an extensible interface for the two-layer model, which could then be coupled with other modules as researchers see fit. 51 | The implementation also provides an easy way to convert between the two-layer model of @held_2010_two_layer and the mathematically equivalent two-timescale impulse response model, used most notably as the thermal core of the FaIR model [@smith_2018_fairv1_3]. 52 | The conversion between the two is an implementation of the proof by @geoffroy_2013_two_layer1. 53 | 54 | | Model | Brief description and URL | Language | 55 | |-------|---------------------------|----------| 56 | | [FaIR](https://github.com/OMS-NetZero/FAIR) | Modified impulse response [@smith_2018_fairv1_3], [github.com/OMS-NetZero/FAIR](https://github.com/OMS-NetZero/FAIR) | Python | 57 | | [GREB](https://github.com/christianstassen/greb-official) | Coarse grid energy balance [@Dommenget_2011_greb], [github.com/christianstassen/greb-official](https://github.com/christianstassen/greb-official) | Fortran 90 | 58 | | [Hector](https://github.com/JGCRI/hector) | Upwelling-diffusion ocean energy balance [@hartin_2015_hector], [github.com/JGCRI/hector](https://github.com/JGCRI/hector) | C++ | 59 | | [MAGICC](http://magicc.org) | Upwelling-diffusion ocean four-box (hemispheric land/ocean) energy balance [@Meinshausen_2011_magicc], [live.magicc.org](http://live.magicc.org) (Pymagicc [@Gieseke_2018_pymagicc] provides a Python wrapper in [github.com/openclimatedata/pymagicc](https://github.com/openclimatedata/pymagicc)) | Fortran 90 | 60 | | [OSCAR](https://github.com/tgasser/OSCAR) | Energy balance with book-keeping land carbon cycle [@Gasser_2020_asdfjk], [github.com/tgasser/OSCAR](https://github.com/tgasser/OSCAR) | Python | 61 | | [WASP](https://github.com/WASP-ESM/WASP_Earth_System_Model) | Energy balance with 8-box carbon cycle [@Goodwin_2019_ggfp6s], [github.com/WASP-ESM/WASP_Earth_System_Model](https://github.com/WASP-ESM/WASP_Earth_System_Model) | C++ | 62 | 63 | : Brief overview of other simple climate models available in the scientific literature. Shown is the model name, a brief description and relevant URL(s), and the programming language in which the model is written. The programming language shown is the one used for the model's core; other languages might be used in the development repositories for, e.g., plotting. For a more extensive list of simple climate models and references which describe the models in detail, see Table 1 of @rcmip_phase_1. 64 | 65 | # Statement of need 66 | 67 | OpenSCM Two Layer Model was designed to provide a clean, modularised, extensible interface for one of the most commonly used simple climate models. 68 | It was used in Phase 1 of the Reduced Complexity Model Intercomparison Project [@rcmip_phase_1] as a point of comparison for the other participating models. 69 | 70 | The FaIR model [@fair_repo] implements a mathematically equivalent model (under certain assumptions) but does not provide as clear a conversion between the two-layer model and the two-timescale response as is provided here. 71 | We hope that this implementation could interface with other simple climate models like FaIR to allow simpler exploration of the combined behaviour of interacting climate components with minimal coupling headaches. 72 | 73 | As implemented here, the OpenSCM Two Layer Model interface is intended to be used in research or education. 74 | 75 | # Acknowledgements 76 | 77 | We thank Robert Gieseke for comments on the manuscript and for all of his efforts within the OpenSCM project. 78 | 79 | # References 80 | -------------------------------------------------------------------------------- /scripts/check_requirements_in_readme.py: -------------------------------------------------------------------------------- 1 | # find the requirements in setup.py, there must be a better way to do this... 2 | with open("setup.py") as fh: 3 | for line in fh.readlines(): 4 | line = line.strip() 5 | if line.startswith("REQUIREMENTS ="): 6 | if "[" in line and "]" in line: 7 | exec(line) 8 | else: 9 | raise NotImplementedError("multi-line requirements") 10 | 11 | 12 | # find the requirements in the README 13 | requirements_readme = [] 14 | with open("README.rst") as fh: 15 | in_requirements = False 16 | for line in fh.readlines(): 17 | line = line.strip() 18 | if line == ".. begin-dependencies": 19 | in_requirements = True 20 | elif line == ".. end-dependencies": 21 | in_requirements = False 22 | elif in_requirements and line: 23 | requirements_readme.append(line.strip("-").strip()) 24 | 25 | requirements_set = set(REQUIREMENTS) 26 | requirements_readme_set = set(requirements_readme) 27 | 28 | assert requirements_set == requirements_readme_set, ( 29 | "Requirements: {}\n" 30 | "Requirements in README: {}\n" 31 | "Requirements - Requirements in README: {}\n" 32 | "Requirements in README - Requirements: {}".format( 33 | requirements_set, 34 | requirements_readme_set, 35 | requirements_set - requirements_readme_set, 36 | requirements_readme_set - requirements_set, 37 | ) 38 | ) 39 | -------------------------------------------------------------------------------- /scripts/test_install.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test that all of our modules can be imported 3 | 4 | Thanks https://stackoverflow.com/a/25562415/10473080 5 | """ 6 | import importlib 7 | import pkgutil 8 | 9 | import openscm_twolayermodel 10 | 11 | 12 | def import_submodules(package_name): 13 | package = importlib.import_module(package_name) 14 | 15 | for _, name, is_pkg in pkgutil.walk_packages(package.__path__): 16 | full_name = package.__name__ + "." + name 17 | importlib.import_module(full_name) 18 | if is_pkg: 19 | import_submodules(full_name) 20 | 21 | 22 | import_submodules("openscm_twolayermodel") 23 | print(openscm_twolayermodel.__version__) 24 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 88 3 | ignore = E203, E266, E501, W503 4 | 5 | [isort] 6 | default_section = THIRDPARTY 7 | # comma after multiline breaks like black: 8 | include_trailing_comma = true 9 | known_first_party = openscm_twolayermodel 10 | # black default line length: 11 | line_length = 88 12 | # multiline breaks like black: 13 | multi_line_output = 3 14 | not_skip = __init__.py 15 | skip = versioneer.py, src/openscm_twolayermodel/_version.py 16 | 17 | [metadata] 18 | description-file = README.rst 19 | 20 | [mypy] 21 | disallow_incomplete_defs = true 22 | disallow_subclassing_any = true 23 | ignore_missing_imports = true 24 | no_implicit_optional = true 25 | warn_redundant_casts = true 26 | warn_return_any = true 27 | warn_unused_configs = true 28 | warn_unused_ignores = true 29 | 30 | [pydocstyle] 31 | # D213 - Multi-line docstring summary should start at the second line 32 | # D402 - First line should not be the function’s “signature” 33 | add_select = D213, D402 34 | # D200 - One-line docstring should fit on one line with quotes 35 | # D205 - 1 blank line required between summary line and description 36 | # D400 - First line should end with a period 37 | add_ignore = D200, D205, D400 38 | convention = numpy 39 | inherit = false 40 | match = (?!test_|_version).*\.py 41 | 42 | [tool:pytest] 43 | testpaths = tests 44 | 45 | [versioneer] 46 | VCS = git 47 | style = pep440 48 | versionfile_source = src/openscm_twolayermodel/_version.py 49 | versionfile_build = openscm_twolayermodel/_version.py 50 | tag_prefix = v 51 | parentdir_prefix = openscm_twolayermodel- 52 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import versioneer 2 | from setuptools import find_packages, setup 3 | from setuptools.command.test import test as TestCommand 4 | 5 | PACKAGE_NAME = "openscm-twolayermodel" 6 | AUTHORS = [ 7 | ("Zeb Nicholls", "zebedee.nicholls@climate-energy-college.org"), 8 | ("Jared Lewis", "jared.lewis@climate-energy-college.org"), 9 | ] 10 | URL = "https://github.com/openscm/openscm-twolayermodel" 11 | 12 | DESCRIPTION = ( 13 | "Implementations of the two layer radiative forcing driven models by " 14 | "`Held et al. `_ " 15 | "and `Geoffroy et al. `_" 16 | ) 17 | LICENSE = "3-Clause BSD License" 18 | KEYWORDS = [ 19 | "openscm-twolayermodel", 20 | "openscm", 21 | "python", 22 | "repo", 23 | "simple", 24 | "climate", 25 | "model", 26 | ] 27 | README = "README.rst" 28 | CLASSIFIERS = [ # full list at https://pypi.org/pypi?%3Aaction=list_classifiers 29 | "Development Status :: 4 - Beta", 30 | "License :: OSI Approved :: BSD License", 31 | "Intended Audience :: Developers", 32 | "Operating System :: OS Independent", 33 | "Programming Language :: Python :: 3.6", 34 | "Programming Language :: Python :: 3.7", 35 | "Programming Language :: Python :: 3.8", 36 | "Programming Language :: Python :: 3.9", 37 | ] 38 | 39 | SOURCE_DIR = "src" 40 | 41 | REQUIREMENTS = ["scmdata>=0.9", "tqdm"] 42 | REQUIREMENTS_NOTEBOOKS = [ 43 | "ipywidgets", 44 | "notebook", 45 | "seaborn", 46 | ] 47 | REQUIREMENTS_TESTS = [ 48 | "codecov", 49 | "coverage", 50 | "nbval", 51 | "pytest-cov", 52 | "pytest>=4.0", 53 | "scipy", 54 | ] 55 | REQUIREMENTS_DOCS = REQUIREMENTS_NOTEBOOKS + [ 56 | "nbsphinx", 57 | "sphinx>=1.4", 58 | "sphinx_rtd_theme", 59 | "sphinx-click", 60 | "sphinx-copybutton", 61 | ] 62 | REQUIREMENTS_DEPLOY = ["twine>=1.11.0", "setuptools>=38.6.0", "wheel>=0.31.0"] 63 | 64 | REQUIREMENTS_DEV = [ 65 | *[ 66 | "bandit", 67 | "black==19.10b0", 68 | "black-nb", 69 | "flake8", 70 | "isort<5", # isort 5 incompatible with pylint 71 | "mypy", 72 | "nbdime", 73 | "pydocstyle", 74 | "pylint>=2.4.4", 75 | ], 76 | *REQUIREMENTS_DEPLOY, 77 | *REQUIREMENTS_DOCS, 78 | *REQUIREMENTS_NOTEBOOKS, 79 | *REQUIREMENTS_TESTS, 80 | ] 81 | 82 | REQUIREMENTS_EXTRAS = { 83 | "deploy": REQUIREMENTS_DEPLOY, 84 | "dev": REQUIREMENTS_DEV, 85 | "docs": REQUIREMENTS_DOCS, 86 | "notebooks": REQUIREMENTS_NOTEBOOKS, 87 | "tests": REQUIREMENTS_TESTS, 88 | } 89 | 90 | # Get the long description from the README file 91 | with open(README, "r") as f: 92 | README_LINES = ["Openscm Two Layer Model", "=======================", ""] 93 | add_line = False 94 | for line in f: 95 | if line.strip() == ".. sec-begin-long-description": 96 | add_line = True 97 | elif line.strip() == ".. sec-end-long-description": 98 | break 99 | elif add_line: 100 | README_LINES.append(line.strip()) 101 | 102 | if len(README_LINES) < 3: 103 | raise RuntimeError("Insufficient description given") 104 | 105 | 106 | class OpenscmTwoLayermodel(TestCommand): 107 | def finalize_options(self): 108 | TestCommand.finalize_options(self) 109 | self.test_args = [] 110 | self.test_suite = True 111 | 112 | def run_tests(self): 113 | import pytest 114 | 115 | pytest.main(self.test_args) 116 | 117 | 118 | cmdclass = versioneer.get_cmdclass() 119 | cmdclass.update({"test": OpenscmTwoLayermodel}) 120 | 121 | setup( 122 | name=PACKAGE_NAME, 123 | version=versioneer.get_version(), 124 | description=DESCRIPTION, 125 | long_description="\n".join(README_LINES), 126 | long_description_content_type="text/x-rst", 127 | author=", ".join([author[0] for author in AUTHORS]), 128 | author_email=", ".join([author[1] for author in AUTHORS]), 129 | url=URL, 130 | license=LICENSE, 131 | classifiers=CLASSIFIERS, 132 | keywords=KEYWORDS, 133 | packages=find_packages(SOURCE_DIR), # no exclude as only searching in `src` 134 | package_dir={"": SOURCE_DIR}, 135 | # include_package_data=True, 136 | install_requires=REQUIREMENTS, 137 | extras_require=REQUIREMENTS_EXTRAS, 138 | cmdclass=cmdclass, 139 | ) 140 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | OpenSCM Two Layer Model, implementations of the two layer radiative forcing driven models by `Held et al. `_ and `Geoffroy et al. `_. 3 | 4 | See README and docs for more info. 5 | """ 6 | 7 | from ._version import get_versions 8 | from .impulse_response_model import ImpulseResponseModel # noqa 9 | from .two_layer_model import TwoLayerModel # noqa 10 | 11 | __version__ = get_versions()["version"] 12 | del get_versions 13 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/_version.py: -------------------------------------------------------------------------------- 1 | 2 | # This file helps to compute a version number in source trees obtained from 3 | # git-archive tarball (such as those provided by githubs download-from-tag 4 | # feature). Distribution tarballs (built by setup.py sdist) and build 5 | # directories (produced by setup.py build) will contain a much shorter file 6 | # that just contains the computed version number. 7 | 8 | # This file is released into the public domain. Generated by 9 | # versioneer-0.18 (https://github.com/warner/python-versioneer) 10 | 11 | """Git implementation of _version.py.""" 12 | 13 | import errno 14 | import os 15 | import re 16 | import subprocess 17 | import sys 18 | 19 | 20 | def get_keywords(): 21 | """Get the keywords needed to look up the version information.""" 22 | # these strings will be replaced by git during git-archive. 23 | # setup.py/versioneer.py will grep for the variable names, so they must 24 | # each be defined on a line of their own. _version.py will just call 25 | # get_keywords(). 26 | git_refnames = " (HEAD -> master)" 27 | git_full = "ec212a28d733d459765f8dcf9cfb2f96ccbb8c36" 28 | git_date = "2021-06-15 22:38:03 +1000" 29 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} 30 | return keywords 31 | 32 | 33 | class VersioneerConfig: 34 | """Container for Versioneer configuration parameters.""" 35 | 36 | 37 | def get_config(): 38 | """Create, populate and return the VersioneerConfig() object.""" 39 | # these strings are filled in when 'setup.py versioneer' creates 40 | # _version.py 41 | cfg = VersioneerConfig() 42 | cfg.VCS = "git" 43 | cfg.style = "pep440" 44 | cfg.tag_prefix = "v" 45 | cfg.parentdir_prefix = "openscm_twolayermodel-" 46 | cfg.versionfile_source = "src/openscm_twolayermodel/_version.py" 47 | cfg.verbose = False 48 | return cfg 49 | 50 | 51 | class NotThisMethod(Exception): 52 | """Exception raised if a method is not valid for the current scenario.""" 53 | 54 | 55 | LONG_VERSION_PY = {} 56 | HANDLERS = {} 57 | 58 | 59 | def register_vcs_handler(vcs, method): # decorator 60 | """Decorator to mark a method as the handler for a particular VCS.""" 61 | def decorate(f): 62 | """Store f in HANDLERS[vcs][method].""" 63 | if vcs not in HANDLERS: 64 | HANDLERS[vcs] = {} 65 | HANDLERS[vcs][method] = f 66 | return f 67 | return decorate 68 | 69 | 70 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, 71 | env=None): 72 | """Call the given command(s).""" 73 | assert isinstance(commands, list) 74 | p = None 75 | for c in commands: 76 | try: 77 | dispcmd = str([c] + args) 78 | # remember shell=False, so use git.cmd on windows, not just git 79 | p = subprocess.Popen([c] + args, cwd=cwd, env=env, 80 | stdout=subprocess.PIPE, 81 | stderr=(subprocess.PIPE if hide_stderr 82 | else None)) 83 | break 84 | except EnvironmentError: 85 | e = sys.exc_info()[1] 86 | if e.errno == errno.ENOENT: 87 | continue 88 | if verbose: 89 | print("unable to run %s" % dispcmd) 90 | print(e) 91 | return None, None 92 | else: 93 | if verbose: 94 | print("unable to find command, tried %s" % (commands,)) 95 | return None, None 96 | stdout = p.communicate()[0].strip() 97 | if sys.version_info[0] >= 3: 98 | stdout = stdout.decode() 99 | if p.returncode != 0: 100 | if verbose: 101 | print("unable to run %s (error)" % dispcmd) 102 | print("stdout was %s" % stdout) 103 | return None, p.returncode 104 | return stdout, p.returncode 105 | 106 | 107 | def versions_from_parentdir(parentdir_prefix, root, verbose): 108 | """Try to determine the version from the parent directory name. 109 | 110 | Source tarballs conventionally unpack into a directory that includes both 111 | the project name and a version string. We will also support searching up 112 | two directory levels for an appropriately named parent directory 113 | """ 114 | rootdirs = [] 115 | 116 | for i in range(3): 117 | dirname = os.path.basename(root) 118 | if dirname.startswith(parentdir_prefix): 119 | return {"version": dirname[len(parentdir_prefix):], 120 | "full-revisionid": None, 121 | "dirty": False, "error": None, "date": None} 122 | else: 123 | rootdirs.append(root) 124 | root = os.path.dirname(root) # up a level 125 | 126 | if verbose: 127 | print("Tried directories %s but none started with prefix %s" % 128 | (str(rootdirs), parentdir_prefix)) 129 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 130 | 131 | 132 | @register_vcs_handler("git", "get_keywords") 133 | def git_get_keywords(versionfile_abs): 134 | """Extract version information from the given file.""" 135 | # the code embedded in _version.py can just fetch the value of these 136 | # keywords. When used from setup.py, we don't want to import _version.py, 137 | # so we do it with a regexp instead. This function is not used from 138 | # _version.py. 139 | keywords = {} 140 | try: 141 | f = open(versionfile_abs, "r") 142 | for line in f.readlines(): 143 | if line.strip().startswith("git_refnames ="): 144 | mo = re.search(r'=\s*"(.*)"', line) 145 | if mo: 146 | keywords["refnames"] = mo.group(1) 147 | if line.strip().startswith("git_full ="): 148 | mo = re.search(r'=\s*"(.*)"', line) 149 | if mo: 150 | keywords["full"] = mo.group(1) 151 | if line.strip().startswith("git_date ="): 152 | mo = re.search(r'=\s*"(.*)"', line) 153 | if mo: 154 | keywords["date"] = mo.group(1) 155 | f.close() 156 | except EnvironmentError: 157 | pass 158 | return keywords 159 | 160 | 161 | @register_vcs_handler("git", "keywords") 162 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 163 | """Get version information from git keywords.""" 164 | if not keywords: 165 | raise NotThisMethod("no keywords at all, weird") 166 | date = keywords.get("date") 167 | if date is not None: 168 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant 169 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 170 | # -like" string, which we must then edit to make compliant), because 171 | # it's been around since git-1.5.3, and it's too difficult to 172 | # discover which version we're using, or to work around using an 173 | # older one. 174 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 175 | refnames = keywords["refnames"].strip() 176 | if refnames.startswith("$Format"): 177 | if verbose: 178 | print("keywords are unexpanded, not using") 179 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 180 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 181 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 182 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 183 | TAG = "tag: " 184 | tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) 185 | if not tags: 186 | # Either we're using git < 1.8.3, or there really are no tags. We use 187 | # a heuristic: assume all version tags have a digit. The old git %d 188 | # expansion behaves like git log --decorate=short and strips out the 189 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 190 | # between branches and tags. By ignoring refnames without digits, we 191 | # filter out many common branch names like "release" and 192 | # "stabilization", as well as "HEAD" and "master". 193 | tags = set([r for r in refs if re.search(r'\d', r)]) 194 | if verbose: 195 | print("discarding '%s', no digits" % ",".join(refs - tags)) 196 | if verbose: 197 | print("likely tags: %s" % ",".join(sorted(tags))) 198 | for ref in sorted(tags): 199 | # sorting will prefer e.g. "2.0" over "2.0rc1" 200 | if ref.startswith(tag_prefix): 201 | r = ref[len(tag_prefix):] 202 | if verbose: 203 | print("picking %s" % r) 204 | return {"version": r, 205 | "full-revisionid": keywords["full"].strip(), 206 | "dirty": False, "error": None, 207 | "date": date} 208 | # no suitable tags, so version is "0+unknown", but full hex is still there 209 | if verbose: 210 | print("no suitable tags, using unknown + full revision id") 211 | return {"version": "0+unknown", 212 | "full-revisionid": keywords["full"].strip(), 213 | "dirty": False, "error": "no suitable tags", "date": None} 214 | 215 | 216 | @register_vcs_handler("git", "pieces_from_vcs") 217 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 218 | """Get version from 'git describe' in the root of the source tree. 219 | 220 | This only gets called if the git-archive 'subst' keywords were *not* 221 | expanded, and _version.py hasn't already been rewritten with a short 222 | version string, meaning we're inside a checked out source tree. 223 | """ 224 | GITS = ["git"] 225 | if sys.platform == "win32": 226 | GITS = ["git.cmd", "git.exe"] 227 | 228 | out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, 229 | hide_stderr=True) 230 | if rc != 0: 231 | if verbose: 232 | print("Directory %s not under git control" % root) 233 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 234 | 235 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 236 | # if there isn't one, this yields HEX[-dirty] (no NUM) 237 | describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", 238 | "--always", "--long", 239 | "--match", "%s*" % tag_prefix], 240 | cwd=root) 241 | # --long was added in git-1.5.5 242 | if describe_out is None: 243 | raise NotThisMethod("'git describe' failed") 244 | describe_out = describe_out.strip() 245 | full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 246 | if full_out is None: 247 | raise NotThisMethod("'git rev-parse' failed") 248 | full_out = full_out.strip() 249 | 250 | pieces = {} 251 | pieces["long"] = full_out 252 | pieces["short"] = full_out[:7] # maybe improved later 253 | pieces["error"] = None 254 | 255 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 256 | # TAG might have hyphens. 257 | git_describe = describe_out 258 | 259 | # look for -dirty suffix 260 | dirty = git_describe.endswith("-dirty") 261 | pieces["dirty"] = dirty 262 | if dirty: 263 | git_describe = git_describe[:git_describe.rindex("-dirty")] 264 | 265 | # now we have TAG-NUM-gHEX or HEX 266 | 267 | if "-" in git_describe: 268 | # TAG-NUM-gHEX 269 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 270 | if not mo: 271 | # unparseable. Maybe git-describe is misbehaving? 272 | pieces["error"] = ("unable to parse git-describe output: '%s'" 273 | % describe_out) 274 | return pieces 275 | 276 | # tag 277 | full_tag = mo.group(1) 278 | if not full_tag.startswith(tag_prefix): 279 | if verbose: 280 | fmt = "tag '%s' doesn't start with prefix '%s'" 281 | print(fmt % (full_tag, tag_prefix)) 282 | pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" 283 | % (full_tag, tag_prefix)) 284 | return pieces 285 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 286 | 287 | # distance: number of commits since tag 288 | pieces["distance"] = int(mo.group(2)) 289 | 290 | # commit: short hex revision ID 291 | pieces["short"] = mo.group(3) 292 | 293 | else: 294 | # HEX: no tags 295 | pieces["closest-tag"] = None 296 | count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], 297 | cwd=root) 298 | pieces["distance"] = int(count_out) # total number of commits 299 | 300 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 301 | date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], 302 | cwd=root)[0].strip() 303 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 304 | 305 | return pieces 306 | 307 | 308 | def plus_or_dot(pieces): 309 | """Return a + if we don't already have one, else return a .""" 310 | if "+" in pieces.get("closest-tag", ""): 311 | return "." 312 | return "+" 313 | 314 | 315 | def render_pep440(pieces): 316 | """Build up version string, with post-release "local version identifier". 317 | 318 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 319 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 320 | 321 | Exceptions: 322 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 323 | """ 324 | if pieces["closest-tag"]: 325 | rendered = pieces["closest-tag"] 326 | if pieces["distance"] or pieces["dirty"]: 327 | rendered += plus_or_dot(pieces) 328 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 329 | if pieces["dirty"]: 330 | rendered += ".dirty" 331 | else: 332 | # exception #1 333 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], 334 | pieces["short"]) 335 | if pieces["dirty"]: 336 | rendered += ".dirty" 337 | return rendered 338 | 339 | 340 | def render_pep440_pre(pieces): 341 | """TAG[.post.devDISTANCE] -- No -dirty. 342 | 343 | Exceptions: 344 | 1: no tags. 0.post.devDISTANCE 345 | """ 346 | if pieces["closest-tag"]: 347 | rendered = pieces["closest-tag"] 348 | if pieces["distance"]: 349 | rendered += ".post.dev%d" % pieces["distance"] 350 | else: 351 | # exception #1 352 | rendered = "0.post.dev%d" % pieces["distance"] 353 | return rendered 354 | 355 | 356 | def render_pep440_post(pieces): 357 | """TAG[.postDISTANCE[.dev0]+gHEX] . 358 | 359 | The ".dev0" means dirty. Note that .dev0 sorts backwards 360 | (a dirty tree will appear "older" than the corresponding clean one), 361 | but you shouldn't be releasing software with -dirty anyways. 362 | 363 | Exceptions: 364 | 1: no tags. 0.postDISTANCE[.dev0] 365 | """ 366 | if pieces["closest-tag"]: 367 | rendered = pieces["closest-tag"] 368 | if pieces["distance"] or pieces["dirty"]: 369 | rendered += ".post%d" % pieces["distance"] 370 | if pieces["dirty"]: 371 | rendered += ".dev0" 372 | rendered += plus_or_dot(pieces) 373 | rendered += "g%s" % pieces["short"] 374 | else: 375 | # exception #1 376 | rendered = "0.post%d" % pieces["distance"] 377 | if pieces["dirty"]: 378 | rendered += ".dev0" 379 | rendered += "+g%s" % pieces["short"] 380 | return rendered 381 | 382 | 383 | def render_pep440_old(pieces): 384 | """TAG[.postDISTANCE[.dev0]] . 385 | 386 | The ".dev0" means dirty. 387 | 388 | Eexceptions: 389 | 1: no tags. 0.postDISTANCE[.dev0] 390 | """ 391 | if pieces["closest-tag"]: 392 | rendered = pieces["closest-tag"] 393 | if pieces["distance"] or pieces["dirty"]: 394 | rendered += ".post%d" % pieces["distance"] 395 | if pieces["dirty"]: 396 | rendered += ".dev0" 397 | else: 398 | # exception #1 399 | rendered = "0.post%d" % pieces["distance"] 400 | if pieces["dirty"]: 401 | rendered += ".dev0" 402 | return rendered 403 | 404 | 405 | def render_git_describe(pieces): 406 | """TAG[-DISTANCE-gHEX][-dirty]. 407 | 408 | Like 'git describe --tags --dirty --always'. 409 | 410 | Exceptions: 411 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 412 | """ 413 | if pieces["closest-tag"]: 414 | rendered = pieces["closest-tag"] 415 | if pieces["distance"]: 416 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 417 | else: 418 | # exception #1 419 | rendered = pieces["short"] 420 | if pieces["dirty"]: 421 | rendered += "-dirty" 422 | return rendered 423 | 424 | 425 | def render_git_describe_long(pieces): 426 | """TAG-DISTANCE-gHEX[-dirty]. 427 | 428 | Like 'git describe --tags --dirty --always -long'. 429 | The distance/hash is unconditional. 430 | 431 | Exceptions: 432 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 433 | """ 434 | if pieces["closest-tag"]: 435 | rendered = pieces["closest-tag"] 436 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 437 | else: 438 | # exception #1 439 | rendered = pieces["short"] 440 | if pieces["dirty"]: 441 | rendered += "-dirty" 442 | return rendered 443 | 444 | 445 | def render(pieces, style): 446 | """Render the given version pieces into the requested style.""" 447 | if pieces["error"]: 448 | return {"version": "unknown", 449 | "full-revisionid": pieces.get("long"), 450 | "dirty": None, 451 | "error": pieces["error"], 452 | "date": None} 453 | 454 | if not style or style == "default": 455 | style = "pep440" # the default 456 | 457 | if style == "pep440": 458 | rendered = render_pep440(pieces) 459 | elif style == "pep440-pre": 460 | rendered = render_pep440_pre(pieces) 461 | elif style == "pep440-post": 462 | rendered = render_pep440_post(pieces) 463 | elif style == "pep440-old": 464 | rendered = render_pep440_old(pieces) 465 | elif style == "git-describe": 466 | rendered = render_git_describe(pieces) 467 | elif style == "git-describe-long": 468 | rendered = render_git_describe_long(pieces) 469 | else: 470 | raise ValueError("unknown style '%s'" % style) 471 | 472 | return {"version": rendered, "full-revisionid": pieces["long"], 473 | "dirty": pieces["dirty"], "error": None, 474 | "date": pieces.get("date")} 475 | 476 | 477 | def get_versions(): 478 | """Get version information or return default if unable to do so.""" 479 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 480 | # __file__, we can work backwards from there to the root. Some 481 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 482 | # case we can only use expanded keywords. 483 | 484 | cfg = get_config() 485 | verbose = cfg.verbose 486 | 487 | try: 488 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, 489 | verbose) 490 | except NotThisMethod: 491 | pass 492 | 493 | try: 494 | root = os.path.realpath(__file__) 495 | # versionfile_source is the relative path from the top of the source 496 | # tree (where the .git directory might live) to this file. Invert 497 | # this to find the root from __file__. 498 | for i in cfg.versionfile_source.split('/'): 499 | root = os.path.dirname(root) 500 | except NameError: 501 | return {"version": "0+unknown", "full-revisionid": None, 502 | "dirty": None, 503 | "error": "unable to find root of source tree", 504 | "date": None} 505 | 506 | try: 507 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 508 | return render(pieces, cfg.style) 509 | except NotThisMethod: 510 | pass 511 | 512 | try: 513 | if cfg.parentdir_prefix: 514 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 515 | except NotThisMethod: 516 | pass 517 | 518 | return {"version": "0+unknown", "full-revisionid": None, 519 | "dirty": None, 520 | "error": "unable to compute version", "date": None} 521 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module containing the base for model implementations 3 | """ 4 | from abc import ABC, abstractmethod 5 | 6 | import numpy as np 7 | import pandas as pd 8 | import pint 9 | import pint.errors 10 | import tqdm.autonotebook as tqdman 11 | from openscm_units import unit_registry as ur 12 | from scmdata.run import ScmRun 13 | 14 | from .constants import DENSITY_WATER, HEAT_CAPACITY_WATER 15 | from .errors import UnitError 16 | 17 | # pylint: disable=invalid-name 18 | 19 | 20 | class Model(ABC): 21 | """ 22 | Base class for model implementations 23 | """ 24 | 25 | _save_paras = tuple() # parameters to save when doing a run 26 | 27 | _name = None # model name 28 | 29 | @staticmethod 30 | def _assert_is_pint_quantity_with_units(quantity, name, model_units): 31 | if not isinstance(quantity, pint.Quantity): 32 | raise TypeError("{} must be a pint.Quantity".format(name)) 33 | 34 | try: 35 | quantity.to(model_units) 36 | except pint.errors.DimensionalityError as exc: 37 | raise UnitError("Wrong units for `{}`".format(name)) from exc 38 | 39 | @abstractmethod 40 | def set_drivers(self, *args, **kwargs): 41 | """ 42 | Set the model's drivers 43 | """ 44 | 45 | def reset(self): 46 | """ 47 | Reset everything so that a new run can be performed. 48 | 49 | Called as late as possible before :meth:`run`. 50 | """ 51 | self._reset() 52 | 53 | @abstractmethod 54 | def _reset(self): 55 | pass 56 | 57 | def run(self): 58 | """ 59 | Run the model. 60 | """ 61 | self._run() 62 | 63 | @abstractmethod 64 | def _run(self): 65 | pass 66 | 67 | def step(self): 68 | """ 69 | Do a single time step. 70 | """ 71 | self._step() 72 | 73 | @abstractmethod 74 | def _step(self): 75 | pass 76 | 77 | 78 | class TwoLayerVariant(Model): 79 | """ 80 | Base for variations of implementations of the two-layer model 81 | """ 82 | 83 | _delta_t_unit = "s" 84 | _erf_unit = "W/m^2" 85 | 86 | @property 87 | def delta_t(self): 88 | """ 89 | :obj:`pint.Quantity` 90 | Time step for forward-differencing approximation 91 | """ 92 | return self._delta_t 93 | 94 | @delta_t.setter 95 | def delta_t(self, val): 96 | self._assert_is_pint_quantity_with_units(val, "delta_t", self._delta_t_unit) 97 | self._delta_t = val 98 | self._delta_t_mag = val.to(self._delta_t_unit).magnitude 99 | 100 | @property 101 | def erf(self): 102 | """ 103 | :obj:`pint.Quantity` 104 | Effective radiative forcing 105 | """ 106 | return self._erf 107 | 108 | @erf.setter 109 | def erf(self, val): 110 | self._assert_is_pint_quantity_with_units(val, "erf", self._erf_unit) 111 | self._erf = val 112 | self._erf_mag = val.to(self._erf_unit).magnitude 113 | 114 | def set_drivers( 115 | self, erf 116 | ): # pylint: disable=arguments-differ # hmm need to think about this 117 | """ 118 | Set drivers for a model run 119 | 120 | Parameters 121 | ---------- 122 | erf : :obj:`pint.Quantity` 123 | Effective radiative forcing (W/m^2) to use to drive the model 124 | 125 | Raises 126 | ------ 127 | AssertionError 128 | ``erf`` is not one-dimensional 129 | """ 130 | if len(erf.shape) != 1: 131 | raise AssertionError("erf must be one-dimensional") 132 | 133 | self.erf = erf 134 | 135 | @staticmethod 136 | def _ensure_scenarios_are_scmrun(scenarios): 137 | if not isinstance(scenarios, ScmRun): 138 | driver = ScmRun(scenarios) 139 | else: 140 | driver = scenarios.copy() 141 | 142 | return driver 143 | 144 | @staticmethod 145 | def _create_ts(base, unit, variable, values): 146 | out = base.copy() 147 | out.index = out.index.set_levels([unit], "unit") 148 | out.index = out.index.set_levels([variable], "variable") 149 | out.iloc[:, :] = values 150 | 151 | return out 152 | 153 | @staticmethod 154 | def _select_timestep(driver): 155 | year_diff = driver["year"].diff().dropna() 156 | if (year_diff == 1).all(): 157 | # assume yearly timesteps 158 | return 1 * ur("yr") 159 | 160 | time_diff = driver["time"].diff().dropna() 161 | if all( 162 | np.logical_and( 163 | time_diff <= np.timedelta64(31, "D"), 164 | time_diff >= np.timedelta64(28, "D"), 165 | ) 166 | ): 167 | # Assume constant monthly timesteps. This is clearly an approximation but 168 | # while we have constant internal timesteps it's the best we can do. 169 | return 1 * ur("month") 170 | 171 | raise NotImplementedError( 172 | "Could not decide on timestep for time axis: {}".format(driver["time"]) 173 | ) 174 | 175 | def run_scenarios( # pylint:disable=too-many-locals 176 | self, scenarios, driver_var="Effective Radiative Forcing", progress=True, 177 | ): 178 | """ 179 | Run scenarios. 180 | 181 | The model timestep is automatically adjusted based on the timestep used in ``scenarios``. 182 | The timestep used in ``scenarios`` must be constant because this implementation 183 | has a constant timestep. Pull requests to upgrade the implementation to support 184 | variable timesteps are welcome ``_. 185 | 186 | Parameters 187 | ---------- 188 | scenarios : :obj:`ScmDataFrame` or :obj:`ScmRun` or :obj:`pyam.IamDataFrame` or :obj:`pd.DataFrame` or :obj:`np.ndarray` or str 189 | Scenarios to run. The input will be converted to an :obj:`ScmRun` before 190 | the run takes place. 191 | 192 | driver_var : str 193 | The variable in ``scenarios`` to use as the driver of the model 194 | 195 | progress : bool 196 | Whether to display a progress bar 197 | 198 | Returns 199 | ------- 200 | :obj:`ScmRun` 201 | Results of the run (including drivers) 202 | 203 | Raises 204 | ------ 205 | ValueError 206 | No data is available for ``driver_var`` in the ``"World"`` region in 207 | ``scenarios``. 208 | """ 209 | driver = self._ensure_scenarios_are_scmrun(scenarios) 210 | 211 | save_paras_meta = { 212 | "{} ({})".format(k, getattr(self, k).units): getattr(self, k).magnitude 213 | for k in self._save_paras 214 | } 215 | 216 | driver = driver.filter(variable=driver_var, region="World") 217 | if np.equal(driver.shape[0], 0): 218 | raise ValueError( 219 | "No World data available for driver_var `{}`".format(driver_var) 220 | ) 221 | 222 | driver["climate_model"] = self._name 223 | for k, v in save_paras_meta.items(): 224 | driver[k] = v 225 | 226 | timestep = self._select_timestep(driver) 227 | self.delta_t = timestep 228 | 229 | run_store = list() 230 | 231 | driver_ts = driver.timeseries() 232 | for i, (label, row) in tqdman.tqdm( 233 | enumerate(driver_ts.iterrows()), 234 | desc="scenarios", 235 | leave=False, 236 | disable=not (progress), 237 | ): 238 | meta = dict(zip(driver_ts.index.names, label)) 239 | row_no_nan = row.dropna() 240 | 241 | self.set_drivers(row_no_nan.values * ur(meta["unit"])) 242 | self.reset() 243 | self.run() 244 | 245 | out_run_tss_base = row_no_nan.to_frame().T 246 | out_run_tss_base.index.names = driver_ts.index.names 247 | 248 | out_run_tss = [out_run_tss_base] 249 | out_run_tss += self._get_run_output_tss(out_run_tss[0]) 250 | 251 | out_run = ScmRun(pd.concat(out_run_tss)) 252 | out_run["run_idx"] = i 253 | 254 | run_store.append(out_run) 255 | 256 | idx = run_store[0].meta.columns.tolist() 257 | 258 | def get_ordered_timeseries(in_ts): 259 | in_ts = in_ts.reorder_levels(idx) 260 | 261 | return in_ts 262 | 263 | out = ScmRun( 264 | pd.concat( 265 | [get_ordered_timeseries(r.timeseries()) for r in run_store], axis=0 266 | ) 267 | ) 268 | 269 | return out 270 | 271 | @abstractmethod 272 | def _get_run_output_tss(self, ts_base): 273 | """Get the run output timeseries as a list""" 274 | 275 | 276 | def _calculate_geoffroy_helper_parameters( # pylint:disable=too-many-locals 277 | du, dl, lambda0, efficacy, eta 278 | ): 279 | C = du * HEAT_CAPACITY_WATER * DENSITY_WATER 280 | C_D = dl * HEAT_CAPACITY_WATER * DENSITY_WATER 281 | 282 | b_pt1 = (lambda0 + efficacy * eta) / (C) 283 | b_pt2 = (eta) / (C_D) 284 | b = b_pt1 + b_pt2 285 | b_star = b_pt1 - b_pt2 286 | delta = b ** 2 - (4 * lambda0 * eta) / (C * C_D) 287 | 288 | taucoeff = C * C_D / (2 * lambda0 * eta) 289 | tau1 = taucoeff * (b - delta ** 0.5) 290 | tau2 = taucoeff * (b + delta ** 0.5) 291 | 292 | phicoeff = C / (2 * efficacy * eta) 293 | phi1 = phicoeff * (b_star - delta ** 0.5) 294 | phi2 = phicoeff * (b_star + delta ** 0.5) 295 | 296 | adenom = C * (phi2 - phi1) 297 | a1 = tau1 * phi2 * lambda0 / adenom 298 | a2 = -tau2 * phi1 * lambda0 / adenom 299 | 300 | out = { 301 | "C": C, 302 | "C_D": C_D, 303 | "b": b, 304 | "b_star": b_star, 305 | "delta": delta, 306 | "tau1": tau1, 307 | "tau2": tau2, 308 | "phi1": phi1, 309 | "phi2": phi2, 310 | "a1": a1, 311 | "a2": a2, 312 | } 313 | 314 | return out 315 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Physical constants used in calculations 3 | """ 4 | from openscm_units import unit_registry as ur 5 | 6 | DENSITY_WATER = 1000 * ur("kg/m^3") 7 | """:obj:`pint.Quantity` : density of water""" 8 | 9 | HEAT_CAPACITY_WATER = 4181 * ur("J/delta_degC/kg") 10 | """:obj:`pint.Quantity` : heat capacity of water""" 11 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/errors.py: -------------------------------------------------------------------------------- 1 | """ 2 | Exceptions raised within ``openscm_twolayermodel`` 3 | """ 4 | 5 | 6 | class UnitError(ValueError): 7 | """ 8 | Exception raised if something has the wrong units 9 | """ 10 | 11 | 12 | class ModelStateError(ValueError): 13 | """ 14 | Exception raised if a model's state is incompatible with the action 15 | """ 16 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/impulse_response_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module containing the impulse response model 3 | 4 | The 2-timescale impulse response model is mathematically equivalent to the 5 | two-layer model without state dependence. 6 | """ 7 | import numpy as np 8 | from openscm_units import unit_registry as ur 9 | 10 | from .base import TwoLayerVariant, _calculate_geoffroy_helper_parameters 11 | from .constants import DENSITY_WATER, HEAT_CAPACITY_WATER 12 | from .errors import ModelStateError 13 | 14 | # pylint: disable=invalid-name 15 | 16 | 17 | class ImpulseResponseModel( 18 | TwoLayerVariant 19 | ): # pylint: disable=too-many-instance-attributes 20 | """ 21 | TODO: top line and paper references 22 | 23 | This implementation uses a forward-differencing approach. This means that 24 | temperature and ocean heat uptake values are start of timestep values. For 25 | example, temperature[i] is only affected by drivers from the i-1 timestep. 26 | In practice, this means that the first temperature and ocean heat uptake 27 | values will always be zero and the last value in the input drivers has no 28 | effect on model output. 29 | """ 30 | 31 | _d1_unit = "yr" 32 | _d2_unit = "yr" 33 | _q1_unit = "delta_degC/(W/m^2)" 34 | _q2_unit = "delta_degC/(W/m^2)" 35 | _efficacy_unit = "dimensionless" 36 | _delta_t_unit = "yr" 37 | 38 | _erf_unit = "W/m^2" 39 | 40 | _temp1_unit = "delta_degC" 41 | _temp2_unit = "delta_degC" 42 | _rndt_unit = "W/m^2" 43 | 44 | _save_paras = ( # parameters to save when doing a run 45 | "d1", 46 | "d2", 47 | "q1", 48 | "q2", 49 | "efficacy", 50 | ) 51 | 52 | _name = "two_timescale_impulse_response" # model name 53 | 54 | def __init__( 55 | self, 56 | q1=0.3 * ur("delta_degC/(W/m^2)"), 57 | q2=0.4 * ur("delta_degC/(W/m^2)"), 58 | d1=9.0 * ur("yr"), 59 | d2=400.0 * ur("yr"), 60 | efficacy=1.0 * ur("dimensionless"), 61 | delta_t=1 / 12 * ur("yr"), 62 | ): # pylint: disable=too-many-arguments 63 | """ 64 | Initialise 65 | 66 | Raises 67 | ------ 68 | ValueError 69 | d1 >= d2, d1 must be the short-timescale 70 | """ 71 | self.q1 = q1 72 | self.q2 = q2 73 | self.d1 = d1 74 | self.d2 = d2 75 | self.efficacy = efficacy 76 | self.delta_t = delta_t 77 | 78 | if d1 >= d2: 79 | raise ValueError("The short-timescale must be d1") 80 | 81 | self._erf = np.zeros(1) * np.nan 82 | self._temp1_mag = np.zeros(1) * np.nan 83 | self._temp2_mag = np.zeros(1) * np.nan 84 | self._rndt_mag = np.zeros(1) * np.nan 85 | self._timestep_idx = np.nan 86 | 87 | @property 88 | def d1(self): 89 | """ 90 | :obj:`pint.Quantity` 91 | Response timescale of first box 92 | """ 93 | return self._d1 94 | 95 | @d1.setter 96 | def d1(self, val): 97 | self._assert_is_pint_quantity_with_units(val, "d1", self._d1_unit) 98 | self._d1 = val 99 | self._d1_mag = val.to(self._d1_unit).magnitude 100 | 101 | @property 102 | def d2(self): 103 | """ 104 | :obj:`pint.Quantity` 105 | Response timescale of second box 106 | """ 107 | return self._d2 108 | 109 | @d2.setter 110 | def d2(self, val): 111 | self._assert_is_pint_quantity_with_units(val, "d2", self._d2_unit) 112 | self._d2 = val 113 | self._d2_mag = val.to(self._d2_unit).magnitude 114 | 115 | @property 116 | def q1(self): 117 | """ 118 | :obj:`pint.Quantity` 119 | Sensitivity of first box response to radiative forcing 120 | """ 121 | return self._q1 122 | 123 | @q1.setter 124 | def q1(self, val): 125 | self._assert_is_pint_quantity_with_units(val, "q1", self._q1_unit) 126 | self._q1 = val 127 | self._q1_mag = val.to(self._q1_unit).magnitude 128 | 129 | @property 130 | def q2(self): 131 | """ 132 | :obj:`pint.Quantity` 133 | Sensitivity of second box response to radiative forcing 134 | """ 135 | return self._q2 136 | 137 | @q2.setter 138 | def q2(self, val): 139 | self._assert_is_pint_quantity_with_units(val, "q2", self._q2_unit) 140 | self._q2 = val 141 | self._q2_mag = val.to(self._q2_unit).magnitude 142 | 143 | @property 144 | def efficacy(self): 145 | """ 146 | :obj:`pint.Quantity` 147 | Efficacy factor 148 | """ 149 | return self._efficacy 150 | 151 | @efficacy.setter 152 | def efficacy(self, val): 153 | self._assert_is_pint_quantity_with_units(val, "efficacy", self._efficacy_unit) 154 | self._efficacy = val 155 | self._efficacy_mag = val.to(self._efficacy_unit).magnitude 156 | 157 | def _reset(self): 158 | if np.isnan(self.erf).any(): 159 | raise ModelStateError( 160 | "The model's drivers have not been set yet, call " 161 | ":meth:`self.set_drivers` first." 162 | ) 163 | 164 | self._timestep_idx = np.nan 165 | self._temp1_mag = np.zeros_like(self._erf_mag) * np.nan 166 | self._temp2_mag = np.zeros_like(self._erf_mag) * np.nan 167 | self._rndt_mag = np.zeros_like(self._erf_mag) * np.nan 168 | 169 | def _run(self): 170 | for _ in self.erf: 171 | self.step() 172 | 173 | def _step(self): 174 | if np.isnan(self._timestep_idx): 175 | self._timestep_idx = 0 176 | 177 | else: 178 | self._timestep_idx += 1 179 | 180 | if np.equal(self._timestep_idx, 0): 181 | self._temp1_mag[self._timestep_idx] = 0.0 182 | self._temp2_mag[self._timestep_idx] = 0.0 183 | self._rndt_mag[self._timestep_idx] = 0.0 184 | 185 | else: 186 | self._temp1_mag[self._timestep_idx] = self._calculate_next_temp( 187 | self._delta_t_mag, 188 | self._temp1_mag[self._timestep_idx - 1], 189 | self._q1_mag, 190 | self._d1_mag, 191 | self._erf_mag[self._timestep_idx - 1], 192 | ) 193 | 194 | self._temp2_mag[self._timestep_idx] = self._calculate_next_temp( 195 | self._delta_t_mag, 196 | self._temp2_mag[self._timestep_idx - 1], 197 | self._q2_mag, 198 | self._d2_mag, 199 | self._erf_mag[self._timestep_idx - 1], 200 | ) 201 | 202 | self._rndt_mag[self._timestep_idx] = self._calculate_next_rndt( 203 | self._temp1_mag[self._timestep_idx - 1], 204 | self._temp2_mag[self._timestep_idx - 1], 205 | self._erf_mag[self._timestep_idx - 1], 206 | self._efficacy_mag, 207 | ) 208 | 209 | @staticmethod 210 | def _calculate_next_temp(delta_t, t, q, d, erf): 211 | decay_factor = np.exp(-delta_t / d) 212 | rise = erf * q * (1 - np.exp(-delta_t / d)) 213 | 214 | return t * decay_factor + rise 215 | 216 | def _calculate_next_rndt(self, t1, t2, erf, efficacy): 217 | two_layer_paras = self.get_two_layer_parameters() 218 | lambda0 = two_layer_paras["lambda0"] 219 | 220 | if np.equal(efficacy, 1): 221 | efficacy_term = 0 * ur(self._erf_unit) 222 | else: 223 | gh = _calculate_geoffroy_helper_parameters( 224 | two_layer_paras["du"], 225 | two_layer_paras["dl"], 226 | two_layer_paras["lambda0"], 227 | two_layer_paras["efficacy"], 228 | two_layer_paras["eta"], 229 | ) 230 | 231 | t1_h = t1 * ur(self._temp1_unit) 232 | t2_h = t2 * ur(self._temp2_unit) 233 | efficacy_term = ( 234 | two_layer_paras["eta"] 235 | * (efficacy - 1) 236 | * ((1 - gh["phi1"]) * t1_h + (1 - gh["phi2"]) * t2_h) 237 | ) 238 | 239 | if str(efficacy_term.units) != "watt / meter ** 2": 240 | raise AssertionError("units should have come out as W/m^2") 241 | 242 | out = erf - lambda0.magnitude * (t1 + t2) - efficacy_term.magnitude 243 | 244 | return out 245 | 246 | def _get_run_output_tss(self, ts_base): 247 | out_run_tss = [] 248 | 249 | out_run_tss.append( 250 | self._create_ts( 251 | base=ts_base, 252 | unit=self._temp1_unit, 253 | variable="Surface Temperature|Box 1", 254 | values=self._temp1_mag, 255 | ) 256 | ) 257 | out_run_tss.append( 258 | self._create_ts( 259 | base=ts_base, 260 | unit=self._temp2_unit, 261 | variable="Surface Temperature|Box 2", 262 | values=self._temp2_mag, 263 | ) 264 | ) 265 | out_run_tss.append( 266 | self._create_ts( 267 | base=ts_base, 268 | unit=self._temp1_unit, 269 | variable="Surface Temperature", 270 | values=self._temp1_mag + self._temp2_mag, 271 | ) 272 | ) 273 | out_run_tss.append( 274 | self._create_ts( 275 | base=ts_base, 276 | unit=self._rndt_unit, 277 | variable="Heat Uptake", 278 | values=self._rndt_mag, 279 | ) 280 | ) 281 | 282 | return out_run_tss 283 | 284 | def get_two_layer_parameters( 285 | self, 286 | ): # pylint:disable=missing-return-doc,missing-return-type-doc 287 | """ 288 | Get equivalent two-layer model parameters 289 | 290 | For details on how the equivalence is calculated, please see the notebook 291 | ``impulse-response-equivalence.ipynb`` in the `OpenSCM Two Layer model 292 | repository `_. 293 | 294 | Returns 295 | ------- 296 | dict of str : :obj:`pint.Quantity` 297 | Input arguments to initialise an 298 | :obj:`openscm_twolayermodel.TwoLayerModel` with the same 299 | temperature response as ``self`` 300 | """ 301 | lambda0 = 1 / (self.q1 + self.q2) 302 | C = (self.d1 * self.d2) / (self.q1 * self.d2 + self.q2 * self.d1) 303 | 304 | a1 = lambda0 * self.q1 305 | a2 = lambda0 * self.q2 306 | 307 | C_D = (lambda0 * (self.d1 * a1 + self.d2 * a2) - C) / self.efficacy 308 | eta = C_D / (self.d1 * a2 + self.d2 * a1) 309 | 310 | du = C / (DENSITY_WATER * HEAT_CAPACITY_WATER) 311 | dl = C_D / (DENSITY_WATER * HEAT_CAPACITY_WATER) 312 | 313 | out = { 314 | "lambda0": lambda0, 315 | "du": du, 316 | "dl": dl, 317 | "eta": eta, 318 | "efficacy": self.efficacy, 319 | } 320 | 321 | return out 322 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/two_layer_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module containing the two-layer model 3 | """ 4 | import numpy as np 5 | from openscm_units import unit_registry as ur 6 | 7 | from .base import TwoLayerVariant, _calculate_geoffroy_helper_parameters 8 | from .constants import DENSITY_WATER, HEAT_CAPACITY_WATER 9 | from .errors import ModelStateError 10 | 11 | # pylint: disable=invalid-name 12 | 13 | 14 | class TwoLayerModel(TwoLayerVariant): # pylint: disable=too-many-instance-attributes 15 | """ 16 | TODO: top line and paper references 17 | 18 | This implementation uses a forward-differencing approach. This means that 19 | temperature and ocean heat uptake values are start of timestep values. For 20 | example, temperature[i] is only affected by drivers from the i-1 timestep. 21 | In practice, this means that the first temperature and ocean heat uptake 22 | values will always be zero and the last value in the input drivers has no 23 | effect on model output. 24 | """ 25 | 26 | _du_unit = "m" 27 | _heat_capacity_upper_unit = "J/delta_degC/m^2" 28 | _heat_capacity_lower_unit = "J/delta_degC/m^2" 29 | _dl_unit = "m" 30 | _lambda0_unit = "W/m^2/delta_degC" 31 | _a_unit = "W/m^2/delta_degC^2" 32 | _efficacy_unit = "dimensionless" 33 | _eta_unit = "W/m^2/delta_degC" 34 | _delta_t_unit = "s" 35 | 36 | _erf_unit = "W/m^2" 37 | 38 | _temp_upper_unit = "delta_degC" 39 | _temp_lower_unit = "delta_degC" 40 | _rndt_unit = "W/m^2" 41 | 42 | _save_paras = ( # parameters to save when doing a run 43 | "du", 44 | "dl", 45 | "lambda0", 46 | "a", 47 | "efficacy", 48 | "eta", 49 | ) 50 | 51 | _name = "two_layer" # model name 52 | 53 | def __init__( 54 | self, 55 | du=50 * ur("m"), 56 | dl=1200 * ur("m"), 57 | lambda0=3.74 / 3 * ur("W/m^2/delta_degC"), 58 | a=0.0 * ur("W/m^2/delta_degC^2"), 59 | efficacy=1.0 * ur("dimensionless"), 60 | eta=0.8 * ur("W/m^2/delta_degC"), 61 | delta_t=ur("yr").to("s"), 62 | ): # pylint: disable=too-many-arguments 63 | """ 64 | Initialise 65 | """ 66 | self.du = du 67 | self.dl = dl 68 | self.lambda0 = lambda0 69 | self.a = a 70 | self.efficacy = efficacy 71 | self.eta = eta 72 | self.delta_t = delta_t 73 | 74 | self._erf = np.zeros(1) * np.nan 75 | self._temp_upper_mag = np.zeros(1) * np.nan 76 | self._temp_lower_mag = np.zeros(1) * np.nan 77 | self._rndt_mag = np.zeros(1) * np.nan 78 | self._timestep_idx = np.nan 79 | 80 | @property 81 | def du(self): 82 | """ 83 | :obj:`pint.Quantity` 84 | Depth of upper layer 85 | """ 86 | return self._du 87 | 88 | @du.setter 89 | def du(self, val): 90 | self._assert_is_pint_quantity_with_units(val, "du", self._du_unit) 91 | self._du = val 92 | self._du_mag = val.to(self._du_unit).magnitude 93 | self._heat_capacity_upper_mag = self.heat_capacity_upper.to( 94 | self._heat_capacity_upper_unit 95 | ).magnitude 96 | 97 | @property 98 | def heat_capacity_upper(self): 99 | """ 100 | :obj:`pint.Quantity` 101 | Heat capacity of upper layer 102 | """ 103 | return self.du * DENSITY_WATER * HEAT_CAPACITY_WATER 104 | 105 | @property 106 | def dl(self): 107 | """ 108 | :obj:`pint.Quantity` 109 | Depth of lower layer 110 | """ 111 | return self._dl 112 | 113 | @dl.setter 114 | def dl(self, val): 115 | self._assert_is_pint_quantity_with_units(val, "dl", self._dl_unit) 116 | self._dl = val 117 | self._dl_mag = val.to(self._dl_unit).magnitude 118 | self._heat_capacity_lower_mag = self.heat_capacity_lower.to( 119 | self._heat_capacity_lower_unit 120 | ).magnitude 121 | 122 | @property 123 | def heat_capacity_lower(self): 124 | """ 125 | :obj:`pint.Quantity` 126 | Heat capacity of lower layer 127 | """ 128 | return self.dl * DENSITY_WATER * HEAT_CAPACITY_WATER 129 | 130 | @property 131 | def lambda0(self): 132 | """ 133 | :obj:`pint.Quantity` 134 | Initial climate feedback factor 135 | """ 136 | return self._lambda0 137 | 138 | @lambda0.setter 139 | def lambda0(self, val): 140 | self._assert_is_pint_quantity_with_units(val, "lambda0", self._lambda0_unit) 141 | self._lambda0 = val 142 | self._lambda0_mag = val.to(self._lambda0_unit).magnitude 143 | 144 | @property 145 | def a(self): 146 | """ 147 | :obj:`pint.Quantity` 148 | Dependence of climate feedback factor on temperature 149 | """ 150 | return self._a 151 | 152 | @a.setter 153 | def a(self, val): 154 | self._assert_is_pint_quantity_with_units(val, "a", self._a_unit) 155 | self._a = val 156 | self._a_mag = val.to(self._a_unit).magnitude 157 | 158 | @property 159 | def efficacy(self): 160 | """ 161 | :obj:`pint.Quantity` 162 | Efficacy factor 163 | """ 164 | return self._efficacy 165 | 166 | @efficacy.setter 167 | def efficacy(self, val): 168 | self._assert_is_pint_quantity_with_units(val, "efficacy", self._efficacy_unit) 169 | self._efficacy = val 170 | self._efficacy_mag = val.to(self._efficacy_unit).magnitude 171 | 172 | @property 173 | def eta(self): 174 | """ 175 | :obj:`pint.Quantity` 176 | Heat transport efficiency 177 | """ 178 | return self._eta 179 | 180 | @eta.setter 181 | def eta(self, val): 182 | self._assert_is_pint_quantity_with_units(val, "eta", self._eta_unit) 183 | self._eta = val 184 | self._eta_mag = val.to(self._eta_unit).magnitude 185 | 186 | def _reset(self): 187 | if np.isnan(self.erf).any(): 188 | raise ModelStateError( 189 | "The model's drivers have not been set yet, call " 190 | ":meth:`self.set_drivers` first." 191 | ) 192 | 193 | self._timestep_idx = np.nan 194 | self._temp_upper_mag = np.zeros_like(self._erf_mag) * np.nan 195 | self._temp_lower_mag = np.zeros_like(self._erf_mag) * np.nan 196 | self._rndt_mag = np.zeros_like(self._erf_mag) * np.nan 197 | 198 | def _run(self): 199 | for _ in self.erf: 200 | self.step() 201 | 202 | def _step(self): 203 | if np.isnan(self._timestep_idx): 204 | self._timestep_idx = 0 205 | 206 | else: 207 | self._timestep_idx += 1 208 | 209 | if np.equal(self._timestep_idx, 0): 210 | self._temp_upper_mag[self._timestep_idx] = 0.0 211 | self._temp_lower_mag[self._timestep_idx] = 0.0 212 | self._rndt_mag[self._timestep_idx] = 0.0 213 | 214 | else: 215 | self._temp_upper_mag[self._timestep_idx] = self._calculate_next_temp_upper( 216 | self._delta_t_mag, 217 | self._temp_upper_mag[self._timestep_idx - 1], 218 | self._temp_lower_mag[self._timestep_idx - 1], 219 | self._erf_mag[self._timestep_idx - 1], 220 | self._lambda0_mag, 221 | self._a_mag, 222 | self._efficacy_mag, 223 | self._eta_mag, 224 | self._heat_capacity_upper_mag, 225 | ) 226 | 227 | self._temp_lower_mag[self._timestep_idx] = self._calculate_next_temp_lower( 228 | self._delta_t_mag, 229 | self._temp_lower_mag[self._timestep_idx - 1], 230 | self._temp_upper_mag[self._timestep_idx - 1], 231 | self._eta_mag, 232 | self._heat_capacity_lower_mag, 233 | ) 234 | 235 | self._rndt_mag[self._timestep_idx] = self._calculate_next_rndt( 236 | self._delta_t_mag, 237 | self._temp_lower_mag[self._timestep_idx], 238 | self._temp_lower_mag[self._timestep_idx - 1], 239 | self._heat_capacity_lower_mag, 240 | self._temp_upper_mag[self._timestep_idx], 241 | self._temp_upper_mag[self._timestep_idx - 1], 242 | self._heat_capacity_upper_mag, 243 | ) 244 | 245 | @staticmethod 246 | def _calculate_next_temp_upper( # pylint: disable=too-many-arguments 247 | delta_t, t_upper, t_lower, erf, lambda0, a, efficacy, eta, heat_capacity_upper 248 | ): 249 | lambda_now = lambda0 - a * t_upper 250 | heat_exchange = efficacy * eta * (t_upper - t_lower) 251 | dT_dt = (erf - lambda_now * t_upper - heat_exchange) / heat_capacity_upper 252 | 253 | return t_upper + delta_t * dT_dt 254 | 255 | @staticmethod 256 | def _calculate_next_temp_lower( 257 | delta_t, t_lower, t_upper, eta, heat_capacity_lower 258 | ): # pylint: disable=too-many-arguments 259 | heat_exchange = eta * (t_upper - t_lower) 260 | dT_dt = heat_exchange / heat_capacity_lower 261 | 262 | return t_lower + delta_t * dT_dt 263 | 264 | @staticmethod 265 | def _calculate_next_rndt( # pylint: disable=too-many-arguments 266 | delta_t, 267 | t_lower_now, 268 | t_lower_prev, 269 | heat_capacity_lower, 270 | t_upper_now, 271 | t_upper_prev, 272 | heat_capacity_upper, 273 | ): 274 | uptake_lower = heat_capacity_lower * (t_lower_now - t_lower_prev) / delta_t 275 | uptake_upper = heat_capacity_upper * (t_upper_now - t_upper_prev) / delta_t 276 | 277 | return uptake_upper + uptake_lower 278 | 279 | def _get_run_output_tss(self, ts_base): 280 | out_run_tss = [] 281 | 282 | out_run_tss.append( 283 | self._create_ts( 284 | base=ts_base, 285 | unit=self._temp_upper_unit, 286 | variable="Surface Temperature|Upper", 287 | values=self._temp_upper_mag, 288 | ) 289 | ) 290 | out_run_tss.append( 291 | self._create_ts( 292 | base=ts_base, 293 | unit=self._temp_lower_unit, 294 | variable="Surface Temperature|Lower", 295 | values=self._temp_lower_mag, 296 | ) 297 | ) 298 | out_run_tss.append( 299 | self._create_ts( 300 | base=ts_base, 301 | unit=self._rndt_unit, 302 | variable="Heat Uptake", 303 | values=self._rndt_mag, 304 | ) 305 | ) 306 | 307 | return out_run_tss 308 | 309 | def get_impulse_response_parameters(self): # pylint:disable=missing-return-doc 310 | """ 311 | Get equivalent two-timescale impulse response model parameters 312 | 313 | For details on how the equivalence is calculated, please see the notebook 314 | ``impulse-response-equivalence.ipynb`` in the `OpenSCM Two Layer model 315 | repository `_. 316 | 317 | Returns 318 | ------- 319 | dict of str : :obj:`pint.Quantity` 320 | Input arguments to initialise an 321 | :obj:`openscm_twolayermodel.ImpulseResponseModel` with the same 322 | temperature response as ``self`` 323 | 324 | Raises 325 | ------ 326 | ValueError 327 | ``self.a`` is non-zero, the two-timescale model does not support 328 | state-dependence. 329 | """ 330 | if not np.equal(self.a.magnitude, 0): 331 | raise ValueError( 332 | "Cannot calculate impulse response parameters with " 333 | "non-zero a={}".format(self.a) 334 | ) 335 | 336 | gh = _calculate_geoffroy_helper_parameters( 337 | self.du, self.dl, self.lambda0, self.efficacy, self.eta 338 | ) 339 | 340 | d1 = gh["tau1"] 341 | d2 = gh["tau2"] 342 | 343 | qdenom = gh["C"] * (gh["phi2"] - gh["phi1"]) 344 | q1 = gh["tau1"] * gh["phi2"] / qdenom 345 | q2 = -gh["tau2"] * gh["phi1"] / qdenom 346 | 347 | out = { 348 | "d1": d1, 349 | "d2": d2, 350 | "q1": q1, 351 | "q2": q2, 352 | "efficacy": self.efficacy, 353 | } 354 | 355 | return out 356 | -------------------------------------------------------------------------------- /src/openscm_twolayermodel/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions 3 | """ 4 | import pint 5 | from openscm_units import unit_registry 6 | 7 | 8 | def convert_lambda_to_ecs(lambda_val, f2x=3.74 * unit_registry("W/m^2")): 9 | """ 10 | Convert a lambda value to equilibrium climate sensitivity (ECS) 11 | 12 | Parameters 13 | ---------- 14 | lambda_val : :obj:`pint.Quantity` 15 | Value of lambda to convert to ECS 16 | 17 | f2x : :obj:`pint.Quantity` 18 | Value of the forcing due to a doubling of atmospheric |CO2| 19 | to assume during the conversion 20 | 21 | Returns 22 | ------- 23 | :obj:`pint.Quantity` 24 | ECS value 25 | 26 | Raises 27 | ------ 28 | TypeError 29 | ``lambda_val`` or ``f2x`` is not a :obj:`pint.Quantity`. 30 | """ 31 | if not isinstance(lambda_val, pint.Quantity): 32 | raise TypeError("lambda_val is not a pint.Quantity") 33 | 34 | if not isinstance(f2x, pint.Quantity): 35 | raise TypeError("f2x is not a pint.Quantity") 36 | 37 | return -f2x / lambda_val 38 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import numpy as np 4 | import numpy.testing as npt 5 | import pandas as pd 6 | import pytest 7 | from openscm_units import unit_registry as ur 8 | from scmdata.run import ScmRun 9 | 10 | TEST_DATA_ROOT_DIR = os.path.join( 11 | os.path.dirname(os.path.abspath(__file__)), "test-data" 12 | ) 13 | 14 | 15 | @pytest.fixture 16 | def test_data_root_dir(): 17 | if not os.path.isdir(TEST_DATA_ROOT_DIR): 18 | pytest.skip("test data required") 19 | 20 | return TEST_DATA_ROOT_DIR 21 | 22 | 23 | @pytest.fixture 24 | def test_rcmip_forcings(test_data_root_dir): 25 | return os.path.join( 26 | test_data_root_dir, "rcmip-radiative-forcing-annual-means-v4-0-0.csv" 27 | ) 28 | 29 | 30 | @pytest.fixture 31 | def test_rcmip_forcings_scmrun(test_rcmip_forcings): 32 | return ScmRun(test_rcmip_forcings, lowercase_cols=True) 33 | 34 | 35 | @pytest.fixture 36 | def test_twolayer_output_dir(test_data_root_dir): 37 | return os.path.join(test_data_root_dir, "two-layer-output") 38 | 39 | 40 | @pytest.fixture 41 | def test_impulseresponse_output_dir(test_data_root_dir): 42 | return os.path.join(test_data_root_dir, "impulse-response-output") 43 | 44 | 45 | def pytest_addoption(parser): 46 | parser.addoption( 47 | "--update-expected-files", 48 | action="store_true", 49 | default=False, 50 | help="Overwrite expected files", 51 | ) 52 | 53 | 54 | @pytest.fixture 55 | def update_expected_files(request): 56 | return request.config.getoption("--update-expected-files") 57 | 58 | 59 | def assert_scmruns_allclose(res, expected): 60 | exp_df = expected.timeseries().sort_index() 61 | meta_cols = exp_df.index.names 62 | 63 | res_df = res.timeseries() 64 | res_df.index = res_df.index.reorder_levels(meta_cols) 65 | res_df = res_df.sort_index() 66 | 67 | pd.testing.assert_frame_equal(res_df, exp_df, check_like=True) 68 | 69 | 70 | @pytest.fixture 71 | def check_scmruns_allclose(): 72 | return assert_scmruns_allclose 73 | 74 | 75 | @pytest.fixture 76 | def run_model_output_comparison(tmpdir): 77 | def _do_comparison(res, expected, update=False): 78 | """ 79 | Run test that results match expected output 80 | 81 | Parameters 82 | ---------- 83 | res : :obj:`ScmRun` 84 | Output from model run 85 | 86 | expected : str 87 | Path containing expected output 88 | 89 | update : bool 90 | If True, don't perform the test and instead simply 91 | overwrite ``expected`` with ``res`` 92 | 93 | Raises 94 | ------ 95 | AssertionError 96 | If ``update`` is ``False`` and ``res`` and ``expected`` 97 | are not identical. 98 | """ 99 | if update: 100 | print("Updating {}".format(expected)) 101 | res.to_csv(expected) 102 | else: 103 | # scmdata bug: the saving and loading process mangles the column 104 | # names so we have to save to disk before checking 105 | tmpfile = os.path.join(tmpdir, "res.csv") 106 | res.to_csv(tmpfile) 107 | assert_scmruns_allclose(ScmRun(tmpfile), ScmRun(expected)) 108 | 109 | if update: 110 | pytest.skip("Updated {}".format(expected)) 111 | 112 | return _do_comparison 113 | 114 | 115 | # temporary workaround until this is in Pint itself and can be imported 116 | def assert_pint_equal(a, b, **kwargs): 117 | c = b.to(a.units) 118 | try: 119 | npt.assert_allclose(a.magnitude, c.magnitude, **kwargs) 120 | 121 | except AssertionError as e: 122 | original_msg = "{}".format(e) 123 | note_line = "Note: values above have been converted to {}".format(a.units) 124 | units_lines = "Input units:\n" "x: {}\n" "y: {}".format(a.units, b.units) 125 | 126 | numerical_lines = ( 127 | "Numerical values with units:\n" "x: {}\n" "y: {}".format(a, b) 128 | ) 129 | 130 | error_msg = ( 131 | "{}\n" 132 | "\n" 133 | "{}\n" 134 | "\n" 135 | "{}\n" 136 | "\n" 137 | "{}".format(original_msg, note_line, units_lines, numerical_lines) 138 | ) 139 | 140 | raise AssertionError(error_msg) 141 | 142 | 143 | @pytest.fixture 144 | def check_equal_pint(): 145 | return assert_pint_equal 146 | 147 | 148 | # temporary workaround until this is in Pint itself and can be imported 149 | def assert_same_unit(unit_1, unit_2): 150 | """ 151 | Check that conversion factor between two units is 1 152 | """ 153 | assert np.equal(1 * ur(str(unit_1)).to(unit_2).magnitude, 1) 154 | 155 | 156 | @pytest.fixture 157 | def check_same_unit(): 158 | return assert_same_unit 159 | -------------------------------------------------------------------------------- /tests/integration/test_impulse_response_integration.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | import pytest 4 | from openscm_units import unit_registry as ur 5 | from scmdata import ScmRun 6 | from test_model_integration_base import TwoLayerVariantIntegrationTester 7 | 8 | from openscm_twolayermodel import ImpulseResponseModel 9 | 10 | 11 | class TestTwoLayerModel(TwoLayerVariantIntegrationTester): 12 | 13 | tmodel = ImpulseResponseModel 14 | 15 | def test_run_scenarios_single(self): 16 | inp = self.tinp.copy() 17 | 18 | model = self.tmodel() 19 | 20 | res = model.run_scenarios(inp) 21 | 22 | model.set_drivers( 23 | inp.values.squeeze() * ur(inp.get_unique_meta("unit", no_duplicates=True)) 24 | ) 25 | model.reset() 26 | model.run() 27 | 28 | npt.assert_allclose( 29 | res.filter(variable="Surface Temperature").values.squeeze(), 30 | model._temp1_mag + model._temp2_mag, 31 | ) 32 | assert ( 33 | res.filter(variable="Surface Temperature").get_unique_meta( 34 | "unit", no_duplicates=True 35 | ) 36 | == "delta_degC" 37 | ) 38 | 39 | npt.assert_allclose( 40 | res.filter(variable="Surface Temperature|Box 1").values.squeeze(), 41 | model._temp1_mag, 42 | ) 43 | assert ( 44 | res.filter(variable="Surface Temperature|Box 1").get_unique_meta( 45 | "unit", no_duplicates=True 46 | ) 47 | == "delta_degC" 48 | ) 49 | 50 | npt.assert_allclose( 51 | res.filter(variable="Surface Temperature|Box 2").values.squeeze(), 52 | model._temp2_mag, 53 | ) 54 | assert ( 55 | res.filter(variable="Surface Temperature|Box 2").get_unique_meta( 56 | "unit", no_duplicates=True 57 | ) 58 | == "delta_degC" 59 | ) 60 | 61 | npt.assert_allclose( 62 | res.filter(variable="Heat Uptake").values.squeeze(), model._rndt_mag 63 | ) 64 | assert ( 65 | res.filter(variable="Heat Uptake").get_unique_meta( 66 | "unit", no_duplicates=True 67 | ) 68 | == "W/m^2" 69 | ) 70 | 71 | def test_run_scenarios_multiple(self): 72 | ts1_erf = np.linspace(0, 4, 101) 73 | ts2_erf = np.sin(np.linspace(0, 4, 101)) 74 | 75 | inp = ScmRun( 76 | data=np.vstack([ts1_erf, ts2_erf]).T, 77 | index=np.linspace(1750, 1850, 101).astype(int), 78 | columns={ 79 | "scenario": ["test_scenario_1", "test_scenario_2"], 80 | "model": "unspecified", 81 | "climate_model": "junk input", 82 | "variable": "Effective Radiative Forcing", 83 | "unit": "W/m^2", 84 | "region": "World", 85 | }, 86 | ) 87 | 88 | model = self.tmodel() 89 | 90 | res = model.run_scenarios(inp) 91 | 92 | for scenario_ts in inp.groupby("scenario"): 93 | scenario = scenario_ts.get_unique_meta("scenario", no_duplicates=True) 94 | 95 | model.set_drivers( 96 | scenario_ts.values.squeeze() 97 | * ur(inp.get_unique_meta("unit", no_duplicates=True)) 98 | ) 99 | model.reset() 100 | model.run() 101 | 102 | res_scen = res.filter(scenario=scenario) 103 | 104 | npt.assert_allclose( 105 | res_scen.filter(variable="Surface Temperature").values.squeeze(), 106 | model._temp1_mag + model._temp2_mag, 107 | ) 108 | assert ( 109 | res_scen.filter(variable="Surface Temperature").get_unique_meta( 110 | "unit", no_duplicates=True 111 | ) 112 | == "delta_degC" 113 | ) 114 | 115 | npt.assert_allclose( 116 | res_scen.filter(variable="Surface Temperature|Box 1").values.squeeze(), 117 | model._temp1_mag, 118 | ) 119 | assert ( 120 | res_scen.filter(variable="Surface Temperature|Box 1").get_unique_meta( 121 | "unit", no_duplicates=True 122 | ) 123 | == "delta_degC" 124 | ) 125 | 126 | npt.assert_allclose( 127 | res_scen.filter(variable="Surface Temperature|Box 2").values.squeeze(), 128 | model._temp2_mag, 129 | ) 130 | assert ( 131 | res_scen.filter(variable="Surface Temperature|Box 2").get_unique_meta( 132 | "unit", no_duplicates=True 133 | ) 134 | == "delta_degC" 135 | ) 136 | 137 | npt.assert_allclose( 138 | res_scen.filter(variable="Heat Uptake").values.squeeze(), 139 | model._rndt_mag, 140 | ) 141 | assert ( 142 | res.filter(variable="Heat Uptake").get_unique_meta( 143 | "unit", no_duplicates=True 144 | ) 145 | == "W/m^2" 146 | ) 147 | 148 | @pytest.mark.parametrize( 149 | "driver_var", 150 | ("Effective Radiative Forcing", "Effective Radiative Forcing|CO2",), 151 | ) 152 | def test_run_scenarios_multiple_drive_var(self, driver_var): 153 | ts1_erf = np.linspace(0, 4, 101) 154 | ts1_erf_co2 = 0.9 * ts1_erf 155 | ts2_erf = np.sin(np.linspace(0, 4, 101)) 156 | ts2_erf_co2 = np.cos(np.linspace(0, 4, 101)) * ts2_erf 157 | 158 | inp = ScmRun( 159 | data=np.vstack([ts1_erf, ts1_erf_co2, ts2_erf, ts2_erf_co2]).T, 160 | index=np.linspace(1750, 1850, 101).astype(int), 161 | columns={ 162 | "scenario": [ 163 | "test_scenario_1", 164 | "test_scenario_1", 165 | "test_scenario_2", 166 | "test_scenario_2", 167 | ], 168 | "model": "unspecified", 169 | "climate_model": "junk input", 170 | "variable": [ 171 | "Effective Radiative Forcing", 172 | "Effective Radiative Forcing|CO2", 173 | "Effective Radiative Forcing", 174 | "Effective Radiative Forcing|CO2", 175 | ], 176 | "unit": "W/m^2", 177 | "region": "World", 178 | }, 179 | ) 180 | 181 | model = self.tmodel() 182 | 183 | res = model.run_scenarios(inp, driver_var=driver_var) 184 | 185 | for scenario_ts in inp.groupby("scenario"): 186 | scenario = scenario_ts.get_unique_meta("scenario", no_duplicates=True) 187 | 188 | driver = scenario_ts.filter(variable=driver_var) 189 | model.set_drivers( 190 | driver.values.squeeze() 191 | * ur(inp.get_unique_meta("unit", no_duplicates=True)) 192 | ) 193 | model.reset() 194 | model.run() 195 | 196 | res_scen = res.filter(scenario=scenario) 197 | 198 | npt.assert_allclose( 199 | res_scen.filter(variable="Surface Temperature|Box 1").values.squeeze(), 200 | model._temp1_mag, 201 | ) 202 | assert ( 203 | res.filter(variable="Surface Temperature|Box 1").get_unique_meta( 204 | "unit", no_duplicates=True 205 | ) 206 | == "delta_degC" 207 | ) 208 | 209 | npt.assert_allclose( 210 | res_scen.filter(variable="Surface Temperature|Box 2").values.squeeze(), 211 | model._temp2_mag, 212 | ) 213 | assert ( 214 | res.filter(variable="Surface Temperature|Box 2").get_unique_meta( 215 | "unit", no_duplicates=True 216 | ) 217 | == "delta_degC" 218 | ) 219 | 220 | npt.assert_allclose( 221 | res_scen.filter(variable="Heat Uptake").values.squeeze(), 222 | model._rndt_mag, 223 | ) 224 | assert ( 225 | res.filter(variable="Heat Uptake").get_unique_meta( 226 | "unit", no_duplicates=True 227 | ) 228 | == "W/m^2" 229 | ) 230 | 231 | def test_run_scenario_timestep_followed(self, check_equal_pint): 232 | inp = self.tinp.copy() 233 | 234 | model = self.tmodel() 235 | 236 | res = model.run_scenarios(inp) 237 | check_equal_pint(model.delta_t, 1 * ur("yr")) 238 | 239 | inp_monthly = inp.resample("MS") 240 | res_monthly = model.run_scenarios(inp_monthly) 241 | check_equal_pint(model.delta_t, 1 * ur("month")) 242 | 243 | comp_filter = { 244 | "variable": "Surface Temperature", 245 | "year": int( 246 | res["year"].iloc[-1] 247 | ), # scmdata bug that you have to wrap this with int() 248 | "month": 1, 249 | } 250 | 251 | # running with two different timesteps should give approximately same results 252 | npt.assert_allclose( 253 | res.filter(**comp_filter).values.squeeze(), 254 | res_monthly.filter(**comp_filter).values.squeeze(), 255 | rtol=6 * 1e-3, 256 | ) 257 | res.filter(variable="Surface Temperature") 258 | -------------------------------------------------------------------------------- /tests/integration/test_model_integration_base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | import numpy as np 4 | import pytest 5 | from scmdata import ScmRun 6 | 7 | from openscm_twolayermodel.errors import UnitError 8 | 9 | 10 | class ModelIntegrationTester(ABC): 11 | tmodel = None 12 | 13 | @abstractmethod 14 | def test_run_scenarios_single(self): 15 | """ 16 | Test the model can run a single scenario correctly 17 | """ 18 | pass 19 | 20 | @abstractmethod 21 | def test_run_scenarios_multiple(self): 22 | """ 23 | Test the model can run multiple scenarios correctly 24 | """ 25 | pass 26 | 27 | 28 | class TwoLayerVariantIntegrationTester(ModelIntegrationTester): 29 | 30 | tinp = ScmRun( 31 | data=np.linspace(0, 4, 101), 32 | index=np.linspace(1750, 1850, 101).astype(int), 33 | columns={ 34 | "scenario": "test_scenario", 35 | "model": "unspecified", 36 | "climate_model": "junk input", 37 | "variable": "Effective Radiative Forcing", 38 | "unit": "W/m^2", 39 | "region": "World", 40 | }, 41 | ) 42 | 43 | def test_run_unit_handling(self, check_scmruns_allclose): 44 | inp = self.tinp.copy() 45 | 46 | model = self.tmodel() 47 | 48 | res = model.run_scenarios(inp) 49 | 50 | # scmdata bug 51 | # inp.convert_unit("kW/m^2") blows up 52 | inp_other_unit = inp.copy() 53 | inp_other_unit *= 10 ** -3 54 | inp_other_unit["unit"] = "kW/m^2" 55 | res_other_unit = model.run_scenarios(inp_other_unit) 56 | 57 | assert res.get_unique_meta("climate_model", no_duplicates=True) == model._name 58 | 59 | check_scmruns_allclose( 60 | res.filter(variable="Effective Radiative Forcing", keep=False), 61 | res_other_unit.filter(variable="Effective Radiative Forcing", keep=False), 62 | ) 63 | 64 | def test_run_wrong_units(self): 65 | inp = self.tinp.copy() 66 | inp["unit"] = "W" 67 | 68 | model = self.tmodel() 69 | 70 | with pytest.raises(UnitError): 71 | model.run_scenarios(inp) 72 | 73 | def test_run_wrong_region(self): 74 | inp = self.tinp.copy() 75 | inp["region"] = "World|R5LAM" 76 | 77 | model = self.tmodel() 78 | 79 | error_msg = ( 80 | "No World data available for driver_var `Effective Radiative Forcing`" 81 | ) 82 | 83 | with pytest.raises(ValueError, match=error_msg): 84 | model.run_scenarios(inp) 85 | 86 | def test_run_wrong_driver(self): 87 | inp = self.tinp.copy() 88 | 89 | model = self.tmodel() 90 | 91 | error_msg = ( 92 | "No World data available for driver_var `Effective Radiative Forcing|CO2`" 93 | ) 94 | 95 | with pytest.raises(ValueError, match=error_msg): 96 | model.run_scenarios(inp, driver_var="Effective Radiative Forcing|CO2") 97 | -------------------------------------------------------------------------------- /tests/integration/test_twolayer_impulse_response_equivalence.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | import pytest 4 | from openscm_units import unit_registry 5 | from scmdata.run import ScmRun 6 | 7 | from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "two_layer_config", 12 | ( 13 | {}, 14 | {"efficacy": 1.2 * unit_registry("dimensionless")}, 15 | {"lambda0": 3.74 / 5 * unit_registry("W/m^2/delta_degC")}, 16 | ), 17 | ) 18 | def test_two_layer_impulse_response_equivalence(two_layer_config): 19 | time = np.arange(1750, 2501) 20 | forcing = 0.3 * np.sin(time / 15 * 2 * np.pi) + 3.0 * time / time.max() 21 | 22 | inp = ScmRun( 23 | data=forcing, 24 | index=time, 25 | columns={ 26 | "scenario": "test_scenario", 27 | "model": "unspecified", 28 | "climate_model": "junk input", 29 | "variable": "Effective Radiative Forcing", 30 | "unit": "W/m^2", 31 | "region": "World", 32 | }, 33 | ) 34 | 35 | twolayer = TwoLayerModel(**two_layer_config) 36 | res_twolayer = twolayer.run_scenarios(inp) 37 | 38 | impulse_response = ImpulseResponseModel( 39 | **twolayer.get_impulse_response_parameters() 40 | ) 41 | res_impulse_response = impulse_response.run_scenarios(inp) 42 | 43 | assert (res_twolayer["time"] == res_impulse_response["time"]).all() 44 | 45 | npt.assert_allclose( 46 | res_twolayer.filter(variable="Effective Radiative Forcing").values, 47 | res_impulse_response.filter(variable="Effective Radiative Forcing").values, 48 | ) 49 | npt.assert_allclose( 50 | res_twolayer.filter(variable="Heat Uptake").values, 51 | res_impulse_response.filter(variable="Heat Uptake").values, 52 | atol=0.1, # numerical errors? 53 | ) 54 | npt.assert_allclose( 55 | res_twolayer.filter(variable="Surface Temperature|Upper").values, 56 | res_impulse_response.filter(variable="Surface Temperature").values, 57 | atol=0.1, # numerical errors? 58 | ) 59 | -------------------------------------------------------------------------------- /tests/integration/test_twolayer_integration.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | import pytest 4 | from openscm_units import unit_registry as ur 5 | from scmdata import ScmRun 6 | from test_model_integration_base import TwoLayerVariantIntegrationTester 7 | 8 | from openscm_twolayermodel import TwoLayerModel 9 | 10 | 11 | class TestTwoLayerModel(TwoLayerVariantIntegrationTester): 12 | 13 | tmodel = TwoLayerModel 14 | 15 | def test_run_scenarios_single(self): 16 | inp = self.tinp.copy() 17 | 18 | model = self.tmodel() 19 | 20 | res = model.run_scenarios(inp) 21 | 22 | model.set_drivers( 23 | inp.values.squeeze() * ur(inp.get_unique_meta("unit", no_duplicates=True)) 24 | ) 25 | model.reset() 26 | model.run() 27 | 28 | npt.assert_allclose( 29 | res.filter(variable="Surface Temperature|Upper").values.squeeze(), 30 | model._temp_upper_mag, 31 | ) 32 | assert ( 33 | res.filter(variable="Surface Temperature|Upper").get_unique_meta( 34 | "unit", no_duplicates=True 35 | ) 36 | == "delta_degC" 37 | ) 38 | 39 | npt.assert_allclose( 40 | res.filter(variable="Surface Temperature|Lower").values.squeeze(), 41 | model._temp_lower_mag, 42 | ) 43 | assert ( 44 | res.filter(variable="Surface Temperature|Lower").get_unique_meta( 45 | "unit", no_duplicates=True 46 | ) 47 | == "delta_degC" 48 | ) 49 | 50 | npt.assert_allclose( 51 | res.filter(variable="Heat Uptake").values.squeeze(), model._rndt_mag 52 | ) 53 | assert ( 54 | res.filter(variable="Heat Uptake").get_unique_meta( 55 | "unit", no_duplicates=True 56 | ) 57 | == "W/m^2" 58 | ) 59 | 60 | def test_run_scenarios_multiple(self): 61 | ts1_erf = np.linspace(0, 4, 101) 62 | ts2_erf = np.sin(np.linspace(0, 4, 101)) 63 | 64 | inp = ScmRun( 65 | data=np.vstack([ts1_erf, ts2_erf]).T, 66 | index=np.linspace(1750, 1850, 101).astype(int), 67 | columns={ 68 | "scenario": ["test_scenario_1", "test_scenario_2"], 69 | "model": "unspecified", 70 | "climate_model": "junk input", 71 | "variable": "Effective Radiative Forcing", 72 | "unit": "W/m^2", 73 | "region": "World", 74 | }, 75 | ) 76 | 77 | model = self.tmodel() 78 | 79 | res = model.run_scenarios(inp) 80 | 81 | for scenario_ts in inp.groupby("scenario"): 82 | scenario = scenario_ts.get_unique_meta("scenario", no_duplicates=True) 83 | 84 | model.set_drivers( 85 | scenario_ts.values.squeeze() 86 | * ur(inp.get_unique_meta("unit", no_duplicates=True)) 87 | ) 88 | model.reset() 89 | model.run() 90 | 91 | res_scen = res.filter(scenario=scenario) 92 | 93 | npt.assert_allclose( 94 | res_scen.filter(variable="Surface Temperature|Upper").values.squeeze(), 95 | model._temp_upper_mag, 96 | ) 97 | assert ( 98 | res.filter(variable="Surface Temperature|Upper").get_unique_meta( 99 | "unit", no_duplicates=True 100 | ) 101 | == "delta_degC" 102 | ) 103 | 104 | npt.assert_allclose( 105 | res_scen.filter(variable="Surface Temperature|Lower").values.squeeze(), 106 | model._temp_lower_mag, 107 | ) 108 | assert ( 109 | res.filter(variable="Surface Temperature|Lower").get_unique_meta( 110 | "unit", no_duplicates=True 111 | ) 112 | == "delta_degC" 113 | ) 114 | 115 | npt.assert_allclose( 116 | res_scen.filter(variable="Heat Uptake").values.squeeze(), 117 | model._rndt_mag, 118 | ) 119 | assert ( 120 | res.filter(variable="Heat Uptake").get_unique_meta( 121 | "unit", no_duplicates=True 122 | ) 123 | == "W/m^2" 124 | ) 125 | 126 | @pytest.mark.parametrize( 127 | "driver_var", 128 | ("Effective Radiative Forcing", "Effective Radiative Forcing|CO2",), 129 | ) 130 | def test_run_scenarios_multiple_drive_var(self, driver_var): 131 | ts1_erf = np.linspace(0, 4, 101) 132 | ts1_erf_co2 = 0.9 * ts1_erf 133 | ts2_erf = np.sin(np.linspace(0, 4, 101)) 134 | ts2_erf_co2 = np.cos(np.linspace(0, 4, 101)) * ts2_erf 135 | 136 | inp = ScmRun( 137 | data=np.vstack([ts1_erf, ts1_erf_co2, ts2_erf, ts2_erf_co2]).T, 138 | index=np.linspace(1750, 1850, 101).astype(int), 139 | columns={ 140 | "scenario": [ 141 | "test_scenario_1", 142 | "test_scenario_1", 143 | "test_scenario_2", 144 | "test_scenario_2", 145 | ], 146 | "model": "unspecified", 147 | "climate_model": "junk input", 148 | "variable": [ 149 | "Effective Radiative Forcing", 150 | "Effective Radiative Forcing|CO2", 151 | "Effective Radiative Forcing", 152 | "Effective Radiative Forcing|CO2", 153 | ], 154 | "unit": "W/m^2", 155 | "region": "World", 156 | }, 157 | ) 158 | 159 | model = self.tmodel() 160 | 161 | res = model.run_scenarios(inp, driver_var=driver_var) 162 | 163 | for scenario_ts in inp.groupby("scenario"): 164 | scenario = scenario_ts.get_unique_meta("scenario", no_duplicates=True) 165 | 166 | driver = scenario_ts.filter(variable=driver_var) 167 | model.set_drivers( 168 | driver.values.squeeze() 169 | * ur(inp.get_unique_meta("unit", no_duplicates=True)) 170 | ) 171 | model.reset() 172 | model.run() 173 | 174 | res_scen = res.filter(scenario=scenario) 175 | 176 | npt.assert_allclose( 177 | res_scen.filter(variable="Surface Temperature|Upper").values.squeeze(), 178 | model._temp_upper_mag, 179 | ) 180 | assert ( 181 | res.filter(variable="Surface Temperature|Upper").get_unique_meta( 182 | "unit", no_duplicates=True 183 | ) 184 | == "delta_degC" 185 | ) 186 | 187 | npt.assert_allclose( 188 | res_scen.filter(variable="Surface Temperature|Lower").values.squeeze(), 189 | model._temp_lower_mag, 190 | ) 191 | assert ( 192 | res.filter(variable="Surface Temperature|Lower").get_unique_meta( 193 | "unit", no_duplicates=True 194 | ) 195 | == "delta_degC" 196 | ) 197 | 198 | npt.assert_allclose( 199 | res_scen.filter(variable="Heat Uptake").values.squeeze(), 200 | model._rndt_mag, 201 | ) 202 | assert ( 203 | res.filter(variable="Heat Uptake").get_unique_meta( 204 | "unit", no_duplicates=True 205 | ) 206 | == "W/m^2" 207 | ) 208 | 209 | def test_run_scenario_timestep_followed(self, check_equal_pint): 210 | inp = self.tinp.copy() 211 | 212 | model = self.tmodel() 213 | 214 | res = model.run_scenarios(inp) 215 | check_equal_pint(model.delta_t, 1 * ur("yr")) 216 | 217 | inp_monthly = inp.resample("MS") 218 | res_monthly = model.run_scenarios(inp_monthly) 219 | check_equal_pint(model.delta_t, 1 * ur("month")) 220 | 221 | comp_filter = { 222 | "variable": "Surface Temperature|Upper", 223 | "year": int( 224 | res["year"].iloc[-1] 225 | ), # scmdata bug that you have to wrap this with int() 226 | "month": 1, 227 | } 228 | 229 | # running with two different timesteps should give approximately same results 230 | npt.assert_allclose( 231 | res.filter(**comp_filter).values.squeeze(), 232 | res_monthly.filter(**comp_filter).values.squeeze(), 233 | rtol=1e-3, 234 | ) 235 | res.filter(variable="Surface Temperature|Upper") 236 | -------------------------------------------------------------------------------- /tests/notebook-tests.cfg: -------------------------------------------------------------------------------- 1 | [regex1] 2 | regex: size \d+x\d+ with 3 | replace: FIGURESIZE 4 | 5 | [regex2] 6 | regex: id="\d*" 7 | replace: ID-STAMP 8 | 9 | [regex3] 10 | regex: \/\S+\/openscm-twolayermodel\/src\S+\: 11 | replace: PATH-STAMP 12 | -------------------------------------------------------------------------------- /tests/regression/test_impulse_response_defaults.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | from openscm_twolayermodel import ImpulseResponseModel 4 | 5 | 6 | def test_impulse_response_defaults( 7 | update_expected_files, 8 | test_rcmip_forcings_scmrun, 9 | test_impulseresponse_output_dir, 10 | run_model_output_comparison, 11 | ): 12 | impulse_response_default = ImpulseResponseModel() 13 | res = impulse_response_default.run_scenarios(test_rcmip_forcings_scmrun) 14 | 15 | expected = os.path.join( 16 | test_impulseresponse_output_dir, "test_impulse_response_defaults.csv" 17 | ) 18 | 19 | run_model_output_comparison(res, expected, update_expected_files) 20 | -------------------------------------------------------------------------------- /tests/regression/test_twolayer_defaults.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import pytest 4 | from openscm_units import unit_registry as ur 5 | 6 | from openscm_twolayermodel import TwoLayerModel 7 | 8 | 9 | def test_twolayer_defaults( 10 | update_expected_files, 11 | test_rcmip_forcings_scmrun, 12 | test_twolayer_output_dir, 13 | run_model_output_comparison, 14 | ): 15 | twolayer_default = TwoLayerModel() 16 | res = twolayer_default.run_scenarios(test_rcmip_forcings_scmrun) 17 | 18 | expected = os.path.join(test_twolayer_output_dir, "test_twolayer_defaults.csv") 19 | 20 | run_model_output_comparison(res, expected, update_expected_files) 21 | 22 | 23 | def test_twolayer_plus_efficacy( 24 | update_expected_files, 25 | test_rcmip_forcings_scmrun, 26 | test_twolayer_output_dir, 27 | run_model_output_comparison, 28 | ): 29 | twolayer_plus_efficacy = TwoLayerModel(efficacy=1.2 * ur("dimensionless")) 30 | res = twolayer_plus_efficacy.run_scenarios(test_rcmip_forcings_scmrun) 31 | 32 | expected = os.path.join(test_twolayer_output_dir, "test_twolayer_plus_efficacy.csv") 33 | 34 | run_model_output_comparison(res, expected, update_expected_files) 35 | 36 | 37 | @pytest.mark.filterwarnings("ignore:overflow encountered") 38 | @pytest.mark.filterwarnings("ignore:invalid value encountered") 39 | def test_twolayer_plus_state_dependence( 40 | update_expected_files, 41 | test_rcmip_forcings_scmrun, 42 | test_twolayer_output_dir, 43 | run_model_output_comparison, 44 | ): 45 | twolayer_plus_state_dependence = TwoLayerModel(a=0.05 * ur("W/m^2/delta_degC^2")) 46 | res = twolayer_plus_state_dependence.run_scenarios(test_rcmip_forcings_scmrun) 47 | 48 | expected = os.path.join( 49 | test_twolayer_output_dir, "test_twolayer_plus_state_dependence.csv" 50 | ) 51 | 52 | run_model_output_comparison(res, expected, update_expected_files) 53 | -------------------------------------------------------------------------------- /tests/unit/test_impulse_response_unit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | import pytest 4 | from openscm_units import unit_registry as ur 5 | from test_model_base import TwoLayerVariantTester 6 | 7 | from openscm_twolayermodel import ImpulseResponseModel, TwoLayerModel 8 | from openscm_twolayermodel.base import _calculate_geoffroy_helper_parameters 9 | from openscm_twolayermodel.constants import DENSITY_WATER, HEAT_CAPACITY_WATER 10 | 11 | 12 | class TestImpulseResponseModel(TwoLayerVariantTester): 13 | tmodel = ImpulseResponseModel 14 | 15 | parameters = dict( 16 | q1=0.33 * ur("delta_degC/(W/m^2)"), 17 | q2=0.41 * ur("delta_degC/(W/m^2)"), 18 | d1=239.0 * ur("yr"), 19 | d2=4.1 * ur("yr"), 20 | efficacy=1.0 * ur("dimensionless"), 21 | delta_t=1 * ur("yr"), 22 | ) 23 | 24 | def test_init(self): 25 | init_kwargs = dict( 26 | q1=0.3 * ur("delta_degC/(W/m^2)"), 27 | q2=0.4 * ur("delta_degC/(W/m^2)"), 28 | d1=25.0 * ur("yr"), 29 | d2=300 * ur("yr"), 30 | efficacy=1.1 * ur("dimensionless"), 31 | delta_t=1 / 12 * ur("yr"), 32 | ) 33 | 34 | res = self.tmodel(**init_kwargs) 35 | 36 | for k, v in init_kwargs.items(): 37 | assert getattr(res, k) == v, "{} not set properly".format(k) 38 | 39 | assert np.isnan(res.erf) 40 | assert np.isnan(res._temp1_mag) 41 | assert np.isnan(res._temp2_mag) 42 | assert np.isnan(res._rndt_mag) 43 | 44 | def test_init_backwards_timescales_error(self): 45 | init_kwargs = dict(d1=250.0 * ur("yr"), d2=3 * ur("yr"),) 46 | 47 | error_msg = "The short-timescale must be d1" 48 | with pytest.raises(ValueError, match=error_msg): 49 | self.tmodel(**init_kwargs) 50 | 51 | def test_calculate_next_temp(self, check_same_unit): 52 | tdelta_t = 30 * 24 * 60 * 60 53 | ttemp = 0.1 54 | tq = 0.4 55 | td = 35.0 56 | tf = 1.2 57 | 58 | res = self.tmodel._calculate_next_temp(tdelta_t, ttemp, tq, td, tf) 59 | 60 | expected = ttemp * np.exp(-tdelta_t / td) + tf * tq * ( 61 | 1 - np.exp(-tdelta_t / td) 62 | ) 63 | 64 | npt.assert_equal(res, expected) 65 | 66 | check_same_unit(self.tmodel._temp1_unit, self.tmodel._temp2_unit) 67 | check_same_unit(self.tmodel._q1_unit, self.tmodel._q2_unit) 68 | check_same_unit(self.tmodel._delta_t_unit, self.tmodel._d1_unit) 69 | check_same_unit(self.tmodel._delta_t_unit, self.tmodel._d2_unit) 70 | check_same_unit( 71 | self.tmodel._temp1_unit, 72 | (1.0 * ur(self.tmodel._erf_unit) * 1.0 * ur(self.tmodel._q1_unit)).units, 73 | ) 74 | 75 | def test_calculate_next_rndt(self, check_same_unit): 76 | ttemp1 = 1.1 77 | ttemp_2 = 0.6 78 | tq1 = 0.5 79 | tq2 = 0.3 80 | td1 = 30 81 | td2 = 600 82 | terf = 1.2 83 | tefficacy = 1.13 84 | 85 | helper = self.tmodel( 86 | q1=tq1 * ur("delta_degC/(W/m^2)"), 87 | q2=tq2 * ur("delta_degC/(W/m^2)"), 88 | d1=td1 * ur("yr"), 89 | d2=td2 * ur("yr"), 90 | efficacy=tefficacy * ur("dimensionless"), 91 | ) 92 | helper_twolayer = TwoLayerModel(**helper.get_two_layer_parameters()) 93 | 94 | gh = _calculate_geoffroy_helper_parameters( 95 | helper_twolayer.du, 96 | helper_twolayer.dl, 97 | helper_twolayer.lambda0, 98 | helper_twolayer.efficacy, 99 | helper_twolayer.eta, 100 | ) 101 | # see notebook for discussion of why this is so 102 | efficacy_term = ( 103 | helper_twolayer.eta 104 | * (helper_twolayer.efficacy - 1) 105 | * ( 106 | ((1 - gh["phi1"]) * ttemp1 * ur("delta_degC")) 107 | + ((1 - gh["phi2"]) * ttemp_2 * ur("delta_degC")) 108 | ) 109 | ) 110 | 111 | expected = ( 112 | terf * ur(helper._erf_unit) 113 | - ((ttemp1 + ttemp_2) * ur(helper._temp1_unit)) * helper_twolayer.lambda0 114 | - efficacy_term 115 | ) 116 | assert str(expected.units) == "watt / meter ** 2" 117 | 118 | res = helper._calculate_next_rndt(ttemp1, ttemp_2, terf, tefficacy) 119 | 120 | npt.assert_allclose(res, expected.magnitude) 121 | 122 | # check internal units make sense 123 | check_same_unit(self.tmodel._q1_unit, self.tmodel._q2_unit) 124 | check_same_unit( 125 | helper_twolayer._lambda0_unit, (1.0 * ur(self.tmodel._q2_unit) ** -1) 126 | ) 127 | check_same_unit( 128 | self.tmodel._erf_unit, 129 | ( 130 | ( 131 | 1.0 * ur(self.tmodel._temp1_unit) / (1.0 * ur(self.tmodel._q1_unit)) 132 | ).units 133 | ), 134 | ) 135 | check_same_unit( 136 | self.tmodel._erf_unit, efficacy_term.units, 137 | ) 138 | 139 | def test_step(self): 140 | # move to integration tests 141 | terf = np.array([3, 4, 5, 6, 7]) * ur("W/m^2") 142 | 143 | model = self.tmodel() 144 | model.set_drivers(terf) 145 | model.reset() 146 | 147 | model.step() 148 | assert model._timestep_idx == 0 149 | npt.assert_equal(model._temp1_mag[model._timestep_idx], 0) 150 | npt.assert_equal(model._temp2_mag[model._timestep_idx], 0) 151 | npt.assert_equal(model._rndt_mag[model._timestep_idx], 0) 152 | 153 | model.step() 154 | model.step() 155 | model.step() 156 | assert model._timestep_idx == 3 157 | 158 | npt.assert_equal( 159 | model._temp1_mag[model._timestep_idx], 160 | model._calculate_next_temp( 161 | model._delta_t_mag, 162 | model._temp1_mag[model._timestep_idx - 1], 163 | model._q1_mag, 164 | model._d1_mag, 165 | model._erf_mag[model._timestep_idx - 1], 166 | ), 167 | ) 168 | 169 | npt.assert_equal( 170 | model._temp2_mag[model._timestep_idx], 171 | model._calculate_next_temp( 172 | model._delta_t_mag, 173 | model._temp2_mag[model._timestep_idx - 1], 174 | model._q2_mag, 175 | model._d2_mag, 176 | model._erf_mag[model._timestep_idx - 1], 177 | ), 178 | ) 179 | 180 | npt.assert_equal( 181 | model._rndt_mag[model._timestep_idx], 182 | model._calculate_next_rndt( 183 | model._temp1_mag[model._timestep_idx - 1], 184 | model._temp2_mag[model._timestep_idx - 1], 185 | model._erf_mag[model._timestep_idx - 1], 186 | model._efficacy_mag, 187 | ), 188 | ) 189 | 190 | def test_reset(self): 191 | terf = np.array([0, 1, 2]) * ur("W/m^2") 192 | 193 | model = self.tmodel() 194 | model.set_drivers(terf) 195 | 196 | def assert_is_nan_and_erf_shape(inp): 197 | assert np.isnan(inp).all() 198 | assert inp.shape == terf.shape 199 | 200 | model.reset() 201 | # after reset, we are not in any timestep 202 | assert np.isnan(model._timestep_idx) 203 | assert_is_nan_and_erf_shape(model._temp1_mag) 204 | assert_is_nan_and_erf_shape(model._temp2_mag) 205 | assert_is_nan_and_erf_shape(model._rndt_mag) 206 | 207 | def test_reset_run_reset(self): 208 | # move to integration tests 209 | terf = np.array([0, 1, 2, 3, 4, 5]) * ur("W/m^2") 210 | 211 | model = self.tmodel() 212 | model.set_drivers(terf) 213 | 214 | def assert_is_nan_and_erf_shape(inp): 215 | assert np.isnan(inp).all() 216 | assert inp.shape == terf.shape 217 | 218 | model.reset() 219 | assert_is_nan_and_erf_shape(model._temp1_mag) 220 | assert_is_nan_and_erf_shape(model._temp2_mag) 221 | assert_is_nan_and_erf_shape(model._rndt_mag) 222 | 223 | def assert_ge_zero_and_erf_shape(inp): 224 | assert not (inp < 0).any() 225 | assert inp.shape == terf.shape 226 | 227 | model.run() 228 | assert_ge_zero_and_erf_shape(model._temp1_mag) 229 | assert_ge_zero_and_erf_shape(model._temp2_mag) 230 | assert_ge_zero_and_erf_shape(model._rndt_mag) 231 | 232 | model.reset() 233 | assert_is_nan_and_erf_shape(model._temp1_mag) 234 | assert_is_nan_and_erf_shape(model._temp2_mag) 235 | assert_is_nan_and_erf_shape(model._rndt_mag) 236 | 237 | def test_get_two_layer_model_parameters(self, check_equal_pint): 238 | tq1 = 0.3 * ur("delta_degC/(W/m^2)") 239 | tq2 = 0.4 * ur("delta_degC/(W/m^2)") 240 | td1 = 3 * ur("yr") 241 | td2 = 300.0 * ur("yr") 242 | tefficacy = 1.2 * ur("dimensionless") 243 | 244 | start_paras = dict(d1=td1, d2=td2, q1=tq1, q2=tq2, efficacy=tefficacy,) 245 | 246 | mod_instance = self.tmodel(**start_paras) 247 | 248 | # for explanation of what is going on, see 249 | # impulse-response-equivalence.ipynb 250 | efficacy = tefficacy 251 | lambda0 = 1 / (tq1 + tq2) 252 | C = (td1 * td2) / (tq1 * td2 + tq2 * td1) 253 | 254 | a1 = lambda0 * tq1 255 | a2 = lambda0 * tq2 256 | tau1 = td1 257 | tau2 = td2 258 | 259 | C_D = (lambda0 * (tau1 * a1 + tau2 * a2) - C) / efficacy 260 | eta = C_D / (tau1 * a2 + tau2 * a1) 261 | 262 | expected = { 263 | "lambda0": lambda0, 264 | "du": C / (DENSITY_WATER * HEAT_CAPACITY_WATER), 265 | "dl": C_D / (DENSITY_WATER * HEAT_CAPACITY_WATER), 266 | "eta": eta, 267 | "efficacy": efficacy, 268 | } 269 | 270 | res = mod_instance.get_two_layer_parameters() 271 | 272 | assert res == expected 273 | 274 | # check circularity 275 | circular_params = TwoLayerModel(**res).get_impulse_response_parameters() 276 | for k, v in circular_params.items(): 277 | check_equal_pint(v, start_paras[k]) 278 | -------------------------------------------------------------------------------- /tests/unit/test_misc.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | import pytest 5 | from conftest import assert_pint_equal, assert_same_unit 6 | from openscm_units import unit_registry 7 | 8 | 9 | def test_pint_array_comparison(): 10 | a = np.array([0, 2]) * unit_registry("GtC") 11 | b = np.array([0, 2]) * unit_registry("MtC") 12 | 13 | # no error but does raise warning about stripping units 14 | with warnings.catch_warnings(record=True): 15 | np.testing.assert_allclose(a, b) 16 | 17 | # actually gives an error as we want 18 | with pytest.raises(AssertionError): 19 | assert_pint_equal(a, b) 20 | 21 | 22 | @pytest.mark.parametrize( 23 | "unit_1,unit_2,error", 24 | (("g", "kg", True), ("W", "J/yr", True), ("W", "J/s", False),), 25 | ) 26 | @pytest.mark.parametrize("unit_1_type", (str, "pint_unit")) 27 | @pytest.mark.parametrize("unit_2_type", (str, "pint_unit")) 28 | def test_assert_same_unit(unit_1, unit_2, error, unit_1_type, unit_2_type): 29 | if unit_1_type == "pint_unit": 30 | unit_1 = unit_registry(unit_1) 31 | 32 | if unit_2_type == "pint_unit": 33 | unit_2 = unit_registry(unit_2) 34 | 35 | if error: 36 | with pytest.raises(AssertionError): 37 | assert_same_unit(unit_1, unit_2) 38 | else: 39 | assert_same_unit(unit_1, unit_2) 40 | -------------------------------------------------------------------------------- /tests/unit/test_model_base.py: -------------------------------------------------------------------------------- 1 | import re 2 | from abc import ABC, abstractmethod 3 | from unittest.mock import MagicMock 4 | 5 | import numpy as np 6 | import pint.errors 7 | import pytest 8 | from openscm_units import unit_registry as ur 9 | 10 | from openscm_twolayermodel.errors import ModelStateError, UnitError 11 | 12 | 13 | class ModelTester(ABC): 14 | tmodel = None 15 | 16 | parameters = None 17 | 18 | @abstractmethod 19 | def test_init(self): 20 | """ 21 | Test the model initialises as intended 22 | """ 23 | pass 24 | 25 | def test_init_no_units(self): 26 | """ 27 | Test error thrown if the model is initiliased with a unitless 28 | quantity 29 | """ 30 | for parameter in self.parameters.keys(): 31 | error_msg = "{} must be a pint.Quantity".format(parameter) 32 | with pytest.raises(TypeError, match=error_msg): 33 | self.tmodel(**{parameter: 34.3}) 34 | 35 | @abstractmethod 36 | def test_init_wrong_units(self): 37 | """ 38 | Test error thrown if the model is initiliased with wrong units 39 | for a quantity 40 | """ 41 | # e.g. 42 | for parameter, value in self.parameters.items(): 43 | error_msg = "{} units must be {}".format(parameter, value.units) 44 | with pytest.raises(TypeError, match=error_msg): 45 | self.tmodel(**{parameter: 34.3 * ur("kg")}) 46 | 47 | def test_run(self): 48 | test = self.tmodel() 49 | test.step = MagicMock() 50 | test.run() 51 | 52 | test.step.assert_called() 53 | 54 | 55 | class TwoLayerVariantTester(ModelTester): 56 | def test_init_wrong_units(self): 57 | helper = self.tmodel() 58 | 59 | for parameter in self.parameters.keys(): 60 | tinp = 34.3 * ur("kg") 61 | default = getattr(helper, parameter) 62 | 63 | try: 64 | tinp.to(default.units) 65 | except pint.errors.DimensionalityError: 66 | pass 67 | 68 | error_msg = re.escape("Wrong units for `{}`".format(parameter)) 69 | with pytest.raises(UnitError, match=error_msg): 70 | self.tmodel(**{parameter: tinp}) 71 | 72 | def test_set_erf(self, check_equal_pint): 73 | terf = np.array([0, 1, 2]) * ur("W/m^2") 74 | 75 | res = self.tmodel() 76 | res.erf = terf 77 | 78 | check_equal_pint(res.erf, terf) 79 | 80 | def test_set_erf_unitless_error(self, check_equal_pint): 81 | terf = np.array([0, 1, 2]) 82 | 83 | res = self.tmodel() 84 | with pytest.raises(TypeError, match="erf must be a pint.Quantity"): 85 | res.erf = terf 86 | 87 | def test_reset_not_set_error(self): 88 | error_msg = "The model's drivers have not been set yet, call :meth:`self.set_drivers` first." 89 | with pytest.raises(ModelStateError, match=error_msg): 90 | self.tmodel().reset() 91 | -------------------------------------------------------------------------------- /tests/unit/test_two_layer_model_unit.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import numpy as np 4 | import numpy.testing as npt 5 | import pytest 6 | from openscm_units import unit_registry as ur 7 | from test_model_base import TwoLayerVariantTester 8 | 9 | from openscm_twolayermodel import TwoLayerModel 10 | from openscm_twolayermodel.base import _calculate_geoffroy_helper_parameters 11 | from openscm_twolayermodel.constants import DENSITY_WATER, HEAT_CAPACITY_WATER 12 | 13 | 14 | class TestTwoLayerModel(TwoLayerVariantTester): 15 | tmodel = TwoLayerModel 16 | 17 | parameters = dict( 18 | du=40 * ur("m"), 19 | dl=1300 * ur("m"), 20 | lambda0=3.4 / 3 * ur("W/m^2/delta_degC"), 21 | a=0.01 * ur("W/m^2/delta_degC^2"), 22 | efficacy=1.1 * ur("dimensionless"), 23 | eta=0.7 * ur("W/m^2/delta_degC"), 24 | delta_t=1 * ur("yr"), 25 | ) 26 | 27 | def test_init(self): 28 | init_kwargs = dict( 29 | du=10 * ur("m"), 30 | dl=2200 * ur("m"), 31 | lambda0=4 / 3 * ur("W/m^2/delta_degC"), 32 | a=0.1 * ur("W/m^2/delta_degC^2"), 33 | efficacy=1.1 * ur("dimensionless"), 34 | eta=0.7 * ur("W/m^2/delta_degC"), 35 | delta_t=1 / 12 * ur("yr"), 36 | ) 37 | 38 | res = self.tmodel(**init_kwargs) 39 | 40 | for k, v in init_kwargs.items(): 41 | assert getattr(res, k) == v, "{} not set properly".format(k) 42 | 43 | assert np.isnan(res.erf) 44 | assert np.isnan(res._temp_upper_mag) 45 | assert np.isnan(res._temp_lower_mag) 46 | assert np.isnan(res._rndt_mag) 47 | 48 | def test_heat_capacity_upper(self, check_equal_pint): 49 | model = self.tmodel(du=50000 * ur("mm")) 50 | 51 | expected = model.du * DENSITY_WATER * HEAT_CAPACITY_WATER 52 | 53 | res = model.heat_capacity_upper 54 | 55 | check_equal_pint(res, expected) 56 | assert ( 57 | model._heat_capacity_upper_mag 58 | == res.to(model._heat_capacity_upper_unit).magnitude 59 | ) 60 | 61 | def test_heat_capacity_upper_no_setter(self): 62 | model = self.tmodel() 63 | with pytest.raises(AttributeError, match="can't set attribute"): 64 | model.heat_capacity_upper = 4 65 | 66 | def test_heat_capacity_lower(self, check_equal_pint): 67 | model = self.tmodel(dl=2.5 * ur("km")) 68 | 69 | expected = model.dl * DENSITY_WATER * HEAT_CAPACITY_WATER 70 | 71 | res = model.heat_capacity_lower 72 | 73 | check_equal_pint(res, expected) 74 | assert ( 75 | model._heat_capacity_lower_mag 76 | == res.to(model._heat_capacity_lower_unit).magnitude 77 | ) 78 | 79 | def test_heat_capacity_lower_no_setter(self): 80 | model = self.tmodel() 81 | with pytest.raises(AttributeError, match="can't set attribute"): 82 | model.heat_capacity_lower = 4 83 | 84 | def test_calculate_next_temp_upper(self, check_same_unit): 85 | tdelta_t = 30 * 24 * 60 * 60 86 | ttemp_upper = 0.1 87 | ttemp_lower = 0.2 88 | terf = 1.1 89 | tlambda0 = 3.7 / 3 90 | ta = 0.02 91 | tefficacy = 0.9 92 | teta = 0.78 93 | theat_capacity_upper = 10 ** 10 94 | 95 | res = self.tmodel._calculate_next_temp_upper( 96 | tdelta_t, 97 | ttemp_upper, 98 | ttemp_lower, 99 | terf, 100 | tlambda0, 101 | ta, 102 | tefficacy, 103 | teta, 104 | theat_capacity_upper, 105 | ) 106 | 107 | expected = ( 108 | ttemp_upper 109 | + tdelta_t 110 | * ( 111 | terf 112 | - (tlambda0 - ta * ttemp_upper) * ttemp_upper 113 | - (tefficacy * teta * (ttemp_upper - ttemp_lower)) 114 | ) 115 | / theat_capacity_upper 116 | ) 117 | 118 | npt.assert_equal(res, expected) 119 | 120 | # check internal units make sense 121 | check_same_unit( 122 | self.tmodel._lambda0_unit, 123 | ( 124 | 1.0 * ur(self.tmodel._a_unit) * 1.0 * ur(self.tmodel._temp_upper_unit) 125 | ).units, 126 | ) 127 | 128 | check_same_unit( 129 | self.tmodel._erf_unit, 130 | ( 131 | 1.0 132 | * ur(self.tmodel._lambda0_unit) 133 | * 1.0 134 | * ur(self.tmodel._temp_upper_unit) 135 | ).units, 136 | ) 137 | 138 | check_same_unit( 139 | self.tmodel._erf_unit, 140 | ( 141 | 1.0 142 | * ur(self.tmodel._efficacy_unit) 143 | * 1.0 144 | * ur(self.tmodel._eta_unit) 145 | * 1.0 146 | * ur(self.tmodel._temp_upper_unit) 147 | ).units, 148 | ) 149 | 150 | check_same_unit( 151 | self.tmodel._temp_upper_unit, 152 | ( 153 | 1.0 154 | * ur(self.tmodel._delta_t_unit) 155 | * 1.0 156 | * ur(self.tmodel._erf_unit) 157 | / (1.0 * ur(self.tmodel._heat_capacity_upper_unit)) 158 | ).units, 159 | ) 160 | 161 | def test_calculate_next_temp_lower(self, check_same_unit): 162 | tdelta_t = 30 * 24 * 60 * 60 163 | ttemp_upper = 0.1 164 | ttemp_lower = 0.2 165 | teta = 0.78 166 | theat_capacity_lower = 10 ** 8 167 | 168 | res = self.tmodel._calculate_next_temp_lower( 169 | tdelta_t, ttemp_lower, ttemp_upper, teta, theat_capacity_lower 170 | ) 171 | 172 | expected = ttemp_lower + ( 173 | tdelta_t * teta * (ttemp_upper - ttemp_lower) / theat_capacity_lower 174 | ) 175 | 176 | npt.assert_equal(res, expected) 177 | 178 | # check internal units make sense 179 | check_same_unit( 180 | self.tmodel._temp_upper_unit, self.tmodel._temp_lower_unit, 181 | ) 182 | check_same_unit( 183 | self.tmodel._temp_lower_unit, 184 | ( 185 | 1.0 186 | * ur(self.tmodel._delta_t_unit) 187 | * 1.0 188 | * ur(self.tmodel._eta_unit) 189 | * 1.0 190 | * ur(self.tmodel._temp_upper_unit) 191 | / (1.0 * ur(self.tmodel._heat_capacity_upper_unit)) 192 | ).units, 193 | ) 194 | 195 | def test_calculate_next_rndt(self, check_same_unit): 196 | tdelta_t = 30 * 24 * 60 * 60 197 | ttemp_upper_t = 0.1 198 | ttemp_lower_t = 0.2 199 | ttemp_upper_t_prev = 0.09 200 | ttemp_lower_t_prev = 0.18 201 | theat_capacity_upper = 10 ** 10 202 | theat_capacity_lower = 10 ** 8 203 | 204 | res = self.tmodel._calculate_next_rndt( 205 | tdelta_t, 206 | ttemp_lower_t, 207 | ttemp_lower_t_prev, 208 | theat_capacity_lower, 209 | ttemp_upper_t, 210 | ttemp_upper_t_prev, 211 | theat_capacity_upper, 212 | ) 213 | 214 | expected = ( 215 | theat_capacity_upper * (ttemp_upper_t - ttemp_upper_t_prev) 216 | + theat_capacity_lower * (ttemp_lower_t - ttemp_lower_t_prev) 217 | ) / tdelta_t 218 | 219 | npt.assert_allclose(res, expected) 220 | 221 | # check internal units make sense 222 | check_same_unit( 223 | self.tmodel._temp_upper_unit, self.tmodel._temp_lower_unit, 224 | ) 225 | check_same_unit( 226 | ( 227 | ( 228 | 1.0 229 | * ur(self.tmodel._heat_capacity_lower_unit) 230 | * 1.0 231 | * ur(self.tmodel._temp_lower_unit) 232 | ).units 233 | ), 234 | ( 235 | 1.0 236 | * ur(self.tmodel._heat_capacity_upper_unit) 237 | * 1.0 238 | * ur(self.tmodel._temp_upper_unit) 239 | ).units, 240 | ) 241 | check_same_unit( 242 | self.tmodel._rndt_unit, 243 | ( 244 | 1.0 245 | * ur(self.tmodel._heat_capacity_upper_unit) 246 | * 1.0 247 | * ur(self.tmodel._temp_upper_unit) 248 | / (1.0 * ur(self.tmodel._delta_t_unit)) 249 | ).units, 250 | ) 251 | 252 | def test_step(self): 253 | # move to integration tests 254 | terf = np.array([3, 4, 5, 6, 7]) * ur("W/m^2") 255 | 256 | model = self.tmodel() 257 | model.set_drivers(terf) 258 | model.reset() 259 | 260 | model.step() 261 | assert model._timestep_idx == 0 262 | npt.assert_equal(model._temp_upper_mag[model._timestep_idx], 0) 263 | npt.assert_equal(model._temp_lower_mag[model._timestep_idx], 0) 264 | npt.assert_equal(model._rndt_mag[model._timestep_idx], 0) 265 | 266 | model.step() 267 | model.step() 268 | model.step() 269 | assert model._timestep_idx == 3 270 | 271 | npt.assert_equal( 272 | model._temp_upper_mag[model._timestep_idx], 273 | model._calculate_next_temp_upper( 274 | model._delta_t_mag, 275 | model._temp_upper_mag[model._timestep_idx - 1], 276 | model._temp_lower_mag[model._timestep_idx - 1], 277 | model._erf_mag[model._timestep_idx - 1], 278 | model._lambda0_mag, 279 | model._a_mag, 280 | model._efficacy_mag, 281 | model._eta_mag, 282 | model._heat_capacity_upper_mag, 283 | ), 284 | ) 285 | 286 | npt.assert_equal( 287 | model._temp_lower_mag[model._timestep_idx], 288 | model._calculate_next_temp_lower( 289 | model._delta_t_mag, 290 | model._temp_lower_mag[model._timestep_idx - 1], 291 | model._temp_upper_mag[model._timestep_idx - 1], 292 | model._eta_mag, 293 | model._heat_capacity_lower_mag, 294 | ), 295 | ) 296 | 297 | npt.assert_equal( 298 | model._rndt_mag[model._timestep_idx], 299 | model._calculate_next_rndt( 300 | model._delta_t_mag, 301 | model._temp_lower_mag[model._timestep_idx], 302 | model._temp_lower_mag[model._timestep_idx - 1], 303 | model._heat_capacity_lower_mag, 304 | model._temp_upper_mag[model._timestep_idx], 305 | model._temp_upper_mag[model._timestep_idx - 1], 306 | model._heat_capacity_upper_mag, 307 | ), 308 | ) 309 | 310 | def test_reset(self): 311 | terf = np.array([0, 1, 2]) * ur("W/m^2") 312 | 313 | model = self.tmodel() 314 | model.set_drivers(terf) 315 | 316 | def assert_is_nan_and_erf_shape(inp): 317 | assert np.isnan(inp).all() 318 | assert inp.shape == terf.shape 319 | 320 | model.reset() 321 | # after reset, we are not in any timestep 322 | assert np.isnan(model._timestep_idx) 323 | assert_is_nan_and_erf_shape(model._temp_upper_mag) 324 | assert_is_nan_and_erf_shape(model._temp_lower_mag) 325 | assert_is_nan_and_erf_shape(model._rndt_mag) 326 | 327 | def test_reset_run_reset(self): 328 | # move to integration tests 329 | terf = np.array([0, 1, 2, 3, 4, 5]) * ur("W/m^2") 330 | 331 | model = self.tmodel() 332 | model.set_drivers(terf) 333 | 334 | def assert_is_nan_and_erf_shape(inp): 335 | assert np.isnan(inp).all() 336 | assert inp.shape == terf.shape 337 | 338 | model.reset() 339 | assert_is_nan_and_erf_shape(model._temp_upper_mag) 340 | assert_is_nan_and_erf_shape(model._temp_lower_mag) 341 | assert_is_nan_and_erf_shape(model._rndt_mag) 342 | 343 | def assert_ge_zero_and_erf_shape(inp): 344 | assert not (inp < 0).any() 345 | assert inp.shape == terf.shape 346 | 347 | model.run() 348 | assert_ge_zero_and_erf_shape(model._temp_upper_mag) 349 | assert_ge_zero_and_erf_shape(model._temp_lower_mag) 350 | assert_ge_zero_and_erf_shape(model._rndt_mag) 351 | 352 | model.reset() 353 | assert_is_nan_and_erf_shape(model._temp_upper_mag) 354 | assert_is_nan_and_erf_shape(model._temp_lower_mag) 355 | assert_is_nan_and_erf_shape(model._rndt_mag) 356 | 357 | def test_get_impulse_response_parameters(self, check_equal_pint): 358 | tdu = 35 * ur("m") 359 | tdl = 3200 * ur("m") 360 | tlambda0 = 4 / 3 * ur("W/m^2/delta_degC") 361 | tefficacy = 1.1 * ur("dimensionless") 362 | teta = 0.7 * ur("W/m^2/delta_degC") 363 | 364 | mod_instance = self.tmodel( 365 | du=tdu, 366 | dl=tdl, 367 | lambda0=tlambda0, 368 | a=0.0 * ur("W/m^2/delta_degC^2"), 369 | efficacy=tefficacy, 370 | eta=teta, 371 | ) 372 | 373 | # for explanation of what is going on, see 374 | # impulse-response-equivalence.ipynb 375 | C = mod_instance.heat_capacity_upper 376 | C_D = mod_instance.heat_capacity_lower 377 | 378 | b = (tlambda0 + tefficacy * teta) / C + teta / C_D 379 | b_star = (tlambda0 + tefficacy * teta) / C - teta / C_D 380 | delta = b ** 2 - 4 * tlambda0 * teta / (C * C_D) 381 | 382 | tau1 = C * C_D / (2 * tlambda0 * teta) * (b - delta ** 0.5) 383 | tau2 = C * C_D / (2 * tlambda0 * teta) * (b + delta ** 0.5) 384 | phi1 = C / (2 * tefficacy * teta) * (b_star - delta ** 0.5) 385 | phi2 = C / (2 * tefficacy * teta) * (b_star + delta ** 0.5) 386 | 387 | expected = { 388 | "q1": tau1 * phi2 / (C * (phi2 - phi1)), 389 | "q2": -tau2 * phi1 / (C * (phi2 - phi1)), 390 | "d1": tau1, 391 | "d2": tau2, 392 | "efficacy": tefficacy, 393 | } 394 | 395 | a1 = tlambda0 * expected["q1"] 396 | a2 = tlambda0 * expected["q2"] 397 | 398 | check_equal_pint(a1 + a2, 1 * ur("dimensionless")) 399 | 400 | res = mod_instance.get_impulse_response_parameters() 401 | 402 | assert res == expected 403 | 404 | def test_get_impulse_response_parameters_non_zero_a_raises(self): 405 | ta = 0.1 * ur("W/m^2/delta_degC^2") 406 | 407 | error_msg = re.escape( 408 | "Cannot calculate impulse response parameters with " 409 | "non-zero a={}".format(ta) 410 | ) 411 | with pytest.raises(ValueError, match=error_msg): 412 | self.tmodel(a=ta).get_impulse_response_parameters() 413 | 414 | 415 | def test_calculate_geoffroy_helper_parameters(check_equal_pint): 416 | tdu = 35 * ur("m") 417 | tdl = 3200 * ur("m") 418 | tlambda0 = 4 / 3 * ur("W/m^2/delta_degC") 419 | tefficacy = 1.1 * ur("dimensionless") 420 | teta = 0.7 * ur("W/m^2/delta_degC") 421 | 422 | # for explanation of what is going on, see 423 | # impulse-response-equivalence.ipynb 424 | C = DENSITY_WATER * HEAT_CAPACITY_WATER * tdu 425 | C_D = DENSITY_WATER * HEAT_CAPACITY_WATER * tdl 426 | 427 | b = (tlambda0 + tefficacy * teta) / C + teta / C_D 428 | b_star = (tlambda0 + tefficacy * teta) / C - teta / C_D 429 | delta = b ** 2 - 4 * tlambda0 * teta / (C * C_D) 430 | 431 | tau1 = C * C_D / (2 * tlambda0 * teta) * (b - delta ** 0.5) 432 | tau2 = C * C_D / (2 * tlambda0 * teta) * (b + delta ** 0.5) 433 | phi1 = C / (2 * tefficacy * teta) * (b_star - delta ** 0.5) 434 | phi2 = C / (2 * tefficacy * teta) * (b_star + delta ** 0.5) 435 | 436 | a1 = phi2 * tau1 * tlambda0 / (C * (phi2 - phi1)) 437 | a2 = -phi1 * tau2 * tlambda0 / (C * (phi2 - phi1)) 438 | 439 | expected = { 440 | "C": C, 441 | "C_D": C_D, 442 | "b": b, 443 | "b_star": b_star, 444 | "delta": delta, 445 | "tau1": tau1, 446 | "tau2": tau2, 447 | "phi1": phi1, 448 | "phi2": phi2, 449 | "a1": a1, 450 | "a2": a2, 451 | } 452 | 453 | # check relationships hold 454 | check_equal_pint(a1 + a2, 1 * ur("dimensionless")) 455 | check_equal_pint(a1 / tau1 + a2 / tau2, tlambda0 / C) 456 | check_equal_pint(a1 * tau1 + a2 * tau2, (C + tefficacy * C_D) / tlambda0) 457 | check_equal_pint(a1 * tau2 + a2 * tau1, C_D / teta) 458 | check_equal_pint(phi1 * a1 / tau1 + phi2 * a2 / tau2, 0 * ur("1/s"), atol=1e-10) 459 | check_equal_pint(tau1 * tau2, (C * C_D) / (tlambda0 * teta)) 460 | check_equal_pint(C + phi1 * tefficacy * C_D, tlambda0 * tau1) 461 | check_equal_pint(C + phi2 * tefficacy * C_D, tlambda0 * tau2) 462 | check_equal_pint(phi1 * a1 + phi2 * a2, 1 * ur("dimensionless")) 463 | check_equal_pint(phi1 * phi2, -C / (tefficacy * C_D)) 464 | 465 | res = _calculate_geoffroy_helper_parameters( 466 | du=tdu, dl=tdl, lambda0=tlambda0, efficacy=tefficacy, eta=teta 467 | ) 468 | 469 | assert res == expected 470 | -------------------------------------------------------------------------------- /tests/unit/test_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import numpy.testing as npt 4 | import pytest 5 | from openscm_units import unit_registry 6 | 7 | from openscm_twolayermodel.utils import convert_lambda_to_ecs 8 | 9 | 10 | @pytest.mark.parametrize("ecs", (1.1, 2, 3, 5.4)) 11 | @pytest.mark.parametrize("f2x", (3, 3.5, 4)) 12 | @pytest.mark.parametrize("test_units", ("delta_degC", "mdelta_degC")) 13 | def test_convert_lambda_to_ecs_with_units(ecs, f2x, test_units): 14 | ecs = ecs * unit_registry("delta_degC") 15 | f2x = f2x * unit_registry("W/m^2") 16 | default_f2x = 3.74 * unit_registry("W/m^2") 17 | 18 | call_kwargs = {} 19 | if f2x is None: 20 | f2x_expected = default_f2x 21 | else: 22 | f2x_expected = f2x 23 | call_kwargs["f2x"] = f2x 24 | 25 | in_lambda = -f2x_expected / ecs 26 | npt.assert_allclose( 27 | convert_lambda_to_ecs(in_lambda, **call_kwargs).to(test_units).magnitude, 28 | ecs.to(test_units).magnitude, 29 | ) 30 | 31 | 32 | def test_convert_lambda_to_ecs_no_units_error_lambda_val(): 33 | with pytest.raises(TypeError, match=re.escape("lambda_val is not a pint.Quantity")): 34 | convert_lambda_to_ecs(-1.1) 35 | 36 | 37 | def test_convert_lambda_to_ecs_no_units_error_f2x(): 38 | with pytest.raises(TypeError, match=re.escape("f2x is not a pint.Quantity")): 39 | convert_lambda_to_ecs(-1.1 * unit_registry("W/m^2/delta_degC"), f2x=3) 40 | --------------------------------------------------------------------------------