├── .gitattributes ├── .github └── workflows │ ├── nightly.yml │ ├── publish.yml │ ├── pytest-legacy.yml │ ├── pytest.yml │ └── ruff.yml ├── .gitignore ├── .readthedocs.yaml ├── AUTHORS.rst ├── CITATION.cff ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.IN ├── NOTICE.md ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── RELEASE_NOTES.md ├── RELEASE_PROCEDURE.md ├── codecov.yml ├── docs ├── .gitignore ├── Makefile ├── README.md ├── R_tutorials │ ├── _static │ │ └── R.png │ └── pyam_R_tutorial.ipynb ├── _static │ ├── custom.css │ ├── iamc-logo.png │ ├── iamc_template.png │ └── logo.svg ├── _templates │ └── navigation.html ├── api.rst ├── api │ ├── compute.rst │ ├── database.rst │ ├── filtering.rst │ ├── general.rst │ ├── iamdataframe.rst │ ├── iiasa.rst │ ├── io.rst │ ├── plotting.rst │ ├── statistics.rst │ ├── testing.rst │ ├── timeseries.rst │ └── variables.rst ├── authors.rst ├── conf.py ├── contributing.rst ├── data.rst ├── examples │ ├── README.txt │ ├── plot_bar.py │ ├── plot_boxplot.py │ ├── plot_pie.py │ ├── plot_ranges.py │ ├── plot_sankey.py │ ├── plot_scatter.py │ ├── plot_secondary_axis.py │ ├── plot_stack.py │ ├── plot_timeseries.py │ ├── sankey_data.csv │ └── tutorial_data.csv ├── index.rst ├── install.rst ├── logos │ ├── pyam-header.png │ ├── pyam-logo.png │ └── pyam-social-media.png ├── make.bat ├── tutorials.rst └── tutorials │ ├── GAMS_to_pyam.ipynb │ ├── _static │ ├── cdlinks_logo.png │ ├── gams_logo.png │ └── iamc-logo.png │ ├── aggregating_downscaling_consistency.ipynb │ ├── aggregating_variables_and_plotting_with_negative_values.ipynb │ ├── algebraic_operations.ipynb │ ├── data_table_formats.ipynb │ ├── iiasa.ipynb │ ├── ipcc_colors.ipynb │ ├── legends.ipynb │ ├── pyam_first_steps.ipynb │ ├── pyam_logo.ipynb │ ├── quantiles.ipynb │ ├── subannual_time_resolution.ipynb │ ├── transport_tutorial.gdx │ ├── tutorial_data.csv │ ├── tutorial_data_aggregating_downscaling.csv │ ├── tutorial_data_subannual_time.csv │ ├── unfccc.ipynb │ └── unit_conversion.ipynb ├── manuscripts ├── JOSS │ ├── line.png │ ├── paper.bib │ ├── paper.md │ └── scatter.png └── ORE │ ├── Makefile │ ├── make.bat │ └── source │ ├── _bib │ └── main.bib │ ├── _static │ └── .gitkeep │ ├── chapters │ ├── appendix.rst │ ├── applications.rst │ ├── datamodels.rst │ ├── intro.rst │ ├── outlook.rst │ └── pyam.rst │ ├── conf.py │ ├── figure │ ├── overview.png │ ├── sr15_fig2.4.png │ └── temperature-by-category.png │ └── index.rst ├── poetry.lock ├── profile ├── .gitignore ├── README.md ├── conftest.py ├── data │ └── README.md ├── profile_report.py └── test_profile.py ├── pyam ├── __init__.py ├── _compare.py ├── _debiasing.py ├── _ops.py ├── _style.py ├── aggregation.py ├── compute.py ├── core.py ├── figures.py ├── filter.py ├── iiasa.py ├── index.py ├── ixmp4.py ├── logging.json ├── logging.py ├── netcdf.py ├── plotting.py ├── run_control.py ├── slice.py ├── statistics.py ├── str.py ├── testing.py ├── time.py ├── timeseries.py ├── unfccc.py ├── units.py ├── utils.py ├── validation.py └── worldbank.py ├── pyproject.toml └── tests ├── README.md ├── __init__.py ├── conftest.py ├── data ├── empty_meta_sheet.xlsx ├── exclude_meta_sheet.xlsx ├── exec.py ├── na_column.xlsx ├── plot_data.csv ├── plot_iso_data.csv ├── plot_region_data.csv ├── setup_iiasa_integration_test_instance.ipynb ├── test_RCP_database_raw_download.xlsx ├── test_SSP_database_raw_download.xlsx ├── test_df.nc ├── test_df.xls └── test_df.xlsx ├── expected_figs ├── test_add_panel_label.png ├── test_barplot.png ├── test_barplot_h.png ├── test_barplot_rc.png ├── test_barplot_stacked.png ├── test_barplot_stacked_net_line.png ├── test_barplot_stacked_order_by_list.png ├── test_barplot_stacked_order_by_rc.png ├── test_barplot_title.png ├── test_boxplot.png ├── test_boxplot_hue.png ├── test_line_PYAM_COLORS.png ├── test_line_color.png ├── test_line_color_fill_between.png ├── test_line_color_fill_between_interpolate.png ├── test_line_color_final_ranges.png ├── test_line_filter_title.png ├── test_line_linestyle_legend.png ├── test_line_marker_legend.png ├── test_line_no_legend.png ├── test_line_plot.png ├── test_line_plot_1_var.png ├── test_line_plot_2_vars.png ├── test_line_plot_bottom_legend.png ├── test_line_plot_cmap_color_arg.png ├── test_line_plot_dict_legend.png ├── test_line_plot_label.png ├── test_line_plot_label_color.png ├── test_line_plot_order_by_dict.png ├── test_line_plot_order_by_rc.png ├── test_line_rm_legend_label.png ├── test_line_single_color.png ├── test_line_update_rc.png ├── test_pie_plot_colors.png ├── test_pie_plot_labels.png ├── test_pie_plot_legend.png ├── test_pie_plot_other.png ├── test_scatter.png ├── test_scatter_meta.png ├── test_scatter_variables_with_meta_color.png ├── test_scatter_with_lines.png ├── test_stackplot.png ├── test_stackplot_missing_zero_issue_266.png ├── test_stackplot_negative.png ├── test_stackplot_negative_emissions.png ├── test_stackplot_negative_emissions_kwargs_custom_total.png ├── test_stackplot_negative_emissions_kwargs_def_total.png ├── test_stackplot_negative_emissions_with_total.png ├── test_stackplot_order_by_list.png ├── test_stackplot_order_by_rc.png └── test_stackplot_other.png ├── test_admin.py ├── test_cast_to_iamc.py ├── test_core.py ├── test_data.py ├── test_feature_aggregate.py ├── test_feature_append_concat.py ├── test_feature_compare.py ├── test_feature_debiasing.py ├── test_feature_downscale.py ├── test_feature_growth_rate.py ├── test_feature_interpolate.py ├── test_feature_learning_rate.py ├── test_feature_quantiles.py ├── test_feature_rename.py ├── test_feature_set_meta.py ├── test_feature_validation.py ├── test_filter.py ├── test_iiasa.py ├── test_index.py ├── test_io.py ├── test_io_unfccc.py ├── test_io_worldbank.py ├── test_ixmp4.py ├── test_logger.py ├── test_ops.py ├── test_plotting.py ├── test_run_control.py ├── test_slice.py ├── test_statistics.py ├── test_string.py ├── test_testing.py ├── test_time.py ├── test_timeseries.py ├── test_tutorials.py ├── test_units.py └── test_utils.py /.gitattributes: -------------------------------------------------------------------------------- 1 | pyam/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | # This workflow installs the package, runs the tests and builds the docs 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: nightly 5 | # This workflow is called nightly for consistency with programming convention 6 | # even though it is scheduled to run only once per week... 7 | 8 | on: 9 | schedule: 10 | # see https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows#scheduled-events 11 | # 05:00 UTC = 06:00 CET = 07:00 CEST 12 | - cron: "0 5 * * TUE" 13 | 14 | jobs: 15 | pytest: 16 | strategy: 17 | matrix: 18 | os: 19 | - ubuntu-latest 20 | python-version: 21 | - '3.10' 22 | 23 | fail-fast: false 24 | 25 | 26 | runs-on: ${{ matrix.os }} 27 | name: ${{ matrix.os }} py${{ matrix.python-version }} 28 | 29 | steps: 30 | - uses: actions/checkout@v4 31 | 32 | - name: Set up Python ${{ matrix.python-version }} 33 | uses: actions/setup-python@v5 34 | with: 35 | python-version: ${{ matrix.python-version }} 36 | 37 | - name: Install Pandoc 38 | uses: r-lib/actions/setup-pandoc@v2 39 | 40 | #------------------------------ 41 | # install & configure poetry 42 | #------------------------------ 43 | - name: Install Poetry 44 | uses: snok/install-poetry@v1 45 | with: 46 | version: 2.1.2 47 | virtualenvs-create: true 48 | virtualenvs-in-project: true 49 | installer-parallel: true 50 | 51 | #------------------------------------------ 52 | # update dependencies to latest versions 53 | #------------------------------------------ 54 | - name: Update dependencies 55 | run: poetry update dev,docs,optional_io_formats,optional_plotting,tutorials,wbdata --lock 56 | 57 | #------------------------------------ 58 | # load cached venv if cache exists 59 | #------------------------------------ 60 | - name: Load cached venv 61 | id: cached-poetry-dependencies 62 | uses: actions/cache@v4 63 | with: 64 | path: .venv 65 | key: venv-${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/poetry.lock') }} 66 | 67 | #------------------------------------------------ 68 | # install dependencies if cache does not exist 69 | #------------------------------------------------ 70 | - name: Install dependencies 71 | if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' 72 | run: poetry install --no-interaction --with calamine,dev,docs,optional_io_formats,optional_plotting,tutorials,wbdata --no-root 73 | 74 | #------------------------ 75 | # install root project 76 | #------------------------ 77 | - name: Install library 78 | run: poetry install --no-interaction --only-root 79 | 80 | - name: Test with pytest (including Matplotlib) 81 | run: poetry run pytest tests --mpl 82 | 83 | - name: Build the docs 84 | run: poetry run make --directory=docs html 85 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: publish 2 | 3 | on: 4 | push: 5 | # build the package on any push to a release-candidate branch 6 | branches: [ "release/rc_v*" ] 7 | # deploy to test-pypi on any push of a version tag 8 | tags: [ "v*" ] 9 | release: 10 | # deploy to pypi when publishing a new release 11 | types: [ published ] 12 | 13 | jobs: 14 | publish: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - uses: actions/setup-python@v5 21 | 22 | #------------------------------ 23 | # install & configure poetry 24 | #------------------------------ 25 | - name: Install Poetry 26 | uses: snok/install-poetry@v1 27 | with: 28 | version: 2.1.2 29 | virtualenvs-create: true 30 | virtualenvs-in-project: true 31 | installer-parallel: true 32 | - name: Install poetry dynamic versioning plugin 33 | run: poetry self add "poetry-dynamic-versioning[plugin]" 34 | - name: Build package 35 | run: poetry build 36 | 37 | - name: Publish to TestPyPI 38 | uses: pypa/gh-action-pypi-publish@v1.4.1 39 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') 40 | with: 41 | user: __token__ 42 | password: ${{ secrets.TESTPYPI_TOKEN }} 43 | repository_url: https://test.pypi.org/legacy/ 44 | 45 | - name: Publish to PyPI 46 | uses: pypa/gh-action-pypi-publish@v1.4.1 47 | if: github.event_name == 'release' 48 | with: 49 | user: __token__ 50 | password: ${{ secrets.PYPI_TOKEN }} 51 | -------------------------------------------------------------------------------- /.github/workflows/pytest-legacy.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs the tests with the oldest explicitly supported versions of dependencies 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: pytest-legacy 5 | 6 | on: 7 | push: 8 | branches: [ 'main' ] 9 | pull_request: 10 | branches: [ '**' ] 11 | 12 | jobs: 13 | pytest-legacy: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Set up Python 21 | id: setup-python 22 | uses: actions/setup-python@v5 23 | with: 24 | python-version: '3.10' 25 | 26 | #------------------------------ 27 | # install & configure poetry 28 | #------------------------------ 29 | - name: Install Poetry 30 | uses: snok/install-poetry@v1 31 | with: 32 | virtualenvs-create: true 33 | virtualenvs-in-project: true 34 | installer-parallel: true 35 | 36 | - name: Enforce usage of specific out-dated versions of dependencies 37 | # Update the package requirements when changing minimum dependency versions 38 | # Please also add a section "Dependency changes" to the release notes 39 | # Don't install packages, just update lock file to see if a cache exists 40 | run: | 41 | poetry add iam-units@2020.4.21 --lock 42 | poetry add matplotlib@3.6.0 --lock 43 | poetry add numpy@1.26.2 --lock 44 | poetry add pandas@2.1.2 --lock 45 | poetry add pint@0.13 --lock 46 | poetry add xlrd@2.0.1 --lock --group optional_io_formats 47 | poetry add xlsxwriter@3.0.3 --lock 48 | 49 | #------------------------------------ 50 | # load cached venv if cache exists 51 | #------------------------------------ 52 | - name: Load cached venv 53 | id: cached-poetry-dependencies 54 | uses: actions/cache@v4 55 | with: 56 | path: .venv 57 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} 58 | 59 | #------------------------------------------------ 60 | # install dependencies if cache does not exist 61 | #------------------------------------------------ 62 | - name: Install dependencies 63 | if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' 64 | run: | 65 | poetry self add "poetry-dynamic-versioning[plugin]" && 66 | poetry install --no-interaction --with calamine,dev,optional_io_formats,optional_plotting,tutorials --no-root 67 | 68 | #------------------------ 69 | # install root project 70 | #------------------------ 71 | - name: Install library 72 | run: poetry install --no-interaction --only-root 73 | 74 | - name: Test with pytest 75 | run: poetry run pytest tests 76 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | # This workflow installs the package on several OS & Python versions and runs the tests 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: pytest 5 | 6 | on: 7 | push: 8 | branches: [ 'main' ] 9 | pull_request: 10 | branches: [ '**' ] 11 | 12 | jobs: 13 | pytest: 14 | strategy: 15 | matrix: 16 | os: 17 | - ubuntu-latest 18 | - windows-latest 19 | # use non-Arm64 version for now due to failing numpy 20 | - macos-13 21 | python-version: 22 | - '3.10' 23 | - '3.11' 24 | - '3.12' 25 | - '3.13' 26 | # exclude Windows and 3.13 due to recurring segfault 27 | exclude: 28 | - os: windows-latest 29 | python-version: 3.13 30 | 31 | fail-fast: false 32 | 33 | defaults: 34 | run: 35 | shell: bash 36 | 37 | runs-on: ${{ matrix.os }} 38 | name: ${{ matrix.os }} py${{ matrix.python-version }} 39 | 40 | steps: 41 | - uses: actions/checkout@v4 42 | 43 | - name: Set up Python ${{ matrix.python-version }} 44 | id: setup-python 45 | uses: actions/setup-python@v5 46 | with: 47 | python-version: ${{ matrix.python-version }} 48 | 49 | #------------------------------ 50 | # install & configure poetry 51 | #------------------------------ 52 | - name: Install Poetry 53 | uses: snok/install-poetry@v1 54 | with: 55 | version: 2.1.2 56 | virtualenvs-create: true 57 | virtualenvs-in-project: true 58 | installer-parallel: true 59 | 60 | #------------------------------------ 61 | # load cached venv if cache exists 62 | #------------------------------------ 63 | - name: Load cached venv 64 | id: cached-poetry-dependencies 65 | uses: actions/cache@v4 66 | with: 67 | path: .venv 68 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} 69 | #------------------------ 70 | # install your project 71 | #------------------------ 72 | - name: Install library 73 | run: | 74 | poetry self add "poetry-dynamic-versioning[plugin]" && 75 | poetry install --no-interaction --with calamine,dev,optional_io_formats,optional_plotting,tutorials,wbdata 76 | 77 | # run tests without Matplotlib & CodeCode tests on earlier Python versions 78 | - name: Test with pytest 79 | if: ${{ matrix.python-version != '3.13' }} 80 | run: poetry run pytest tests 81 | 82 | # run tests with Matplotlib & CodeCov on latest Python version 83 | - name: Test with pytest including Matplotlib & Codecov 84 | if: ${{ matrix.python-version == '3.13' }} 85 | run: poetry run pytest tests --mpl --cov=./ --cov-report=xml 86 | 87 | - name: Upload coverage report to Codecov 88 | if: ${{ matrix.os == 'ubuntu-latest' && matrix.python-version == '3.13' }} 89 | uses: codecov/codecov-action@v4 90 | with: 91 | file: ./coverage.xml 92 | env_vars: ${{ matrix.os }} py${{ matrix.python-version }} 93 | -------------------------------------------------------------------------------- /.github/workflows/ruff.yml: -------------------------------------------------------------------------------- 1 | name: linter 2 | on: [ push, pull_request ] 3 | jobs: 4 | ruff: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: chartboost/ruff-action@v1 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # extras 2 | *~ 3 | ~* 4 | *csv 5 | *xlsx 6 | *png 7 | *jpg 8 | *pdf 9 | *zip 10 | 11 | # Apple file system 12 | *.DS_STORE 13 | 14 | # Byte-compiled / optimized / DLL files 15 | __pycache__/ 16 | *.py[cod] 17 | *$py.class 18 | .pytest_cache 19 | 20 | # C extensions 21 | *.so 22 | 23 | # Distribution / packaging 24 | .Python 25 | env/ 26 | build/ 27 | develop-eggs/ 28 | dist/ 29 | downloads/ 30 | eggs/ 31 | .eggs/ 32 | lib/ 33 | lib64/ 34 | parts/ 35 | sdist/ 36 | var/ 37 | wheels/ 38 | *.egg-info/ 39 | .installed.cfg 40 | *.egg 41 | 42 | # PyInstaller 43 | # Usually these files are written by a python script from a template 44 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 45 | *.manifest 46 | *.spec 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Unit test / coverage reports 53 | htmlcov/ 54 | .tox/ 55 | .coverage 56 | .coverage.* 57 | .cache 58 | nosetests.xml 59 | coverage.xml 60 | *.cover 61 | .hypothesis/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # PyBuilder 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # celery beat schedule file 88 | celerybeat-schedule 89 | 90 | # SageMath parsed files 91 | *.sage.py 92 | 93 | # dotenv 94 | .env 95 | 96 | # virtualenv 97 | .venv 98 | venv/ 99 | ENV/ 100 | 101 | # Spyder project settings 102 | .spyderproject 103 | .spyproject 104 | 105 | # Rope project settings 106 | .ropeproject 107 | 108 | # mkdocs documentation 109 | /site 110 | 111 | # mypy 112 | .mypy_cache/ 113 | 114 | # pytest 115 | .pytest_cache/ 116 | 117 | # idea 118 | .idea/ 119 | *.iml 120 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | jobs: 14 | post_create_environment: 15 | # Install poetry 16 | # https://python-poetry.org/docs/#installing-manually 17 | - pip install poetry 18 | - poetry self add "poetry-dynamic-versioning[plugin]" 19 | post_install: 20 | # Install dependencies with required dependency groups 21 | # https://python-poetry.org/docs/managing-dependencies/#dependency-groups 22 | # VIRTUAL_ENV needs to be set manually for now. 23 | # See https://github.com/readthedocs/readthedocs.org/pull/11152/ 24 | - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs,tutorials,optional_plotting 25 | 26 | # Build documentation in the docs/ directory with Sphinx 27 | sphinx: 28 | builder: html 29 | configuration: docs/conf.py 30 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | Authors and Developers 2 | ====================== 3 | 4 | The following persons contributed to the development of the |pyam| package: 5 | 6 | - Matthew Gidden `@gidden `_ 7 | - Daniel Huppmann `@danielhuppmann `_ 8 | - Zebedee Nicholls `@znicholls `_ 9 | - Nikolay Kushin `@zikolach `_ 10 | - Robin Lamboll `@Rlamboll `_ 11 | - Oliver Fricko `@OFR-IIASA `_ 12 | - Jonas Hörsch `@coroa `_ 13 | - Paul Natsuo Kishimoto `@khaeru `_ 14 | - Thorsten Burandt `@tburandt `_ 15 | - Ross Ursino `@rossursino `_ 16 | - Maik Budzinski `@mabudz `_ 17 | - Jarmo Kikstra `@jkikstra `_ 18 | - Michael Pimmer `@fonfon `_ 19 | - Patrick Jürgens `@pjuergens `_ 20 | - Florian Maczek `@macflo8 `_ 21 | - Laura Wienpahl `@LauWien `_ 22 | - Philip Hackstock `@phackstock `_ 23 | - Pietro Monticone `@pitmonticone `_ 24 | - Edward Byers `@byersiiasa `_ 25 | - Fridolin Glatter `@glatterf42 `_ 26 | - Linh Ho `@linhho ` 27 | 28 | | The core maintenance of the |pyam| package is done by 29 | the *Scenario Services & Scientific Software* research theme 30 | at the IIASA Energy, Climate, and Environment program. 31 | | Visit https://software.ece.iiasa.ac.at for more information. 32 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.1.0 2 | message: "If you use this package, please cite the corresponding manuscript in Open Research Europe." 3 | title: "pyam: analysis and visualization of integrated-assessment and macro-energy scenarios" 4 | repository: https://github.com/iamconsortium/pyam 5 | version: 1.0 6 | license: Apache-2.0 7 | journal: Open Research Europe 8 | doi: 10.12688/openreseurope.13633.2 9 | authors: 10 | - family-names: Huppmann 11 | given-names: Daniel 12 | orcid: https://orcid.org/0000-0002-7729-7389 13 | - family-names: Gidden 14 | given-names: Matthew J. 15 | orcid: https://orcid.org/0000-0003-0687-414X 16 | - family-names: Nicholls 17 | given-names: Zebedee 18 | orcid: https://orcid.org/0000-0002-4767-2723 19 | - family-names: Hörsch 20 | given-names: Jonas 21 | orcid: https://orcid.org/0000-0001-9438-767X 22 | - family-names: Lamboll 23 | given-names: Robin D. 24 | orcid: https://orcid.org/0000-0002-8410-037X 25 | - family-names: Kishimoto 26 | given-names: Paul Natsuo 27 | - family-names: Burandt 28 | given-names: Thorsten 29 | - family-names: Fricko 30 | given-names: Oliver 31 | - family-names: Byers 32 | given-names: Edward 33 | - family-names: Kikstra 34 | given-names: Jarmo S. 35 | orcid: https://orcid.org/0000-0001-9405-1228 36 | - family-names: Brinkerink 37 | given-names: Maarten 38 | - family-names: Budzinski 39 | given-names: Maik 40 | orcid: https://orcid.org/0000-0003-2879-1193 41 | - family-names: Maczek 42 | given-names: Florian 43 | - family-names: Zwickl-Bernhard 44 | given-names: Sebastian 45 | - family-names: Welder 46 | given-names: Lara 47 | - family-names: Alvarez Quispe 48 | given-names: Erik Francisco 49 | orcid: https://orcid.org/0000-0003-3862-9747 50 | - family-names: Smith 51 | given-names: Christopher J. 52 | keywords: 53 | - integrated assessment 54 | - energy systems 55 | - macro-energy 56 | - modelling 57 | - scenario analysis 58 | - data visualisation 59 | - Python package 60 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Have a question? Get in touch! 2 | ------------------------------ 3 | 4 | - Send bug reports, suggest new features or view the source code `on GitHub`_, 5 | - Reach out via our community mailing list hosted by `groups.io`_, 6 | - Or send us an `email`_ to join our Slack_ workspace! 7 | 8 | .. _on GitHub: http://github.com/IAMconsortium/pyam 9 | .. _`groups.io`: https://groups.io/g/pyam 10 | .. _`email`: mailto:pyam+owner@groups.io?subject=[pyam]%20Please%20add%20me%20to%20the%20Slack%20workspace 11 | .. _Slack: https://slack.com 12 | 13 | Interested in contributing? Join the team! 14 | ------------------------------------------ 15 | 16 | The pyam package has been developed with the explicit aim to facilitate 17 | open and collaborative analysis of integrated assessment and climate models. 18 | We appreciate contributions to the code base and development of new features. 19 | 20 | Please use the GitHub *Issues* feature to raise questions concerning potential 21 | bugs or to propose new features, but search and read resolved/closed topics on 22 | similar subjects before raising a new issue. 23 | 24 | For contributions to the code base, please use GitHub *Pull Requests*, 25 | including a detailed description of the new feature and unit tests 26 | to illustrate the intended functionality. 27 | Code submitted via pull requests must adhere to the `pep8`_ style formats 28 | and the documentation should follow the `numpydoc docstring guide`_. We are 29 | using `ruff`_ to check the code style. 30 | 31 | We do not require users to sign a *Contributor License Agreement*, because we 32 | believe that when posting ideas or submitting code to an open-source project, 33 | it should be obvious and self-evident that any such contributions 34 | are made in the spirit of open collaborative development. 35 | 36 | Setup 37 | ----- 38 | 39 | .. code-block:: bash 40 | 41 | # Install Poetry, minimum version >=1.2 required 42 | curl -sSL https://install.python-poetry.org | python - 43 | 44 | # You may have to reinitialize your shell at this point. 45 | source ~/.bashrc 46 | 47 | # Activate in-project virtualenvs 48 | poetry config virtualenvs.in-project true 49 | 50 | # Add dynamic versioning plugin 51 | poetry self add "poetry-dynamic-versioning[plugin]" 52 | 53 | # Install dependencies 54 | # (using "--with dev,..." if dependencies should be installed as well) 55 | poetry install --with dev,docs,optional_io_formats,optional_plotting,tutorials,wbdata,unfccc 56 | 57 | # Activate virtual environment 58 | poetry shell 59 | 60 | 61 | Update poetry 62 | ^^^^^^^^^^^^^ 63 | 64 | Developing pyam requires poetry ``>= 1.2``. 65 | 66 | If you already have a previous version of poetry installed you will need to update. The 67 | first step is removing the old poetry version: 68 | 69 | .. code-block:: bash 70 | 71 | curl -sSL https://install.python-poetry.org | python3 - --uninstall 72 | 73 | 74 | after that, the latest poetry version can be installed using: 75 | 76 | .. code-block:: bash 77 | 78 | curl -sSL https://install.python-poetry.org | python3 - 79 | 80 | 81 | details can be found here in the poetry docs: 82 | https://python-poetry.org/docs/#installation. 83 | 84 | Resolve conflicts in poetry.lock 85 | -------------------------------- 86 | 87 | When updating dependencies it can happen that a conflict between the current and the 88 | target poetry.lock file occurs. In this case the following steps should be taken to 89 | resolve the conflict. 90 | 91 | #. Do not attempt to manually resolve in the GitHub web interface. 92 | #. Instead checkout the target branch locally and merge into your branch: 93 | 94 | .. code-block:: bash 95 | 96 | git checkout main 97 | git pull origin main 98 | git checkout my-branch 99 | git merge main 100 | 101 | 102 | #. After the last step you'll have a merge conflict in poetry.lock. 103 | #. Instead of resolving the conflict, directly checkout the one from main and rewrite 104 | it: 105 | 106 | .. code-block:: bash 107 | 108 | # Get poetry.lock to look like it does in master 109 | git checkout main poetry.lock 110 | # Rewrite the lock file 111 | poetry lock --no-update 112 | 113 | #. After that simply add poetry.lock to mark the conflict as resolved and commit to 114 | finalize the merge: 115 | 116 | .. code-block:: bash 117 | 118 | git add poetry.lock 119 | git commit 120 | 121 | # and most likely needed 122 | poetry install 123 | 124 | (Taken from https://www.peterbe.com/plog/how-to-resolve-a-git-conflict-in-poetry.lock) 125 | 126 | .. _`pep8`: https://www.python.org/dev/peps/pep-0008/ 127 | 128 | .. _`numpydoc docstring guide`: https://numpydoc.readthedocs.io/en/latest/format.html 129 | 130 | .. _`ruff`: https://docs.astral.sh/ruff/ 131 | -------------------------------------------------------------------------------- /MANIFEST.IN: -------------------------------------------------------------------------------- 1 | exclude .gitignore 2 | exclude appveyor.yml 3 | exclude .travis.yml 4 | recursive-exclude ci 5 | 6 | include README.md 7 | include setup.cfg 8 | include LICENSE 9 | include versioneer.py 10 | include pyam/_version.py 11 | 12 | prune .cache 13 | prune .git 14 | prune build 15 | prune dist 16 | 17 | recursive-exclude *.egg-info * 18 | -------------------------------------------------------------------------------- /NOTICE.md: -------------------------------------------------------------------------------- 1 | Copyright 2017-2024 IIASA and the pyam developer team 2 | 3 | The **pyam** package is licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use the package except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Please confirm that this PR has done the following: 2 | 3 | - [ ] Tests Added 4 | - [ ] Documentation Added 5 | - [ ] Name of contributors Added to AUTHORS.rst 6 | - [ ] Description in RELEASE_NOTES.md Added 7 | 8 | ## Adding to RELEASE_NOTES.md (remove section after adding to RELEASE_NOTES.md) 9 | 10 | Please add a single line in the release notes similar to the following: 11 | 12 | ``` 13 | - (#XX)[http://link-to-pr.com] Added feature which does something 14 | ``` 15 | 16 | # Description of PR 17 | 18 | Please describe the changes introduced by this PR. 19 | -------------------------------------------------------------------------------- /RELEASE_PROCEDURE.md: -------------------------------------------------------------------------------- 1 | # Release procedure 2 | 3 | ## Required accounts and admin privileges 4 | 5 | - pip (automated via GitHub Actions): 6 | - https://pypi.org/project/pyam-iamc/ 7 | - https://test.pypi.org/project/pyam-iamc/ 8 | - conda: https://github.com/conda-forge/pyam-feedstock/ 9 | - ReadTheDocs (automated): https://readthedocs.org/projects/pyam-iamc/ 10 | 11 | ## Steps for publishing a new release 12 | 13 | 1. Create a release candidate branch named `release/rc_v` 14 | and pull request it into `main` with the following updates: 15 | 1. If it's the first release in a new year, 16 | search for `Copyright 2017` and update the end-year of the copyright tag 17 | 2. Deprecate any portion of the API marked for removal in this release 18 | (you can find them by searching the code base for "deprecate") 19 | 3. Update `RELEASE_NOTES.md` (see the examples of previous releases) 20 | - Replace "# Next Release" with "# Release v" 21 | - Add a "## Highlights" section with the most important updates & changes 22 | - If applicable, add/review "## Dependency changes" and "## API changes" sections 23 | - Add a new heading "## Individual Updates" before the list of individual PRs 24 | 4. Confirm that the PR passes all tests 25 | 5. Tag the release candidate `git tag vrc`, 26 | e.g., `git tag v1.2.0rc1`, and push to the upstream repository 27 | 6. Confirm that the "publish" workflow passes 28 | https://github.com/IAMconsortium/pyam/actions/workflows/publish.yml 29 | 7. Confirm that the release is published on https://test.pypi.org/project/pyam-iamc/ 30 | - The package can be downloaded, installed and run 31 | - The README is rendered correctly 32 | 8. If there are any problems, fix the issues and repeat from step 5, 33 | bumping the release candidate number 34 | 9. If successful, merge the candidate PR into `main` and then delete the branch 35 | 2. Switch to the updated main branch: `git checkout main` and `git pull upstream main` 36 | 3. Tag the release number: `git tag v`, e.g., `git tag v1.2.0` 37 | 4. Push the tag upstream: `git push upstream --tags` 38 | 5. Make a new release on Github 39 | - Make sure that you choose the tag name defined above 40 | - Copy the release summary from `RELEASE_NOTES.md` into the description box 41 | 6. Confirm that the "publish" workflow passes 42 | https://github.com/IAMconsortium/pyam/actions/workflows/publish.yml 43 | 7. Confirm that the release is published on https://www.pypi.org/project/pyam-iamc/ 44 | 8. Update on `conda-forge` 45 | - A PR should automatically be opened by the bot after the Github release 46 | - Confirm that any new dependencies are included, 47 | change the minimum dependency version if necessary 48 | (compare to ./.github/workflows/pytest-depedency.yml) 49 | - Merge the PR 50 | - Check that the new version is available on https://anaconda.org/conda-forge/pyam 51 | 9. Confirm that the doc pages are updated on https://pyam-iamc.readthedocs.io/ 52 | - Both the **latest** and the **stable** versions point to the new release 53 | - The new release has been added to the list of available versions 54 | 10. Add a new first line "# Next Release" in `RELEASE_NOTES.md` and commit to `main` 55 | 11. Announce it to our community 56 | - The mailing list (https://pyam.groups.io) - copy the rendered HTML 57 | from the Github release and use the subject line `#release v` 58 | - The Slack channel 59 | - Social media using the tag `#pyam_iamc` 60 | 61 | And that's it! Whew... 62 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | precision: 1 3 | status: 4 | project: 5 | default: 6 | threshold: 0.2% 7 | patch: 8 | default: 9 | informational: true 10 | 11 | ignore: 12 | - pyam/_version.py 13 | - versioneer.py 14 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # exclude build folder 2 | _build 3 | build 4 | 5 | # exclude built gallery folder (built by sphinx-gallery module) 6 | sg_execution_times.rst 7 | gallery 8 | modules 9 | 10 | #* 11 | .#* 12 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # This file was generated with sphinx-quickstart version 3.5.0 3 | # 4 | 5 | # You can set these variables from the command line, and also 6 | # from the environment for the first two. 7 | SPHINXOPTS ?= 8 | SPHINXBUILD ?= sphinx-build 9 | SOURCEDIR = . 10 | BUILDDIR = _build 11 | 12 | # Put it first so that "make" without argument is like "make help". 13 | help: 14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 15 | 16 | .PHONY: help Makefile 17 | 18 | # Catch-all target: route all unknown targets to Sphinx using the new 19 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 20 | %: Makefile 21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | Building the docs 2 | ================== 3 | 4 | We use Sphinx and restructured text (rst) for building the documentation pages. 5 | Detailed documentation of the package is built from mark-up docstrings 6 | in the source code. 7 | 8 | Dependencies 9 | ------------ 10 | 11 | To install the **pyam** package and all dependencies, run the following 12 | (in the top-level directory of this repository). 13 | 14 | ``` 15 | pip install --editable .[docs,tutorials,optional_plotting] 16 | ``` 17 | 18 | Writing in Restructured Text 19 | ---------------------------- 20 | 21 | There are a number of guides to get started, for example 22 | on [sourceforge](https://docutils.sourceforge.io/docs/user/rst/quickref.html). 23 | 24 | Building the documentation pages 25 | -------------------------------- 26 | 27 | On *nix, from the command line, run:: 28 | 29 | make html 30 | 31 | On Windows, from the command line, run:: 32 | 33 | ./make.bat 34 | 35 | The rendered html pages will be located in `docs/_build/index.html`. -------------------------------------------------------------------------------- /docs/R_tutorials/_static/R.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/R_tutorials/_static/R.png -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | div.body h1 {font-size: 200%} 2 | 3 | div.admonition p.admonition-title { 4 | font-size: 17px; 5 | font-weight: bold; 6 | } 7 | 8 | div.admonition code.xref:hover { 9 | background-color: #AAA; 10 | } 11 | 12 | dl.py { 13 | margin: 20px 0 0 0; 14 | } 15 | -------------------------------------------------------------------------------- /docs/_static/iamc-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/_static/iamc-logo.png -------------------------------------------------------------------------------- /docs/_static/iamc_template.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/_static/iamc_template.png -------------------------------------------------------------------------------- /docs/_templates/navigation.html: -------------------------------------------------------------------------------- 1 | 4 | 5 |

{{ _('Navigation') }}

6 | {{ toctree(maxdepth=0, includehidden=theme_sidebar_includehidden, collapse=theme_sidebar_collapse) }} 7 | {% if theme_extra_nav_links %} 8 |
9 |
    10 | {% for text, uri in theme_extra_nav_links.items() %} 11 |
  • {{ text }}
  • 12 | {% endfor %} 13 |
14 | {% endif %} 15 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | API Reference 4 | ============= 5 | 6 | This page gives an overview of the public |pyam| features, objects, functions 7 | and methods. 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | api/io 13 | api/general 14 | api/iamdataframe 15 | api/database 16 | api/filtering 17 | api/compute 18 | api/plotting 19 | api/iiasa 20 | api/statistics 21 | api/testing 22 | api/timeseries 23 | api/variables 24 | 25 | Intersphinx mapping 26 | ------------------- 27 | 28 | To use sphinx.ext.intersphinx_ for generating automatic links from your project 29 | to the documentation of |pyam| classes and functions, please add the following 30 | to your project's :code:`conf.py`: 31 | 32 | .. _sphinx.ext.intersphinx : https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html 33 | 34 | .. code-block:: python 35 | 36 | intersphinx_mapping = { 37 | 'pyam': ('https://pyam-iamc.readthedocs.io/en/stable/', None), 38 | } 39 | -------------------------------------------------------------------------------- /docs/api/compute.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam.compute 2 | 3 | Derived timeseries data 4 | ======================= 5 | 6 | .. autoclass:: IamComputeAccessor 7 | :members: 8 | -------------------------------------------------------------------------------- /docs/api/database.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | Data resources integration 4 | ========================== 5 | 6 | Connecting to an IIASA database instance 7 | ---------------------------------------- 8 | 9 | IIASA's ixmp Scenario Explorer infrastructure implements a RestAPI 10 | to directly query the database server connected to an explorer instance. 11 | See https://docs.ece.iiasa.ac.at/ for more information. 12 | 13 | The |pyam| package uses this interface to read timeseries data as well as 14 | categorization and quantitative meta indicators. 15 | The data is returned as an :class:`IamDataFrame`. 16 | See `this tutorial <../tutorials/iiasa.html>`_ for more information. 17 | 18 | .. autofunction:: read_iiasa 19 | 20 | .. autofunction:: lazy_read_iiasa 21 | 22 | Reading from an |ixmp4| platform 23 | -------------------------------- 24 | 25 | The |pyam| package provides a simple interface to read timeseries data and meta 26 | indicators from local or remote |ixmp4| platform instances. 27 | 28 | .. autofunction:: read_ixmp4 29 | 30 | Reading UNFCCC inventory data 31 | ----------------------------- 32 | 33 | The package :class:`unfccc-di-api` 34 | (`read the docs `_) 35 | provides an interface to the UNFCCC Data Inventory API 36 | (`link `_). 37 | The |pyam| package uses this package to query inventory data and 38 | return the timeseries data directly as an :class:`IamDataFrame`. 39 | 40 | .. autofunction:: read_unfccc 41 | 42 | Connecting to World Bank data resources 43 | --------------------------------------- 44 | 45 | The package :class:`pandas-datareader` 46 | (`read the docs `_) 47 | implements a number of connections to publicly accessible data resources, 48 | e.g., the `World Bank Open Data Catalog `_. 49 | |pyam| provides a simple utility function to cast the queried timeseries data 50 | directly as an :class:`IamDataFrame`. 51 | 52 | .. autofunction:: read_worldbank 53 | -------------------------------------------------------------------------------- /docs/api/filtering.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | Filtering and slicing 4 | ===================== 5 | 6 | Arguments for filtering an :class:`IamDataFrame` 7 | ------------------------------------------------ 8 | 9 | The |pyam| package provides several methods to filter an :class:`IamDataFrame` by its 10 | (timeseries) **data** or **meta** values. Read more about the `Data Model `_ 11 | that is implemented by an :class:`IamDataFrame`. 12 | 13 | The following arguments are available for filtering and can be combined as needed: 14 | 15 | Index 16 | ^^^^^ 17 | - A *column* of the :attr:`IamDataFrame.index` 18 | (usually '**model**' and '**scenario**'): string or list of strings 19 | - '**index**': list of model/scenario-tuples or a :class:`pandas.MultiIndex` 20 | 21 | Timeseries data coordinates 22 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 23 | - Any *column* of the :attr:`IamDataFrame.coordinates ` 24 | ('**region**', '**variable**', '**unit**'): string or list of strings 25 | - '**measurand**': a tuple (or list of tuples) of '*variable*' and '*unit*' 26 | - '**depth**': the "depth" of entries in the '*variable*' column (number of '|') 27 | - '**level**': the "depth" of entries in the '*variable*' column (number of '|'), 28 | excluding the strings in the '*variable*' argument (if given) 29 | - '**year**': takes an integer (int/:class:`numpy.int64`), a list of integers or 30 | a range. Note that the last year of a range is not included, 31 | so ``range(2010, 2015)`` is interpreted as ``[2010, ..., 2014]`` 32 | - '**time_domain**': can be 'year' or 'datetime' 33 | - Arguments for filtering by :class:`datetime.datetime` or :class:`numpy.datetime64` 34 | ('**month**', '**hour**', '**time**') 35 | 36 | Meta indicators and other attributes 37 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 38 | - Any *column* of the :attr:`IamDataFrame.meta ` dataframe: 39 | string, integer, float, or list of these 40 | - '**exclude**' (see :attr:`IamDataFrame.exclude `): boolean 41 | 42 | .. note:: 43 | 44 | In any string filters, '*' is interpreted as wildcard, unless the keyword argument 45 | *regexp=True* is used; in this case, strings are treated as 46 | `regular expressions `_. 47 | 48 | Methods for filtering and slicing an :class:`IamDataFrame` 49 | ---------------------------------------------------------- 50 | 51 | .. automethod:: pyam.IamDataFrame.filter 52 | :noindex: 53 | 54 | .. automethod:: pyam.IamDataFrame.slice 55 | :noindex: 56 | 57 | The **IamSlice** class 58 | ---------------------- 59 | 60 | This class is an auxiliary feature to streamline the implementation of the 61 | :meth:`IamDataFrame.filter` method. 62 | 63 | .. autoclass:: pyam.slice.IamSlice 64 | :members: dimensions, time, info 65 | 66 | Filtering using a proxy :class:`IamDataFrame` 67 | --------------------------------------------- 68 | 69 | |pyam| includes a function to directly filter a :class:`pandas.DataFrame` 70 | with appropriate columns or index dimensions (i.e.,'*model*' and '*scenario*') using 71 | an :class:`IamDataFrame` and keyword arguments similar to :meth:`IamDataFrame.filter`. 72 | 73 | .. autofunction:: pyam.filter_by_meta 74 | -------------------------------------------------------------------------------- /docs/api/general.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | General functions 4 | ================= 5 | 6 | .. autofunction:: concat 7 | 8 | .. autofunction:: compare 9 | 10 | .. autofunction:: require_variable 11 | 12 | .. autofunction:: validate 13 | 14 | .. autofunction:: categorize 15 | 16 | .. autofunction:: check_aggregate 17 | -------------------------------------------------------------------------------- /docs/api/iamdataframe.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | The **IamDataFrame** class 4 | ========================== 5 | 6 | .. autoclass:: IamDataFrame 7 | :members: 8 | -------------------------------------------------------------------------------- /docs/api/iiasa.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam.iiasa 2 | 3 | Databases hosted by IIASA 4 | ========================= 5 | 6 | The |pyam| package allows to directly query the scenario databases hosted by the 7 | IIASA Energy, Climate and Environment program (ECE), commonly known as 8 | the *Scenario Explorer* infrastructure. It is developed and maintained 9 | by the ECE `Scenario Services and Scientific Software team`_. 10 | 11 | .. _`Scenario Services and Scientific Software team` : https://software.ece.iiasa.ac.at 12 | 13 | You do not have to provide username/password credentials to connect to any public 14 | database instance using |pyam|. However, to connect to project-internal databases, 15 | you have to create an account at the IIASA-ECE *Manager Service* 16 | (https://manager.ece.iiasa.ac.at). Please contact the respective project coordinator 17 | for permission to access a project-internal database. 18 | 19 | To store the credentials on your machine so that |pyam| can use it to query a database, 20 | we depend on the Python package |ixmp4|. You only have to do this once 21 | (unless you change your password). 22 | 23 | The credentials will be valid for connecting to *Scenario Apps* based on |ixmp4| 24 | as well as for (legacy) *Scenario Explorer* database backends (see below). 25 | 26 | In a console, run the following: 27 | 28 | .. code-block:: console 29 | 30 | ixmp4 login 31 | 32 | You will be prompted to enter your password. 33 | 34 | .. warning:: 35 | 36 | Your username and password will be saved locally in plain-text for future use! 37 | 38 | *Scenario Apps* instances 39 | ------------------------- 40 | 41 | The *Scenario Apps* use the |ixmp4| package as a database backend. 42 | You can list all available ixmp4 platforms hosted by IIASA using the following function: 43 | 44 | .. autofunction:: platforms 45 | 46 | *Scenario Explorer* instances (legacy service) 47 | ---------------------------------------------- 48 | 49 | The *Scenario Explorer* infrastructure developed by the Scenario Services and Scientific 50 | Software team was developed and used for projects from 2018 until 2023. 51 | 52 | See `this tutorial <../tutorials/iiasa.html>`_ for more information. 53 | 54 | .. autoclass:: Connection 55 | :members: 56 | 57 | .. autofunction:: read_iiasa 58 | :noindex: 59 | 60 | .. autofunction:: lazy_read_iiasa 61 | :noindex: 62 | -------------------------------------------------------------------------------- /docs/api/io.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | Input/output file formats 4 | ========================= 5 | 6 | DataFrames and xlsx/csv files 7 | ----------------------------- 8 | 9 | A :class:`pandas.DataFrame` or a path to an *xlsx* or *csv* with data in the required 10 | structure (i.e., index/columns) can be imported directly by initializing an 11 | :class:`IamDataFrame` - see `this tutorial <../tutorials/data_table_formats.html>`_ for 12 | more information. 13 | 14 | Exporting to these formats is implemented via the following functions: 15 | 16 | .. automethod:: IamDataFrame.as_pandas 17 | :noindex: 18 | 19 | .. automethod:: IamDataFrame.to_excel 20 | :noindex: 21 | 22 | .. automethod:: IamDataFrame.to_csv 23 | :noindex: 24 | 25 | Integration with netcdf files 26 | ----------------------------- 27 | 28 | `NetCDF `_ is a powerful file format that 29 | can efficiently store multiple scientific variables sharing the same dimensions. 30 | In climate science, data such as temperature, precipitation and radiation can be stored 31 | in four dimensions: a time dimension and three spatial dimensions (latitude, longitude, 32 | altitude). 33 | 34 | The |pyam| package supports reading and writing to netcdf files that have the following 35 | structure: 36 | 37 | - **Timeseries data** are stored such that each *variable* (in the sense of the IAMC 38 | format) is a separate netcdf-data-variable with the dimensions *time*, *model*, 39 | *scenario* and *region*. The *unit* is given as an attribute of the data variable. 40 | The *long_name* attribute is used as the *variable* in the :class:`IamDataFrame`. 41 | The *time* dimension can be either a datetime format or given as years (integer). 42 | 43 | - **Meta indicators** are stored as netcdf-data-variables with the dimensions *model* 44 | and *scenario*. 45 | 46 | Read more about :ref:`pyam_data_model`. The :attr:`exclude ` 47 | attribute is not written to netcdf files. 48 | 49 | .. autofunction:: read_netcdf 50 | 51 | .. automethod:: IamDataFrame.to_netcdf 52 | :noindex: 53 | 54 | .. automethod:: IamDataFrame.to_xarray 55 | :noindex: 56 | 57 | The frictionless Data Package 58 | ----------------------------- 59 | 60 | The |pyam| package supports reading and writing to the 61 | `frictionless Data Package `_. 62 | 63 | .. autofunction:: read_datapackage 64 | 65 | .. automethod:: IamDataFrame.to_datapackage 66 | :noindex: 67 | -------------------------------------------------------------------------------- /docs/api/plotting.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | Plotting library 4 | ================ 5 | 6 | The plotting API(s) 7 | ------------------- 8 | 9 | There are three ways to use the |pyam| plotting library. 10 | 11 | 1. Using the plot feature as an attribute of the :class:`IamDataFrame`: 12 | 13 | .. code-block:: python 14 | 15 | IamDataFrame.plot.(**kwargs) 16 | 17 | 2. Using the plot feature as a function with a `kind` keyword argument: 18 | 19 | .. code-block:: python 20 | 21 | IamDataFrame.plot(kind='', **kwargs) 22 | 23 | This function defaults to the :meth:`pyam.plotting.line` type. 24 | 25 | 3. Calling any function of either the :mod:`plotting` 26 | or the :mod:`figures` module directly via 27 | 28 | .. code-block:: python 29 | 30 | pyam..(df, **kwargs) 31 | 32 | where `df` is either an :class:`IamDataFrame` 33 | or a suitable :class:`pandas.DataFrame`. 34 | 35 | Check out the `Plotting Gallery`_ for examples! 36 | 37 | .. _`Plotting Gallery` : ../gallery/index.html 38 | 39 | The RunControl configuration 40 | ---------------------------- 41 | 42 | The |pyam| plotting library provides a thin :class:`RunControl` wrapper 43 | around a Python :class:`dictionary` for plotting-style defaults, 44 | like setting colors or linestyles for certain model or scenario names. 45 | 46 | .. autofunction:: pyam.run_control 47 | 48 | Input can be provided as nested dictionaries of the structure 49 | :code:`type > dimension > name > value`, e.g., 50 | 51 | .. code-block:: python 52 | 53 | pyam.run_control().update( 54 | {'color': {'scenario': {'test_scenario': 'black'}}} 55 | ) 56 | 57 | or as the path to a yaml file with a similar structure: 58 | 59 | .. code-block:: python 60 | 61 | pyam.run_control().update() 62 | 63 | See `this example`_ from the AR6 WG1 using |pyam| plotting via a `yaml` file. 64 | 65 | .. _`this example`: https://github.com/gidden/ar6-wg1-ch6-emissions/blob/master/plotting.yaml 66 | 67 | The :meth:`IamDataFrame.categorize` function also appends any style arguments 68 | to the RunControl. 69 | 70 | Plotting functions 71 | ------------------ 72 | 73 | .. autofunction:: pyam.plotting.line 74 | 75 | .. autofunction:: pyam.plotting.stack 76 | 77 | .. autofunction:: pyam.plotting.bar 78 | 79 | .. autofunction:: pyam.plotting.box 80 | 81 | .. autofunction:: pyam.plotting.scatter 82 | 83 | .. autofunction:: pyam.plotting.pie 84 | 85 | .. autofunction:: pyam.figures.sankey 86 | -------------------------------------------------------------------------------- /docs/api/statistics.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | The **Statistics** class 4 | ======================== 5 | 6 | This class provides a wrapper for generating descriptive summary statistics 7 | of timeseries data from a scenario ensemble. 8 | It internally uses the :meth:`pandas.DataFrame.describe` function 9 | and hides the tedious work of filters, groupbys and merging of dataframes. 10 | 11 | .. autoclass:: Statistics 12 | :members: 13 | -------------------------------------------------------------------------------- /docs/api/testing.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam 2 | 3 | The **testing** module 4 | ====================== 5 | 6 | .. automodule:: pyam.testing 7 | :members: 8 | -------------------------------------------------------------------------------- /docs/api/timeseries.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam.timeseries 2 | 3 | Timeseries functions 4 | ==================== 5 | 6 | The |pyam| package includes several utility functions for working 7 | with timeseries data formatted as :class:`pandas.Series` that have 8 | the time dimension as index. 9 | 10 | .. warning:: 11 | 12 | Not all **pyam** functions currently support continuous-time formats. 13 | Please reach out via our `Slack channel, mailing list or GitHub issues`_ 14 | if you are not sure whether your use case is supported. 15 | 16 | .. _`Slack channel, mailing list or GitHub issues`: ../contributing.html 17 | 18 | .. automodule:: pyam.timeseries 19 | :autosummary: 20 | :members: 21 | -------------------------------------------------------------------------------- /docs/api/variables.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: pyam.str 2 | 3 | Variables utilities 4 | =================== 5 | 6 | The **variable** dimension of the |pyam| data format implements implements a 7 | "semi-hierarchical" structure using the :code:`|` character (*pipe*, not l or i) 8 | to indicate the *depth*. Read the `data model documentation`_ for more information. 9 | 10 | .. _`data model documentation`: ../data.html#the-variable-column 11 | 12 | The package provides several functions to work with such strings. 13 | 14 | .. autofunction:: concat_with_pipe 15 | 16 | .. autofunction:: find_depth 17 | 18 | .. autofunction:: get_variable_components 19 | 20 | .. autofunction:: reduce_hierarchy 21 | 22 | .. autofunction:: is_str -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../AUTHORS.rst 2 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | Support and Contributing 2 | ======================== 3 | 4 | .. include:: ../CONTRIBUTING.rst 5 | -------------------------------------------------------------------------------- /docs/examples/README.txt: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Plotting Gallery 4 | ================ 5 | 6 | All examples in this gallery currently use the *IAMC template* for yearly data, 7 | but |pyam| also supports timeseries data with a sub-annual resolution. 8 | Please read the `Data Model <../data.html>`_ section for more information. 9 | 10 | For more information about the plotting module, refer to the 11 | `plotting API documentation <../api/plotting.html>`_. -------------------------------------------------------------------------------- /docs/examples/plot_bar.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================== 3 | Stacked bar charts 4 | ================== 5 | 6 | """ 7 | 8 | # sphinx_gallery_thumbnail_number = 4 9 | 10 | ############################### 11 | # Read in tutorial data and show a summary 12 | # **************************************** 13 | # 14 | # This gallery uses the scenario data from the first-steps tutorial. 15 | # 16 | # If you haven't cloned the **pyam** GitHub repository to your machine, 17 | # you can download the file from 18 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 19 | # 20 | # Make sure to place the data file in the same folder as this script/notebook. 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | import pyam 25 | from pyam.plotting import add_net_values_to_bar_plot 26 | 27 | df = pyam.IamDataFrame("tutorial_data.csv") 28 | df 29 | 30 | ############################### 31 | # Show stacked bar chart by categories 32 | # ************************************ 33 | # 34 | # First, we generate a simple stacked bar chart 35 | # of all components of primary energy supply for one scenario. 36 | # 37 | # Calling :func:`tight_layout() ` ensures 38 | # that the final plot looks nice and tidy. 39 | 40 | args = dict(model="WITCH-GLOBIOM 4.4", scenario="CD-LINKS_NPi2020_1000") 41 | data = df.filter(**args, variable="Primary Energy|*", region="World") 42 | 43 | data.plot.bar(stacked=True, title="Primary energy mix") 44 | plt.legend(loc=1) 45 | plt.tight_layout() 46 | plt.show() 47 | 48 | ############################### 49 | # Flip the direction of a stacked bar chart 50 | # ***************************************** 51 | # 52 | # We can flip that round for a horizontal chart. 53 | 54 | data.plot.bar(stacked=True, orient="h", title="Primary energy mix") 55 | plt.legend(loc=1) 56 | plt.tight_layout() 57 | plt.show() 58 | 59 | ############################### 60 | # Show stacked bar chart by regions 61 | # ********************************* 62 | # 63 | # We don't just have to plot subcategories of variables, 64 | # any data or meta indicators from the IamDataFrame can be used. 65 | # Here, we show the contribution by region to total CO2 emissions. 66 | 67 | data = df.filter(**args, variable="Emissions|CO2").filter(region="World", keep=False) 68 | 69 | data.plot.bar( 70 | bars="region", stacked=True, title="CO2 emissions by region", cmap="tab20" 71 | ) 72 | plt.legend(loc=1) 73 | plt.tight_layout() 74 | plt.show() 75 | 76 | ############################### 77 | # Add indicators to show net values 78 | # ********************************* 79 | # 80 | # Sometimes, stacked bar charts have negative entries. 81 | # In that case, it helps to add a line showing the net value. 82 | 83 | fig, ax = plt.subplots() 84 | data.plot.bar( 85 | ax=ax, bars="region", stacked=True, title="CO2 emissions by region", cmap="tab20" 86 | ) 87 | add_net_values_to_bar_plot(ax) 88 | plt.legend(loc=1) 89 | plt.tight_layout() 90 | plt.show() 91 | -------------------------------------------------------------------------------- /docs/examples/plot_boxplot.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============== 3 | Boxplot charts 4 | ============== 5 | 6 | """ 7 | 8 | # sphinx_gallery_thumbnail_number = 2 9 | 10 | ############################### 11 | # Read in tutorial data and show a summary 12 | # **************************************** 13 | # 14 | # This gallery uses the scenario data from the first-steps tutorial. 15 | # 16 | # If you haven't cloned the **pyam** GitHub repository to your machine, 17 | # you can download the file from 18 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 19 | # 20 | # Make sure to place the data file in the same folder as this script/notebook. 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | import pyam 25 | 26 | df = pyam.IamDataFrame("tutorial_data.csv") 27 | df 28 | 29 | ############################### 30 | # A boxplot of CO emissions 31 | # ************************* 32 | # 33 | # We generate a simple boxplot of CO2 emissions 34 | # across one scenario implemented by a range of models. 35 | 36 | data = df.filter( 37 | scenario="CD-LINKS_NPi2020_1000", variable="Emissions|CO2", region="World" 38 | ) 39 | 40 | data.plot.box(x="year") 41 | plt.tight_layout() 42 | plt.show() 43 | 44 | ############################### 45 | # A grouped boxplot 46 | # ***************** 47 | # 48 | # We can add sub-groupings of the data using the keyword argument `by`. 49 | 50 | data = df.filter( 51 | scenario="CD-LINKS_NPi2020_1000", 52 | variable="Emissions|CO2", 53 | year=[2010, 2020, 2030, 2050, 2100], 54 | ).filter(region="World", keep=False) 55 | 56 | data.plot.box(x="year", by="region", legend=True) 57 | 58 | # We can use matplotlib arguments to make the figure more appealing. 59 | plt.legend(loc=1) 60 | plt.tight_layout() 61 | plt.show() 62 | -------------------------------------------------------------------------------- /docs/examples/plot_pie.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================== 3 | Pie chart visualizations 4 | ======================== 5 | 6 | """ 7 | 8 | # sphinx_gallery_thumbnail_number = 3 9 | 10 | ############################### 11 | # Read in tutorial data and show a summary 12 | # **************************************** 13 | # 14 | # This gallery uses the scenario data from the first-steps tutorial. 15 | # 16 | # If you haven't cloned the **pyam** GitHub repository to your machine, 17 | # you can download the file from 18 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 19 | # 20 | # Make sure to place the data file in the same folder as this script/notebook. 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | import pyam 25 | 26 | df = pyam.IamDataFrame("tutorial_data.csv") 27 | df 28 | 29 | ############################### 30 | # A pie chart of the energy supply 31 | # ******************************** 32 | # 33 | # We generate a pie plot of all components of primary energy supply 34 | # for one scenario. 35 | 36 | data = df.filter( 37 | model="AIM/CGE 2.1", 38 | scenario="CD-LINKS_NPi", 39 | variable="Primary Energy|*", 40 | year=2050, 41 | region="World", 42 | ) 43 | 44 | data.plot.pie() 45 | plt.tight_layout() 46 | plt.show() 47 | 48 | ############################### 49 | # A pie chart with a legend 50 | # ************************* 51 | # 52 | # Sometimes a legend is preferable to labels, so we can use that instead. 53 | 54 | data.plot.pie(labels=None, legend=True) 55 | plt.tight_layout() 56 | plt.show() 57 | 58 | ############################### 59 | # A pie chart of regional contributions 60 | # ************************************* 61 | # 62 | # We don't just have to plot subcategories of variables, 63 | # any data or meta indicators from the IamDataFrame can be used. 64 | # Here, we show the contribution by region to CO2 emissions. 65 | 66 | data = df.filter( 67 | model="AIM/CGE 2.1", scenario="CD-LINKS_NPi", variable="Emissions|CO2", year=2050 68 | ).filter(region="World", keep=False) 69 | data.plot.pie(category="region", cmap="tab20") 70 | plt.tight_layout() 71 | plt.show() 72 | -------------------------------------------------------------------------------- /docs/examples/plot_ranges.py: -------------------------------------------------------------------------------- 1 | """ 2 | ========================= 3 | Ranges of timeseries data 4 | ========================= 5 | 6 | """ 7 | 8 | # sphinx_gallery_thumbnail_number = 3 9 | 10 | ############################### 11 | # Read in tutorial data and show a summary 12 | # **************************************** 13 | # 14 | # This gallery uses the scenario data from the first-steps tutorial. 15 | # 16 | # If you haven't cloned the **pyam** GitHub repository to your machine, 17 | # you can download the file from 18 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 19 | # 20 | # Make sure to place the data file in the same folder as this script/notebook. 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | import pyam 25 | 26 | df = pyam.IamDataFrame("tutorial_data.csv") 27 | df 28 | 29 | ############################### 30 | # Highlighting ranges in a line chart 31 | # *********************************** 32 | # 33 | # In this example, we want to highlight the range across a scenario ensemble. 34 | # We do this by utilizing the `fill_between` argument. 35 | 36 | data = df.filter(scenario="CD-LINKS*", variable="Emissions|CO2", region="World") 37 | 38 | data.plot(color="scenario", fill_between=True) 39 | plt.tight_layout() 40 | plt.show() 41 | 42 | ############################### 43 | # More options for highlighting ranges in a line chart 44 | # **************************************************** 45 | # 46 | # The keyword argument `fill_between` can be set to true, 47 | # or it can be provided specific arguments as a dictionary: 48 | # in this illustration, we choose a very low transparency value. 49 | 50 | data.plot(color="scenario", fill_between=dict(alpha=0.15)) 51 | plt.tight_layout() 52 | plt.show() 53 | 54 | ############################### 55 | # Even more options for highlighting ranges in a line chart 56 | # ********************************************************* 57 | # 58 | # To further highlight the range of data, we can also add a bar showing the 59 | # range of data in the final time period using `final_ranges`. Similar to 60 | # `fill_between` it can either be true or have specific arguments. 61 | 62 | data.plot(color="scenario", fill_between=True, final_ranges=dict(linewidth=5)) 63 | plt.tight_layout() 64 | plt.show() 65 | -------------------------------------------------------------------------------- /docs/examples/plot_sankey.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============== 3 | Sankey diagram 4 | ============== 5 | 6 | """ 7 | 8 | ############################### 9 | # Read in example data and show a summary 10 | # *************************************** 11 | # 12 | # This gallery uses a small selection of the data 13 | # compiled for the IPCC's *Special Report on Global Warming of 1.5°C* (SR15_). 14 | # The complete scenario ensemble data is publicly available from the 15 | # `IAMC 1.5°C Scenario Explorer and Data hosted by IIASA`_. 16 | # 17 | # Please read the License_ of the IAMC 1.5°C Scenario Explorer 18 | # before using the full scenario data for scientific analyis or other work. 19 | # 20 | # .. _SR15: http://ipcc.ch/sr15/ 21 | # 22 | # .. _`IAMC 1.5°C Scenario Explorer and Data hosted by IIASA` : https://data.ece.iiasa.ac.at/iamc-1.5c-explorer 23 | # 24 | # .. _License : https://data.ece.iiasa.ac.at/iamc-1.5c-explorer/#/license 25 | # 26 | # If you haven't cloned the **pyam** GitHub repository to your machine, 27 | # you can download the data file from 28 | # https://github.com/IAMconsortium/pyam/tree/main/docs/examples_source 29 | # 30 | # Make sure to place the data file in the same folder as this script/notebook. 31 | 32 | import plotly 33 | 34 | import pyam 35 | 36 | df = pyam.IamDataFrame("sankey_data.csv") 37 | df 38 | 39 | ############################### 40 | # A simple Sankey diagram 41 | # *********************** 42 | # 43 | # We show a Sankey diagram of a subset of the energy system 44 | # in the 'CD-LINKS_NPi2020_1000' scenario 45 | # implemented by the 'REMIND-MAgPIE 1.7-3.0' model. 46 | # 47 | # The :meth:`~pyam.figures.sankey` function 48 | # takes a dictionary to define flows, sources and targets: 49 | # 50 | # .. code-block:: python 51 | # 52 | # { 53 | # variable: (source, target), 54 | # } 55 | 56 | sankey_mapping = { 57 | "Primary Energy|Coal": ("Coal Mining", "Coal Trade & Power Generation"), 58 | "Primary Energy|Gas": ("Natural Gas Extraction", "Gas Network & Power Generation"), 59 | "Secondary Energy|Electricity|Non-Biomass Renewables": ( 60 | "Non-Biomass Renewables", 61 | "Electricity Grid", 62 | ), 63 | "Secondary Energy|Electricity|Nuclear": ("Nuclear", "Electricity Grid"), 64 | "Secondary Energy|Electricity|Coal": ( 65 | "Coal Trade & Power Generation", 66 | "Electricity Grid", 67 | ), 68 | "Secondary Energy|Electricity|Gas": ( 69 | "Gas Network & Power Generation", 70 | "Electricity Grid", 71 | ), 72 | "Final Energy|Electricity": ("Electricity Grid", "Electricity Demand"), 73 | "Final Energy|Solids|Coal": ( 74 | "Coal Trade & Power Generation", 75 | "Non-Electricity Coal Demand", 76 | ), 77 | "Final Energy|Gases": ("Gas Network & Power Generation", "Gas Demand"), 78 | } 79 | 80 | fig = df.filter(year=2050).plot.sankey(mapping=sankey_mapping) 81 | # calling `show()` is necessary to have the thumbnail in the gallery overview 82 | plotly.io.show(fig) 83 | -------------------------------------------------------------------------------- /docs/examples/plot_scatter.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============ 3 | Scatter plot 4 | ============ 5 | 6 | """ 7 | 8 | ############################### 9 | # Read in tutorial data and show a summary 10 | # **************************************** 11 | # 12 | # This gallery uses the scenario data from the first-steps tutorial. 13 | # 14 | # If you haven't cloned the **pyam** GitHub repository to your machine, 15 | # you can download the file from 16 | # https://github.com/IAMconsortium/pyam/tree/master/docs/tutorials. 17 | # 18 | # Make sure to place the data file in the same folder as this script/notebook. 19 | 20 | import matplotlib.pyplot as plt 21 | 22 | import pyam 23 | 24 | df = pyam.IamDataFrame("tutorial_data.csv") 25 | df 26 | 27 | ############################### 28 | # Show relation of variables 29 | # ************************** 30 | # 31 | # In the first example, we show the relation between two variables, 32 | # biomass and fossil energy use. 33 | 34 | data = df.filter(region="World") 35 | 36 | data.plot.scatter( 37 | x="Primary Energy|Biomass", y="Primary Energy|Fossil", color="scenario" 38 | ) 39 | plt.tight_layout() 40 | plt.show() 41 | 42 | ############################### 43 | # Show a scatter plot of meta indicators 44 | # ************************************** 45 | # 46 | # In the second example, we show the relation between two meta indicators 47 | # computed from the timeseries data. 48 | # 49 | # Cumulative CO2 emissions 50 | # ======================== 51 | # 52 | # The first indicator computes the cumulative CO2 emissions from 2020 53 | # until the end of the century. 54 | 55 | co2 = ( 56 | df.filter(region="World", variable="Emissions|CO2") 57 | .convert_unit("Mt CO2/yr", "Gt CO2/yr") 58 | .timeseries() 59 | ) 60 | 61 | df.set_meta( 62 | meta=co2.apply(pyam.timeseries.cumulative, first_year=2020, last_year=2100, axis=1), 63 | name="cumulative_co2", 64 | ) 65 | 66 | ############################### 67 | # Temperature at the end of the century 68 | # ===================================== 69 | # 70 | # The second indicator takes the value of the temperature variable 71 | # in the latest year and assigns it as a meta indicator. 72 | 73 | temperature_var = "AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|MED" 74 | df.set_meta_from_data(name="temperature", variable=temperature_var, year=2100) 75 | 76 | ############################### 77 | # Draw the scatter plot! 78 | # ====================== 79 | # 80 | 81 | df.plot.scatter(x="cumulative_co2", y="temperature", color="scenario") 82 | -------------------------------------------------------------------------------- /docs/examples/plot_secondary_axis.py: -------------------------------------------------------------------------------- 1 | """ 2 | ===================================== 3 | Composing plots with a secondary axis 4 | ===================================== 5 | 6 | """ 7 | 8 | # sphinx_gallery_thumbnail_number = 2 9 | 10 | ############################### 11 | # Read in tutorial data and show a summary 12 | # **************************************** 13 | # 14 | # This gallery uses the scenario data from the first-steps tutorial. 15 | # 16 | # If you haven't cloned the **pyam** GitHub repository to your machine, 17 | # you can download the file from 18 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 19 | # 20 | # Make sure to place the data file in the same folder as this script/notebook. 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | import pyam 25 | 26 | df = pyam.IamDataFrame("tutorial_data.csv") 27 | df 28 | 29 | ############################### 30 | # Create a figure with different units on secondary axis 31 | # ****************************************************** 32 | # 33 | # To create a chart with multiple axes, we directly use the **matplotlib** package 34 | # and start with a subplot consisting of a figure canvas and 35 | # an :class:`Axes ` object, which contains the figure elements. 36 | # 37 | # First, we generate a simple line chart with temperature increase in °C 38 | # for one scenario and multiple models. 39 | # We now tell **pyam** to specifically use the :code:`ax` instance for the plot. 40 | # 41 | # Then, we create a second axis :code:`ax2` using 42 | # :meth:`Axes.secondary_yaxis() ` 43 | # showing temperature increase in °C from the original axis :code:`ax` 44 | # to temperature increase in °F. 45 | 46 | fig, ax = plt.subplots() 47 | 48 | args = dict( 49 | scenario="CD-LINKS_NPi2020_1000", 50 | region="World", 51 | ) 52 | 53 | temperature = "AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|MED" 54 | title = "Temperature change relative to pre-industrial levels" 55 | 56 | data_temperature = df.filter(**args, variable=temperature) 57 | data_temperature.plot(ax=ax, title=title, legend=False) 58 | 59 | ax2 = ax.secondary_yaxis("right", functions=(lambda x: x * 1.8, lambda x: x / 1.8)) 60 | ax2.set_ylabel("°F") 61 | 62 | plt.tight_layout() 63 | plt.show() 64 | 65 | ############################### 66 | # Create a composed figure from several plot types 67 | # ************************************************ 68 | # 69 | # To create a composed chart, we again use the **matplotlib** package 70 | # and start with a subplot consisting of a figure canvas and 71 | # an :class:`Axes ` object, which contains the figure elements. 72 | # 73 | # First, we generate a simple stacked chart 74 | # of all components of the primary energy supply for one scenario. 75 | # We now tell **pyam** to specifically use the :code:`ax` instance for the plot. 76 | # 77 | # Then, we create a second axes using :meth:`Axes.twinx() ` 78 | # and place a second plot on this other axes. 79 | 80 | fig, ax = plt.subplots() 81 | 82 | args = dict( 83 | model="WITCH-GLOBIOM 4.4", 84 | scenario="CD-LINKS_NPi2020_1000", 85 | region="World", 86 | ) 87 | 88 | data_energy = df.filter(**args, variable="Primary Energy|*") 89 | data_energy.plot.stack(ax=ax, title=None, legend=False) 90 | 91 | temperature = "AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|MED" 92 | data_temperature = df.filter(**args, variable=temperature) 93 | 94 | ax2 = ax.twinx() 95 | format_args = dict(color="black", linestyle="--", marker="o", label="Temperature") 96 | data_temperature.plot(ax=ax2, legend=False, title=None, **format_args) 97 | 98 | ax.legend(loc=4) 99 | ax2.legend(loc=1) 100 | ax2.set_ylim(0, 2) 101 | ax.set_title("Primary energy mix and temperature") 102 | 103 | plt.tight_layout() 104 | plt.show() 105 | -------------------------------------------------------------------------------- /docs/examples/plot_stack.py: -------------------------------------------------------------------------------- 1 | """ 2 | =================== 3 | Stacked line charts 4 | =================== 5 | 6 | """ 7 | 8 | # sphinx_gallery_thumbnail_number = 2 9 | 10 | ############################### 11 | # Read in tutorial data and show a summary 12 | # **************************************** 13 | # 14 | # This gallery uses the scenario data from the first-steps tutorial. 15 | # 16 | # If you haven't cloned the **pyam** GitHub repository to your machine, 17 | # you can download the file from 18 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 19 | # 20 | # Make sure to place the data file in the same folder as this script/notebook. 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | import pyam 25 | 26 | df = pyam.IamDataFrame("tutorial_data.csv") 27 | df 28 | 29 | ############################## 30 | # First, we generate a simple stacked line chart 31 | # of all components of primary energy supply for one scenario. 32 | 33 | model, scenario = "IMAGE 3.0.1", "CD-LINKS_NPi2020_400" 34 | 35 | data = df.filter( 36 | model=model, scenario=scenario, variable="Primary Energy|*", region="World" 37 | ) 38 | 39 | data.plot.stack(title=scenario) 40 | plt.legend(loc=1) 41 | plt.tight_layout() 42 | plt.show() 43 | 44 | ############################### 45 | # We don't just have to plot subcategories of variables, 46 | # any data dimension or meta indicators from the IamDataFrame can be used. 47 | # Here, we show the contribution by region to total CO2 emissions. 48 | 49 | data = df.filter(model=model, scenario=scenario, variable="Emissions|CO2").filter( 50 | region="World", keep=False 51 | ) 52 | 53 | data.plot.stack(stack="region", cmap="tab20", title=scenario, total=True) 54 | plt.legend(loc=1) 55 | plt.tight_layout() 56 | plt.show() 57 | -------------------------------------------------------------------------------- /docs/examples/plot_timeseries.py: -------------------------------------------------------------------------------- 1 | """ 2 | ====================== 3 | Timeseries data charts 4 | ====================== 5 | 6 | """ 7 | 8 | ############################### 9 | # Read in tutorial data and show a summary 10 | # **************************************** 11 | # 12 | # This gallery uses the scenario data from the first-steps tutorial. 13 | # 14 | # If you haven't cloned the **pyam** GitHub repository to your machine, 15 | # you can download the file from 16 | # https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials. 17 | # 18 | # Make sure to place the data file in the same folder as this script/notebook. 19 | 20 | import pyam 21 | 22 | df = pyam.IamDataFrame("tutorial_data.csv") 23 | df 24 | 25 | ############################### 26 | # A simple line chart 27 | # ******************* 28 | # 29 | # We show a simple line chart of the regional components 30 | # of CO2 emissions for one scenario. 31 | # 32 | # Then, also show the data as a wide IAMC-style dataframe. 33 | 34 | model, scenario = "REMIND-MAgPIE 1.7-3.0", "CD-LINKS_INDCi" 35 | 36 | data = df.filter(model=model, scenario=scenario, variable="Emissions|CO2").filter( 37 | region="World", keep=False 38 | ) 39 | 40 | data.plot(color="region", title="CO2 emissions by region") 41 | data.timeseries() 42 | -------------------------------------------------------------------------------- /docs/examples/sankey_data.csv: -------------------------------------------------------------------------------- 1 | Model,Scenario,Region,Variable,Unit,2005,2010,2015,2020,2025,2030,2035,2040,2045,2050,2060,2070,2080,2090,2100 2 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Final Energy|Electricity,EJ/yr,53.80826953,63.71489844,73.58595313,85.44735938,93.82329688,91.06659375,90.33624219,91.08263281,99.0765,112.81510159999998,145.203,177.0755938,208.0252031,233.48259380000002,255.9907969 3 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Final Energy|Gases,EJ/yr,53.48498047,55.26935938,59.98828125,70.29185938,76.66061719,74.36647656,72.98084375,68.02853125,62.07119922,62.35551953,68.29827344,68.0620625,61.55342969,48.81646094,53.30096875 4 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Final Energy|Solids|Coal,EJ/yr,31.73178906,40.72285156,44.00421875,47.02408984,46.73598047,39.87710938,33.44739844,30.14419922,28.30783984,27.98949023,29.55891992,26.17583008,19.37980078,12.16663965,7.01698584 5 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Primary Energy|Coal,EJ/yr,117.587,146.0782031,163.7425,167.4712031,154.03309380000002,120.1952969,94.58446094,67.09732031,47.41614844,46.68614063,49.39321094,62.27291016,87.65290625,100.1868984,107.0912031 6 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Primary Energy|Gas,EJ/yr,102.8227969,113.837,126.49260159999999,151.08890630000002,158.1367031,141.8922969,140.3855938,134.77829690000002,136.3615,125.95239840000002,107.46789840000002,118.5287031,126.1347969,110.0517031,97.90427344 7 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Secondary Energy|Electricity|Coal,EJ/yr,26.10010637,31.05132416,35.89387699,38.28399283,34.60050592,26.06913544,20.16249298,11.75232609,4.441232395,4.860397221,6.419255568,8.889895747999999,12.75908466,11.90663916,8.283740534 8 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Secondary Energy|Electricity|Gas,EJ/yr,13.19149266,17.06967033,18.95721907,22.52411008,24.32093947,23.18641294,24.37631067,23.97483888,22.6008403,18.029436800000003,10.62676198,14.82788779,22.43512595,21.11827361,14.71180695 9 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Secondary Energy|Electricity|Non-Biomass Renewables,EJ/yr,11.1561265,13.984732999999999,18.11896042,23.25778326,29.91443939,33.75737043,33.67677133,37.14785677,49.96145382,65.874368,90.82459443,102.59832759999999,109.01675259999999,132.2030597,158.04181090000003 10 | IMAGE 3.0.1,CD-LINKS_NPi2020_1000,World,Secondary Energy|Electricity|Nuclear,EJ/yr,9.964626943999999,9.9226368,9.662710784,11.80798976,17.51332045,21.53414042,21.3886505,21.79807027,27.341060100000004,32.42462003,43.07098010000001,53.00470989,65.26325965,72.37544346,79.84093594 11 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | .. _install: 2 | 3 | Installation 4 | ============ 5 | 6 | Conda 7 | ----- 8 | 9 | https://anaconda.org/conda-forge/pyam 10 | 11 | .. code-block:: bash 12 | 13 | conda install -c conda-forge pyam 14 | 15 | Pypi 16 | ---- 17 | 18 | https://pypi.org/project/pyam-iamc/ 19 | 20 | .. warning:: The pyam package is distributed as "pyam-iamc" on pypi. 21 | 22 | .. code-block:: bash 23 | 24 | pip install pyam-iamc 25 | 26 | Installing from source 27 | ---------------------- 28 | 29 | |pyam| can also be installed from source. 30 | 31 | .. code-block:: bash 32 | 33 | pip install -e git+https://github.com/IAMconsortium/pyam.git#egg=pyam-iamc 34 | 35 | Dependencies 36 | ------------ 37 | 38 | Like any software project, we stand on the shoulders of giants. Our particular 39 | giants include **pandas** (https://pandas.pydata.org), 40 | **matplotlib** (https://matplotlib.org), and **numpy** (https://numpy.org). 41 | See the `pyproject.toml`_ for more information. 42 | 43 | .. _`pyproject.toml`: https://github.com/IAMconsortium/pyam/blob/main/pyproject.toml 44 | -------------------------------------------------------------------------------- /docs/logos/pyam-header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/logos/pyam-header.png -------------------------------------------------------------------------------- /docs/logos/pyam-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/logos/pyam-logo.png -------------------------------------------------------------------------------- /docs/logos/pyam-social-media.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/logos/pyam-social-media.png -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | REM This file was generated with sphinx-quickstart version 3.5.0 2 | 3 | @ECHO OFF 4 | 5 | pushd %~dp0 6 | 7 | REM Command file for Sphinx documentation 8 | 9 | if "%SPHINXBUILD%" == "" ( 10 | set SPHINXBUILD=sphinx-build 11 | ) 12 | REM set SOURCEDIR= 13 | set BUILDDIR=_build 14 | 15 | if "%1" == "" goto help 16 | 17 | %SPHINXBUILD% >NUL 2>NUL 18 | if errorlevel 9009 ( 19 | echo. 20 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 21 | echo.installed, then set the SPHINXBUILD environment variable to point 22 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 23 | echo.may add the Sphinx directory to PATH. 24 | echo. 25 | echo.If you don't have Sphinx installed, grab it from 26 | echo.http://sphinx-doc.org/ 27 | exit /b 1 28 | ) 29 | 30 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 31 | goto end 32 | 33 | :help 34 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 35 | 36 | :end 37 | popd 38 | -------------------------------------------------------------------------------- /docs/tutorials.rst: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | Tutorials 4 | ========= 5 | 6 | Jupyter notebooks 7 | ----------------- 8 | 9 | All tutorials currently use the *IAMC template* for yearly data, 10 | but |pyam| also supports timeseries data with a sub-annual resolution. 11 | Please read the `Data Model `_ section for more information. 12 | 13 | The source code is available in the folder 14 | `docs/tutorials`_ of the |pyam| GitHub repository. 15 | 16 | .. _`docs/tutorials`: 17 | https://github.com/IAMconsortium/pyam/tree/main/docs/tutorials 18 | 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | tutorials/pyam_first_steps.ipynb 23 | tutorials/data_table_formats.ipynb 24 | tutorials/unit_conversion.ipynb 25 | tutorials/algebraic_operations.ipynb 26 | tutorials/quantiles.ipynb 27 | tutorials/iiasa.ipynb 28 | tutorials/unfccc.ipynb 29 | tutorials/GAMS_to_pyam.ipynb 30 | tutorials/aggregating_downscaling_consistency.ipynb 31 | tutorials/subannual_time_resolution.ipynb 32 | tutorials/ipcc_colors.ipynb 33 | tutorials/legends.ipynb 34 | tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb 35 | tutorials/pyam_logo.ipynb 36 | 37 | Workshops and recordings 38 | ------------------------ 39 | 40 | The Energy, Climate and Environment program (ECE) at IIASA is regularly holding 41 | workshops and trainings for the pyam package. You can find recordings of these 42 | workshops and related material at https://teaching.ece.iiasa.ac.at/pyam.html. 43 | -------------------------------------------------------------------------------- /docs/tutorials/_static/cdlinks_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/tutorials/_static/cdlinks_logo.png -------------------------------------------------------------------------------- /docs/tutorials/_static/gams_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/tutorials/_static/gams_logo.png -------------------------------------------------------------------------------- /docs/tutorials/_static/iamc-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/tutorials/_static/iamc-logo.png -------------------------------------------------------------------------------- /docs/tutorials/ipcc_colors.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Using IPCC Color Palettes\n", 8 | "\n", 9 | "**pyam** supports the use of explicit IPCC [AR5](https://tdaviesbarnard.co.uk/1202/ipcc-official-colors-rcp/) and [AR6](https://github.com/IPCC-WG1/colormaps) color palettes by providing the RCP and/or SSP of interest via the `pyam.run_control()` feature." 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "The full list of the IPCC color palette colors available in **pyam** can be retrieved by the following code." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "import pandas as pd\n", 26 | "\n", 27 | "import pyam\n", 28 | "\n", 29 | "colors = pyam.plotting.PYAM_COLORS\n", 30 | "pd.DataFrame({\"name\": list(colors.keys()), \"color\": list(colors.values())})" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "## Display scenario data with default colours\n", 38 | "\n", 39 | "We use the scenario ensemble from the **first-steps tutorial** (here on\n", 40 | "[GitHub](https://github.com/IAMconsortium/pyam/blob/master/doc/source/tutorials/pyam_first_steps.ipynb)\n", 41 | "and on [read the docs](https://pyam-iamc.readthedocs.io/en/stable/tutorials/pyam_first_steps.html)).\n", 42 | "Let's pull out two example scenarios (implemented by multiple modelling frameworks each) and plot them with the default color scheme." 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "scenarios = [\"CD-LINKS_NoPolicy\", \"CD-LINKS_NPi2020_400\"]\n", 52 | "\n", 53 | "df = pyam.IamDataFrame(data=\"tutorial_data.csv\").filter(\n", 54 | " variable=\"Emissions|CO2\", region=\"World\", scenario=scenarios\n", 55 | ")\n", 56 | "\n", 57 | "df.plot(color=\"scenario\")" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "As an example, we assume that each of these two sets of scenarios correspond to categorizations in the AR6 context. We can utilize the specific colors by following two steps:\n", 65 | "\n", 66 | "1. Update `pyam.run_control()` telling it which scenario name maps to which AR6 color\n", 67 | "2. Call `line_plot` using that color mapping" 68 | ] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "metadata": {}, 73 | "source": [ 74 | "## Updating the run control\n", 75 | "\n", 76 | "We need to tell **pyam** that whenever it sees a certain scenario name, it should use a specific color from the IPCC palette." 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "color_map = {\n", 86 | " \"CD-LINKS_NPi2020_400\": \"AR6-SSP2-4.5\",\n", 87 | " \"CD-LINKS_NoPolicy\": \"AR6-SSP5-8.5\",\n", 88 | "}\n", 89 | "\n", 90 | "pyam.run_control().update({\"color\": {\"scenario\": color_map}})" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "The illustration above is implemented directly in Python code, but it also works by specifying the desired mapping in a `yaml` configuration file and loading that file into `run_control()`." 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "## Use the new colors\n", 105 | "\n", 106 | "Now, it's as simple as calling the plot function again!" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "df.plot(color=\"scenario\")" 116 | ] 117 | } 118 | ], 119 | "metadata": { 120 | "kernelspec": { 121 | "display_name": "Python 3", 122 | "language": "python", 123 | "name": "python3" 124 | }, 125 | "language_info": { 126 | "codemirror_mode": { 127 | "name": "ipython", 128 | "version": 3 129 | }, 130 | "file_extension": ".py", 131 | "mimetype": "text/x-python", 132 | "name": "python", 133 | "nbconvert_exporter": "python", 134 | "pygments_lexer": "ipython3", 135 | "version": "3.8.3" 136 | } 137 | }, 138 | "nbformat": 4, 139 | "nbformat_minor": 2 140 | } 141 | -------------------------------------------------------------------------------- /docs/tutorials/legends.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Customizing legends\n", 8 | "\n", 9 | "This is a short tutorial showing how different arguments to the `legend` keyword in the **pyam** plotting library affects where the legend is located.\n", 10 | "\n", 11 | "We use the scenario ensemble from the **first-steps tutorial** ([link](https://github.com/IAMconsortium/pyam/blob/master/docs/tutorials/pyam_first_steps.ipynb))." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from pyam import IamDataFrame" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "df = IamDataFrame(data=\"tutorial_data.csv\").filter(\n", 30 | " variable=\"Emissions|CO2\", region=\"World\"\n", 31 | ")\n", 32 | "\n", 33 | "df.head()" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "By default, a legend will not appear if there are too many entries." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "df.plot()" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "By using the `color` argument, we tell the **pyam** plotting library to apply colours by model family.\n", 57 | "This reduces the number of legend entries (from 38 model-scenario combinations to 8 model families), and the legend will be shown by default with **matplotlib** standard settings." 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "df.plot(color=\"model\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "You can use standard **matplotlib** [legend](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.legend.html) arguments by passing a dictionary of keyword arguments." 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "df.plot(color=\"model\", legend=dict(loc=\"center left\"))" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "However, we also offer support for a few standard special cases, namely legends to the right of a plot and legends below the plot" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "from pyam.plotting import OUTSIDE_LEGEND" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "df.plot(color=\"model\", legend=OUTSIDE_LEGEND[\"right\"])" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "df.plot(color=\"model\", legend=OUTSIDE_LEGEND[\"bottom\"])" 117 | ] 118 | } 119 | ], 120 | "metadata": { 121 | "kernelspec": { 122 | "display_name": "Python 3 (ipykernel)", 123 | "language": "python", 124 | "name": "python3" 125 | }, 126 | "language_info": { 127 | "codemirror_mode": { 128 | "name": "ipython", 129 | "version": 3 130 | }, 131 | "file_extension": ".py", 132 | "mimetype": "text/x-python", 133 | "name": "python", 134 | "nbconvert_exporter": "python", 135 | "pygments_lexer": "ipython3", 136 | "version": "3.10.0" 137 | } 138 | }, 139 | "nbformat": 4, 140 | "nbformat_minor": 2 141 | } 142 | -------------------------------------------------------------------------------- /docs/tutorials/pyam_logo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Make our Logo!\n", 8 | "\n", 9 | "The logo combines a number of fun **pyam** features, including\n", 10 | "\n", 11 | "- line plots\n", 12 | "- filling data between lines\n", 13 | "- adding ranges of final-year data" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "import matplotlib.pyplot as plt\n", 23 | "\n", 24 | "plt.style.use(\"seaborn-v0_8-deep\")" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "import numpy as np\n", 34 | "\n", 35 | "\n", 36 | "def func(x, factor):\n", 37 | " return np.sin(x) + factor * x\n", 38 | "\n", 39 | "\n", 40 | "x = np.linspace(0, 4, 100)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "import itertools\n", 50 | "\n", 51 | "import pandas as pd\n", 52 | "\n", 53 | "from pyam import IAMC_IDX, IamDataFrame\n", 54 | "\n", 55 | "combinations = itertools.product([\"m1\", \"m2\", \"m3\", \"m4\"], [\"s1\", \"s2\", \"s3\"])\n", 56 | "data = [\n", 57 | " [m, s] + [\"r\", \"v\", \"u\"] + list(func(x, 0.5 + 0.1 * i))\n", 58 | " for i, (m, s) in enumerate(combinations)\n", 59 | "]\n", 60 | "df = IamDataFrame(pd.DataFrame(data, columns=IAMC_IDX + list(range(len(x)))))" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "df.head()" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "fig, ax = plt.subplots()\n", 79 | "df.filter(scenario=\"s2\").plot(ax=ax, color=\"model\", legend=False, title=False)\n", 80 | "df.filter(scenario=\"s2\", keep=False).plot(\n", 81 | " ax=ax, linewidth=0.5, color=\"model\", legend=False, title=False\n", 82 | ")\n", 83 | "df.plot(\n", 84 | " ax=ax,\n", 85 | " alpha=0,\n", 86 | " color=\"model\",\n", 87 | " fill_between=True,\n", 88 | " final_ranges=dict(linewidth=4),\n", 89 | " legend=False,\n", 90 | " title=False,\n", 91 | ")\n", 92 | "plt.axis(\"off\")\n", 93 | "plt.tight_layout()\n", 94 | "fig.savefig(\"logo.pdf\", bbox_inches=\"tight\", transparent=True, pad_inches=0)" 95 | ] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "Python 3 (ipykernel)", 101 | "language": "python", 102 | "name": "python3" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.12.2" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 2 119 | } 120 | -------------------------------------------------------------------------------- /docs/tutorials/subannual_time_resolution.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Aggregating subannual timeseries data\n", 8 | "\n", 9 | "The **pyam** package offers many tools to facilitate processing of scenario data.\n", 10 | "In this notebook, we illustrate methods to aggregate timeseries data that is given at a sub-annual resolution using timeslices (seasons, representative days, etc.).\n", 11 | "\n", 12 | "
\n", 13 | "\n", 14 | "The features for working with subannual time resolution are still in an experimental status.\n", 15 | "The functions illustrated in this tutorial are operational and tested, but other tools such as the plotting library may not work as expected (yet) when working with subannual data.\n", 16 | "\n", 17 | "
\n", 18 | "\n", 19 | "## Overview\n", 20 | "\n", 21 | "This notebook illustrates the following features:\n", 22 | "\n", 23 | "0. Import data from file and inspect the scenario\n", 24 | "1. Aggregate timeseries data given at a sub-annual time resolution to a yearly value\n", 25 | "\n", 26 | "***" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "## 0. Import data from file and inspect the scenario\n", 34 | "\n", 35 | "The stylized scenario used in this tutorial has data for primary-energy timeseries for two subannual timeslices `summer` and `winter`." 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "from pyam import IamDataFrame\n", 45 | "\n", 46 | "df = IamDataFrame(data=\"tutorial_data_subannual_time.csv\")" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "df.timeseries()" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "## 1. Aggregating timeseries across sub-annual timesteps\n", 63 | "\n", 64 | "Per default, the [aggregate_time()](https://pyam-iamc.readthedocs.io/en/stable/api.html#pyam.IamDataFrame.aggregate_time) function\n", 65 | "aggregates (by summation) the data from all sub-annual timesteps (given in the column `subannual`) to a `year` value.\n", 66 | "\n", 67 | "The function returns an **IamDataFrame**, so we can use [timeseries()](https://pyam-iamc.readthedocs.io/en/stable/api.html#pyam.IamDataFrame.timeseries) to display the resulting data." 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "df.aggregate_time(\"Primary Energy\").timeseries()" 77 | ] 78 | }, 79 | { 80 | "cell_type": "markdown", 81 | "metadata": {}, 82 | "source": [ 83 | "The function also supports directly appending the aggregated data to the original **IamDataFrame**.\n", 84 | "You can also pass a a list of variables, or call [variables()](https://pyam-iamc.readthedocs.io/en/stable/api.html#pyam.IamDataFrame.variables) to perform the aggregation on all timeseries data.\n", 85 | "\n", 86 | "A user can also manually set the \"target\" sub-annual value and the components to be aggregated;\n", 87 | "for example, this can then be used to process an aggregate of hourly data to monthly values.\n", 88 | "\n", 89 | "You will notice that the following cell returns a larger dataset compared to calling the same function above." 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "df.aggregate_time(\n", 99 | " df.variable, value=\"year\", components=[\"summer\", \"winter\"], append=True\n", 100 | ")" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "df.timeseries()" 110 | ] 111 | } 112 | ], 113 | "metadata": { 114 | "kernelspec": { 115 | "display_name": "Python 3 (ipykernel)", 116 | "language": "python", 117 | "name": "python3" 118 | }, 119 | "language_info": { 120 | "codemirror_mode": { 121 | "name": "ipython", 122 | "version": 3 123 | }, 124 | "file_extension": ".py", 125 | "mimetype": "text/x-python", 126 | "name": "python", 127 | "nbconvert_exporter": "python", 128 | "pygments_lexer": "ipython3", 129 | "version": "3.10.0" 130 | } 131 | }, 132 | "nbformat": 4, 133 | "nbformat_minor": 2 134 | } 135 | -------------------------------------------------------------------------------- /docs/tutorials/transport_tutorial.gdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/docs/tutorials/transport_tutorial.gdx -------------------------------------------------------------------------------- /docs/tutorials/tutorial_data_aggregating_downscaling.csv: -------------------------------------------------------------------------------- 1 | Model,Scenario,Region,Variable,Unit,2005,2010 2 | model_a,scen_a,World,Emissions|CO2,Mt CO2,10.0,14.0 3 | model_a,scen_a,World,Emissions|CO2|AFOLU,Mt CO2,3.0,4.0 4 | model_a,scen_a,World,Emissions|CO2|Bunkers,Mt CO2,1.0,2.0 5 | model_a,scen_a,World,Emissions|CO2|Energy,Mt CO2,6.0,8.0 6 | model_a,scen_a,World,Population,million,3.0,5.0 7 | model_a,scen_a,World,Price|Carbon,USD/t CO2,4.0,27.0 8 | model_a,scen_a,World,Primary Energy,EJ/yr,12.0,15.0 9 | model_a,scen_a,World,Primary Energy|Coal,EJ/yr,9.0,10.0 10 | model_a,scen_a,World,Primary Energy|Wind,EJ/yr,3.0,5.0 11 | model_a,scen_a,reg_a,Emissions|CO2,Mt CO2,6.0,8.0 12 | model_a,scen_a,reg_a,Emissions|CO2|AFOLU,Mt CO2,2.0,3.0 13 | model_a,scen_a,reg_a,Emissions|CO2|Energy,Mt CO2,4.0,5.0 14 | model_a,scen_a,reg_a,Population,million,1.5,2.5 15 | model_a,scen_a,reg_a,Price|Carbon,USD/t CO2,1.0,30.0 16 | model_a,scen_a,reg_a,Primary Energy,EJ/yr,8.0,9.0 17 | model_a,scen_a,reg_a,Primary Energy|Coal,EJ/yr,6.0,6.0 18 | model_a,scen_a,reg_a,Primary Energy|Wind,EJ/yr,2.0,3.0 19 | model_a,scen_a,reg_b,Emissions|CO2,Mt CO2,3.0,4.0 20 | model_a,scen_a,reg_b,Emissions|CO2|AFOLU,Mt CO2,1.0,1.0 21 | model_a,scen_a,reg_b,Emissions|CO2|Energy,Mt CO2,2.0,3.0 22 | model_a,scen_a,reg_b,Population,million,1.5,2.5 23 | model_a,scen_a,reg_b,Price|Carbon,USD/t CO2,10.0,21.0 24 | model_a,scen_a,reg_b,Primary Energy,EJ/yr,4.0,6.0 25 | model_a,scen_a,reg_b,Primary Energy|Coal,EJ/yr,3.0,4.0 26 | model_a,scen_a,reg_b,Primary Energy|Wind,EJ/yr,1.0,2.0 27 | -------------------------------------------------------------------------------- /docs/tutorials/tutorial_data_subannual_time.csv: -------------------------------------------------------------------------------- 1 | model,scenario,region,variable,unit,subannual,2005,2010 2 | model_a,scen_a,World,Primary Energy,EJ/y,summer,3.5999999999999996,4.5 3 | model_a,scen_a,World,Primary Energy,EJ/y,winter,8.399999999999999,10.5 4 | model_a,scen_a,World,Primary Energy|Coal,EJ/y,summer,2.6999999999999997,3.0 5 | model_a,scen_a,World,Primary Energy|Coal,EJ/y,winter,6.3,7.0 6 | -------------------------------------------------------------------------------- /manuscripts/JOSS/line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/manuscripts/JOSS/line.png -------------------------------------------------------------------------------- /manuscripts/JOSS/scatter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/manuscripts/JOSS/scatter.png -------------------------------------------------------------------------------- /manuscripts/ORE/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # This file was generated with sphinx-quickstart version 3.5.0 3 | # 4 | 5 | # You can set these variables from the command line, and also 6 | # from the environment for the first two. 7 | SPHINXOPTS ?= 8 | SPHINXBUILD ?= sphinx-build 9 | SOURCEDIR = source 10 | BUILDDIR = build 11 | 12 | # Put it first so that "make" without argument is like "make help". 13 | help: 14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 15 | 16 | .PHONY: help Makefile 17 | 18 | # Catch-all target: route all unknown targets to Sphinx using the new 19 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 20 | %: Makefile 21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | -------------------------------------------------------------------------------- /manuscripts/ORE/make.bat: -------------------------------------------------------------------------------- 1 | REM This file was generated with sphinx-quickstart version 3.5.0 2 | 3 | @ECHO OFF 4 | 5 | pushd %~dp0 6 | 7 | REM Command file for Sphinx documentation 8 | 9 | if "%SPHINXBUILD%" == "" ( 10 | set SPHINXBUILD=sphinx-build 11 | ) 12 | set SOURCEDIR=source 13 | set BUILDDIR=build 14 | 15 | if "%1" == "" goto help 16 | 17 | %SPHINXBUILD% >NUL 2>NUL 18 | if errorlevel 9009 ( 19 | echo. 20 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 21 | echo.installed, then set the SPHINXBUILD environment variable to point 22 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 23 | echo.may add the Sphinx directory to PATH. 24 | echo. 25 | echo.If you don't have Sphinx installed, grab it from 26 | echo.http://sphinx-doc.org/ 27 | exit /b 1 28 | ) 29 | 30 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 31 | goto end 32 | 33 | :help 34 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 35 | 36 | :end 37 | popd 38 | -------------------------------------------------------------------------------- /manuscripts/ORE/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/manuscripts/ORE/source/_static/.gitkeep -------------------------------------------------------------------------------- /manuscripts/ORE/source/chapters/appendix.rst: -------------------------------------------------------------------------------- 1 | Appendix 2 | ======== 3 | 4 | Software references 5 | ------------------- 6 | 7 | List of references for all packages and tools listed in :numref:`overview`. 8 | 9 | - **pandas**: https://pandas.pydata.org 10 | - **numpy**: https://numpy.org 11 | - **tidyverse**: https://www.tidyverse.org 12 | - **Queryverse.jl**: https://www.queryverse.org 13 | - **matplotlib**: https://matplotlib.org, :cite:`Hunter:2007:matplotlib` 14 | - **seaborn**: https://seaborn.pydata.org, :cite:`Waskom:2021:seaborn` 15 | - **ggplot**: https://ggplot2.tidyverse.org 16 | - **shiny**: https://shiny.rstudio.com 17 | - **madrat**: https://github.com/pik-piam/madrat, :cite:`Dietrich:2021:madrat` 18 | - **iamc**: https://github.com/IAMconsortium/iamc 19 | - **genno**: https://genno.readthedocs.io 20 | - **mipplot**: https://github.com/UTokyo-mip/mipplot, :cite:`Yiyi:2021:mipplot` 21 | - **PUDL**: https://catalyst.coop/pudl 22 | - **PowerGenome**: https://github.com/PowerGenome/PowerGenome 23 | - **PowerSystems.jl**: https://github.com/NREL-SIIP/PowerSystems.jl 24 | - **Open Energy Platform**: https://openenergy-platform.org 25 | - **Spine Toolbox**: https://spine-toolbox.readthedocs.io 26 | - **TIMES-VEDA**: https://veda-documentation.readthedocs.io 27 | - **OSeMOSYS**: http://www.osemosys.org 28 | - **MESSAGEix**: https://docs.messageix.org 29 | - **REMIND**: https://www.pik-potsdam.de/en/institute/departments/transformation-pathways/models/remind 30 | - **GCAM**: http://www.globalchange.umd.edu/gcam/ 31 | - **mimi.jl**: https://www.mimiframework.org 32 | - **TEMOA**: https://github.com/TemoaProject/temoa, :cite:`DeCarolis:2016:TEMOA` 33 | - **pypsa**: https://pypsa.org, :cite:`Brown:2018:PyPSA` 34 | - **PLEXOS**: https://energyexemplar.com/solutions/plexos 35 | - **RESKit**: https://github.com/FZJ-IEK3-VSA/RESKit, :cite:`Ryberg:2019:RESKit` 36 | - **glaes**: https://github.com/FZJ-IEK3-VSA/glaes 37 | 38 | Software availability 39 | --------------------- 40 | 41 | Source code available from: https://github.com/IAMconsortium/pyam 42 | 43 | Archived source code at time of publication: xxx 44 | 45 | Data availability 46 | ----------------- 47 | 48 | No data are associated with this article. 49 | 50 | Grant information 51 | ----------------- 52 | 53 | This research was financially supported by the European Union’s Horizon 2020 research 54 | and innovation programme under the grant agreement No 835896 (project openENTRANCE). 55 | -------------------------------------------------------------------------------- /manuscripts/ORE/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | # import sys 15 | # sys.path.insert(0, os.path.abspath('.')) 16 | 17 | 18 | # -- Project information ----------------------------------------------------- 19 | 20 | project = "pyam" 21 | copyright = "2024 IIASA and the pyam developer team" 22 | author = "Daniel Huppmann, Matthew Gidden, et al." 23 | 24 | 25 | # -- General configuration --------------------------------------------------- 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be 28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 29 | # ones. 30 | extensions = [ 31 | "sphinxcontrib.bibtex", 32 | ] 33 | 34 | # Add any paths that contain templates here, relative to this directory. 35 | templates_path = ["_templates"] 36 | 37 | # List of patterns, relative to source directory, that match files and 38 | # directories to ignore when looking for source files. 39 | # This pattern also affects html_static_path and html_extra_path. 40 | exclude_patterns = [] 41 | 42 | # Activate numbering of figures 43 | numfig = True 44 | 45 | # -- Options for HTML output ------------------------------------------------- 46 | 47 | # The theme to use for HTML and HTML Help pages. See the documentation for 48 | # a list of build-docs.ymlbuiltin themes. 49 | # 50 | html_theme = "alabaster" 51 | 52 | # Add any paths that contain custom static files (such as style sheets) here, 53 | # relative to this directory. They are copied after the builtin static files, 54 | # so a file named "default.css" will overwrite the builtin "default.css". 55 | html_static_path = ["_static"] 56 | 57 | # -- Options for bibtex files ------------------------------------------------ 58 | 59 | bibtex_bibfiles = ["_bib/main.bib"] 60 | -------------------------------------------------------------------------------- /manuscripts/ORE/source/figure/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/manuscripts/ORE/source/figure/overview.png -------------------------------------------------------------------------------- /manuscripts/ORE/source/figure/sr15_fig2.4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/manuscripts/ORE/source/figure/sr15_fig2.4.png -------------------------------------------------------------------------------- /manuscripts/ORE/source/figure/temperature-by-category.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/manuscripts/ORE/source/figure/temperature-by-category.png -------------------------------------------------------------------------------- /profile/.gitignore: -------------------------------------------------------------------------------- 1 | .pymon 2 | -------------------------------------------------------------------------------- /profile/README.md: -------------------------------------------------------------------------------- 1 | # Profiling 2 | 3 | This module provides utility code to run benchmarking on features of pyam. 4 | 5 | ### *Work in progress!* 6 | 7 | This module is in an experimental stage. 8 | We are currently investigating additional/alternative packages for benchmarking 9 | and profiling, and the preferred solution may change at any time. 10 | 11 | **To-do** 12 | 13 | Required steps to make this module a full part of the pyam package: 14 | 15 | - Include module dependencies as extra-requirements of the installation 16 | - Migrate gitignore from this folder to the main gitignore file 17 | 18 | ## Dependencies and usage 19 | 20 | This module uses the [pytest-monitor](https://pytest-monitor.readthedocs.io) package. 21 | 22 | ### Installation 23 | 24 | In addition to an installation of **pyam** with all optional dependencies, 25 | install the pytest-extension using 26 | 27 | ``` 28 | pip install pytest-monitor 29 | ``` 30 | 31 | ### Usage 32 | 33 | The **pytest-monitor** package is executed automatically (if installed) when 34 | running **pytest**, writing metrics for each test to a SQLite database (``.pymon``). 35 | To use the profiling module, navigate to the `profile` folder and run pytest. 36 | Then, ``profile_report.py`` prints metrics of the latest session to the command line. 37 | 38 | ``` 39 | pytest . 40 | python profile_report.py 41 | ``` 42 | 43 | ## Adding benchmarks and profile tests 44 | 45 | To add profiling/benchmarking tests, add any standard pytest-functions to this folder. 46 | -------------------------------------------------------------------------------- /profile/conftest.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pandas as pd 4 | import pytest 5 | 6 | from pyam import IAMC_IDX, IamDataFrame 7 | 8 | DATA_PATH = Path("data") 9 | TEST_DF = pd.DataFrame( 10 | [ 11 | ["model_a", "scen_a", "World", "Primary Energy", "EJ/yr", 1, 6.0], 12 | ["model_a", "scen_a", "World", "Primary Energy|Coal", "EJ/yr", 0.5, 3], 13 | ["model_a", "scen_b", "World", "Primary Energy", "EJ/yr", 2, 7], 14 | ], 15 | columns=IAMC_IDX + [2005, 2010], 16 | ) 17 | 18 | TEST_FRAMES = [TEST_DF] + [ 19 | pd.read_excel(f, sheet_name="data") for f in DATA_PATH.glob("*.xlsx") 20 | ] 21 | 22 | 23 | @pytest.fixture(scope="function", params=TEST_FRAMES) 24 | def data(request): 25 | yield request.param 26 | 27 | 28 | @pytest.fixture(scope="function", params=TEST_FRAMES) 29 | def df(request): 30 | yield IamDataFrame(request.param) 31 | -------------------------------------------------------------------------------- /profile/data/README.md: -------------------------------------------------------------------------------- 1 | # Profile data folder 2 | 3 | Please place any xlsx files to be used for benchmarking in this folder. -------------------------------------------------------------------------------- /profile/profile_report.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | import pandas as pd 4 | 5 | 6 | def main(): 7 | db = sqlite3.connect(".pymon") 8 | 9 | print( 10 | pd.read_sql_query( 11 | "SELECT item_path, item_variant, total_time, cpu_usage, mem_usage " 12 | "FROM test_metrics WHERE session_h = (" 13 | " SELECT session_h FROM test_sessions " 14 | " WHERE run_date = (SELECT MAX(run_date) FROM test_sessions)" 15 | ")", 16 | db, 17 | ) 18 | ) 19 | 20 | 21 | if __name__ == "__main__": 22 | main() 23 | -------------------------------------------------------------------------------- /profile/test_profile.py: -------------------------------------------------------------------------------- 1 | from pyam import IamDataFrame 2 | 3 | 4 | def test_init(data): 5 | IamDataFrame(data) 6 | 7 | 8 | def test_filter(df): 9 | df.filter(year=2010) 10 | -------------------------------------------------------------------------------- /pyam/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import PackageNotFoundError, version 2 | 3 | from pyam.core import ( 4 | IamDataFrame, 5 | categorize, 6 | check_aggregate, 7 | compare, 8 | concat, 9 | filter_by_meta, 10 | read_datapackage, 11 | require_variable, 12 | validate, 13 | ) 14 | from pyam.iiasa import lazy_read_iiasa, read_iiasa 15 | from pyam.ixmp4 import read_ixmp4 16 | from pyam.logging import configure_logging 17 | from pyam.netcdf import read_netcdf 18 | from pyam.run_control import run_control 19 | from pyam.statistics import Statistics 20 | from pyam.testing import assert_iamframe_equal 21 | from pyam.unfccc import read_unfccc 22 | from pyam.utils import IAMC_IDX 23 | from pyam.worldbank import read_worldbank 24 | 25 | try: 26 | __version__ = version("pyam-iamc") 27 | # the pyam package is distributed under different names on pypi and conda 28 | except PackageNotFoundError: 29 | __version__ = version("pyam") 30 | 31 | # Set up logging consistent with the ixmp4 "production" logging configuration 32 | configure_logging() 33 | -------------------------------------------------------------------------------- /pyam/_compare.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | 5 | def _compare( 6 | left, right, left_label="left", right_label="right", drop_close=True, **kwargs 7 | ): 8 | """Internal implementation of comparison of IamDataFrames or pd.Series""" 9 | 10 | def as_series(s): 11 | return s if isinstance(s, pd.Series) else s._data 12 | 13 | ret = pd.merge( 14 | left=as_series(left).rename(index=left_label), 15 | right=as_series(right).rename(index=right_label), 16 | how="outer", 17 | left_index=True, 18 | right_index=True, 19 | ) 20 | if drop_close: 21 | ret = ret[~np.isclose(ret[left_label], ret[right_label], **kwargs)] 22 | return ret 23 | -------------------------------------------------------------------------------- /pyam/_debiasing.py: -------------------------------------------------------------------------------- 1 | def _compute_bias(df, name, method, axis): 2 | """Internal implementation for computing bias weights""" 3 | if method == "count": 4 | # invert from the count to obtain the weighting factor 5 | count = 1 / df.exclude.groupby(axis).count() 6 | count.name = name 7 | df.meta = df.meta.join(count, on=axis, how="outer") 8 | else: 9 | raise ValueError(f"Unknown method {method} for computing bias weights!") 10 | -------------------------------------------------------------------------------- /pyam/_style.py: -------------------------------------------------------------------------------- 1 | # 2 | # This file is taken from https://github.com/pandas-dev/pandas as suggested by 3 | # the pandas-dev mailing list. The goal is to ask the matplotlib devs to 4 | # eventually incorporate this into matplotlib proper. 5 | # 6 | 7 | # being a bit too dynamic 8 | import warnings 9 | 10 | import matplotlib.colors 11 | import numpy as np 12 | import pandas.core.common as com 13 | from pandas.core.dtypes.common import is_list_like 14 | 15 | 16 | def _get_standard_colors( # noqa: C901 17 | num_colors=None, colormap=None, color_type="default", color=None 18 | ): 19 | import matplotlib.pyplot as plt 20 | 21 | if color is None and colormap is not None: 22 | if isinstance(colormap, str): 23 | cmap = colormap 24 | colormap = plt.get_cmap(colormap) 25 | if colormap is None: 26 | raise ValueError(f"Colormap {cmap} is not recognized") 27 | colors = [colormap(num) for num in np.linspace(0, 1, num=num_colors)] 28 | elif color is not None: 29 | if colormap is not None: 30 | warnings.warn( 31 | "'color' and 'colormap' cannot be used " 32 | + "simultaneously. Using 'color'" 33 | ) 34 | colors = list(color) if is_list_like(color) else color 35 | else: 36 | if color_type == "default": 37 | # need to call list() on the result to copy so we don't 38 | # modify the global rcParams below 39 | try: 40 | colors = [c["color"] for c in list(plt.rcParams["axes.prop_cycle"])] 41 | except KeyError: 42 | colors = list(plt.rcParams.get("axes.color_cycle", list("bgrcmyk"))) 43 | if isinstance(colors, str): 44 | colors = list(colors) 45 | 46 | colors = colors[0:num_colors] 47 | elif color_type == "random": 48 | 49 | def random_color(column): 50 | """Returns a random color represented as a list of length 3""" 51 | # GH17525 use common._random_state to avoid resetting the seed 52 | rs = com.random_state(column) 53 | return rs.rand(3).tolist() 54 | 55 | colors = [random_color(num) for num in range(num_colors)] 56 | else: 57 | raise ValueError("color_type must be either 'default' or 'random'") 58 | 59 | if isinstance(colors, str): 60 | conv = matplotlib.colors.ColorConverter() 61 | 62 | def _maybe_valid_colors(colors): 63 | try: 64 | [conv.to_rgba(c) for c in colors] 65 | return True 66 | except ValueError: 67 | return False 68 | 69 | # check whether the string can be convertible to single color 70 | maybe_single_color = _maybe_valid_colors([colors]) 71 | # check whether each character can be convertible to colors 72 | maybe_color_cycle = _maybe_valid_colors(list(colors)) 73 | if maybe_single_color and maybe_color_cycle and len(colors) > 1: 74 | hex_color = [c["color"] for c in list(plt.rcParams["axes.prop_cycle"])] 75 | colors = [hex_color[int(colors[1])]] 76 | elif maybe_single_color: 77 | colors = [colors] 78 | else: 79 | # ``colors`` is regarded as color cycle. 80 | # mpl will raise error any of them is invalid 81 | pass 82 | 83 | # Append more colors by cycling if there is not enough color. 84 | # Extra colors will be ignored by matplotlib if there are more colors 85 | # than needed and nothing needs to be done here. 86 | if len(colors) < num_colors: 87 | try: 88 | multiple = num_colors // len(colors) - 1 89 | except ZeroDivisionError: 90 | raise ValueError("Invalid color argument: ''") 91 | mod = num_colors % len(colors) 92 | 93 | colors += multiple * colors 94 | colors += colors[:mod] 95 | 96 | return colors 97 | -------------------------------------------------------------------------------- /pyam/figures.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pandas as pd 4 | 5 | from pyam.index import get_index_levels 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | try: 10 | import plotly.graph_objects as go 11 | 12 | HAS_PLOTLY = True 13 | except ImportError: # pragma: no cover 14 | go = None 15 | HAS_PLOTLY = False 16 | 17 | 18 | def sankey(df, mapping): 19 | """Plot a sankey diagram 20 | 21 | It is currently only possible to create this diagram for single years. 22 | 23 | Parameters 24 | ---------- 25 | df : :class:`pyam.IamDataFrame` 26 | Data to be plotted 27 | mapping : dict 28 | Assigns the source and target component of a variable 29 | 30 | .. code-block:: python 31 | 32 | { 33 | variable: (source, target), 34 | } 35 | 36 | Returns 37 | ------- 38 | fig : :class:`plotly.graph_objects.Figure` 39 | """ 40 | if not HAS_PLOTLY: # pragma: no cover 41 | raise ImportError( 42 | "Missing optional dependency `plotly`, use pip or conda to install" 43 | ) 44 | # Check for duplicates 45 | for col in [name for name in df.dimensions if name != "variable"]: 46 | levels = get_index_levels(df._data, col) 47 | if len(levels) > 1: 48 | raise ValueError(f"Non-unique values in column {col}: {levels}") 49 | 50 | # Concatenate the data with source and target columns 51 | _df = pd.DataFrame.from_dict( 52 | mapping, orient="index", columns=["source", "target"] 53 | ).merge(df._data, how="left", left_index=True, right_on="variable") 54 | label_mapping = { 55 | label: i 56 | for i, label in enumerate(set(pd.concat([_df["source"], _df["target"]]))) 57 | } 58 | _df.replace(label_mapping, inplace=True) 59 | region = get_index_levels(_df, "region")[0] 60 | unit = get_index_levels(_df, "unit")[0] 61 | year = get_index_levels(_df, "year")[0] 62 | fig = go.Figure( 63 | data=[ 64 | go.Sankey( 65 | valuesuffix=unit, 66 | node=dict( 67 | pad=15, 68 | thickness=10, 69 | line=dict(color="black", width=0.5), 70 | label=pd.Series(list(label_mapping)), 71 | hovertemplate="%{label}: %{value}", 72 | color="blue", 73 | ), 74 | link=dict( 75 | source=_df.source, 76 | target=_df.target, 77 | value=_df.value, 78 | hovertemplate='"%{source.label}" to "%{target.label}": \ 79 | %{value}', 80 | ), 81 | ) 82 | ] 83 | ) 84 | fig.update_layout(title_text=f"region: {region}, year: {year}", font_size=10) 85 | return fig 86 | -------------------------------------------------------------------------------- /pyam/index.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from pyam.logging import raise_data_error 5 | 6 | 7 | def get_index_level_number(index, level): 8 | """Return the number of a specific level""" 9 | return index._get_level_number(level) 10 | 11 | 12 | def get_index_levels(index, level): 13 | """Return the labels for a specific level""" 14 | 15 | if not isinstance(index, pd.Index): 16 | index = index.index # assume that the arg `index` is a pd.DataFrame 17 | 18 | if isinstance(index, pd.MultiIndex): 19 | return list(index.levels[get_index_level_number(index, level)]) 20 | 21 | # if index is one-dimensional, make sure that the "level" is the name 22 | if index.name != level: 23 | raise KeyError("Index does not have a level {level}") 24 | return list(index) 25 | 26 | 27 | def get_index_levels_codes(df, level): 28 | """Return the category-values and codes for a specific level""" 29 | n = get_index_level_number(df.index, level) 30 | return df.index.levels[n], df.index.codes[n] 31 | 32 | 33 | def get_keep_col(codes, matches): 34 | """Return boolean mask where *matches* appear in *codes* 35 | 36 | *matches* can be given as either: 37 | 1. A boolean mask against the levels of a multiindex, or 38 | 2. An subset of integers in *codes* 39 | """ 40 | matches = np.asanyarray(matches) 41 | 42 | if np.issubdtype(matches.dtype, "bool"): 43 | (matches,) = np.where(matches) 44 | 45 | return np.isin(codes, matches) 46 | 47 | 48 | def replace_index_values(df, name, mapping, rows=None): 49 | """Replace one or several category-values at a specific level (for specific rows)""" 50 | index = df if isinstance(df, pd.Index) else df.index 51 | 52 | n = get_index_level_number(index, name) 53 | 54 | # if replacing level values with a filter (by rows) 55 | if rows is not None and not all(rows): 56 | _levels = pd.Series(index.get_level_values(n)) 57 | renamed_index = replace_index_values(index[rows], name, mapping) 58 | _levels[rows] = list(renamed_index.get_level_values(n)) 59 | _unique_levels = pd.Index(_levels.unique()) 60 | 61 | return append_index_level( 62 | index=index.droplevel(n), 63 | codes=_unique_levels.get_indexer(_levels), 64 | level=_unique_levels, 65 | name=name, 66 | order=index.names, 67 | ) 68 | 69 | # else, replace the level values for the entire index dimension 70 | _levels = index.levels[n].map(lambda level: mapping.get(level, level)) 71 | _unique_levels = _levels.unique() 72 | 73 | # if no duplicate levels exist after replace, set new levels and return 74 | if len(index.levels[n]) == len(_unique_levels): 75 | return index.set_levels(_levels, level=n) 76 | 77 | # if duplicate levels exist, re-map the codes 78 | level_mapping = _unique_levels.get_indexer(_levels) 79 | _codes = np.where(index.codes[n] != -1, level_mapping[index.codes[n]], -1) 80 | return index.set_codes(_codes, level=n).set_levels(_unique_levels, level=n) 81 | 82 | 83 | def replace_index_labels(index, name, labels): 84 | """Replace the labels for a specific level""" 85 | 86 | n = get_index_level_number(index, name) 87 | codes = index.codes[n] 88 | return append_index_level(index.droplevel(n), codes, labels, name, index.names) 89 | 90 | 91 | def append_index_col(index, values, name, order=False): 92 | """Append a list of `values` as a new column (level) to an `index`""" 93 | levels = pd.Index(values).unique() 94 | codes = levels.get_indexer(values) 95 | 96 | return append_index_level(index, codes, levels, name, order) 97 | 98 | 99 | def append_index_level(index, codes, level, name, order=False): 100 | """Append a level to a pd.MultiIndex""" 101 | if isinstance(level, str): 102 | level = [level] 103 | codes = [codes] * len(index.codes[0]) 104 | 105 | new_index = pd.MultiIndex( 106 | codes=index.codes + [codes], 107 | levels=index.levels + [level], 108 | names=index.names + [name], 109 | ) 110 | if order: 111 | new_index = new_index.reorder_levels(order) 112 | return new_index 113 | 114 | 115 | def verify_index_integrity(df): 116 | """Verify integrity of index 117 | 118 | Arguments 119 | --------- 120 | df : Union[pd.DataFrame, pd.Series, pd.Index] 121 | 122 | Raises 123 | ------ 124 | ValueError 125 | """ 126 | index = df if isinstance(df, pd.Index) else df.index 127 | if not index.is_unique: 128 | overlap = index[index.duplicated()].unique() 129 | 130 | raise_data_error( 131 | "Timeseries data has overlapping values", overlap.to_frame(index=False) 132 | ) 133 | -------------------------------------------------------------------------------- /pyam/ixmp4.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import ixmp4 4 | import pandas as pd 5 | from ixmp4.core.region import RegionModel 6 | from ixmp4.core.unit import UnitModel 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | def read_ixmp4( 12 | platform: ixmp4.Platform | str, 13 | default_only: bool = True, 14 | model: str | list[str] | None = None, 15 | scenario: str | list[str] | None = None, 16 | region: str | list[str] | None = None, 17 | variable: str | list[str] | None = None, 18 | unit: str | list[str] | None = None, 19 | year: int | list[int] | None = None, 20 | ): 21 | """Read scenario runs from an ixmp4 platform database instance 22 | 23 | Parameters 24 | ---------- 25 | platform : :class:`ixmp4.Platform` or str 26 | The ixmp4 platform database instance to which the scenario data is saved. 27 | default_only : :class:`bool`, optional 28 | Read only default runs. 29 | model, scenario, region, variable, unit : str or list of str, optional 30 | Filter by these dimensions. 31 | year : int or list of int, optional 32 | Filter by time domain. 33 | """ 34 | from pyam import IamDataFrame 35 | 36 | if not isinstance(platform, ixmp4.Platform): 37 | platform = ixmp4.Platform(platform) 38 | 39 | # TODO This may have to be revised, see https://github.com/iiasa/ixmp4/issues/72 40 | meta_filters = dict( 41 | run=dict(default_only=default_only, model=model, scenario=scenario) 42 | ) 43 | iamc_filters = dict( 44 | run=dict(default_only=default_only), 45 | model=model, 46 | scenario=scenario, 47 | region=region, 48 | variable=variable, 49 | unit=unit, 50 | year=year, 51 | ) 52 | data = platform.iamc.tabulate(**iamc_filters) 53 | meta = platform.meta.tabulate(**meta_filters) 54 | 55 | # if default-only, simplify to standard IAMC index, add `version` as meta indicator 56 | if default_only: 57 | index = ["model", "scenario"] 58 | meta_version = ( 59 | data[index + ["version"]] 60 | .drop_duplicates() 61 | .rename(columns={"version": "value"}) 62 | ) 63 | meta_version["key"] = "version" 64 | meta = pd.concat([meta.drop(columns="version"), meta_version]) 65 | data.drop(columns="version", inplace=True) 66 | else: 67 | index = ["model", "scenario", "version"] 68 | 69 | return IamDataFrame(data, meta=meta, index=index) 70 | 71 | 72 | def write_to_ixmp4(platform: ixmp4.Platform | str, df): 73 | """Save all scenarios as new default runs in an ixmp4 platform database instance 74 | 75 | Parameters 76 | ---------- 77 | platform : :class:`ixmp4.Platform` or str 78 | The ixmp4 platform database instance to which the scenario data is saved 79 | df : pyam.IamDataFrame 80 | The IamDataFrame instance with scenario data 81 | """ 82 | if df.time_domain != "year": 83 | raise NotImplementedError("Only time_domain='year' is supported for now.") 84 | 85 | if not isinstance(platform, ixmp4.Platform): 86 | platform = ixmp4.Platform(platform) 87 | 88 | # TODO: implement try-except to roll back changes if any error writing to platform 89 | # depends on https://github.com/iiasa/ixmp4/issues/29 90 | # quickfix: ensure that units and regions exist before writing 91 | for dimension, values, model in [ 92 | ("regions", df.region, RegionModel), 93 | ("units", df.unit, UnitModel), 94 | ]: 95 | platform_values = getattr(platform, dimension).tabulate().name.values 96 | if missing := set(values).difference(platform_values): 97 | raise model.NotFound( 98 | ", ".join(missing) 99 | + f". Use `Platform.{dimension}.create()` to add missing elements." 100 | ) 101 | 102 | # The "version" meta-indicator, added when reading from an ixmp4 platform, 103 | # should not be written to the platform 104 | if "version" in df.meta.columns: 105 | logger.warning( 106 | "The `meta.version` column was dropped when writing to the ixmp4 platform." 107 | ) 108 | meta = df.meta.drop(columns="version") 109 | else: 110 | meta = df.meta.copy() 111 | 112 | # Create runs and add IAMC timeseries data and meta indicators 113 | for model, scenario in df.index: 114 | _df = df.filter(model=model, scenario=scenario) 115 | 116 | run = platform.runs.create(model=model, scenario=scenario) 117 | run.iamc.add(_df.data) 118 | if not meta.empty: 119 | run.meta = dict(meta.loc[(model, scenario)]) 120 | run.set_as_default() 121 | -------------------------------------------------------------------------------- /pyam/logging.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "disable_existing_loggers": false, 4 | "formatters": { 5 | "generic": { 6 | "format": "[%(levelname)s] %(asctime)s - %(name)s: %(message)s", 7 | "datefmt": "%H:%M:%S" 8 | } 9 | }, 10 | "loggers": { 11 | "pyam": { 12 | "level": "INFO", 13 | "handlers": [ 14 | "console" 15 | ] 16 | } 17 | }, 18 | "handlers": { 19 | "console": { 20 | "class": "logging.StreamHandler", 21 | "level": "NOTSET", 22 | "formatter": "generic" 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /pyam/logging.py: -------------------------------------------------------------------------------- 1 | import json 2 | import warnings 3 | from contextlib import contextmanager 4 | from logging import config, getLogger 5 | from pathlib import Path 6 | 7 | import pandas as pd 8 | 9 | here = Path(__file__).parent 10 | logger = getLogger(__name__) 11 | 12 | 13 | def configure_logging(): 14 | """Configure logging""" 15 | logging_config = here / "logging.json" 16 | with open(logging_config) as file: 17 | config.dictConfig(json.load(file)) 18 | 19 | 20 | @contextmanager 21 | def adjust_log_level(logger="pyam", level="ERROR"): 22 | """Context manager to change log level""" 23 | if isinstance(logger, str): 24 | logger = getLogger(logger) 25 | old_level = logger.getEffectiveLevel() 26 | logger.setLevel(level) 27 | yield 28 | logger.setLevel(old_level) 29 | 30 | 31 | def deprecation_warning(msg, item="This method", stacklevel=3): 32 | """Write deprecation warning to log""" 33 | warnings.simplefilter("always", DeprecationWarning) 34 | message = f"{item} is deprecated and will be removed in future versions. {msg}" 35 | warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) 36 | 37 | 38 | def raise_data_error(msg, data): 39 | """Format error message with (head of) data table and raise""" 40 | raise ValueError(format_log_message(msg, data)) 41 | 42 | 43 | def format_log_message(msg, data): 44 | """Utils function to format message with (head of) data table""" 45 | if isinstance(data, pd.MultiIndex): 46 | data = data.to_frame(index=False) 47 | data = data.drop_duplicates() 48 | return f"{msg}:\n{data.head()}" + ("\n..." if len(data) > 5 else "") 49 | -------------------------------------------------------------------------------- /pyam/run_control.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | from collections.abc import Mapping 4 | 5 | import yaml 6 | 7 | from pyam.str import is_str 8 | 9 | # user-defined defaults for various plot settings 10 | _RUN_CONTROL = None 11 | 12 | 13 | # path to regional mapping files 14 | def _REG_MAP_PATH(x) -> str: 15 | return os.path.join( 16 | os.path.abspath(os.path.dirname(__file__)), "region_mappings", x 17 | ) 18 | 19 | 20 | # defaults for run control 21 | _RC_DEFAULTS = { 22 | "color": {}, 23 | "marker": {}, 24 | "linestyle": {}, 25 | "order": {}, 26 | } 27 | 28 | 29 | def reset_rc_defaults(): 30 | """Reset run control object to original defaults""" 31 | global _RUN_CONTROL 32 | _RUN_CONTROL = RunControl() 33 | 34 | 35 | def run_control(): 36 | """Global run control for user-defined plotting style defaults""" 37 | global _RUN_CONTROL 38 | if _RUN_CONTROL is None: 39 | _RUN_CONTROL = RunControl() 40 | return _RUN_CONTROL 41 | 42 | 43 | def _recursive_update(d, u): 44 | """recursively update a dictionary d with a dictionary u""" 45 | for k, v in u.items(): 46 | if isinstance(v, Mapping): 47 | r = _recursive_update(d.get(k, {}), v) 48 | d[k] = r 49 | elif isinstance(v, list): # values for `order` are lists 50 | if k in d: 51 | d[k] += [i for i in v if i not in d[k]] 52 | else: 53 | d[k] = v 54 | else: 55 | d[k] = u[k] 56 | return d 57 | 58 | 59 | class RunControl(Mapping): 60 | """A thin wrapper around a Python dictionary for plotting style defaults 61 | 62 | Input can be provided as dictionaries or YAML files. 63 | """ 64 | 65 | def __init__(self, rc=None, defaults=None): 66 | """ 67 | Parameters 68 | ---------- 69 | rc : string, file, dictionary, optional 70 | a path to a YAML file, a file handle for a YAML file, or a 71 | dictionary describing run control configuration 72 | defaults : string, file, dictionary, optional 73 | a path to a YAML file, a file handle for a YAML file, or a 74 | dictionary describing **default** run control configuration 75 | """ 76 | rc = rc or {} 77 | defaults = defaults or copy.deepcopy(_RC_DEFAULTS) 78 | 79 | rc = self._load_yaml(rc) 80 | defaults = self._load_yaml(defaults) 81 | self.store = _recursive_update(defaults, rc) 82 | 83 | def update(self, rc): 84 | """Add additional run control parameters 85 | 86 | Parameters 87 | ---------- 88 | rc : string, file, dictionary, optional 89 | a path to a YAML file, a file handle for a YAML file, or a 90 | dictionary describing run control configuration 91 | """ 92 | rc = self._load_yaml(rc) 93 | self.store = _recursive_update(self.store, rc) 94 | 95 | def __getitem__(self, k): 96 | return self.store[k] 97 | 98 | def __iter__(self): 99 | return iter(self.store) 100 | 101 | def __len__(self): 102 | return len(self.store) 103 | 104 | def __repr__(self): 105 | return self.store.__repr__() 106 | 107 | def _get_path(self, key, fyaml, fname): 108 | if os.path.exists(fname): 109 | return fname 110 | 111 | _fname = os.path.join(os.path.dirname(fyaml), fname) 112 | if not os.path.exists(_fname): 113 | msg = ( 114 | "YAML key '{}' in {}: {} is not a valid relative " + "or absolute path" 115 | ) 116 | raise OSError(msg.format(key, fyaml, fname)) 117 | return _fname 118 | 119 | def _load_yaml(self, obj): 120 | if hasattr(obj, "read"): # it's a file 121 | obj = obj.read() 122 | if is_str(obj) and not os.path.exists(obj): 123 | raise OSError(f"File {obj} does not exist") 124 | if is_str(obj) and os.path.exists(obj): 125 | fname = obj 126 | with open(fname) as f: 127 | obj = f.read() 128 | if not isinstance(obj, dict): 129 | obj = yaml.load(obj, Loader=yaml.FullLoader) 130 | return obj 131 | 132 | def recursive_update(self, k, d): 133 | """Recursively update a top-level option in the run control 134 | 135 | Parameters 136 | ---------- 137 | k : string 138 | the top-level key 139 | d : dictionary or similar 140 | the dictionary to use for updating 141 | """ 142 | u = self.__getitem__(k) 143 | self.store[k] = _recursive_update(u, d) 144 | -------------------------------------------------------------------------------- /pyam/slice.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | from pyam.utils import print_list 4 | 5 | 6 | class IamSlice(pd.Series): 7 | """A slice object of the IamDataFrame timeseries data index""" 8 | 9 | @property 10 | def _constructor(self): 11 | return IamSlice 12 | 13 | _internal_names = pd.Series._internal_names + ["_iamcache"] 14 | _internal_names_set = set(_internal_names) 15 | 16 | def __init__(self, data=None, index=None, **kwargs): 17 | super().__init__(data, index, **kwargs) 18 | self._iamcache = dict() 19 | 20 | def __dir__(self): 21 | return self.dimensions + super().__dir__() 22 | 23 | def __getattr__(self, attr): 24 | try: 25 | return super().__getattr__(attr) 26 | except AttributeError: 27 | cache = object.__getattribute__(self, "_iamcache") 28 | ret = cache.get(attr) 29 | if ret is not None: 30 | return ret.tolist() 31 | 32 | if attr in self.dimensions: 33 | ret = cache[attr] = self.index[self].unique(level=attr) 34 | return ret.tolist() 35 | 36 | raise 37 | 38 | def __len__(self): 39 | return self.sum() 40 | 41 | @property 42 | def dimensions(self): 43 | """Return the list of index names & data coordinates""" 44 | return self.index.names 45 | 46 | @property 47 | def time(self): 48 | """The time index, i.e., axis labels related to the time domain. 49 | 50 | Returns 51 | ------- 52 | - A :class:`pandas.Index` (dtype 'int64') if the :attr:`time_domain` is 'year' 53 | - A :class:`pandas.DatetimeIndex` if the time-domain is 'datetime' 54 | - A :class:`pandas.Index` if the time-domain is 'mixed' 55 | """ 56 | ret = self._iamcache.get("time") 57 | if ret is None: 58 | ret = self._iamcache["time"] = ( 59 | self.index[self].unique(level=self.time_col).rename("time") 60 | ) 61 | return ret 62 | 63 | @property 64 | def time_col(self): 65 | return "year" if "year" in self.dimensions else "time" 66 | 67 | def __repr__(self): 68 | return self.info() 69 | 70 | def info(self, n=80): 71 | """Print a summary of the represented index dimensions and data coordinates 72 | 73 | Parameters 74 | ---------- 75 | n : int 76 | The maximum line length 77 | """ 78 | # concatenate list of index dimensions and levels 79 | info = f"{type(self)}\nIndex dimensions and data coordinates:\n" 80 | c1 = max([len(i) for i in self.dimensions]) + 1 81 | c2 = n - c1 - 5 82 | info += "\n".join( 83 | [ 84 | f" {i:{c1}}: {print_list(getattr(self, i), c2)}" 85 | for i in self.dimensions 86 | ] 87 | ) 88 | 89 | return info 90 | -------------------------------------------------------------------------------- /pyam/testing.py: -------------------------------------------------------------------------------- 1 | import pandas.testing as pdt 2 | 3 | from . import compare 4 | 5 | 6 | def assert_iamframe_equal(left, right, **kwargs): 7 | """Check that left and right IamDataFrame instances are equal. 8 | 9 | Parameters 10 | ---------- 11 | left, right : :class:`IamDataFrame` 12 | Two IamDataFrame instances to be compared. 13 | **kwargs 14 | Passed to :meth:`IamDataFrame.compare`, comparing the `data` objects. 15 | 16 | Raises 17 | ------ 18 | AssertionError if *left* and *right* are different. 19 | 20 | Notes 21 | ----- 22 | Columns of the *meta* attribute where all values are *nan* are ignored. 23 | """ 24 | diff = compare(left, right, **kwargs) 25 | if not diff.empty: 26 | msg = "IamDataFrame.data are different: \n {}" 27 | raise AssertionError(msg.format(diff.head())) 28 | 29 | pdt.assert_frame_equal( 30 | left.meta.dropna(axis="columns", how="all"), 31 | right.meta.dropna(axis="columns", how="all"), 32 | check_column_type=False, 33 | check_dtype=False, 34 | check_like=True, 35 | ) 36 | 37 | pdt.assert_series_equal( 38 | left.exclude, 39 | right.exclude, 40 | ) 41 | -------------------------------------------------------------------------------- /pyam/time.py: -------------------------------------------------------------------------------- 1 | import dateutil 2 | import pandas as pd 3 | 4 | from pyam.index import append_index_col 5 | from pyam.logging import raise_data_error 6 | 7 | 8 | def swap_time_for_year(df, inplace=False, subannual=False): 9 | """Internal implementation to swap 'time' domain to 'year' (as int)""" 10 | if not df.time_col == "time": 11 | raise ValueError("Time domain must be datetime to use this method") 12 | 13 | ret = df.copy() if not inplace else df 14 | 15 | index = ret._data.index 16 | time = pd.Series(index.get_level_values("time")) 17 | order = [v if v != "time" else "year" for v in index.names] 18 | 19 | # reduce "time" index column to "year" 20 | # TODO use `replace_index_values` instead of `append_index_col` 21 | index = index.droplevel("time") 22 | new_index_col = time.apply(lambda x: x if isinstance(x, int) else x.year) 23 | index = append_index_col(index, new_index_col, "year", order=order) 24 | 25 | # if selected, extract the "subannual" info from the "time" index column 26 | if subannual: 27 | # if subannual is True, default to simple datetime format without year 28 | if subannual is True: 29 | subannual = "%m-%d %H:%M%z" 30 | if isinstance(subannual, str): 31 | _subannual = time.apply(lambda x: x.strftime(subannual)) 32 | else: 33 | _subannual = time.apply(subannual) 34 | 35 | index = append_index_col(index, _subannual, "subannual") 36 | ret.extra_cols.append("subannual") 37 | 38 | rows = index.duplicated() 39 | if any(rows): 40 | error_msg = "Swapping time for year causes duplicates in `data`" 41 | raise_data_error(error_msg, index[rows].to_frame().reset_index(drop=True)) 42 | 43 | # assign data and other attributes 44 | ret._data.index = index 45 | ret.time_col = "year" 46 | ret._set_attributes() 47 | 48 | if not inplace: 49 | return ret 50 | 51 | 52 | def swap_year_for_time(df, inplace=False): 53 | """Internal implementation to swap 'year' domain to 'time' (as datetime)""" 54 | 55 | if not df.time_col == "year": 56 | raise ValueError("Time domain must be 'year' to use this method") 57 | 58 | ret = df.copy() if not inplace else df 59 | index = ret._data.index 60 | 61 | order = [v if v != "year" else "time" for v in index.names] 62 | 63 | if "subannual" in df.extra_cols: 64 | order = order.remove("subannual") 65 | time_values = zip(*[index.get_level_values(c) for c in ["year", "subannual"]]) 66 | time = list(map(dateutil.parser.parse, [f"{y}-{s}" for y, s in time_values])) 67 | index = index.droplevel(["year", "subannual"]) 68 | ret.extra_cols.remove("subannual") 69 | else: 70 | time = index.get_level_values("year") 71 | index = index.droplevel(["year"]) 72 | 73 | # add new index column, assign data and other attributes 74 | index = append_index_col(index, time, "time", order=order) 75 | ret._data.index = index 76 | ret.time_col = "time" 77 | ret._set_attributes() 78 | delattr(ret, "year") 79 | 80 | if not inplace: 81 | return ret 82 | -------------------------------------------------------------------------------- /pyam/worldbank.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | from pyam import IamDataFrame 4 | 5 | 6 | def read_worldbank(model="World Bank", scenario="WDI", **kwargs) -> IamDataFrame: 7 | """Read data from the World Bank Data Catalogue and return as IamDataFrame 8 | 9 | This function is a simple wrapper for the function 10 | :func:`wbdata.get_dataframe`. Import the module to retrieve/search 11 | the list of indicators (and their id's), countries, etc. 12 | 13 | .. code-block:: python 14 | 15 | import wbdata as wb 16 | 17 | Parameters 18 | ---------- 19 | model : str, optional 20 | The `model` name to be used for the returned timeseries data. 21 | scenario : str, optional 22 | The `scenario` name to be used for the returned timeseries data. 23 | **kwargs 24 | passed to :func:`wbdata.get_dataframe` 25 | 26 | Notes 27 | ----- 28 | The function :func:`wbdata.get_dataframe` takes an `indicators` 29 | argument, which is a dictionary where the keys are desired indicators and the values 30 | are the desired column names. If the `indicators` passed to :func:`read_worldbank` 31 | is a single indicator code string, we should instead use :func:`wbdata.get_series`. 32 | 33 | The function :func:`wbdata.get_dataframe` does not return a unit, 34 | but it can be collected for some indicators using the function 35 | :func:`wbdata.get_indicators`. 36 | In the current implementation, unit is defined as `n/a` for all data; 37 | this can be enhanced later (if there is interest from users). 38 | 39 | Returns 40 | ------- 41 | :class:`IamDataFrame` 42 | """ 43 | # import packages for functions with low-frequency usage only when needed 44 | # also, there seems to be an issue with wbdata on Mac OS 45 | # see https://github.com/OliverSherouse/wbdata/issues/74 46 | import wbdata # noqa: F401 47 | 48 | data: pd.DataFrame = wbdata.get_dataframe(**kwargs) 49 | value = data.columns 50 | data.reset_index(inplace=True) 51 | data.rename(columns={"date": "year"}, inplace=True) 52 | df = IamDataFrame( 53 | data, 54 | model=model, 55 | scenario=scenario, 56 | value=value, 57 | unit="n/a", 58 | region="country", 59 | ) 60 | # TODO use wb.get_indicators to retrieve correct units (where available) 61 | 62 | # if `indicators` is a mapping, use it for renaming 63 | if "indicators" in kwargs and isinstance(kwargs["indicators"], dict): 64 | df.rename(variable=kwargs["indicators"], inplace=True) 65 | 66 | return df 67 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Plotting Tests 2 | 3 | Plotting tests are used as regression tests for plotting features. They can be run 4 | locally (see below) and are also run on CI. 5 | 6 | ## Install Deps 7 | 8 | You have to install `pytest-mpl` to run the plotting tests. 9 | 10 | ## Tests Failing on CI? 11 | 12 | Make sure your local version of `matplotlib` and `seaborn` are the same as on CI. 13 | `seaborn` can override default `matplotlib` style sheets, and thus both need to be the 14 | same version. 15 | 16 | ## Creating Baseline Images 17 | 18 | ``` 19 | pytest --mpl-generate-path=expected_figs test_plotting.py 20 | ``` 21 | 22 | ## Running Tests 23 | 24 | ``` 25 | pytest --mpl 26 | ``` 27 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/__init__.py -------------------------------------------------------------------------------- /tests/data/empty_meta_sheet.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/empty_meta_sheet.xlsx -------------------------------------------------------------------------------- /tests/data/exclude_meta_sheet.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/exclude_meta_sheet.xlsx -------------------------------------------------------------------------------- /tests/data/exec.py: -------------------------------------------------------------------------------- 1 | def do_exec(df): 2 | df.set_meta("bar", name="foo") 3 | -------------------------------------------------------------------------------- /tests/data/na_column.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/na_column.xlsx -------------------------------------------------------------------------------- /tests/data/plot_data.csv: -------------------------------------------------------------------------------- 1 | model,scenario,region,variable,unit,2005,2010,2015 2 | test_model,test_scenario,World,Primary Energy,EJ/y,1,6,10 3 | test_model,test_scenario,World,Primary Energy|Coal,EJ/y,0.5,3,4 4 | test_model,test_scenario1,World,Primary Energy,EJ/y,2,6,8 5 | test_model,test_scenario1,World,Primary Energy|Coal,EJ/y,0.5,2,5 6 | test_model1,test_scenario,World,Primary Energy,EJ/y,0.7,4.2,7 7 | test_model1,test_scenario,World,Primary Energy|Coal,EJ/y,0.35,2.1,2.8 8 | test_model1,test_scenario1,World,Primary Energy,EJ/y,1.4,4.2,5.6 9 | test_model1,test_scenario1,World,Primary Energy|Coal,EJ/y,0.35,1.4,3.5 10 | -------------------------------------------------------------------------------- /tests/data/plot_region_data.csv: -------------------------------------------------------------------------------- 1 | Model,Scenario,Region,Variable,Unit,2015 2 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,AFR,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0336763562024 3 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,CPA,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.789019366289 4 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,EEU,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0100767087431 5 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,FSU,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0999954893386 6 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,LAM,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.024528839293 7 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,MEA,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.055586345231 8 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,NAM,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0112865104686 9 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,PAO,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0176177431532 10 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,PAS,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0804704013735 11 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,SAS,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0377479130688 12 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,WEU,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,0.0305512909744 13 | MESSAGE-GLOBIOM,SSP2-45-SPA2-V25,World,CEDS+|9+ Sectors|Emissions|BC|Energy Sector|Harmonized-DB,Mt BC/yr,1.19055696414 14 | -------------------------------------------------------------------------------- /tests/data/test_RCP_database_raw_download.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/test_RCP_database_raw_download.xlsx -------------------------------------------------------------------------------- /tests/data/test_SSP_database_raw_download.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/test_SSP_database_raw_download.xlsx -------------------------------------------------------------------------------- /tests/data/test_df.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/test_df.nc -------------------------------------------------------------------------------- /tests/data/test_df.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/test_df.xls -------------------------------------------------------------------------------- /tests/data/test_df.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/data/test_df.xlsx -------------------------------------------------------------------------------- /tests/expected_figs/test_add_panel_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_add_panel_label.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_h.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_h.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_rc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_rc.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_stacked.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_stacked.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_stacked_net_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_stacked_net_line.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_stacked_order_by_list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_stacked_order_by_list.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_stacked_order_by_rc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_stacked_order_by_rc.png -------------------------------------------------------------------------------- /tests/expected_figs/test_barplot_title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_barplot_title.png -------------------------------------------------------------------------------- /tests/expected_figs/test_boxplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_boxplot.png -------------------------------------------------------------------------------- /tests/expected_figs/test_boxplot_hue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_boxplot_hue.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_PYAM_COLORS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_PYAM_COLORS.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_color.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_color_fill_between.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_color_fill_between.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_color_fill_between_interpolate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_color_fill_between_interpolate.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_color_final_ranges.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_color_final_ranges.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_filter_title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_filter_title.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_linestyle_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_linestyle_legend.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_marker_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_marker_legend.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_no_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_no_legend.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_1_var.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_1_var.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_2_vars.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_2_vars.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_bottom_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_bottom_legend.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_cmap_color_arg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_cmap_color_arg.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_dict_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_dict_legend.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_label.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_label_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_label_color.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_order_by_dict.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_order_by_dict.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_plot_order_by_rc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_plot_order_by_rc.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_rm_legend_label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_rm_legend_label.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_single_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_single_color.png -------------------------------------------------------------------------------- /tests/expected_figs/test_line_update_rc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_line_update_rc.png -------------------------------------------------------------------------------- /tests/expected_figs/test_pie_plot_colors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_pie_plot_colors.png -------------------------------------------------------------------------------- /tests/expected_figs/test_pie_plot_labels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_pie_plot_labels.png -------------------------------------------------------------------------------- /tests/expected_figs/test_pie_plot_legend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_pie_plot_legend.png -------------------------------------------------------------------------------- /tests/expected_figs/test_pie_plot_other.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_pie_plot_other.png -------------------------------------------------------------------------------- /tests/expected_figs/test_scatter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_scatter.png -------------------------------------------------------------------------------- /tests/expected_figs/test_scatter_meta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_scatter_meta.png -------------------------------------------------------------------------------- /tests/expected_figs/test_scatter_variables_with_meta_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_scatter_variables_with_meta_color.png -------------------------------------------------------------------------------- /tests/expected_figs/test_scatter_with_lines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_scatter_with_lines.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_missing_zero_issue_266.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_missing_zero_issue_266.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_negative.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_negative.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_negative_emissions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_negative_emissions.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_negative_emissions_kwargs_custom_total.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_negative_emissions_kwargs_custom_total.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_negative_emissions_kwargs_def_total.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_negative_emissions_kwargs_def_total.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_negative_emissions_with_total.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_negative_emissions_with_total.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_order_by_list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_order_by_list.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_order_by_rc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_order_by_rc.png -------------------------------------------------------------------------------- /tests/expected_figs/test_stackplot_other.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IAMconsortium/pyam/5821946a57d0a0f5a36c044bf2fc6494ebb03d7b/tests/expected_figs/test_stackplot_other.png -------------------------------------------------------------------------------- /tests/test_admin.py: -------------------------------------------------------------------------------- 1 | """This file is a **NOT** used to test specific functions of pyam, but rather 2 | to print configuration information of the loaded package(s) to users!""" 3 | 4 | import sys 5 | 6 | from .conftest import IIASA_UNAVAILABLE 7 | from .test_plotting import MPL_KWARGS 8 | 9 | 10 | def test_config(capsys): 11 | modules = {} 12 | for m in list(sys.modules.values()): 13 | if m: 14 | version = getattr(m, "__version__", None) 15 | if version: 16 | modules[m.__name__] = version 17 | 18 | with capsys.disabled(): 19 | print("\nPlotting function decorator kwargs:") 20 | for k, v in MPL_KWARGS.items(): 21 | print(f"{k}: {v}") 22 | 23 | print("\nModule versions:") 24 | for key in sorted(list(modules.keys())): 25 | print(f"{key}: {modules[key]}") 26 | 27 | if IIASA_UNAVAILABLE: 28 | print("\nWARNING: IIASA-API unavailable, skipping related tests\n") 29 | 30 | # add empty spaces equivalent to length of file name 31 | print("tests/test_admin.py ", end="") 32 | -------------------------------------------------------------------------------- /tests/test_cast_to_iamc.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | from pandas import testing as pdt 4 | 5 | from pyam import IamDataFrame 6 | 7 | # when making any updates to this file, 8 | # please also update the `data_table_formats` tutorial notebook! 9 | 10 | 11 | def test_cast_from_value_col(test_df_year): 12 | df_with_value_cols = pd.DataFrame( 13 | [ 14 | ["model_a", "scen_a", "World", "EJ/yr", 2005, 1, 0.5], 15 | ["model_a", "scen_a", "World", "EJ/yr", 2010, 6.0, 3], 16 | ["model_a", "scen_b", "World", "EJ/yr", 2005, 2, None], 17 | ["model_a", "scen_b", "World", "EJ/yr", 2010, 7, None], 18 | ], 19 | columns=[ 20 | "model", 21 | "scenario", 22 | "region", 23 | "unit", 24 | "year", 25 | "Primary Energy", 26 | "Primary Energy|Coal", 27 | ], 28 | ) 29 | df = IamDataFrame( 30 | df_with_value_cols, value=["Primary Energy", "Primary Energy|Coal"] 31 | ) 32 | pdt.assert_series_equal(df._data, test_df_year._data, check_like=True) 33 | 34 | 35 | def test_cast_from_value_col_and_args(test_df_year): 36 | # checks for issue [#210](https://github.com/IAMconsortium/pyam/issues/210) 37 | df_with_value_cols = pd.DataFrame( 38 | [ 39 | ["scen_a", "World", "EJ/yr", 2005, 1, 0.5], 40 | ["scen_a", "World", "EJ/yr", 2010, 6.0, 3], 41 | ["scen_b", "World", "EJ/yr", 2005, 2, None], 42 | ["scen_b", "World", "EJ/yr", 2010, 7, None], 43 | ], 44 | columns=[ 45 | "scenario", 46 | "node", 47 | "unit", 48 | "year", 49 | "Primary Energy", 50 | "Primary Energy|Coal", 51 | ], 52 | ) 53 | df = IamDataFrame( 54 | df_with_value_cols, 55 | model="model_a", 56 | region="node", 57 | value=["Primary Energy", "Primary Energy|Coal"], 58 | ) 59 | pdt.assert_series_equal(df._data, test_df_year._data, check_like=True) 60 | 61 | 62 | def test_cast_with_model_arg_raises(): 63 | df = pd.DataFrame( 64 | [ 65 | ["model_a", "scen_a", "World", "EJ/yr", 2005, 1, 0.5], 66 | ], 67 | columns=[ 68 | "model", 69 | "scenario", 70 | "region", 71 | "unit", 72 | "year", 73 | "Primary Energy", 74 | "Primary Energy|Coal", 75 | ], 76 | ) 77 | pytest.raises(ValueError, IamDataFrame, df, model="foo") 78 | 79 | 80 | def test_cast_with_model_arg(test_df): 81 | df = test_df.timeseries().reset_index() 82 | df.rename(columns={"model": "foo"}, inplace=True) 83 | 84 | df = IamDataFrame(df, model="foo") 85 | pdt.assert_series_equal(df._data, test_df._data, check_like=True) 86 | 87 | 88 | def test_cast_by_column_concat(test_df_year): 89 | df = pd.DataFrame( 90 | [ 91 | ["scen_a", "World", "Primary Energy", None, "EJ/yr", 1, 6.0], 92 | ["scen_a", "World", "Primary Energy", "Coal", "EJ/yr", 0.5, 3], 93 | ["scen_b", "World", "Primary Energy", None, "EJ/yr", 2, 7], 94 | ], 95 | columns=["scenario", "region", "var_1", "var_2", "unit", 2005, 2010], 96 | ) 97 | 98 | obs = IamDataFrame(df, model="model_a", variable=["var_1", "var_2"]) 99 | pdt.assert_series_equal(obs._data, test_df_year._data, check_like=True) 100 | 101 | 102 | def test_cast_with_variable_and_value(test_df): 103 | pe_df = test_df.filter(variable="Primary Energy") 104 | data = pe_df.data.rename(columns={"value": "lvl"}).drop("variable", axis=1) 105 | 106 | obs = IamDataFrame(data, variable="Primary Energy", value="lvl") 107 | pdt.assert_series_equal(obs._data, pe_df._data, check_like=True) 108 | 109 | 110 | def test_cast_from_r_df(test_pd_df): 111 | df = test_pd_df.copy() 112 | # last two columns are years but saved as X2005 as written by R 113 | df.columns = list(df.columns[:-2]) + [f"X{c}" for c in df.columns[-2:]] 114 | 115 | pdt.assert_series_equal( 116 | IamDataFrame(df)._data, IamDataFrame(test_pd_df)._data, check_like=True 117 | ) 118 | 119 | 120 | def test_cast_from_r_df_err(test_pd_df): 121 | df = test_pd_df.copy() 122 | # last two columns are years 123 | df.columns = list(df.columns[:-2]) + ["Xfoo", "Xbar"] 124 | pytest.raises(ValueError, IamDataFrame, df) 125 | -------------------------------------------------------------------------------- /tests/test_data.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import pytest 4 | from pandas import testing as pdt 5 | 6 | from pyam import IamDataFrame 7 | 8 | 9 | @pytest.mark.parametrize("inplace", [True, False]) 10 | def test_data_sort(test_df, inplace): 11 | """Assert that data can be sorted as expected""" 12 | 13 | # revert order of _data 14 | df = IamDataFrame(test_df.data.iloc[[5, 4, 3, 2, 1, 0]]) 15 | 16 | # assert that data is not sorted as expected 17 | with pytest.raises(AssertionError): 18 | pdt.assert_frame_equal(df.data, test_df.data) 19 | 20 | # assert that data is sorted as expected 21 | if inplace: 22 | obs = df.copy() 23 | obs.sort_data(inplace=True) 24 | else: 25 | obs = df.sort_data() 26 | pdt.assert_frame_equal(obs.data, test_df.data) 27 | 28 | 29 | @pytest.mark.parametrize("inplace", [True, False]) 30 | def test_data_sort_mixed_time_domain(test_df_year, inplace): 31 | """Assert that timeseries with mixed time domain can be sorted as expected""" 32 | 33 | # TODO implement mixed df in conftest.py 34 | mixed_data = test_df_year.data 35 | mixed_data.year.replace({2005: datetime(2005, 1, 1, 0, 0)}, inplace=True) 36 | mixed_data.rename(columns={"time": "year"}, inplace=True) 37 | 38 | # revert order of _data 39 | df = IamDataFrame(mixed_data.iloc[[5, 4, 3, 2, 1, 0]]) 40 | 41 | # assert that data is not sorted as expected 42 | with pytest.raises(AssertionError): 43 | pdt.assert_frame_equal(df.data, mixed_data) 44 | 45 | # assert that data is sorted as expected 46 | if inplace: 47 | obs = df.copy() 48 | obs.sort_data(inplace=True) 49 | else: 50 | obs = df.sort_data() 51 | pdt.assert_frame_equal(obs.data, mixed_data) 52 | -------------------------------------------------------------------------------- /tests/test_feature_compare.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | from pyam import compare 7 | from pyam.utils import IAMC_IDX 8 | 9 | 10 | def test_compare(test_df): 11 | clone = test_df.copy() 12 | clone._data.iloc[0] = 2 13 | clone.rename(variable={"Primary Energy|Coal": "Primary Energy|Gas"}, inplace=True) 14 | 15 | obs = compare(test_df, clone, left_label="test_df", right_label="clone") 16 | 17 | exp = pd.DataFrame( 18 | [ 19 | ["Primary Energy", "EJ/yr", dt.datetime(2005, 6, 17), 1, 2], 20 | ["Primary Energy|Coal", "EJ/yr", dt.datetime(2005, 6, 17), 0.5, np.nan], 21 | ["Primary Energy|Coal", "EJ/yr", dt.datetime(2010, 7, 21), 3, np.nan], 22 | ["Primary Energy|Gas", "EJ/yr", dt.datetime(2005, 6, 17), np.nan, 0.5], 23 | ["Primary Energy|Gas", "EJ/yr", dt.datetime(2010, 7, 21), np.nan, 3], 24 | ], 25 | columns=["variable", "unit", "time", "test_df", "clone"], 26 | ) 27 | exp["model"] = "model_a" 28 | exp["scenario"] = "scen_a" 29 | exp["region"] = "World" 30 | time_col = "time" 31 | if test_df.time_col == "year": 32 | exp["year"] = exp["time"].apply(lambda x: x.year) 33 | exp = exp.drop("time", axis="columns") 34 | time_col = "year" 35 | else: 36 | obs = obs.reset_index() 37 | obs.time = obs.time.dt.normalize() 38 | obs = obs.set_index(IAMC_IDX + [time_col]) 39 | 40 | exp = exp.set_index(IAMC_IDX + [time_col]) 41 | pd.testing.assert_frame_equal(obs, exp) 42 | -------------------------------------------------------------------------------- /tests/test_feature_debiasing.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from numpy.testing import assert_array_equal 3 | 4 | from pyam import IamDataFrame 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "axis, exp", 9 | (["scenario", [0.5, 0.5, 1]], [["model", "scenario"], [1, 1, 1]]), 10 | ) 11 | def test_debiasing_count(test_pd_df, axis, exp): 12 | """Check computing bias weights counting the number of scenarios by scenario name""" 13 | 14 | # modify the default test data to have three distinct scenarios 15 | test_pd_df.loc[1, "model"] = "model_b" 16 | df = IamDataFrame(test_pd_df) 17 | df.compute.bias(method="count", name="bias", axis=axis) 18 | 19 | assert_array_equal(df["bias"].values, exp) 20 | 21 | 22 | def test_debiasing_unknown_method(test_df_year): 23 | """Check computing bias weights counting the number of scenarios by scenario name""" 24 | msg = "Unknown method foo for computing bias weights!" 25 | with pytest.raises(ValueError, match=msg): 26 | test_df_year.compute.bias(method="foo", name="bias", axis="scenario") 27 | -------------------------------------------------------------------------------- /tests/test_feature_downscale.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from pyam.testing import assert_iamframe_equal 4 | 5 | 6 | def test_downscale_region_without_method_raises(simple_df): 7 | # downscale_region without specifying a method 8 | variable = "Primary Energy" 9 | pytest.raises(ValueError, simple_df.downscale_region, variable=variable) 10 | 11 | 12 | def test_downscale_region_with_multiple_methods_raises(simple_df): 13 | # downscale_region with specifying both weight and proxy raises 14 | 15 | # create weighting dataframe 16 | weight_df = simple_df.filter(variable="Population").data.pivot_table( 17 | index="region", columns=simple_df.time_col, values="value" 18 | ) 19 | 20 | # call downscale_region with both proxy and weight 21 | variable = "Primary Energy" 22 | pytest.raises( 23 | ValueError, 24 | simple_df.downscale_region, 25 | variable=variable, 26 | proxy="Population", 27 | weight=weight_df, 28 | ) 29 | 30 | 31 | @pytest.mark.parametrize( 32 | "variable", 33 | ( 34 | ("Primary Energy"), 35 | (["Primary Energy", "Primary Energy|Coal"]), 36 | ), 37 | ) 38 | def test_downscale_region_with_proxy(simple_df, variable): 39 | simple_df.set_meta([1], name="test") 40 | regions = ["reg_a", "reg_b"] 41 | 42 | # return as new IamDataFrame 43 | obs = simple_df.downscale_region(variable, proxy="Population") 44 | exp = simple_df.filter(variable=variable, region=regions) 45 | assert_iamframe_equal(exp, obs) 46 | 47 | # append to `self` (after removing to-be-downscaled timeseries) 48 | inplace = simple_df.filter(variable=variable, region=regions, keep=False) 49 | inplace.downscale_region(variable, proxy="Population", append=True) 50 | assert_iamframe_equal(inplace, simple_df) 51 | 52 | 53 | @pytest.mark.parametrize( 54 | "variable, index", 55 | ( 56 | ("Primary Energy", ["region"]), 57 | (["Primary Energy", "Primary Energy|Coal"], ["region"]), 58 | (["Primary Energy", "Primary Energy|Coal"], ["model", "region"]), 59 | ), 60 | ) 61 | def test_downscale_region_with_weight(simple_df, variable, index): 62 | simple_df.set_meta([1], name="test") 63 | regions = ["reg_a", "reg_b"] 64 | 65 | # create weighting dataframe 66 | weight_df = simple_df.filter( 67 | variable="Population", region=regions 68 | ).data.pivot_table(index=index, columns=simple_df.time_col, values="value") 69 | 70 | # return as new IamDataFrame 71 | obs = simple_df.downscale_region(variable, weight=weight_df) 72 | exp = simple_df.filter(variable=variable, region=regions) 73 | assert_iamframe_equal(exp, obs) 74 | 75 | # append to `self` (after removing to-be-downscaled timeseries) 76 | inplace = simple_df.filter(variable=variable, region=regions, keep=False) 77 | inplace.downscale_region(variable, weight=weight_df, append=True) 78 | assert_iamframe_equal(inplace, simple_df) 79 | 80 | 81 | @pytest.mark.parametrize( 82 | "variable, index", 83 | ( 84 | ("Primary Energy", ["region"]), 85 | (["Primary Energy", "Primary Energy|Coal"], ["model", "region"]), 86 | ), 87 | ) 88 | def test_downscale_region_with_weight_subregions(simple_df, variable, index): 89 | simple_df.set_meta([1], name="test") 90 | regions = ["reg_a", "reg_b"] 91 | 92 | # create weighting dataframe with an extra "duplicate" region 93 | weight_df = ( 94 | simple_df.filter(variable="Population") 95 | .rename(region={"reg_a": "duplicate"}, append=True) # add extra region 96 | .data.pivot_table(index=index, columns=simple_df.time_col, values="value") 97 | ) 98 | 99 | # return as new IamDataFrame 100 | ds_args = dict(variable=variable, weight=weight_df, subregions=regions) 101 | obs = simple_df.downscale_region(**ds_args) 102 | exp = simple_df.filter(variable=variable, region=regions) 103 | assert_iamframe_equal(exp, obs) 104 | 105 | # append to `self` (after removing to-be-downscaled timeseries) 106 | inplace = simple_df.filter(variable=variable, region=regions, keep=False) 107 | inplace.downscale_region(**ds_args, append=True) 108 | assert_iamframe_equal(inplace, simple_df) 109 | -------------------------------------------------------------------------------- /tests/test_feature_growth_rate.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import pandas as pd 4 | import pandas.testing as pdt 5 | import pytest 6 | 7 | from pyam import IamDataFrame 8 | from pyam.testing import assert_iamframe_equal 9 | from pyam.timeseries import growth_rate 10 | from pyam.utils import IAMC_IDX 11 | 12 | from .conftest import META_DF 13 | 14 | EXP_DF = IamDataFrame( 15 | pd.DataFrame( 16 | [ 17 | ["model_a", "scen_a", "World", "Growth Rate", "", 0.430969], 18 | ["model_a", "scen_b", "World", "Growth Rate", "", 0.284735], 19 | ], 20 | columns=IAMC_IDX + [2005], 21 | ), 22 | meta=META_DF, 23 | ) 24 | 25 | 26 | @pytest.mark.parametrize("append", (False, True)) 27 | def test_growth_rate(test_df_year, append): 28 | """Check computing the growth rate from an IamDataFrame""" 29 | 30 | if append: 31 | obs = test_df_year.copy() 32 | obs.compute.growth_rate({"Primary Energy": "Growth Rate"}, append=True) 33 | assert_iamframe_equal(test_df_year.append(EXP_DF), obs) 34 | else: 35 | obs = test_df_year.compute.growth_rate({"Primary Energy": "Growth Rate"}) 36 | assert_iamframe_equal(EXP_DF, obs) 37 | 38 | 39 | @pytest.mark.parametrize("append", (False, True)) 40 | def test_growth_rate_empty(test_df_year, append): 41 | """Assert that computing the growth rate with invalid variables returns empty""" 42 | 43 | if append: 44 | obs = test_df_year.copy() 45 | obs.compute.growth_rate({"foo": "bar"}, append=True) 46 | assert_iamframe_equal(test_df_year, obs) # assert that no data was added 47 | else: 48 | obs = test_df_year.compute.growth_rate({"foo": "bar"}) 49 | assert obs.empty 50 | 51 | 52 | @pytest.mark.parametrize("x2010", (1, 27, -3)) 53 | @pytest.mark.parametrize("rates", ([0.05, 1.25], [0.5, -0.5])) 54 | def test_growth_rate_timeseries(x2010, rates): 55 | """Check several combinations of growth rates directly on the timeseries""" 56 | 57 | x2013 = x2010 * math.pow(1 + rates[0], 3) # 3 years: 2010 - 2013 58 | x2017 = x2013 * math.pow(1 + rates[1], 4) # 4 years: 2013 - 2017 59 | 60 | pdt.assert_series_equal( 61 | growth_rate(pd.Series([x2010, x2013, x2017], index=[2010, 2013, 2017])), 62 | pd.Series(rates, index=[2010, 2013]), 63 | ) 64 | 65 | 66 | @pytest.mark.parametrize("value", (0, -1)) 67 | def test_growth_rate_timeseries_fails(value): 68 | """Check that a timeseries reaching/crossing 0 raises""" 69 | 70 | with pytest.raises(ValueError, match="Cannot compute growth rate when"): 71 | growth_rate(pd.Series([1.0, value])) 72 | -------------------------------------------------------------------------------- /tests/test_feature_interpolate.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import pytest 6 | from numpy import testing as npt 7 | 8 | from pyam import IamDataFrame 9 | from pyam.str import is_str 10 | from pyam.testing import assert_iamframe_equal 11 | from pyam.utils import IAMC_IDX 12 | 13 | 14 | def test_interpolate(test_pd_df): 15 | _df = test_pd_df.copy() 16 | _df["foo"] = ["bar", "baz", 2] # add extra_col (check for #351) 17 | df = IamDataFrame(_df) 18 | obs = df.interpolate(2007, inplace=False).filter(year=2007)._data.values 19 | npt.assert_allclose(obs, [3, 1.5, 4]) 20 | 21 | # redo the interpolation and check that no duplicates are added 22 | df.interpolate(2007, inplace=False) 23 | assert not df._data.index.duplicated().any() 24 | 25 | # assert that extra_col does not have nan's (check for #351) 26 | assert all([True if is_str(i) else ~np.isnan(i) for i in df.foo]) 27 | 28 | 29 | def test_interpolate_time_exists(test_df_year): 30 | obs = test_df_year.interpolate(2005, inplace=False).filter(year=2005)._data.values 31 | npt.assert_allclose(obs, [1.0, 0.5, 2.0]) 32 | 33 | 34 | def test_interpolate_with_list(test_df_year): 35 | lst = [2007, 2008] 36 | obs = test_df_year.interpolate(lst, inplace=False).filter(year=lst)._data.values 37 | npt.assert_allclose(obs, [3, 4, 1.5, 2, 4, 5]) 38 | 39 | 40 | def test_interpolate_with_numpy_list(test_df_year): 41 | test_df_year.interpolate(np.r_[2007 : 2008 + 1], inplace=True) 42 | obs = test_df_year.filter(year=[2007, 2008])._data.values 43 | npt.assert_allclose(obs, [3, 4, 1.5, 2, 4, 5]) 44 | 45 | 46 | def test_interpolate_full_example(): 47 | cols = ["model_a", "scen_a", "World"] 48 | df = IamDataFrame( 49 | pd.DataFrame( 50 | [ 51 | cols + ["all", "EJ/yr", 0, 1, 6.0, 10], 52 | cols + ["last", "EJ/yr", 0, 0.5, 3, np.nan], 53 | cols + ["first", "EJ/yr", 0, np.nan, 2, 7], 54 | cols + ["middle", "EJ/yr", 0, 1, np.nan, 7], 55 | cols + ["first two", "EJ/yr", 0, np.nan, np.nan, 7], 56 | cols + ["last two", "EJ/yr", 0, 1, np.nan, np.nan], 57 | ], 58 | columns=IAMC_IDX + [2000, 2005, 2010, 2017], 59 | ) 60 | ) 61 | exp = IamDataFrame( 62 | pd.DataFrame( 63 | [ 64 | cols + ["all", "EJ/yr", 0, 1, 6.0, 7.142857, 10], 65 | cols + ["last", "EJ/yr", 0, 0.5, 3, np.nan, np.nan], 66 | cols + ["first", "EJ/yr", 0, 1.0, 2, 3.428571, 7], 67 | cols + ["middle", "EJ/yr", 0, 1, np.nan, 4.5, 7], 68 | cols + ["first two", "EJ/yr", 0, 2.058824, np.nan, 4.941176, 7], 69 | cols + ["last two", "EJ/yr", 0, 1, np.nan, np.nan, np.nan], 70 | ], 71 | columns=IAMC_IDX + [2000, 2005, 2010, 2012, 2017], 72 | ) 73 | ) 74 | assert_iamframe_equal(df.interpolate([2005, 2012], inplace=False), exp) 75 | 76 | 77 | def test_interpolate_extra_cols(): 78 | # check that interpolation with non-matching extra_cols has no effect 79 | # (#351) 80 | EXTRA_COL_DF = pd.DataFrame( 81 | [ 82 | ["foo", 2005, 1], 83 | ["foo", 2010, 2], 84 | ["bar", 2005, 2], 85 | ["bar", 2010, 3], 86 | ], 87 | columns=["extra_col", "year", "value"], 88 | ) 89 | df = IamDataFrame( 90 | EXTRA_COL_DF, 91 | model="model_a", 92 | scenario="scen_a", 93 | region="World", 94 | variable="Primary Energy", 95 | unit="EJ/yr", 96 | ) 97 | 98 | # create a copy from interpolation 99 | df2 = df.interpolate(2007, inplace=False) 100 | 101 | # interpolate should work as if extra_cols is in the _data index 102 | assert_iamframe_equal(df, df2.filter(year=2007, keep=False)) 103 | obs = df2.filter(year=2007)._data.values 104 | npt.assert_allclose(obs, [2.4, 1.4]) 105 | 106 | 107 | def test_interpolate_datetimes(test_df): 108 | # test that interpolation also works with date-times. 109 | some_date = datetime.datetime(2007, 7, 1) 110 | if test_df.time_col == "year": 111 | pytest.raises(ValueError, test_df.interpolate, time=some_date) 112 | else: 113 | test_df.interpolate(some_date, inplace=True) 114 | obs = test_df.filter(time=some_date).data["value"].reset_index(drop=True) 115 | exp = pd.Series([3, 1.5, 4], name="value") 116 | pd.testing.assert_series_equal(obs, exp, rtol=0.01) 117 | # redo the interpolation and check that no duplicates are added 118 | test_df.interpolate(some_date, inplace=True) 119 | assert not test_df.filter()._data.index.duplicated().any() 120 | -------------------------------------------------------------------------------- /tests/test_feature_learning_rate.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | from pyam import IamDataFrame 5 | from pyam.testing import assert_iamframe_equal 6 | from pyam.utils import IAMC_IDX 7 | 8 | TEST_DF = IamDataFrame( 9 | pd.DataFrame( 10 | [ 11 | ["model_a", "scen_a", "World", "Cap", "GW", 1, 2], 12 | ["model_a", "scen_a", "World", "Cost", "US$2010/kW", 1, 0.5], 13 | ["model_a", "scen_b", "World", "Cap", "GW", 0.1, 0.2], 14 | ["model_a", "scen_b", "World", "Cost", "US$2010/kW", 1, 0.5], 15 | ["model_a", "scen_c", "World", "Cap", "GW", 10, 20], 16 | ["model_a", "scen_c", "World", "Cost", "US$2010/kW", 1, 0.5], 17 | ["model_a", "scen_d", "World", "Cap", "GW", 1, 2], 18 | ["model_a", "scen_d", "World", "Cost", "US$2010/kW", 1, 0.75], 19 | ["model_a", "scen_e", "World", "Cap", "GW", 1, 2], 20 | ["model_a", "scen_e", "World", "Cost", "US$2010/kW", 1, 0.25], 21 | ], 22 | columns=IAMC_IDX + [2005, 2010], 23 | ) 24 | ) 25 | 26 | EXP_DF = IamDataFrame( 27 | pd.DataFrame( 28 | [ 29 | ["model_a", "scen_a", "World", "Learning Rate", "", 0.5], 30 | ["model_a", "scen_b", "World", "Learning Rate", "", 0.5], 31 | ["model_a", "scen_c", "World", "Learning Rate", "", 0.5], 32 | ["model_a", "scen_d", "World", "Learning Rate", "", 0.25], 33 | ["model_a", "scen_e", "World", "Learning Rate", "", 0.75], 34 | ], 35 | columns=IAMC_IDX + [2005], 36 | ) 37 | ) 38 | 39 | 40 | @pytest.mark.parametrize("append", (False, True)) 41 | def test_learning_rate(append): 42 | """Check computing the learning rate""" 43 | 44 | if append: 45 | obs = TEST_DF.copy() 46 | obs.compute.learning_rate("Learning Rate", "Cost", "Cap", append=True) 47 | assert_iamframe_equal(TEST_DF.append(EXP_DF), obs) 48 | else: 49 | obs = TEST_DF.compute.learning_rate("Learning Rate", "Cost", "Cap") 50 | assert_iamframe_equal(EXP_DF, obs) 51 | 52 | 53 | @pytest.mark.parametrize("append", (False, True)) 54 | def test_learning_rate_empty(append): 55 | """Assert that computing the learning rate with invalid variables returns empty""" 56 | 57 | if append: 58 | obs = TEST_DF.copy() 59 | obs.compute.learning_rate("Learning Rate", "foo", "Cap", append=True) 60 | assert_iamframe_equal(TEST_DF, obs) # assert that no data was added 61 | else: 62 | obs = TEST_DF.compute.learning_rate("Learning Rate", "foo", "Cap") 63 | assert obs.empty 64 | -------------------------------------------------------------------------------- /tests/test_feature_quantiles.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | from pyam import IamDataFrame 5 | from pyam.testing import assert_iamframe_equal 6 | 7 | 8 | def test_quantile_one_variable(test_pd_df): 9 | """Tests interquartile range of standard test df 10 | 11 | Because it is only two datapoints, the only 'new' computation 12 | is the median 13 | """ 14 | df = IamDataFrame(test_pd_df) 15 | quantiles = (0.25, 0.5, 0.75) 16 | obs = df.filter(variable="Primary Energy").compute.quantiles(quantiles) 17 | exp = IamDataFrame( 18 | pd.DataFrame( 19 | { 20 | "scenario": [str(q) for q in quantiles], 21 | "2005": [1, (1.0 + 2) / 2, 2], 22 | "2010": [6, (6 + 7) / 2, 7], 23 | } 24 | ), 25 | model="Quantiles", 26 | region="World", 27 | variable="Primary Energy", 28 | unit="EJ/yr", 29 | ) 30 | assert_iamframe_equal(exp, obs) 31 | 32 | 33 | def test_quantile_missing_variable(test_pd_df): 34 | df = IamDataFrame(test_pd_df) 35 | with pytest.raises(ValueError): 36 | df.compute.quantiles((0.25, 0.5)) 37 | -------------------------------------------------------------------------------- /tests/test_index.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pandas.testing as pdt 3 | import pytest 4 | 5 | from pyam.index import ( 6 | append_index_level, 7 | get_index_levels, 8 | replace_index_labels, 9 | replace_index_values, 10 | ) 11 | from pyam.utils import IAMC_IDX 12 | 13 | 14 | def test_get_index_levels(test_df_index): 15 | """Assert that get_index_levels returns the correct values""" 16 | assert get_index_levels(test_df_index, "scenario") == ["scen_a", "scen_b"] 17 | 18 | 19 | def test_get_index_levels_raises(test_df_index): 20 | """Assert that get_index_levels raises with non-existing level""" 21 | with pytest.raises(KeyError): 22 | get_index_levels(test_df_index, "foo") 23 | 24 | 25 | @pytest.mark.parametrize( 26 | "exp_scen, mapping", 27 | [ 28 | (["scen_c", "scen_c", "scen_b"], {"scen_a": "scen_c"}), 29 | (["scen_c", "scen_c", "scen_c"], {"scen_a": "scen_c", "scen_b": "scen_c"}), 30 | (["scen_b", "scen_b", "scen_c"], {"scen_a": "scen_b", "scen_b": "scen_c"}), 31 | # this test ensures that no transitive replacing occurs 32 | ], 33 | ) 34 | @pytest.mark.parametrize("rows", (None, [False, True, True])) 35 | def test_replace_index_level(test_pd_df, test_df_index, exp_scen, mapping, rows): 36 | """Assert that replace_index_value works as expected""" 37 | 38 | test_pd_df["scenario"] = exp_scen if rows is None else ["scen_a"] + exp_scen[1:] 39 | exp = test_pd_df.set_index(IAMC_IDX) 40 | 41 | test_df_index.index = replace_index_values(test_df_index, "scenario", mapping, rows) 42 | pdt.assert_frame_equal(exp, test_df_index) 43 | 44 | 45 | def test_replace_index_level_raises(test_df_index): 46 | """Assert that replace_index_value raises with non-existing level""" 47 | with pytest.raises(KeyError): 48 | replace_index_values(test_df_index, "foo", {"scen_a": "scen_c"}) 49 | 50 | 51 | def test_append_index(): 52 | """Assert that appending and re-ordering to an index works as expected""" 53 | 54 | index = pd.MultiIndex( 55 | codes=[[0, 1]], 56 | levels=[["scen_a", "scen_b"]], 57 | names=["scenario"], 58 | ) 59 | 60 | obs = append_index_level(index, 0, "World", "region", order=["region", "scenario"]) 61 | 62 | exp = pd.MultiIndex( 63 | codes=[[0, 0], [0, 1]], 64 | levels=[["World"], ["scen_a", "scen_b"]], 65 | names=["region", "scenario"], 66 | ) 67 | pdt.assert_index_equal(obs, exp) 68 | 69 | 70 | def test_replace_index_labels(): 71 | """Assert that replacing and re-ordering to an index works as expected""" 72 | 73 | index = pd.MultiIndex( 74 | codes=[[0, 0], [0, 1], [0, 0]], 75 | levels=[["World"], ["scen_a", "scen_b"], ["foo"]], 76 | names=["region", "scenario", "variable"], 77 | ) 78 | 79 | obs = replace_index_labels(index, "scenario", ["A", "B"]) 80 | 81 | exp = pd.MultiIndex( 82 | codes=[[0, 0], [0, 1], [0, 0]], 83 | levels=[["World"], ["A", "B"], ["foo"]], 84 | names=["region", "scenario", "variable"], 85 | ) 86 | pdt.assert_index_equal(obs, exp) 87 | -------------------------------------------------------------------------------- /tests/test_io_unfccc.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | from pyam import IamDataFrame, read_unfccc 5 | from pyam.testing import assert_iamframe_equal 6 | 7 | UNFCCC_DF = pd.DataFrame( 8 | [[1990, 1638.57], [1991, 1460.31], [1992, 1429.20]], 9 | columns=["year", "value"], 10 | ) 11 | 12 | INDEX_ARGS = dict(model="UNFCCC", scenario="Data Inventory") 13 | 14 | 15 | @pytest.mark.skip("Skip due to https://github.com/pik-primap/unfccc_di_api/issues/74") 16 | def test_unfccc_tier1(): 17 | # test that UNFCCC API returns expected data and units 18 | exp = IamDataFrame( 19 | UNFCCC_DF, 20 | **INDEX_ARGS, 21 | region="DEU", 22 | variable="Emissions|CH4|Agriculture", 23 | unit="kt CH4", 24 | ) 25 | 26 | obs = read_unfccc(party_code="DEU", gases=["CH4"], tier=1) 27 | 28 | # assert that the data is similar 29 | horizon = [1990, 1991, 1992] 30 | assert_iamframe_equal(obs.filter(year=horizon, variable="*Agri*"), exp) 31 | 32 | # assert that variables are similar 33 | types = [ 34 | "Agriculture", 35 | "Energy", 36 | "Industrial Processes and Product Use", 37 | "Land Use, Land-Use Change and Forestry", 38 | "Waste", 39 | ] 40 | assert obs.variable == [f"Emissions|CH4|{i}" for i in types] 41 | 42 | # assert that the unit is merged as expected 43 | assert obs.unit == ["kt CH4"] 44 | -------------------------------------------------------------------------------- /tests/test_io_worldbank.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pandas as pd 4 | import pytest 5 | from requests.exceptions import ReadTimeout 6 | 7 | from pyam import IamDataFrame, read_worldbank 8 | from pyam.testing import assert_iamframe_equal 9 | from pyam.utils import IAMC_IDX 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | try: 14 | import wbdata # noqa: F401 15 | 16 | WB_UNAVAILABLE = False 17 | except ImportError: 18 | WB_UNAVAILABLE = True 19 | 20 | WB_REASON = "World Bank API unavailable" 21 | 22 | WB_DF = pd.DataFrame( 23 | [ 24 | ["foo", "WDI", "Canada", "GDP", "n/a", 49231.9, 50283.0, 51409.4], 25 | ["foo", "WDI", "Mexico", "GDP", "n/a", 20065.3, 20477.6, 20613.5], 26 | ["foo", "WDI", "United States", "GDP", "n/a", 56825.5, 58471.1, 59952.7], 27 | ], 28 | columns=IAMC_IDX + [2003, 2004, 2005], 29 | ) 30 | 31 | 32 | @pytest.mark.skipif(WB_UNAVAILABLE, reason=WB_REASON) 33 | def test_worldbank(): 34 | try: 35 | # Find the country codes via wbdata.get_countries(query="Canada") etc 36 | obs = read_worldbank( 37 | model="foo", 38 | indicators={"NY.GDP.PCAP.PP.KD": "GDP"}, 39 | country=["CAN", "MEX", "USA"], 40 | date=("2003", "2005"), 41 | ) 42 | exp = IamDataFrame(WB_DF) 43 | # test data with 5% relative tolerance to guard against minor data changes 44 | assert_iamframe_equal(obs, exp, rtol=5.0e-2) 45 | except ReadTimeout: 46 | logger.error("Timeout when reading from WorldBank API.") 47 | -------------------------------------------------------------------------------- /tests/test_ixmp4.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from ixmp4.core.region import RegionModel 3 | from ixmp4.core.unit import UnitModel 4 | 5 | import pyam 6 | from pyam import read_ixmp4 7 | from pyam.testing import assert_iamframe_equal 8 | 9 | 10 | def test_to_ixmp4_missing_region_raises(test_platform, test_df_year): 11 | """Writing to platform raises if region not defined""" 12 | test_df_year.rename(region={"World": "foo"}, inplace=True) 13 | with pytest.raises(RegionModel.NotFound, match="foo. Use `Platform.regions."): 14 | test_df_year.to_ixmp4(platform=test_platform) 15 | 16 | 17 | def test_to_ixmp4_missing_unit_raises(test_platform, test_df_year): 18 | """Writing to platform raises if unit not defined""" 19 | test_df_year.rename(unit={"EJ/yr": "foo"}, inplace=True) 20 | with pytest.raises(UnitModel.NotFound, match="foo. Use `Platform.units."): 21 | test_df_year.to_ixmp4(platform=test_platform) 22 | 23 | 24 | def test_ixmp4_time_not_implemented(test_platform, test_df): 25 | """Writing an IamDataFrame with datetime-data is not implemented""" 26 | if test_df.time_domain != "year": 27 | with pytest.raises(NotImplementedError): 28 | test_df.to_ixmp4(platform=test_platform) 29 | 30 | 31 | def test_ixmp4_integration(test_platform, test_df_year): 32 | """Write an IamDataFrame to the platform""" 33 | 34 | # test writing to platform 35 | test_df_year.to_ixmp4(platform=test_platform) 36 | 37 | # read only default scenarios (runs) - version number added as meta indicator 38 | obs = read_ixmp4(platform=test_platform) 39 | exp = test_df_year.copy() 40 | exp.set_meta(1, "version") # add version number added from ixmp4 41 | assert_iamframe_equal(exp, obs) 42 | 43 | # make one scenario a non-default scenario, make sure that it is not included 44 | test_platform.runs.get("model_a", "scen_b").unset_as_default() 45 | obs = read_ixmp4(platform=test_platform) 46 | assert_iamframe_equal(exp.filter(scenario="scen_a"), obs) 47 | 48 | # read all scenarios (runs) - version number used as additional index dimension 49 | obs = read_ixmp4(platform=test_platform, default_only=False) 50 | data = test_df_year.data 51 | data["version"] = 1 52 | meta = test_df_year.meta.reset_index() 53 | meta["version"] = 1 54 | exp = pyam.IamDataFrame(data, meta=meta, index=["model", "scenario", "version"]) 55 | pyam.assert_iamframe_equal(exp, obs) 56 | 57 | 58 | @pytest.mark.parametrize( 59 | "filters", 60 | ( 61 | dict(model="model_a"), 62 | dict(scenario="scen_a"), 63 | dict(scenario="*n_a"), 64 | dict(model="model_a", scenario="scen_a", region="World", variable="* Energy"), 65 | dict(scenario="scen_a", region="World", variable="Primary Energy", year=2010), 66 | ), 67 | ) 68 | def test_ixmp4_filters(test_platform, test_df_year, filters): 69 | """Write an IamDataFrame to the platform and read it back with filters""" 70 | 71 | # test writing to platform 72 | test_df_year.to_ixmp4(platform=test_platform) 73 | 74 | # add 'version' meta indicator (indicator during ixmp4 roundtrip) 75 | test_df_year.set_meta(1, "version") 76 | 77 | # read with filters 78 | assert_iamframe_equal( 79 | read_ixmp4(test_platform, **filters), 80 | test_df_year.filter(**filters), 81 | ) 82 | 83 | 84 | @pytest.mark.parametrize("drop_meta", (True, False)) 85 | def test_ixmp4_reserved_columns(test_platform, test_df_year, drop_meta): 86 | """Make sure that a 'version' column in `meta` is not written to the platform""" 87 | 88 | if drop_meta: 89 | test_df_year = pyam.IamDataFrame(test_df_year.data) 90 | 91 | # write to platform with a version-number as meta indicator 92 | test_df_year.set_meta(1, "version") # add version number added from ixmp4 93 | test_df_year.to_ixmp4(platform=test_platform) 94 | 95 | # version is not saved to the platform 96 | if drop_meta: 97 | assert len(test_platform.runs.get("model_a", "scen_a").meta) == 0 98 | else: 99 | assert "version" not in test_platform.runs.get("model_a", "scen_a").meta 100 | 101 | # version is included when reading again from the platform 102 | assert_iamframe_equal(test_df_year, pyam.read_ixmp4(test_platform)) 103 | -------------------------------------------------------------------------------- /tests/test_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | def test_logger_namespacing(test_df, caplog): 5 | with caplog.at_level(logging.INFO, logger="pyam.core"): 6 | test_df.filter(model="junk") 7 | 8 | assert caplog.record_tuples == [ 9 | ( 10 | "pyam.core", # namespacing 11 | logging.WARNING, # level 12 | "Filtered IamDataFrame is empty!", # message 13 | ) 14 | ] 15 | 16 | 17 | def test_adjusting_logger_level(test_df, caplog): 18 | def throw_warning(): 19 | logging.warning("This is a root warning") 20 | 21 | with caplog.at_level(logging.INFO, logger="pyam.core"): 22 | test_df.filter(model="junk") 23 | throw_warning() 24 | 25 | assert caplog.record_tuples == [ 26 | ("pyam.core", logging.WARNING, "Filtered IamDataFrame is empty!"), 27 | ("root", logging.WARNING, "This is a root warning"), 28 | ] 29 | 30 | caplog.clear() 31 | with caplog.at_level(logging.ERROR, logger="pyam.core"): 32 | test_df.filter(model="junk") 33 | throw_warning() 34 | 35 | # only the root warning should come through now i.e. we can silence pyam 36 | # without silencing everything 37 | # TODO this test fails with pytest>=6.0.1, deactivated for now 38 | # assert caplog.record_tuples == [ 39 | # ("root", logging.WARNING, "This is a root warning"), 40 | # ] 41 | -------------------------------------------------------------------------------- /tests/test_run_control.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from pyam import IamDataFrame 6 | from pyam.run_control import run_control 7 | 8 | from .conftest import TEST_DATA_DIR, TEST_DF 9 | 10 | 11 | def test_exec(): 12 | rc = { 13 | "exec": [ 14 | {"file": os.path.join(TEST_DATA_DIR, "exec.py"), "functions": ["do_exec"]}, 15 | ] 16 | } 17 | 18 | run_control().update(rc) 19 | df = IamDataFrame(TEST_DF) 20 | 21 | exp = ["bar"] * len(TEST_DF["scenario"].unique()) 22 | obs = df["foo"].values 23 | assert (exp == obs).all() 24 | 25 | 26 | def test_no_file(): 27 | rc = run_control() 28 | pytest.raises(IOError, rc.update, "no_such_file.yaml") 29 | -------------------------------------------------------------------------------- /tests/test_slice.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | 5 | def test_slice_len(test_df_year): 6 | """Check the length of a slice""" 7 | 8 | assert len(test_df_year.slice(scenario="scen_a")) == 4 9 | 10 | 11 | def test_slice_index_attributes(test_df): 12 | # assert that the index and data column attributes are set correctly in an IamSlice 13 | 14 | s = test_df.slice() 15 | 16 | assert s.model == ["model_a"] 17 | assert s.scenario == ["scen_a", "scen_b"] 18 | assert s.region == ["World"] 19 | assert s.variable == ["Primary Energy", "Primary Energy|Coal"] 20 | assert s.unit == ["EJ/yr"] 21 | if test_df.time_col == "year": 22 | assert s.year == [2005, 2010] 23 | else: 24 | match = "'IamSlice' object has no attribute 'year'" 25 | with pytest.raises(AttributeError, match=match): 26 | s.year 27 | assert s.time.equals(pd.Index(test_df.data[test_df.time_col].unique())) 28 | 29 | 30 | def test_filtered_slice_index_attributes(test_df_year): 31 | # assert that the attributes are set correctly in a filtered IamSlice 32 | 33 | s = test_df_year.slice(scenario="scen_b") 34 | assert s.scenario == ["scen_b"] 35 | 36 | 37 | def test_print(test_df_year): 38 | """Assert that `print(IamSlice)` (and `info()`) returns as expected""" 39 | exp = "\n".join( 40 | [ 41 | "", 42 | "Index dimensions and data coordinates:", 43 | " model : model_a (1)", 44 | " scenario : scen_a, scen_b (2)", 45 | " region : World (1)", 46 | " variable : Primary Energy, Primary Energy|Coal (2)", 47 | " unit : EJ/yr (1)", 48 | " year : 2005, 2010 (2)", 49 | ] 50 | ) 51 | obs = test_df_year.slice().info() 52 | assert obs == exp 53 | -------------------------------------------------------------------------------- /tests/test_string.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import pytest 4 | 5 | from pyam.str import ( 6 | concat_with_pipe, 7 | find_depth, 8 | get_variable_components, 9 | reduce_hierarchy, 10 | ) 11 | 12 | TEST_VARS = ["foo", "foo|bar", "foo|bar|baz"] 13 | TEST_CONCAT_SERIES = pd.Series(["foo", "bar", "baz"], index=["f", "b", "z"]) 14 | 15 | 16 | def test_find_depth_as_list(): 17 | obs = find_depth(TEST_VARS) 18 | assert obs == [0, 1, 2] 19 | 20 | 21 | def test_find_depth_as_str(): 22 | assert find_depth("foo|bar|baz") == 2 23 | 24 | 25 | def test_find_depth_with_str(): 26 | data = pd.Series(["foo", "foo|bar|baz", "bar|baz", "bar|baz|foo"]) 27 | obs = find_depth(data, "bar") 28 | assert obs == [None, None, 1, 2] 29 | 30 | 31 | def test_find_depth_with_str_1(): 32 | data = pd.Series(["foo", "foo|bar|baz", "bar|baz", "bar|baz|foo"]) 33 | obs = find_depth(data, "bar|", 1) 34 | assert obs == [False, False, False, True] 35 | 36 | 37 | def test_find_depth_with_str_0(): 38 | data = pd.Series(["foo", "foo|bar|baz", "bar|baz", "bar|baz|foo"]) 39 | obs = find_depth(data, "*bar|", 0) 40 | assert obs == [False, True, True, False] 41 | 42 | 43 | def test_find_depth_0(): 44 | obs = find_depth(TEST_VARS, level=0) 45 | assert obs == [True, False, False] 46 | 47 | 48 | def test_find_depth_0_minus(): 49 | obs = find_depth(TEST_VARS, level="0-") 50 | assert obs == [True, False, False] 51 | 52 | 53 | def test_find_depth_0_plus(): 54 | obs = find_depth(TEST_VARS, level="0+") 55 | assert obs == [True, True, True] 56 | 57 | 58 | def test_find_depth_1(): 59 | obs = find_depth(TEST_VARS, level=1) 60 | assert obs == [False, True, False] 61 | 62 | 63 | def test_find_depth_1_minus(): 64 | obs = find_depth(TEST_VARS, level="1-") 65 | assert obs == [True, True, False] 66 | 67 | 68 | def test_find_depth_1_plus(): 69 | obs = find_depth(TEST_VARS, level="1+") 70 | assert obs == [False, True, True] 71 | 72 | 73 | def test_concat_with_pipe_all(): 74 | obs = concat_with_pipe(TEST_CONCAT_SERIES) 75 | assert obs == "foo|bar|baz" 76 | 77 | 78 | def test_concat_with_pipe_exclude_none(): 79 | s = TEST_CONCAT_SERIES.copy() 80 | s["b"] = None 81 | obs = concat_with_pipe(s) 82 | assert obs == "foo|baz" 83 | 84 | 85 | def test_concat_with_pipe_exclude_nan(): 86 | s = TEST_CONCAT_SERIES.copy() 87 | s["b"] = np.nan 88 | obs = concat_with_pipe(s) 89 | assert obs == "foo|baz" 90 | 91 | 92 | def test_concat_with_pipe_by_name(): 93 | obs = concat_with_pipe(TEST_CONCAT_SERIES, cols=["f", "z"]) 94 | assert obs == "foo|baz" 95 | 96 | 97 | def test_concat_list_with_pipe(): 98 | obs = concat_with_pipe(["foo", "bar"]) 99 | assert obs == "foo|bar" 100 | 101 | 102 | def test_concat_list_with_pipe_by_cols(): 103 | obs = concat_with_pipe(["foo", "bar", "baz"], cols=[0, 2]) 104 | assert obs == "foo|baz" 105 | 106 | 107 | def test_concat_args_with_pipe(): 108 | obs = concat_with_pipe("foo", "bar") 109 | assert obs == "foo|bar" 110 | 111 | 112 | def test_concat_args_with_pipe_by_cols(): 113 | obs = concat_with_pipe("foo", "bar", "baz", cols=[0, 2]) 114 | assert obs == "foo|baz" 115 | 116 | 117 | def test_concat_args_deprecated(): 118 | # test error message for legacy-issues when introducing `*args` (#778) 119 | with pytest.raises(DeprecationWarning, match=r"Please use `cols=\[0, 2\]`."): 120 | concat_with_pipe(["foo", "bar", "baz"], [0, 2]) 121 | 122 | 123 | def test_reduce_hierarchy_0(): 124 | assert reduce_hierarchy("foo|bar|baz", 0) == "foo" 125 | 126 | 127 | def test_reduce_hierarchy_1(): 128 | assert reduce_hierarchy("foo|bar|baz", 1) == "foo|bar" 129 | 130 | 131 | def test_reduce_hierarchy_neg1(): 132 | assert reduce_hierarchy("foo|bar|baz", -1) == "foo|bar" 133 | 134 | 135 | def test_reduce_hierarchy_neg2(): 136 | assert reduce_hierarchy("foo|bar|baz", -2) == "foo" 137 | 138 | 139 | def test_get_variable_components_int(): 140 | assert get_variable_components("foo|bar|baz", 1) == "bar" 141 | 142 | 143 | def test_get_variable_components_list(): 144 | assert get_variable_components("foo|bar|baz", [1, 2]) == ["bar", "baz"] 145 | 146 | 147 | def test_get_variable_components_indexError(): 148 | with pytest.raises(IndexError): 149 | get_variable_components("foo|bar|baz", 3) 150 | 151 | 152 | def test_get_variable_components_join_true(): 153 | assert get_variable_components("foo|bar|baz", [0, 2], join=True) == "foo|baz" 154 | 155 | 156 | def test_get_variable_components_join_str(): 157 | assert get_variable_components("foo|bar|baz", [2, 1], join="_") == "baz_bar" 158 | -------------------------------------------------------------------------------- /tests/test_testing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pyam.testing import assert_iamframe_equal 4 | 5 | 6 | def test_equal_meta_nan_col(test_df_year): 7 | """Test that a meta-column with only np.nan is seen as equal""" 8 | # https://github.com/IAMconsortium/pyam/issues/515 9 | df = test_df_year.copy() 10 | df.set_meta(meta=np.nan, name="nan-column") # add a column of np.nan's to `meta` 11 | 12 | assert_iamframe_equal(test_df_year, df) 13 | -------------------------------------------------------------------------------- /tests/test_timeseries.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import datetime 3 | import logging 4 | 5 | import numpy as np 6 | import pandas as pd 7 | import pytest 8 | 9 | from pyam.timeseries import cross_threshold, cumulative, fill_series 10 | from pyam.utils import to_int 11 | 12 | 13 | def test_fill_series(): 14 | # note that the series is not order and the index is defined as float 15 | y = pd.Series(data=[np.nan, 1, 4, 1], index=[2002.0, 2008.0, 2005.0, 2013.0]) 16 | assert fill_series(y, 2006) == 3.0 17 | 18 | 19 | def test_fill_series_datetime(): 20 | # note that the series is not order and the index is defined as float 21 | y = pd.Series( 22 | data=[3, 1], 23 | index=[datetime.datetime(2001, 1, 1), datetime.datetime(2003, 1, 1)], 24 | ) 25 | assert fill_series(y, datetime.datetime(2002, 1, 1)) == 2.0 26 | 27 | 28 | def test_fill_series_out_of_range(): 29 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002.0, 2005.0, 2007.0, 2013.0]) 30 | assert fill_series(y, 2001) is np.nan 31 | 32 | 33 | def test_cols_to_int(): 34 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002.0, 2007.5, 2003.0, 2013.0]) 35 | pytest.raises(ValueError, to_int, x=y) 36 | 37 | 38 | def test_cumulative(): 39 | # note that the series is not order and the index is defined as float 40 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002.0, 2007.0, 2003.0, 2013.0]) 41 | assert cumulative(y, 2008, 2013) == 6 42 | 43 | 44 | def test_cumulative_out_of_range(): 45 | # set logger level to exclude warnings in unit test output 46 | logging.getLogger("pyam").setLevel("ERROR") 47 | # note that the series is not order and the index is defined as float 48 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002.0, 2005.0, 2007.0, 2013.0]) 49 | assert cumulative(y, 2008, 2015) is np.nan 50 | logging.getLogger("pyam").setLevel("NOTSET") 51 | 52 | 53 | def test_cross_treshold(): 54 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002, 2005, 2007, 2013]) 55 | obs = cross_threshold(y, 2) 56 | assert obs == [2007, 2011] 57 | 58 | 59 | def test_cross_treshold_empty(): 60 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002, 2005, 2007, 2013]) 61 | obs = cross_threshold(y, 4) 62 | assert obs == [] 63 | 64 | 65 | def test_cross_treshold_from_below(): 66 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002, 2005, 2007, 2013]) 67 | obs = cross_threshold(y, 2, direction="from below") 68 | assert obs == [2007] 69 | 70 | 71 | def test_cross_treshold_from_above(): 72 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002, 2005, 2007, 2013]) 73 | obs = cross_threshold(y, 2, direction="from above") 74 | assert obs == [2011] 75 | 76 | 77 | def test_cross_treshold_direction_error(): 78 | y = pd.Series(data=[np.nan, 1, 3, 1], index=[2002, 2005, 2007, 2013]) 79 | pytest.raises(ValueError, cross_threshold, x=y, threshold=2, direction="up") 80 | -------------------------------------------------------------------------------- /tests/test_tutorials.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | try: 4 | import nbformat 5 | from nbconvert.preprocessors import ExecutePreprocessor 6 | except ModuleNotFoundError: 7 | pytest.skip( 8 | "Missing Jupyter Notebook and related dependencies", allow_module_level=True 9 | ) 10 | 11 | from .conftest import IIASA_UNAVAILABLE, here 12 | 13 | nb_path = here.parent / "docs" / "tutorials" 14 | 15 | 16 | def _run_notebook(file, timeout=30): 17 | """Execute a notebook file""" 18 | with open(nb_path / f"{file}.ipynb") as f: 19 | nb = nbformat.read(f, as_version=4) 20 | 21 | ep = ExecutePreprocessor(timeout=timeout) 22 | ep.preprocess(nb, {"metadata": {"path": nb_path}}) 23 | 24 | 25 | @pytest.mark.parametrize( 26 | "file", 27 | [ 28 | "pyam_first_steps", 29 | "data_table_formats", 30 | "unit_conversion", 31 | "aggregating_downscaling_consistency", 32 | "subannual_time_resolution", 33 | "pyam_logo", 34 | "ipcc_colors", 35 | "legends", 36 | "algebraic_operations", 37 | "aggregating_variables_and_plotting_with_negative_values", 38 | ], 39 | ) 40 | def test_tutorial_notebook(file): 41 | _run_notebook(file) 42 | 43 | 44 | @pytest.mark.skipif(IIASA_UNAVAILABLE, reason="IIASA database API unavailable") 45 | def test_tutorial_iiasa(): 46 | _run_notebook("iiasa") 47 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import pytest 6 | from pandas import Timestamp 7 | from pandas import testing as pdt 8 | 9 | from pyam.utils import ( 10 | META_IDX, 11 | merge_meta, 12 | pattern_match, 13 | to_time, 14 | ) 15 | 16 | 17 | def test_pattern_match_none(): 18 | data = pd.Series(["foo", "bar"]) 19 | values = ["baz"] 20 | 21 | obs = pattern_match(data, values) 22 | assert (obs == [False, False]).all() 23 | 24 | 25 | def test_pattern_match_nan(): 26 | data = pd.Series(["foo", np.nan]) 27 | values = ["baz"] 28 | 29 | obs = pattern_match(data, values, has_nan=True) 30 | assert (obs == [False, False]).all() 31 | 32 | 33 | def test_pattern_match_one(): 34 | data = pd.Series(["foo", "bar"]) 35 | values = ["foo"] 36 | 37 | obs = pattern_match(data, values) 38 | assert (obs == [True, False]).all() 39 | 40 | 41 | def test_pattern_match_str_regex(): 42 | data = pd.Series(["foo", "foo2", "bar"]) 43 | values = ["foo"] 44 | 45 | obs = pattern_match(data, values) 46 | assert (obs == [True, False, False]).all() 47 | 48 | 49 | def test_pattern_match_ast_regex(): 50 | data = pd.Series(["foo", "foo2", "bar"]) 51 | values = ["foo*"] 52 | 53 | obs = pattern_match(data, values) 54 | assert (obs == [True, True, False]).all() 55 | 56 | 57 | def test_pattern_match_ast2_regex(): 58 | data = pd.Series(["foo|bar", "foo", "bar"]) 59 | values = ["*o*b*"] 60 | 61 | obs = pattern_match(data, values) 62 | assert (obs == [True, False, False]).all() 63 | 64 | 65 | def test_pattern_match_plus(): 66 | data = pd.Series(["foo", "foo+", "+bar", "b+az"]) 67 | values = ["*+*"] 68 | 69 | obs = pattern_match(data, values) 70 | assert (obs == [False, True, True, True]).all() 71 | 72 | 73 | def test_pattern_match_dot(): 74 | data = pd.Series(["foo", "fo."]) 75 | values = ["fo."] 76 | 77 | obs = pattern_match(data, values) 78 | assert (obs == [False, True]).all() 79 | 80 | 81 | @pytest.mark.parametrize("bracket", ("(bar)", "[bar]", "{2}")) 82 | def test_pattern_match_brackets(bracket): 83 | s = f"foo {bracket}" 84 | data = pd.Series([s, "foo bar"]) 85 | values = [s] 86 | 87 | obs = pattern_match(data, values) 88 | assert (obs == [True, False]).all() 89 | 90 | 91 | def test_pattern_match_dollar(): 92 | data = pd.Series(["foo$bar", "foo"]) 93 | values = ["foo$bar"] 94 | 95 | obs = pattern_match(data, values) 96 | assert (obs == [True, False]).all() 97 | 98 | 99 | def test_pattern_regexp(): 100 | data = pd.Series(["foo", "foa", "foo$"]) 101 | values = ["fo.$"] 102 | 103 | obs = pattern_match(data, values, regexp=True) 104 | assert (obs == [True, True, False]).all() 105 | 106 | 107 | def test_merge_meta(): 108 | # test merging of two meta tables 109 | left = pd.DataFrame( 110 | [ 111 | ["model_a", "scen_a", "foo", 1], 112 | ["model_a", "scen_b", "bar", 2], 113 | ], 114 | columns=META_IDX + ["string", "value"], 115 | ).set_index(META_IDX) 116 | right = pd.DataFrame( 117 | [ 118 | ["model_a", "scen_a", "bar", 2], 119 | ["model_b", "scen_a", "baz", 3], 120 | ], 121 | columns=META_IDX + ["string", "value2"], 122 | ).set_index(META_IDX) 123 | 124 | # merge conflict raises an error 125 | pytest.raises(ValueError, merge_meta, left, right) 126 | 127 | # merge conflict ignoring errors yields expected results 128 | exp = pd.DataFrame( 129 | [ 130 | ["model_a", "scen_a", "foo", 1, 2], 131 | ["model_a", "scen_b", "bar", 2, np.nan], 132 | ["model_b", "scen_a", "baz", np.nan, 3], 133 | ], 134 | columns=META_IDX + ["string", "value", "value2"], 135 | ).set_index(META_IDX) 136 | 137 | obs = merge_meta(left, right, ignore_conflict=True) 138 | pdt.assert_frame_equal(exp, obs) 139 | 140 | 141 | @pytest.mark.parametrize( 142 | "x, exp", 143 | [ 144 | ("2", 2), 145 | ("2010-07-10", Timestamp("2010-07-10 00:00")), 146 | (datetime(2010, 7, 10), Timestamp("2010-07-10 00:00")), 147 | ], 148 | ) 149 | def test_to_time(x, exp): 150 | assert to_time(x) == exp 151 | 152 | 153 | @pytest.mark.parametrize("x", [2.5, "2010-07-10 foo"]) 154 | def test_to_time_raises(x): 155 | with pytest.raises(ValueError, match=f"Invalid time domain: {x}"): 156 | to_time(x) 157 | --------------------------------------------------------------------------------