├── .github
├── dependabot.yml
└── workflows
│ ├── ci.yml
│ └── deploy_release.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── LICENSE
├── README.md
├── doc
├── .gitignore
├── Makefile
├── analysis.rst
├── api.rst
├── conf.py
├── examples.rst
├── examples
│ ├── README.md
│ ├── calibrated_models
│ │ ├── calibrated_models.yaml
│ │ ├── conditions.tsv
│ │ ├── measurements.tsv
│ │ ├── model.xml
│ │ ├── model_space.tsv
│ │ ├── observables.tsv
│ │ ├── parameters.tsv
│ │ ├── petab_problem.yaml
│ │ └── petab_select_problem.yaml
│ ├── example_cli_famos.ipynb
│ ├── example_cli_famos_calibration_tool.py
│ ├── example_cli_famos_helpers.py
│ ├── model_selection
│ │ ├── calibrated_M1_4.yaml
│ │ ├── calibrated_M1_7.yaml
│ │ ├── calibrated_models_1.yaml
│ │ ├── conditions.tsv
│ │ ├── measurements.tsv
│ │ ├── model.xml
│ │ ├── model_space.tsv
│ │ ├── observables.tsv
│ │ ├── parameters.tsv
│ │ ├── petab_problem.yaml
│ │ └── petab_select_problem.yaml
│ ├── visualization.ipynb
│ ├── workflow_cli.ipynb
│ └── workflow_python.ipynb
├── index.rst
├── logo
│ ├── editable
│ │ ├── README.md
│ │ ├── logo-tall.svg
│ │ └── logo-wide.svg
│ ├── logo-tall.svg
│ └── logo-wide.svg
├── make.bat
├── obsolete
│ └── changes.md
├── problem_definition.rst
├── standard
│ ├── make_schemas.py
│ ├── model.yaml
│ ├── models.yaml
│ └── problem.yaml
└── test_suite.rst
├── petab_select
├── __init__.py
├── analyze.py
├── candidate_space.py
├── cli.py
├── constants.py
├── criteria.py
├── handlers.py
├── misc.py
├── model.py
├── model_space.py
├── model_subspace.py
├── models.py
├── petab.py
├── plot.py
├── problem.py
└── ui.py
├── pyproject.toml
├── requirements_dev.txt
├── setup.py
├── test
├── .gitignore
├── __init__.py
├── analyze
│ ├── input
│ │ └── models.yaml
│ └── test_analyze.py
├── candidate_space
│ ├── __init__.py
│ ├── input
│ │ └── famos_synthetic
│ │ │ ├── petab
│ │ │ ├── FAMoS_2019.xml
│ │ │ ├── FAMoS_2019_problem.yaml
│ │ │ ├── FAMos_2019_synth_measurements_0.1noise.tsv
│ │ │ ├── experimental_conditions_FAMoS_2019.tsv
│ │ │ ├── observables_FAMoS_2019.tsv
│ │ │ └── parameters_FAMoS_2019.tsv
│ │ │ ├── select
│ │ │ ├── FAMoS_2019_petab_select_problem.yaml
│ │ │ └── model_space_FAMoS_2019.tsv
│ │ │ └── test_files
│ │ │ ├── calibration_results.tsv
│ │ │ ├── predecessor_model.yaml
│ │ │ └── regenerate_model_hashes.py
│ ├── test_candidate_space.py
│ └── test_famos.py
├── cli
│ ├── __init__.py
│ ├── expected_output
│ │ ├── model
│ │ │ ├── conditions.tsv
│ │ │ ├── measurements.tsv
│ │ │ ├── model.xml
│ │ │ ├── observables.tsv
│ │ │ ├── parameters.tsv
│ │ │ └── problem.yaml
│ │ └── models
│ │ │ ├── model_1
│ │ │ ├── conditions.tsv
│ │ │ ├── measurements.tsv
│ │ │ ├── model.xml
│ │ │ ├── observables.tsv
│ │ │ ├── parameters.tsv
│ │ │ └── problem.yaml
│ │ │ └── model_2
│ │ │ ├── conditions.tsv
│ │ │ ├── measurements.tsv
│ │ │ ├── model.xml
│ │ │ ├── observables.tsv
│ │ │ ├── parameters.tsv
│ │ │ └── problem.yaml
│ ├── input
│ │ ├── model.yaml
│ │ └── models.yaml
│ └── test_cli.py
├── model
│ ├── __init__.py
│ ├── expected_output
│ │ └── petab
│ │ │ ├── conditions.tsv
│ │ │ ├── measurements.tsv
│ │ │ ├── model.xml
│ │ │ ├── observables.tsv
│ │ │ ├── parameters.tsv
│ │ │ └── problem.yaml
│ ├── input
│ │ └── model.yaml
│ └── test_model.py
├── model_space
│ ├── __init__.py
│ ├── model_space_file_1.tsv
│ ├── model_space_file_2.tsv
│ └── test_model_space.py
├── model_subspace
│ ├── __init__.py
│ └── test_model_subspace.py
├── problem
│ ├── __init__.py
│ ├── expected_output
│ │ ├── model_space.tsv
│ │ └── petab_select_problem.yaml
│ └── test_problem.py
└── ui
│ ├── __init__.py
│ └── test_ui.py
├── test_cases
├── 0001
│ ├── expected.yaml
│ ├── model_space.tsv
│ ├── petab
│ │ ├── conditions.tsv
│ │ ├── measurements.tsv
│ │ ├── model.xml
│ │ ├── observables.tsv
│ │ ├── parameters.tsv
│ │ └── petab_problem.yaml
│ └── petab_select_problem.yaml
├── 0002
│ ├── expected.yaml
│ ├── model_space.tsv
│ └── petab_select_problem.yaml
├── 0003
│ ├── expected.yaml
│ ├── model_space.tsv
│ └── petab_select_problem.yaml
├── 0004
│ ├── constraints.tsv
│ ├── expected.yaml
│ ├── model_space.tsv
│ └── petab_select_problem.yaml
├── 0005
│ ├── expected.yaml
│ ├── initial_models.tsv
│ ├── model_space.tsv
│ └── petab_select_problem.yaml
├── 0006
│ ├── expected.yaml
│ ├── model_space.tsv
│ └── petab_select_problem.yaml
├── 0007
│ ├── expected.yaml
│ ├── model_space.tsv
│ ├── petab
│ │ ├── conditions.tsv
│ │ ├── measurements.tsv
│ │ ├── model.xml
│ │ ├── observables.tsv
│ │ ├── parameters.tsv
│ │ └── petab_problem.yaml
│ └── petab_select_problem.yaml
├── 0008
│ ├── expected.yaml
│ ├── model_space.tsv
│ └── petab_select_problem.yaml
└── 0009
│ ├── README.md
│ ├── expected.yaml
│ ├── expected_summary.tsv
│ ├── model_space.tsv
│ ├── petab
│ ├── conditions.tsv
│ ├── measurements.tsv
│ ├── model.xml
│ ├── observables.tsv
│ ├── parameters.tsv
│ └── petab_problem.yaml
│ ├── petab_select_problem.yaml
│ └── predecessor_model.yaml
└── tox.ini
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 |
4 | - package-ecosystem: "github-actions"
5 | directory: "/"
6 | schedule:
7 | interval: "weekly"
8 | ignore:
9 | - dependency-name: "*"
10 | update-types: ["version-update:semver-patch", "version-update:semver-minor"]
11 | target-branch: "develop"
12 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | # trigger
4 | on:
5 | push:
6 | pull_request:
7 | branches:
8 | - main
9 | schedule:
10 | # run Tuesday and Friday at 02:00 UTC
11 | - cron: '00 2 * * TUE,FRI'
12 | workflow_dispatch:
13 | merge_group:
14 |
15 | jobs:
16 | base:
17 | runs-on: ubuntu-latest
18 | strategy:
19 | matrix:
20 | # test on latest and minimum python version
21 | python-version: ['3.13', '3.11']
22 |
23 | steps:
24 | - name: Check out repository
25 | uses: actions/checkout@v4
26 |
27 | - name: Prepare python ${{ matrix.python-version }}
28 | uses: actions/setup-python@v5
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 |
32 | - name: Cache
33 | uses: actions/cache@v4
34 | with:
35 | path: ~ /.cache
36 | key: ci-${{ runner.os }}-${{ matrix.python-version }}-base
37 |
38 | - name: Install system dependencies
39 | run: |
40 | sudo apt-get update
41 | sudo apt-get install -y \
42 | swig \
43 | libatlas-base-dev \
44 | libhdf5-serial-dev
45 |
46 | - name: Install Python dependencies
47 | run: pip install -r requirements_dev.txt
48 |
49 | - name: Run tox
50 | run: python -m tox
51 |
52 | - name: Run pre-commit hooks
53 | run: pre-commit run --all-files
54 |
55 | - name: Coverage
56 | uses: codecov/codecov-action@v5
57 | with:
58 | file: ./coverage.xml
59 |
--------------------------------------------------------------------------------
/.github/workflows/deploy_release.yml:
--------------------------------------------------------------------------------
1 | name: Deploy
2 | on:
3 | release:
4 | types:
5 | - published
6 |
7 | jobs:
8 | pypi:
9 | name: Deploy PyPI
10 | runs-on: ubuntu-latest
11 | environment:
12 | name: pypi
13 | url: https://pypi.org/p/petab-select
14 | permissions:
15 | id-token: write
16 |
17 | steps:
18 | - name: Set up Python
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: "3.12"
22 |
23 | - uses: actions/checkout@v4
24 | with:
25 | fetch-depth: 20
26 |
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | pip install setuptools wheel build
31 | python -m build -s
32 |
33 | - name: Publish a Python distribution to PyPI
34 | uses: pypa/gh-action-pypi-publish@release/v1
35 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | .ipynb_checkpoints
3 | dist
4 | amici_models
5 | *egg-info
6 |
7 | # coverage
8 | .coverage*
9 | coverage.xml
10 |
11 | # tox
12 | .tox
13 |
14 | # osx
15 | .DS_Store
16 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # This is run as a precondition to commits, run manually via `pre-commit run`
2 |
3 | # When adding new hooks, it may make sense to once run
4 | # `pre-commit run --all-files` as by default only changed files are checked
5 |
6 | repos:
7 | - repo: https://github.com/pre-commit/pre-commit-hooks
8 | rev: v5.0.0
9 | hooks:
10 | - id: check-yaml
11 | description: Check yaml files for parseable syntax
12 | - id: check-added-large-files
13 | description: Prevent large files from being committed
14 | - id: check-merge-conflict
15 | description: Check for files that contain merge conflict strings
16 | - id: check-symlinks
17 | description: Check for symlinks which do not point to anything
18 | - id: trailing-whitespace
19 | description: Trim trailing whitespaces
20 | - id: end-of-file-fixer
21 | description: Fix empty lines at ends of files
22 | - id: detect-private-key
23 | description: Detects the presence of private keys
24 | - repo: https://github.com/astral-sh/ruff-pre-commit
25 | # Ruff version.
26 | rev: v0.6.9
27 | hooks:
28 | # Run the linter.
29 | - id: ruff
30 | args:
31 | - --fix
32 | - --config
33 | - pyproject.toml
34 |
35 | # Run the formatter.
36 | - id: ruff-format
37 | args:
38 | - --config
39 | - pyproject.toml
40 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Build documentation in the docs/ directory with Sphinx
9 | sphinx:
10 | builder: html
11 | configuration: doc/conf.py
12 | fail_on_warning: False
13 |
14 | python:
15 | install:
16 | - method: pip
17 | path: .
18 | extra_requirements:
19 | - doc
20 |
21 | build:
22 | os: "ubuntu-22.04"
23 | apt_packages:
24 | - libatlas-base-dev
25 | - swig
26 | tools:
27 | python: "3.11"
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, PEtab - an SBML and TSV-based data format for parameter estimation problems in systems biology
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | [](https://pypi.org/project/petab-select/)
4 | [](https://doi.org/10.5281/zenodo.14183390)
5 |
6 |
7 | The PEtab extension for model selection, including additional file formats and library.
8 |
9 | ## Install
10 |
11 | The Python 3 library provides both the Python 3 and command-line (CLI)
12 | interfaces, and can be installed from PyPI, with `pip3 install petab-select`.
13 |
14 | ## Documentation
15 |
16 | Further documentation is available at
17 | [http://petab-select.readthedocs.io/](http://petab-select.readthedocs.io/).
18 |
19 | ## Examples
20 |
21 | There are example Jupyter notebooks covering the Python API, CLI, and visualization in the "doc/examples" directory.
22 | The notebooks can be viewed at [https://petab-select.readthedocs.io/en/stable/examples.html](https://petab-select.readthedocs.io/en/stable/examples.html).
23 |
24 | ## Supported features
25 |
26 | ### Criterion
27 |
28 | - `AIC`: https://en.wikipedia.org/wiki/Akaike_information_criterion#Definition
29 | - `AICc`: https://en.wikipedia.org/wiki/Akaike_information_criterion#Modification_for_small_sample_size
30 | - `BIC`: https://en.wikipedia.org/wiki/Bayesian_information_criterion#Definition
31 |
32 | ### Methods
33 |
34 | - `forward`: https://en.wikipedia.org/wiki/Stepwise_regression#Main_approaches
35 | - `backward`: https://en.wikipedia.org/wiki/Stepwise_regression#Main_approaches
36 | - `brute_force`: Optimize all possible model candidates, then return the model
37 | with the best criterion value.
38 | - `famos`: https://doi.org/10.1371/journal.pcbi.1007230
39 |
40 | Note that the directional methods (forward, backward) find models with the
41 | smallest step size (in terms of number of estimated parameters). For example,
42 | given the forward method and a predecessor model with 2 estimated parameters,
43 | if there are no models with 3 estimated parameters, but some models with 4
44 | estimated parameters, then the search may return candidate models with 4
45 | estimated parameters.
46 |
--------------------------------------------------------------------------------
/doc/.gitignore:
--------------------------------------------------------------------------------
1 | _build
2 | generated
3 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/doc/analysis.rst:
--------------------------------------------------------------------------------
1 | Analysis
2 | ========
3 |
4 | After using PEtab Select to perform model selection, you may want to operate on all "good" calibrated models.
5 | The PEtab Select Python library provides some methods to help with this. Please request any missing methods.
6 |
7 | See the Python API docs for the :class:`petab_select.Models` class, which provides some methods. In particular, :attr:`petab_select.Models.df` can be used
8 | to get a quick overview over all models, as a pandas dataframe.
9 |
10 | Additionally, see the Python API docs for the :mod:`petab_select.analyze` module, which contains some methods to subset and group models,
11 | or compute "weights" (e.g. Akaike weights).
12 |
13 | Model hashes
14 | ^^^^^^^^^^^^
15 |
16 | Model hashes are special objects in the library, that are generated from model-specific information that is unique within a single PEtab Select problem.
17 |
18 | This means you can reconstruct the model given some model hash. For example, with this model hash `M1-000`, you can reconstruct the :class:`petab_select.ModelHash` from a string, then reconstruct the :class:`petab_select.Model`.
19 |
20 | .. code-block:: language
21 |
22 | ModelHash.from_hash("M1-000").get_model(petab_select_problem)
23 |
24 | You can use this to get the uncalibrated version of a calibrated model.
25 |
26 | .. code-block:: language
27 |
28 | model.hash.get_model(petab_select_problem)
29 |
--------------------------------------------------------------------------------
/doc/api.rst:
--------------------------------------------------------------------------------
1 | petab-select Python API
2 | =======================
3 |
4 | .. rubric:: Modules
5 |
6 | .. autosummary::
7 | :toctree: generated
8 |
9 | petab_select
10 | petab_select.analyze
11 | petab_select.candidate_space
12 | petab_select.constants
13 | petab_select.criteria
14 | petab_select.handlers
15 | petab_select.misc
16 | petab_select.model
17 | petab_select.model_space
18 | petab_select.model_subspace
19 | petab_select.petab
20 | petab_select.plot
21 | petab_select.problem
22 | petab_select.ui
23 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 | from __future__ import annotations
6 |
7 | import inspect
8 |
9 | import sphinx
10 |
11 | # -- Project information -----------------------------------------------------
12 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
13 |
14 | project = "PEtab Select"
15 | copyright = "2024, The PEtab Select developers"
16 | author = "The PEtab Select developers"
17 |
18 |
19 | # -- General configuration ---------------------------------------------------
20 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
21 |
22 | extensions = [
23 | "readthedocs_ext.readthedocs",
24 | "sphinx.ext.napoleon",
25 | "sphinx.ext.autodoc",
26 | "sphinx.ext.intersphinx",
27 | "sphinx.ext.autosummary",
28 | "sphinx.ext.viewcode",
29 | "sphinx.ext.mathjax",
30 | "nbsphinx",
31 | "IPython.sphinxext.ipython_console_highlighting",
32 | "recommonmark",
33 | "sphinx_autodoc_typehints",
34 | ]
35 |
36 | templates_path = ["_templates"]
37 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
38 |
39 |
40 | intersphinx_mapping = {
41 | "petab": (
42 | "https://petab.readthedocs.io/projects/libpetab-python/en/latest/",
43 | None,
44 | ),
45 | "pandas": ("https://pandas.pydata.org/docs/", None),
46 | "numpy": ("https://numpy.org/devdocs/", None),
47 | "python": ("https://docs.python.org/3", None),
48 | }
49 |
50 | autosummary_generate = True
51 | autodoc_default_options = {
52 | "special-members": "__init__",
53 | "inherited-members": True,
54 | }
55 |
56 |
57 | # -- Options for HTML output -------------------------------------------------
58 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
59 |
60 | html_theme = "sphinx_rtd_theme"
61 | html_static_path = ["standard"]
62 | html_logo = "logo/logo-wide.svg"
63 |
64 |
65 | def autodoc_skip_member(app, what, name, obj, skip, options):
66 | """Exclude some objects from the documentation."""
67 | if inspect.isbuiltin(obj):
68 | return True
69 |
70 | # Skip inherited members from builtins
71 | # (skips, for example, all the int/str-derived methods of enums
72 | if (
73 | objclass := getattr(obj, "__objclass__", None)
74 | ) and objclass.__module__ == "builtins":
75 | return True
76 |
77 | return None
78 |
79 |
80 | def setup(app: sphinx.application.Sphinx):
81 | app.connect("autodoc-skip-member", autodoc_skip_member, priority=0)
82 |
--------------------------------------------------------------------------------
/doc/examples.rst:
--------------------------------------------------------------------------------
1 | Examples
2 | ========
3 |
4 | Various example notebooks.
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 |
9 | examples/example_cli_famos.ipynb
10 | examples/visualization.ipynb
11 | examples/workflow_cli.ipynb
12 | examples/workflow_python.ipynb
13 |
--------------------------------------------------------------------------------
/doc/examples/README.md:
--------------------------------------------------------------------------------
1 | These notebooks need to be run to see the output. Pre-computed output can be viewed in the documentation, at https://petab-select.readthedocs.io/en/stable/examples.html
2 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/calibrated_models.yaml:
--------------------------------------------------------------------------------
1 | - criteria:
2 | AICc: 37.97523003111246
3 | NLLH: 17.48761501555623
4 | estimated_parameters:
5 | sigma_x2: 4.462298422134608
6 | iteration: 1
7 | model_hash: M-000
8 | model_id: M-000
9 | model_subspace_id: M
10 | model_subspace_indices:
11 | - 0
12 | - 0
13 | - 0
14 | parameters:
15 | k1: 0.2
16 | k2: 0.1
17 | k3: 0
18 | model_subspace_petab_yaml: petab_problem.yaml
19 | predecessor_model_hash: virtual_initial_model-
20 | - criteria:
21 | AICc: -0.17540608110890332
22 | NLLH: -4.087703040554452
23 | estimated_parameters:
24 | k3: 0.0
25 | sigma_x2: 0.12242920113658338
26 | iteration: 2
27 | model_hash: M-001
28 | model_id: M-001
29 | model_subspace_id: M
30 | model_subspace_indices:
31 | - 0
32 | - 0
33 | - 1
34 | parameters:
35 | k1: 0.2
36 | k2: 0.1
37 | k3: estimate
38 | model_subspace_petab_yaml: petab_problem.yaml
39 | predecessor_model_hash: M-000
40 | - criteria:
41 | AICc: -0.27451438069575573
42 | NLLH: -4.137257190347878
43 | estimated_parameters:
44 | k2: 0.10147824307890803
45 | sigma_x2: 0.12142219599557078
46 | iteration: 2
47 | model_hash: M-010
48 | model_id: M-010
49 | model_subspace_id: M
50 | model_subspace_indices:
51 | - 0
52 | - 1
53 | - 0
54 | parameters:
55 | k1: 0.2
56 | k2: estimate
57 | k3: 0
58 | model_subspace_petab_yaml: petab_problem.yaml
59 | predecessor_model_hash: M-000
60 | - criteria:
61 | AICc: -0.7053270766271886
62 | NLLH: -4.352663538313594
63 | estimated_parameters:
64 | k1: 0.20160925279667963
65 | sigma_x2: 0.11714017664827497
66 | iteration: 2
67 | model_hash: M-100
68 | model_id: M-100
69 | model_subspace_id: M
70 | model_subspace_indices:
71 | - 1
72 | - 0
73 | - 0
74 | parameters:
75 | k1: estimate
76 | k2: 0.1
77 | k3: 0
78 | model_subspace_petab_yaml: petab_problem.yaml
79 | predecessor_model_hash: M-000
80 | - criteria:
81 | AICc: 9.294672923372811
82 | NLLH: -4.352663538313594
83 | estimated_parameters:
84 | k1: 0.20160925279667963
85 | k3: 0.0
86 | sigma_x2: 0.11714017664827497
87 | iteration: 3
88 | model_hash: M-101
89 | model_id: M-101
90 | model_subspace_id: M
91 | model_subspace_indices:
92 | - 1
93 | - 0
94 | - 1
95 | parameters:
96 | k1: estimate
97 | k2: 0.1
98 | k3: estimate
99 | model_subspace_petab_yaml: petab_problem.yaml
100 | predecessor_model_hash: M-100
101 | - criteria:
102 | AICc: 7.8521704398854
103 | NLLH: -5.0739147800573
104 | estimated_parameters:
105 | k1: 0.20924804320838675
106 | k2: 0.0859052351446815
107 | sigma_x2: 0.10386846319370771
108 | iteration: 3
109 | model_hash: M-110
110 | model_id: M-110
111 | model_subspace_id: M
112 | model_subspace_indices:
113 | - 1
114 | - 1
115 | - 0
116 | parameters:
117 | k1: estimate
118 | k2: estimate
119 | k3: 0
120 | model_subspace_petab_yaml: petab_problem.yaml
121 | predecessor_model_hash: M-100
122 | - criteria:
123 | AICc: 35.94352968170024
124 | NLLH: -6.028235159149878
125 | estimated_parameters:
126 | k1: 0.6228488917665873
127 | k2: 0.020189424009226256
128 | k3: 0.0010850434974038557
129 | sigma_x2: 0.08859278245811462
130 | iteration: 4
131 | model_hash: M-111
132 | model_id: M-111
133 | model_subspace_id: M
134 | model_subspace_indices:
135 | - 1
136 | - 1
137 | - 1
138 | parameters:
139 | k1: estimate
140 | k2: estimate
141 | k3: estimate
142 | model_subspace_petab_yaml: petab_problem.yaml
143 | predecessor_model_hash: M-100
144 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id petab_yaml k1 k2 k3
2 | M petab_problem.yaml 0.2;estimate 0.1;estimate 0
3 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0 1e3 0.2 1
3 | k2 k_{2} lin 0 1e3 0.1 1
4 | k3 k_{3} lin 0 1e3 0 1
5 | sigma_x2 \sigma_{x2} lin 1e-5 1e3 0.15 1
6 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/petab_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1
2 | parameter_file: parameters.tsv
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | observable_files:
9 | - observables.tsv
10 | sbml_files:
11 | - model.xml
12 |
--------------------------------------------------------------------------------
/doc/examples/calibrated_models/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | version: beta_1
2 | criterion: AICc
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/doc/examples/example_cli_famos.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e28947de",
6 | "metadata": {},
7 | "source": [
8 | "# FAMoS in PEtab Select\n",
9 | "\n",
10 | "In this notebook, the FAMoS algorithm [1] is demonstrated. This is designed as an example for tool developers to be able to ensure that they utilize PEtab Select correctly, such that complex methods like FAMoS work.\n",
11 | "\n",
12 | "[1] Gabel M, Hohl T, Imle A, Fackler OT, Graw F (2019) FAMoS: A Flexible and dynamic Algorithm for Model Selection to analyse complex systems dynamics. PLOS Computational Biology 15(8): e1007230. https://doi.org/10.1371/journal.pcbi.1007230"
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "id": "484b7f3c",
18 | "metadata": {},
19 | "source": [
20 | "The model space contains 65536 models. In normal PEtab Select workflows, a calibration tool would take the place of the `example_cli_famos_calibration_tool.py` script. This script emulates a calibration tool: it takes PEtab Select models and assigns criterion values to them, based on previous calibration results for the same models."
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "id": "1f04dce0",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "# Cleanup the state and candidate models output by a previous run of this notebook\n",
31 | "import shutil\n",
32 | "from pathlib import Path\n",
33 | "\n",
34 | "from example_cli_famos_helpers import (\n",
35 | " parse_summary_to_progress_list,\n",
36 | " petab_select_problem_yaml, # noqa: F401\n",
37 | ")\n",
38 | "\n",
39 | "output_path = Path().resolve() / \"output_famos\"\n",
40 | "output_path_str = str(output_path)\n",
41 | "if output_path.exists():\n",
42 | " shutil.rmtree(output_path_str)\n",
43 | "output_path.mkdir(exist_ok=False, parents=True)"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": null,
49 | "id": "a81560e6",
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "from petab_select import Method\n",
54 | "\n",
55 | "state = str(output_path / \"state.dill\")\n",
56 | "\n",
57 | "# Each iteration of model selection is described as a 2-tuple here.\n",
58 | "# First value is the model selection method.\n",
59 | "# Second value are relative change in parameter indices that correspond\n",
60 | "# to the best model from this iteration.\n",
61 | "# e.g. `(Method.FORWARD, {3})` states that the best model from a forward move\n",
62 | "# is the model that now estimates the parameter at index 3.\n",
63 | "expected_progress_list = [\n",
64 | " (Method.LATERAL, set()),\n",
65 | " (Method.LATERAL, {4, 15}),\n",
66 | " (Method.LATERAL, {9, 13}),\n",
67 | " (Method.FORWARD, set()),\n",
68 | " (Method.FORWARD, {3}),\n",
69 | " (Method.FORWARD, {11}),\n",
70 | " (Method.BACKWARD, set()),\n",
71 | " (Method.BACKWARD, {6}),\n",
72 | " (Method.BACKWARD, {10}),\n",
73 | " (Method.BACKWARD, {8}),\n",
74 | " (Method.BACKWARD, {14}),\n",
75 | " (Method.BACKWARD, {1}),\n",
76 | " (Method.BACKWARD, {16}),\n",
77 | " (Method.BACKWARD, {4}),\n",
78 | " (Method.FORWARD, set()),\n",
79 | " (Method.LATERAL, set()),\n",
80 | " (Method.MOST_DISTANT, {2, 3, 4, 5, 6, 7, 9, 11, 12, 13, 15}),\n",
81 | " (Method.LATERAL, {16, 7}),\n",
82 | " (Method.LATERAL, {5, 12}),\n",
83 | " (Method.LATERAL, {13, 15}),\n",
84 | " (Method.LATERAL, {1, 6}),\n",
85 | " (Method.FORWARD, set()),\n",
86 | " (Method.FORWARD, {3}),\n",
87 | " (Method.FORWARD, {7}),\n",
88 | " (Method.FORWARD, {2}),\n",
89 | " (Method.FORWARD, {11}),\n",
90 | " (Method.BACKWARD, set()),\n",
91 | " (Method.BACKWARD, {7}),\n",
92 | " (Method.BACKWARD, {16}),\n",
93 | " (Method.BACKWARD, {4}),\n",
94 | " (Method.FORWARD, set()),\n",
95 | " (Method.LATERAL, set()),\n",
96 | " (Method.LATERAL, {9, 15}),\n",
97 | " (Method.FORWARD, set()),\n",
98 | " (Method.BACKWARD, set()),\n",
99 | " (Method.LATERAL, set()),\n",
100 | "]"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "7202f6c6",
106 | "metadata": {},
107 | "source": [
108 | "The predecessor model is some model from the model space, and is defined in the PEtab Select problem YAML file."
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "id": "bb1a5144",
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "%%bash -s \"$petab_select_problem_yaml\" \"$output_path_str\"\n",
119 | "petab_select_problem_yaml=$1\n",
120 | "output_path_str=$2\n",
121 | "\n",
122 | "problem=$petab_select_problem_yaml\n",
123 | "state=$output_path_str/state.dill\n",
124 | "\n",
125 | "file_uncalibrated_models=$output_path_str/uncalibrated_models.yaml\n",
126 | "file_calibrated_models=$output_path_str/calibrated_models.yaml\n",
127 | "file_models=$output_path_str/iteration_models.yaml\n",
128 | "file_metadata=$output_path_str/metadata.yaml\n",
129 | "\n",
130 | "for i in {1..40}\n",
131 | "do\n",
132 | " echo \"Executing iteration $i\"\n",
133 | "\n",
134 | " petab_select start_iteration \\\n",
135 | " --problem=\"$problem\" \\\n",
136 | " --state=\"$state\" \\\n",
137 | " --output-uncalibrated-models=\"$file_uncalibrated_models\" \\\n",
138 | " --relative-paths\n",
139 | " \n",
140 | " # Replace this line with a tool that calibrates the models in `$output`\n",
141 | " # and stores the calibrated models in `$calibrated_output`.\n",
142 | " # This script also changes model IDs for easier analysis in this example.\n",
143 | " python example_cli_famos_calibration_tool.py $file_uncalibrated_models $file_calibrated_models\n",
144 | " \n",
145 | " petab_select end_iteration \\\n",
146 | " --state=\"$state\" \\\n",
147 | " --calibrated-models=\"$file_calibrated_models\" \\\n",
148 | " --output-models=\"$file_models\" \\\n",
149 | " --output-metadata=\"$file_metadata\" \\\n",
150 | " --relative-paths\n",
151 | " \n",
152 | " terminate=$(cat $file_metadata | grep terminate | awk '{print $NF}')\n",
153 | " if [ \"$terminate\" = \"true\" ]; then\n",
154 | " echo \"Model selection has terminated.\"\n",
155 | " break\n",
156 | " fi\n",
157 | "done"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "id": "93caf071",
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "progress_list = parse_summary_to_progress_list(output_path / \"summary.tsv\")"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": null,
173 | "id": "cb61d0f7",
174 | "metadata": {},
175 | "outputs": [],
176 | "source": [
177 | "assert progress_list == expected_progress_list"
178 | ]
179 | }
180 | ],
181 | "metadata": {
182 | "kernelspec": {
183 | "display_name": "Python 3 (ipykernel)",
184 | "language": "python",
185 | "name": "python3"
186 | },
187 | "language_info": {
188 | "codemirror_mode": {
189 | "name": "ipython",
190 | "version": 3
191 | },
192 | "file_extension": ".py",
193 | "mimetype": "text/x-python",
194 | "name": "python",
195 | "nbconvert_exporter": "python",
196 | "pygments_lexer": "ipython3",
197 | "version": "3.12.3"
198 | }
199 | },
200 | "nbformat": 4,
201 | "nbformat_minor": 5
202 | }
203 |
--------------------------------------------------------------------------------
/doc/examples/example_cli_famos_calibration_tool.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from example_cli_famos_helpers import calibrate
4 |
5 | import petab_select
6 |
7 | models_yaml = sys.argv[1]
8 | calibrated_models_yaml = sys.argv[2]
9 |
10 | models = petab_select.Models.from_yaml(models_yaml)
11 | predecessor_model_hashes = set()
12 | for model in models:
13 | calibrate(model=model)
14 | predecessor_model_hashes |= {model.predecessor_model_hash}
15 | models.to_yaml(filename=calibrated_models_yaml)
16 |
17 | if len(predecessor_model_hashes) == 0:
18 | pass
19 | elif len(predecessor_model_hashes) == 1:
20 | (predecessor_model_hash,) = predecessor_model_hashes
21 | else:
22 | print(
23 | "The models of this iteration somehow have different predecessor models.\n"
24 | + "\n".join(predecessor_model_hashes)
25 | )
26 |
--------------------------------------------------------------------------------
/doc/examples/example_cli_famos_helpers.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pandas as pd
4 | from more_itertools import one
5 |
6 | import petab_select
7 | from petab_select import MODEL_HASH, Criterion, Method, Model
8 |
9 | input_path = (
10 | Path(__file__).resolve().parent.parent.parent
11 | / "test"
12 | / "candidate_space"
13 | / "input"
14 | / "famos_synthetic"
15 | )
16 | petab_select_problem_yaml = str(
17 | input_path / "select" / "FAMoS_2019_petab_select_problem.yaml"
18 | )
19 | expected_criterion_values = dict(
20 | pd.read_csv(
21 | input_path / "test_files" / "calibration_results.tsv", sep="\t"
22 | ).set_index(MODEL_HASH)[Criterion.AICC]
23 | )
24 |
25 | petab_select_problem = petab_select.Problem.from_yaml(
26 | petab_select_problem_yaml
27 | )
28 | criterion = petab_select_problem.criterion
29 |
30 |
31 | def calibrate(
32 | model: Model,
33 | criterion=criterion,
34 | expected_criterion_values=expected_criterion_values,
35 | ) -> None:
36 | """Set the criterion value for a model."""
37 | model.set_criterion(
38 | criterion=criterion,
39 | value=float(expected_criterion_values[model.get_hash()]),
40 | )
41 |
42 |
43 | def parse_summary_to_progress_list(
44 | summary_tsv: str,
45 | ) -> list[tuple[Method, set]]:
46 | """Get progress information from the summary file."""
47 | df_raw = pd.read_csv(summary_tsv, sep="\t")
48 | df = df_raw.loc[~pd.isnull(df_raw["predecessor change"])]
49 |
50 | parameter_list = list(
51 | one(
52 | petab_select_problem.model_space.model_subspaces.values()
53 | ).parameters
54 | )
55 |
56 | progress_list = []
57 |
58 | for index, (_, row) in enumerate(df.iterrows()):
59 | method = Method(row["method"])
60 |
61 | model = {
62 | 1 + parameter_list.index(parameter_id)
63 | for parameter_id in eval(row["current model"])
64 | }
65 | if index == 0:
66 | model0 = model
67 |
68 | difference = model.symmetric_difference(model0)
69 | progress_list.append((method, difference))
70 | model0 = model
71 |
72 | return progress_list
73 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/calibrated_M1_4.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_4
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | AIC: 15
8 | model_hash: M1_4-000
9 | model_subspace_petab_yaml: ../model_selection/petab_problem.yaml
10 | estimated_parameters:
11 | k2: 0.15
12 | k3: 0.0
13 | iteration: 1
14 | model_id: M1_4-000
15 | parameters:
16 | k1: 0
17 | k2: estimate
18 | k3: estimate
19 | predecessor_model_hash: M1_2-000
20 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/calibrated_M1_7.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_7
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | AIC: 20
8 | model_hash: M1_7-000
9 | model_subspace_petab_yaml: ../model_selection/petab_problem.yaml
10 | estimated_parameters:
11 | k1: 0.25
12 | k2: 0.1
13 | k3: 0.0
14 | iteration: 2
15 | model_id: M1_7-000
16 | parameters:
17 | k1: estimate
18 | k2: estimate
19 | k3: estimate
20 | predecessor_model_hash: M1_4-000
21 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/calibrated_models_1.yaml:
--------------------------------------------------------------------------------
1 | - model_subspace_id: M1_0
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | AIC: 180
8 | model_hash: M1_0-000
9 | model_subspace_petab_yaml: ../model_selection/petab_problem.yaml
10 | estimated_parameters: {}
11 | iteration: 1
12 | model_id: M1_0-000
13 | parameters:
14 | k1: 0
15 | k2: 0
16 | k3: 0
17 | predecessor_model_hash: virtual_initial_model-
18 | - model_subspace_id: M1_1
19 | model_subspace_indices:
20 | - 0
21 | - 0
22 | - 0
23 | criteria:
24 | AIC: 100
25 | model_hash: M1_1-000
26 | model_subspace_petab_yaml: ../model_selection/petab_problem.yaml
27 | estimated_parameters: {}
28 | iteration: 1
29 | model_id: M1_1-000
30 | parameters:
31 | k1: 0.2
32 | k2: 0.1
33 | k3: estimate
34 | predecessor_model_hash: virtual_initial_model-
35 |
36 | - model_subspace_id: M1_2
37 | model_subspace_indices:
38 | - 0
39 | - 0
40 | - 0
41 | criteria:
42 | AIC: 50
43 | model_hash: M1_2-000
44 | model_subspace_petab_yaml: ../model_selection/petab_problem.yaml
45 | estimated_parameters: {}
46 | iteration: 1
47 | model_id: M1_2-000
48 | parameters:
49 | k1: 0.2
50 | k2: estimate
51 | k3: 0
52 | predecessor_model_hash: virtual_initial_model-
53 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 petab_problem.yaml 0 0 0
3 | M1_1 petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 petab_problem.yaml 0.2 estimate 0
5 | M1_3 petab_problem.yaml estimate 0.1 0
6 | M1_4 petab_problem.yaml 0.2 estimate estimate
7 | M1_5 petab_problem.yaml estimate 0.1 estimate
8 | M1_6 petab_problem.yaml estimate estimate 0
9 | M1_7 petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2+k4+k5 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0 1e3 0.2 1
3 | k2 k_{2} lin 0 1e3 0.1 1
4 | k3 k_{3} lin 0 1e3 0 1
5 | k4 k_{4} lin 0 1e3 0 0
6 | k5 k_{5} lin 0 1e3 0 0
7 | sigma_x2 \sigma_{x2} lin 1e-5 1e3 0.15 1
8 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/petab_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1
2 | parameter_file: parameters.tsv
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | observable_files:
9 | - observables.tsv
10 | sbml_files:
11 | - model.xml
12 |
--------------------------------------------------------------------------------
/doc/examples/model_selection/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1.0.0
2 | criterion: AIC
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 | candidate_space_arguments: {}
7 |
--------------------------------------------------------------------------------
/doc/examples/visualization.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "50d3669e",
6 | "metadata": {},
7 | "source": [
8 | "# Visualization gallery"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "c01e724e-1d4e-41e7-bb76-742f354fd15f",
14 | "metadata": {},
15 | "source": [
16 | "The visualization methods implemented in PEtab Select are demonstrated here. These methods generally visualize the output of a model selection task, so the input is generally a list of already-calibrated models.\n",
17 | "\n",
18 | "All dependencies for these plots can be installed with `pip install petab_select[plot]`.\n",
19 | "\n",
20 | "In this notebook, some calibrated models that were saved to disk with the `to_yaml` method of a `Models` object, are loaded and used as input here. This is the result of a forward selection with the problem provided in `calibrated_models`. Note that a `Models` object is expect here; if you have a list of models `model_list`, simply use `models = Models(model_list)`."
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "id": "ca6ce5b4",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "import matplotlib\n",
31 | "\n",
32 | "import petab_select\n",
33 | "import petab_select.plot\n",
34 | "\n",
35 | "models = petab_select.Models.from_yaml(\n",
36 | " \"calibrated_models/calibrated_models.yaml\"\n",
37 | ")"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "54532b75-53e4-4670-8e64-21e7adda0c0e",
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "models.df.drop(\n",
48 | " columns=[petab_select.Criterion.AIC, petab_select.Criterion.BIC]\n",
49 | ").style.background_gradient(\n",
50 | " cmap=matplotlib.colormaps.get_cmap(\"summer\"),\n",
51 | " subset=[petab_select.Criterion.AICC],\n",
52 | ")"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "id": "aaeb0606",
58 | "metadata": {},
59 | "source": [
60 | "To use the plotting methods, we need to first setup an object that contains information common to multiple plotting methods. This can include the models, custom colors and labels, and the criterion."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "id": "09c9df1d",
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "# Custom colors for some models\n",
71 | "colors = {\n",
72 | " \"M-000\": \"lightgreen\",\n",
73 | " \"M-001\": \"lightgreen\",\n",
74 | "}\n",
75 | "\n",
76 | "plot_data = petab_select.plot.PlotData(\n",
77 | " models=models,\n",
78 | " criterion=petab_select.Criterion.AICC,\n",
79 | " colors=colors,\n",
80 | " relative_criterion=True,\n",
81 | ")\n",
82 | "\n",
83 | "# Change default color\n",
84 | "petab_select.plot.DEFAULT_NODE_COLOR = \"darkgray\""
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "id": "6c73e0bc-5bf8-4c03-a54b-f19ced731322",
90 | "metadata": {},
91 | "source": [
92 | "## UpSet plot\n",
93 | "\n",
94 | "This shows models ordered by criterion, with their parameters directly below the bars.\n",
95 | "\n",
96 | "A black dot indicates that the parameter (e.g `k2`) is estimated in the model (e.g. the first bar is a model with `k1` and `sigma_x2` estimated)."
97 | ]
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": null,
102 | "id": "96d99572-f74d-4e25-8237-0aa158eb29f6",
103 | "metadata": {},
104 | "outputs": [],
105 | "source": [
106 | "petab_select.plot.upset(plot_data=plot_data);"
107 | ]
108 | },
109 | {
110 | "cell_type": "markdown",
111 | "id": "32de6556",
112 | "metadata": {},
113 | "source": [
114 | "## Best model from each iteration\n",
115 | "\n",
116 | "This shows strict improvements in the criterion, and the corresponding model, across all iterations of model selection.\n",
117 | "\n",
118 | "Since there were no improvements after `M_100`, no other iterations or models are shown."
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "id": "56b4a27b",
125 | "metadata": {},
126 | "outputs": [],
127 | "source": [
128 | "petab_select.plot.line_best_by_iteration(plot_data=plot_data);"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "id": "8e5243e0",
134 | "metadata": {},
135 | "source": [
136 | "## Selection history trajectory\n",
137 | "\n",
138 | "This shows the relationship between models across iterations. For example, `M_000` was the predecessor model to `M_001`, `M_010`, and `M_100`."
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "id": "862a78ef",
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "# Add the relative criterion value to each label\n",
149 | "plot_data.augment_labels(criterion=True)\n",
150 | "petab_select.plot.graph_history(plot_data=plot_data)\n",
151 | "# Reset the labels (remove the relative criterion)\n",
152 | "plot_data.augment_labels()"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "id": "d87e75ab",
158 | "metadata": {},
159 | "source": [
160 | "## Criterion values of each model\n",
161 | "\n",
162 | "This shows the criterion value of every calibrated model."
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "id": "bce41584",
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "petab_select.plot.bar_criterion_vs_models(plot_data=plot_data);"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "id": "d9c2d487",
178 | "metadata": {},
179 | "source": [
180 | "## Criterion values vs. number of estimated parameters\n",
181 | "\n",
182 | "This shows all calibrated models.\n",
183 | "\n",
184 | "In this example, models with 2 estimated parameters tend to perform best. This is also seen in the UpSet plot above.\n",
185 | "\n",
186 | "Jitter is added to distinguish models with the same number of parameters and similar criterion values, according to the optional `max_jitter` argument."
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": null,
192 | "id": "824e2e6a",
193 | "metadata": {},
194 | "outputs": [],
195 | "source": [
196 | "petab_select.plot.scatter_criterion_vs_n_estimated(\n",
197 | " plot_data=plot_data,\n",
198 | " # Uncomment to turn off jitter.\n",
199 | " # max_jitter=0,\n",
200 | ");"
201 | ]
202 | },
203 | {
204 | "cell_type": "markdown",
205 | "id": "8dc7e142",
206 | "metadata": {},
207 | "source": [
208 | "## History as layers in a hierarchical graph\n",
209 | "\n",
210 | "This shows the relative change in parameters of each model, compared to its predecessor model.\n",
211 | "\n",
212 | "Each column is an iteration."
213 | ]
214 | },
215 | {
216 | "cell_type": "code",
217 | "execution_count": null,
218 | "id": "21157e4d-b2ba-4cb1-95f6-e14052c86959",
219 | "metadata": {},
220 | "outputs": [],
221 | "source": [
222 | "# # Customize the colors\n",
223 | "# criterion_values = [model.get_criterion(petab_select.Criterion.AICC) for model in models]\n",
224 | "# norm = matplotlib.colors.Normalize(\n",
225 | "# vmin=min(criterion_values),\n",
226 | "# vmax=max(criterion_values),\n",
227 | "# )\n",
228 | "# cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"green\",\"lightgreen\"])\n",
229 | "# colorbar_mappable = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)\n",
230 | "\n",
231 | "# Augment labels with the changes in parameters of each model, compared to their predecessor model\n",
232 | "plot_data.augment_labels(added_parameters=True, removed_parameters=True)\n",
233 | "\n",
234 | "petab_select.plot.graph_iteration_layers(\n",
235 | " plot_data=plot_data,\n",
236 | " draw_networkx_kwargs={\n",
237 | " \"arrowstyle\": \"-|>\",\n",
238 | " \"node_shape\": \"s\",\n",
239 | " \"node_size\": 2500,\n",
240 | " \"edgecolors\": \"k\",\n",
241 | " },\n",
242 | " # colorbar_mappable=colorbar_mappable,\n",
243 | ");"
244 | ]
245 | }
246 | ],
247 | "metadata": {
248 | "kernelspec": {
249 | "display_name": "Python 3 (ipykernel)",
250 | "language": "python",
251 | "name": "python3"
252 | },
253 | "language_info": {
254 | "codemirror_mode": {
255 | "name": "ipython",
256 | "version": 3
257 | },
258 | "file_extension": ".py",
259 | "mimetype": "text/x-python",
260 | "name": "python",
261 | "nbconvert_exporter": "python",
262 | "pygments_lexer": "ipython3",
263 | "version": "3.12.3"
264 | }
265 | },
266 | "nbformat": 4,
267 | "nbformat_minor": 5
268 | }
269 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. petab-select documentation master file, created by
2 | sphinx-quickstart on Mon Oct 23 09:01:31 2023.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to PEtab Select's documentation!
7 | ========================================
8 |
9 | PEtab Select brings
10 | `model selection `_ to
11 | `PEtab `_. PEtab Select comprises file
12 | formats, a Python library and a command line interface.
13 |
14 | Model selection is the process of choosing the best model from a set of
15 | candidate models. PEtab Select provides a standardized and compact way to
16 | specify the candidate model space, implements a number of model selection
17 | algorithms and criteria.
18 |
19 | Supported model selection algorithms:
20 |
21 | * brute force
22 | * `forward selection `_
23 | * `backward selection `_
24 | * `FAMoS `_
25 |
26 | Supported model selection criteria:
27 |
28 | * (`corrected `_)
29 | `Akaike Information Criterion `_ (AIC / AICc)
30 | * `Bayesian Information Criterion `_ (BIC)
31 |
32 | Model calibration is performed outside of PEtab Select. For example,
33 | PEtab Select is well-integrated with:
34 |
35 | * `BasiCO `_
36 | (`example `__)
37 | * `PEtab.jl `_
38 | (`example `__)
39 | * `pyPESTO `_
40 | (`example `__)
41 |
42 | Other model calibration tools can easily be integrated using the provided
43 | Python library or command line interface.
44 |
45 | Installation
46 | ------------
47 |
48 | The Python 3 package provides both the Python 3 and command-line (CLI)
49 | interfaces, and can be installed from PyPI, with:
50 |
51 | .. code-block:: bash
52 |
53 | pip install petab-select
54 |
55 |
56 | .. toctree::
57 | :maxdepth: 2
58 | :caption: Contents:
59 |
60 | problem_definition
61 | examples
62 | analysis
63 | Test Suite
64 | api
65 |
66 |
67 | Indices and tables
68 | ==================
69 |
70 | * :ref:`genindex`
71 | * :ref:`modindex`
72 | * :ref:`search`
73 |
--------------------------------------------------------------------------------
/doc/logo/editable/README.md:
--------------------------------------------------------------------------------
1 | The text in these copies is still text. In the copies in the parent directory, the text is converted to paths (e.g. `Path -> Object to Path` in Inkscape), for consistent rendering on systems that don't have the fonts installed.
2 |
--------------------------------------------------------------------------------
/doc/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/doc/problem_definition.rst:
--------------------------------------------------------------------------------
1 | Problem definition and file formats
2 | ===================================
3 |
4 | Model selection problems for PEtab Select are defined by the following files:
5 |
6 | #. a general description of the model selection problem,
7 | #. a specification of the model space, and
8 | #. (optionally) a specification of the initial candidate model.
9 |
10 | The different file formats are described below. The YAML file formats
11 | come with a YAML-formatted JSON schema, such that these files can be
12 | easily worked with independently of the PEtab Select library.
13 |
14 | 1. Selection problem
15 | --------------------
16 |
17 | A YAML file with a description of the model selection problem.
18 |
19 | .. code-block:: yaml
20 |
21 | format_version: # string.
22 | criterion: # string.
23 | method: # string.
24 | model_space_files: # list[string]. Filenames.
25 | candidate_space_arguments: # list[string] (optional). Filenames.
26 |
27 | - ``format_version``: The version of the model selection extension format
28 | (e.g. ``1``)
29 | - ``criterion``: The criterion by which models should be compared
30 | (e.g. ``AIC``)
31 | - ``method``: The method by which model candidates should be generated
32 | (e.g. ``forward``)
33 | - ``model_space_files``: The filenames of model space files.
34 | - ``candidate_space_arguments``: Additional arguments used to generate
35 | candidate models during model selection. For example, an initial candidate
36 | model can be specified with the following code, where
37 | ``predecessor_model.yaml`` is a valid :ref:`model file `. Additional arguments are
38 | provided in the documentation of the ``CandidateSpace`` class, and an example is provided in
39 | `test case 0009 `_.
40 |
41 | .. code-block:: yaml
42 |
43 | candidate_space_arguments:
44 | predecessor_model: predecessor_model.yaml
45 |
46 | Schema
47 | ^^^^^^
48 |
49 | The schema is provided as `YAML-formatted JSON schema <_static/problem.yaml>`_, which enables easy validation with various third-party tools.
50 |
51 | .. literalinclude:: standard/problem.yaml
52 | :language: yaml
53 |
54 | 2. Model space
55 | --------------
56 |
57 | A TSV file with candidate models, in compressed or uncompressed format.
58 | Each row defines a model subspace, by specifying value(s) that each parameter
59 | can take. The models in a model subspace are all combinations of values across
60 | all parameters.
61 |
62 | .. list-table::
63 | :header-rows: 1
64 |
65 | * - ``model_subspace_id``
66 | - ``model_subspace_petab_yaml``
67 | - ``parameter_id_1``
68 | - ...
69 | - ``parameter_id_n``
70 | * - (unique) [string]
71 | - [string]
72 | - (``;``-delimited list) [string/float]
73 | - ...
74 | - ...
75 |
76 | - ``model_subspace_id``: An ID for the model subspace.
77 | - ``model_subspace_petab_yaml``: The YAML filename of the PEtab problem that serves as the basis of all
78 | models in this subspace.
79 | - ``parameter_id_1`` ... ``parameter_id_n``: Specify the values that a
80 | parameter can take in the model subspace. For example, this could be:
81 |
82 | - a single value
83 |
84 | - ``0.0``
85 | - ``1.0``
86 | - ``estimate``
87 |
88 | - one of several possible values, as a ``;``-delimited list
89 |
90 | - ``0.0;1.1;estimate`` (the parameter can take the values ``0.0`` or
91 | ``1.1``, or be estimated)
92 |
93 | Example of concise specification
94 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
95 |
96 | Using the ``;``-delimited list format, a model subspace that has two parameters
97 | (``p1, p2``) and six models:
98 |
99 | - ``p1:=0, p2:=10``
100 | - ``p1:=0, p2:=20``
101 | - ``p1:=0, p2:=estimate``
102 | - ``p1:=estimate, p2:=10``
103 | - ``p1:=estimate, p2:=20``
104 | - ``p1:=estimate, p2:=estimate``
105 |
106 | can be specified like
107 |
108 | .. list-table::
109 | :header-rows: 1
110 |
111 | * - model_subspace_id
112 | - model_subspace_petab_yaml
113 | - p1
114 | - p2
115 | * - subspace1
116 | - petab_problem.yaml
117 | - 0;estimate
118 | - 10;20;estimate
119 |
120 | .. _section-model-yaml:
121 |
122 | 3. Model(s) (Predecessor models / model interchange / report)
123 | -------------------------------------------------------------
124 |
125 | - *Predecessor models* are used to initialize a compatible model selection
126 | method.
127 | - *Model interchange* refers to the format used to transfer model information
128 | between PEtab Select and a PEtab-compatible calibration tool, during the
129 | model selection process.
130 | - *Report* refers to the final results of the model selection process, which
131 | may include calibration results from any calibrated models, or just the
132 | selected model.
133 |
134 | Here, the format for a single model is shown. Multiple models can be specified
135 | as a YAML list of the same format. Some optional keys are required in different
136 | contexts (for example, model comparison will require ``criteria``).
137 |
138 | Brief format description
139 | ^^^^^^^^^^^^^^^^^^^^^^^^
140 |
141 |
142 | .. code-block:: yaml
143 |
144 | model_subspace_id: # str (required).
145 | model_subspace_indices: # list[int] (required).
146 | criteria: # dict[str, float] (optional). Criterion ID => criterion value.
147 | model_hash: # str (optional).
148 | model_subspace_petab_yaml: # str (required).
149 | estimated_parameters: # dict[str, float] (optional). Parameter ID => parameter value.
150 | iteration: # int (optional).
151 | model_id: # str (optional).
152 | parameters: # dict[str, float | int | "estimate"] (required). Parameter ID => parameter value or "estimate".
153 | predecessor_model_hash: # str (optional).
154 |
155 | - ``model_subspace_id``: Same as in the model space files.
156 | - ``model_subspace_indices``: The indices that locate this model in its model subspace.
157 | - ``criteria``: The value of the criterion by which model selection was performed, at least. Optionally, other criterion values too.
158 | - ``model_hash``: The model hash, generated by the PEtab Select library. The format is ``[MODEL_SUBSPACE_ID]-[MODEL_SUBSPACE_INDICES_HASH]``. If all parameters are in the model are defined like ``0;estimate``, then the hash is a string of ``1`` and ``0``, for parameters that are estimated or not, respectively.
159 | - ``model_subspace_petab_yaml``: Same as in model space files.
160 | - ``estimated_parameters``: Parameter estimates, including all estimated parameters that are not in the model selection problem; i.e., parameters that are set to be estimated in the model subspace PEtab problem but don't appear in the column header of the model space file.
161 | - ``iteration``: The iteration of model selection in which this model appeared.
162 | - ``model_id``: The model ID.
163 | - ``parameters``: The parameter combination from the model space file that defines this model (either values or ``"estimate"``). Not the calibrated values, which are in ``estimated_parameters``!
164 | - ``predecessor_model_hash``: The hash of the model that preceded this model during the model selection process. Will be ``virtual_initial_model-`` if the model had no predecessor model.
165 |
166 | Schema
167 | ^^^^^^
168 |
169 | The schema are provided as YAML-formatted JSON schema, which enables easy validation with various third-party tools. Schema are provided for:
170 |
171 | - `a single model <_static/model.yaml>`_, and
172 | - `a list of models <_static/models.yaml>`_, which is simply a YAML list of the single model format.
173 |
174 | Below is the schema for a single model.
175 |
176 | .. literalinclude:: standard/model.yaml
177 | :language: yaml
178 |
--------------------------------------------------------------------------------
/doc/standard/make_schemas.py:
--------------------------------------------------------------------------------
1 | from petab_select.model import ModelStandard
2 | from petab_select.models import ModelsStandard
3 | from petab_select.problem import ProblemStandard
4 |
5 | ModelStandard.save_schema("model.yaml")
6 | ModelsStandard.save_schema("models.yaml")
7 | ProblemStandard.save_schema("problem.yaml")
8 |
--------------------------------------------------------------------------------
/doc/standard/model.yaml:
--------------------------------------------------------------------------------
1 | $defs:
2 | ModelHash:
3 | type: string
4 | description: A model.
5 | properties:
6 | model_subspace_id:
7 | title: Model Subspace Id
8 | type: string
9 | model_subspace_indices:
10 | items:
11 | type: integer
12 | title: Model Subspace Indices
13 | type: array
14 | criteria:
15 | additionalProperties:
16 | type: number
17 | title: Criteria
18 | type: object
19 | model_hash:
20 | $ref: '#/$defs/ModelHash'
21 | default: null
22 | model_subspace_petab_yaml:
23 | anyOf:
24 | - format: path
25 | type: string
26 | - type: 'null'
27 | title: Model Subspace Petab Yaml
28 | estimated_parameters:
29 | anyOf:
30 | - additionalProperties:
31 | type: number
32 | type: object
33 | - type: 'null'
34 | default: null
35 | title: Estimated Parameters
36 | iteration:
37 | anyOf:
38 | - type: integer
39 | - type: 'null'
40 | default: null
41 | title: Iteration
42 | model_id:
43 | default: null
44 | title: Model Id
45 | type: string
46 | model_label:
47 | anyOf:
48 | - type: string
49 | - type: 'null'
50 | default: null
51 | title: Model Label
52 | parameters:
53 | additionalProperties:
54 | anyOf:
55 | - type: number
56 | - type: integer
57 | - const: estimate
58 | type: string
59 | title: Parameters
60 | type: object
61 | predecessor_model_hash:
62 | $ref: '#/$defs/ModelHash'
63 | default: null
64 | required:
65 | - model_subspace_id
66 | - model_subspace_indices
67 | - model_subspace_petab_yaml
68 | - parameters
69 | title: Model
70 | type: object
71 |
--------------------------------------------------------------------------------
/doc/standard/models.yaml:
--------------------------------------------------------------------------------
1 | $defs:
2 | Model:
3 | description: A model.
4 | properties:
5 | model_subspace_id:
6 | title: Model Subspace Id
7 | type: string
8 | model_subspace_indices:
9 | items:
10 | type: integer
11 | title: Model Subspace Indices
12 | type: array
13 | criteria:
14 | additionalProperties:
15 | type: number
16 | title: Criteria
17 | type: object
18 | model_hash:
19 | $ref: '#/$defs/ModelHash'
20 | default: null
21 | model_subspace_petab_yaml:
22 | anyOf:
23 | - format: path
24 | type: string
25 | - type: 'null'
26 | title: Model Subspace Petab Yaml
27 | estimated_parameters:
28 | anyOf:
29 | - additionalProperties:
30 | type: number
31 | type: object
32 | - type: 'null'
33 | default: null
34 | title: Estimated Parameters
35 | iteration:
36 | anyOf:
37 | - type: integer
38 | - type: 'null'
39 | default: null
40 | title: Iteration
41 | model_id:
42 | default: null
43 | title: Model Id
44 | type: string
45 | model_label:
46 | anyOf:
47 | - type: string
48 | - type: 'null'
49 | default: null
50 | title: Model Label
51 | parameters:
52 | additionalProperties:
53 | anyOf:
54 | - type: number
55 | - type: integer
56 | - const: estimate
57 | type: string
58 | title: Parameters
59 | type: object
60 | predecessor_model_hash:
61 | $ref: '#/$defs/ModelHash'
62 | default: null
63 | required:
64 | - model_subspace_id
65 | - model_subspace_indices
66 | - model_subspace_petab_yaml
67 | - parameters
68 | title: Model
69 | type: object
70 | ModelHash:
71 | type: string
72 | description: A collection of models.
73 | items:
74 | $ref: '#/$defs/Model'
75 | title: Models
76 | type: array
77 |
--------------------------------------------------------------------------------
/doc/standard/problem.yaml:
--------------------------------------------------------------------------------
1 | description: The model selection problem.
2 | properties:
3 | format_version:
4 | default: 1.0.0
5 | title: Format Version
6 | type: string
7 | criterion:
8 | type: string
9 | method:
10 | type: string
11 | model_space_files:
12 | items:
13 | format: path
14 | type: string
15 | title: Model Space Files
16 | type: array
17 | candidate_space_arguments:
18 | title: Candidate Space Arguments
19 | type: object
20 | required:
21 | - criterion
22 | - method
23 | - model_space_files
24 | title: Problem
25 | type: object
26 |
--------------------------------------------------------------------------------
/doc/test_suite.rst:
--------------------------------------------------------------------------------
1 | Model selection test suite
2 | ==========================
3 |
4 | Several test cases are provided, to test the compatibility of a
5 | PEtab-compatible calibration tool with different PEtab Select features.
6 |
7 | The test cases are available in the ``test_cases`` directory, and are provided in
8 | the model format.
9 |
10 | .. list-table::
11 | :header-rows: 1
12 |
13 | * - Test ID
14 | - Criterion
15 | - Method
16 | - Model space files
17 | - Compressed format
18 | - Predecessor (initial) models files
19 | * - 0001
20 | - (all)
21 | - (only one model)
22 | - 1
23 | -
24 | -
25 | * - 0002 [#f1]_
26 | - AIC
27 | - forward
28 | - 1
29 | -
30 | -
31 | * - 0003
32 | - BIC
33 | - brute force
34 | - 1
35 | - Yes
36 | -
37 | * - 0004
38 | - AICc
39 | - backward
40 | - 1
41 | -
42 | -
43 | * - 0005
44 | - AIC
45 | - forward
46 | - 1
47 | -
48 | - 1
49 | * - 0006
50 | - AIC
51 | - forward
52 | - 1
53 | -
54 | -
55 | * - 0007 [#f2]_
56 | - AIC
57 | - forward
58 | - 1
59 | -
60 | -
61 | * - 0008 [#f2]_
62 | - AICc
63 | - backward
64 | - 1
65 | -
66 | -
67 | * - 0009 [#f3]_
68 | - AICc
69 | - FAMoS
70 | - 1
71 | - Yes
72 | - Yes
73 |
74 | .. [#f1] Model ``M1_0`` differs from ``M1_1`` in three parameters, but only 1 additional estimated parameter. The effect of this on model selection criteria needs to be clarified. Test case 0006 is a duplicate of 0002 that doesn't have this issue.
75 |
76 | .. [#f2] Noise parameter is removed, noise is fixed to ``1``.
77 |
78 | .. [#f3] This is a computationally expensive problem to solve. Developers can try a model selection initialized with the provided predecessor model, which is a model start that reproducibly finds the expected model. To solve the problem reproducibly *ab initio*, on the order of 100 random model starts are required. This test case reproduces the model selection problem presented in https://doi.org/10.1016/j.cels.2016.01.002.
79 |
--------------------------------------------------------------------------------
/petab_select/__init__.py:
--------------------------------------------------------------------------------
1 | """Model selection extension for PEtab."""
2 |
3 | import sys
4 |
5 | from .analyze import *
6 | from .candidate_space import *
7 | from .constants import *
8 | from .criteria import *
9 | from .misc import *
10 | from .model import *
11 | from .model_space import *
12 | from .model_subspace import *
13 | from .models import *
14 | from .problem import *
15 | from .ui import *
16 |
17 | __all__ = [
18 | x
19 | for x in dir(sys.modules[__name__])
20 | if not x.startswith("_") and x != "sys"
21 | ]
22 |
--------------------------------------------------------------------------------
/petab_select/analyze.py:
--------------------------------------------------------------------------------
1 | """Methods to analyze results of model selection."""
2 |
3 | import warnings
4 | from collections.abc import Callable
5 |
6 | import networkx as nx
7 | import numpy as np
8 |
9 | from .constants import Criterion
10 | from .model import Model, ModelHash, default_compare
11 | from .models import Models
12 |
13 | __all__ = [
14 | # "get_predecessor_models",
15 | "group_by_predecessor_model",
16 | "group_by_iteration",
17 | "get_best_by_iteration",
18 | "compute_weights",
19 | "get_graph",
20 | "get_parameter_changes",
21 | ]
22 |
23 |
24 | # def get_predecessor_models(models: Models) -> Models:
25 | # """Get all models that were predecessors to other models.
26 | #
27 | # Args:
28 | # models:
29 | # The models
30 | #
31 | # Returns:
32 | # The predecessor models.
33 | # """
34 | # predecessor_models = Models([
35 | # models.get(
36 | # model.predecessor_model_hash,
37 | # # Handle virtual initial model.
38 | # model.predecessor_model_hash,
39 | # ) for model in models
40 | # ])
41 | # return predecessor_models
42 |
43 |
44 | def group_by_predecessor_model(models: Models) -> dict[ModelHash, Models]:
45 | """Group models by their predecessor model.
46 |
47 | Args:
48 | models:
49 | The models.
50 |
51 | Returns:
52 | Key is predecessor model hash, value is models.
53 | """
54 | result = {}
55 | for model in models:
56 | if model.predecessor_model_hash not in result:
57 | result[model.predecessor_model_hash] = Models()
58 | result[model.predecessor_model_hash].append(model)
59 | return result
60 |
61 |
62 | def group_by_iteration(
63 | models: Models, sort: bool = True
64 | ) -> dict[int | None, Models]:
65 | """Group models by their iteration.
66 |
67 | Args:
68 | models:
69 | The models.
70 | sort:
71 | Whether to sort the iterations.
72 |
73 | Returns:
74 | Key is iteration, value is models.
75 | """
76 | result = {}
77 | for model in models:
78 | if model.iteration not in result:
79 | result[model.iteration] = Models()
80 | result[model.iteration].append(model)
81 | if sort:
82 | result = {iteration: result[iteration] for iteration in sorted(result)}
83 | return result
84 |
85 |
86 | def get_best(
87 | models: Models,
88 | criterion: Criterion,
89 | compare: Callable[[Model, Model], bool] | None = None,
90 | compute_criterion: bool = False,
91 | ) -> Model:
92 | """Get the best model.
93 |
94 | Args:
95 | models:
96 | The models.
97 | criterion.
98 | The criterion.
99 | compare:
100 | The method used to compare two models.
101 | Defaults to :func:``petab_select.model.default_compare``.
102 | compute_criterion:
103 | Whether to try computing criterion values, if sufficient
104 | information is available (e.g., likelihood and number of
105 | parameters, to compute AIC).
106 |
107 | Returns:
108 | The best model.
109 | """
110 | if compare is None:
111 | compare = default_compare
112 |
113 | best_model = None
114 | for model in models:
115 | if compute_criterion and not model.has_criterion(criterion):
116 | model.get_criterion(criterion)
117 | if not model.has_criterion(criterion):
118 | warnings.warn(
119 | f"The model `{model.hash}` has no value set for criterion "
120 | f"`{criterion}`. Consider using `compute_criterion=True` "
121 | "if there is sufficient information already stored in the "
122 | "model (e.g. the likelihood).",
123 | RuntimeWarning,
124 | stacklevel=2,
125 | )
126 | continue
127 | if best_model is None:
128 | best_model = model
129 | continue
130 | if compare(best_model, model, criterion=criterion):
131 | best_model = model
132 | if best_model is None:
133 | raise KeyError(
134 | "None of the supplied models have a value set for the criterion "
135 | f"`{criterion}`."
136 | )
137 | return best_model
138 |
139 |
140 | def get_best_by_iteration(
141 | models: Models,
142 | *args,
143 | **kwargs,
144 | ) -> dict[int, Models]:
145 | """Get the best model of each iteration.
146 |
147 | See :func:``get_best`` for additional required arguments.
148 |
149 | Args:
150 | models:
151 | The models.
152 | *args, **kwargs:
153 | Forwarded to :func:``get_best``.
154 |
155 | Returns:
156 | The strictly improving models. Keys are iteration, values are models.
157 | """
158 | iterations_models = group_by_iteration(models=models)
159 | best_by_iteration = {
160 | iteration: get_best(
161 | *args,
162 | models=iteration_models,
163 | **kwargs,
164 | )
165 | for iteration, iteration_models in iterations_models.items()
166 | }
167 | return best_by_iteration
168 |
169 |
170 | def compute_weights(
171 | models: Models,
172 | criterion: Criterion,
173 | as_dict: bool = False,
174 | ) -> list[float] | dict[ModelHash, float]:
175 | """Compute criterion weights.
176 |
177 | N.B.: regardless of the criterion, the formula used is the Akaike weights
178 | formula, but with ``criterion`` values instead of the AIC.
179 |
180 | Args:
181 | models:
182 | The models.
183 | criterion:
184 | The criterion.
185 | as_dict:
186 | Whether to return a dictionary, with model hashes for keys.
187 |
188 | Returns:
189 | The criterion weights.
190 | """
191 | relative_criterion_values = np.array(
192 | models.get_criterion(criterion=criterion, relative=True)
193 | )
194 | weights = np.exp(-0.5 * relative_criterion_values)
195 | weights /= weights.sum()
196 | weights = weights.tolist()
197 | if as_dict:
198 | weights = dict(zip(models.hashes, weights, strict=False))
199 | return weights
200 |
201 |
202 | def get_graph(
203 | models: Models,
204 | labels: dict[ModelHash, str] = None,
205 | ) -> nx.DiGraph:
206 | """Get a graph representation of the models in terms of their ancestry.
207 |
208 | Edges connect models with their predecessor models.
209 |
210 | Args:
211 | models:
212 | The models.
213 | labels:
214 | Alternative labels for the models. Keys are model hashes, values
215 | are the labels.
216 |
217 | Returns:
218 | The graph.
219 | """
220 | if labels is None:
221 | labels = {}
222 |
223 | G = nx.DiGraph()
224 | edges = []
225 | for model in models:
226 | tail = labels.get(
227 | model.predecessor_model_hash, model.predecessor_model_hash
228 | )
229 | head = labels.get(model.hash, model.hash)
230 | edges.append((tail, head))
231 | G.add_edges_from(edges)
232 | return G
233 |
234 |
235 | def get_parameter_changes(
236 | models: Models,
237 | as_dict: bool = False,
238 | ) -> (
239 | dict[ModelHash, list[tuple[set[str], set[str]]]]
240 | | list[tuple[set[str], set[str]]]
241 | ):
242 | """Get the differences in parameters betweem models and their predecessors.
243 |
244 | Args:
245 | models:
246 | The models.
247 | as_dict:
248 | Whether to return a dictionary, with model hashes for keys.
249 |
250 | Returns:
251 | The parameter changes. Each model has a 2-tuple of sets of parameters.
252 | The first and second sets are the added and removed parameters,
253 | respectively. If the predecessor model is undefined (e.g. the
254 | ``VIRTUAL_INITIAL_MODEL``), then both sets will be empty.
255 | """
256 | estimated_parameters = {
257 | model.hash: set(model.estimated_parameters) for model in models
258 | }
259 | parameter_changes = [
260 | (set(), set())
261 | if model.predecessor_model_hash not in estimated_parameters
262 | else (
263 | estimated_parameters[model.hash].difference(
264 | estimated_parameters[model.predecessor_model_hash]
265 | ),
266 | estimated_parameters[model.predecessor_model_hash].difference(
267 | estimated_parameters[model.hash]
268 | ),
269 | )
270 | for model in models
271 | ]
272 |
273 | if as_dict:
274 | return dict(zip(models.hashes, parameter_changes, strict=True))
275 | return parameter_changes
276 |
--------------------------------------------------------------------------------
/petab_select/constants.py:
--------------------------------------------------------------------------------
1 | """Constants for the PEtab Select package."""
2 |
3 | from __future__ import annotations
4 |
5 | import string
6 | import sys
7 | from enum import Enum
8 | from pathlib import Path
9 | from typing import Literal
10 |
11 | # Checked
12 |
13 | # Criteria
14 | CRITERIA = "criteria"
15 | CRITERION = "criterion"
16 |
17 |
18 | class Criterion(str, Enum):
19 | """String literals for model selection criteria."""
20 |
21 | #: The Akaike information criterion.
22 | AIC = "AIC"
23 | #: The corrected Akaike information criterion.
24 | AICC = "AICc"
25 | #: The Bayesian information criterion.
26 | BIC = "BIC"
27 | #: The likelihood.
28 | LH = "LH"
29 | #: The log-likelihood.
30 | LLH = "LLH"
31 | #: The negative log-likelihood.
32 | NLLH = "NLLH"
33 | #: The sum of squared residuals.
34 | SSR = "SSR"
35 |
36 |
37 | # Model
38 | ESTIMATED_PARAMETERS = "estimated_parameters"
39 | ITERATION = "iteration"
40 | MODEL_ID = "model_id"
41 | MODEL_SUBSPACE_ID = "model_subspace_id"
42 | MODEL_SUBSPACE_INDICES = "model_subspace_indices"
43 | PARAMETERS = "parameters"
44 | MODEL_SUBSPACE_PETAB_YAML = "model_subspace_petab_yaml"
45 | MODEL_SUBSPACE_PETAB_PROBLEM = "_model_subspace_petab_problem"
46 | PETAB_YAML = "petab_yaml"
47 | ROOT_PATH = "root_path"
48 | ESTIMATE = "estimate"
49 |
50 | PETAB_PROBLEM = "petab_problem"
51 |
52 | # Model hash
53 | MODEL_HASH = "model_hash"
54 | MODEL_HASH_DELIMITER = "-"
55 | MODEL_SUBSPACE_INDICES_HASH = "model_subspace_indices_hash"
56 | MODEL_SUBSPACE_INDICES_HASH_DELIMITER = "."
57 | MODEL_SUBSPACE_INDICES_HASH_MAP = (
58 | # [0-9]+[A-Z]+[a-z]
59 | string.digits + string.ascii_uppercase + string.ascii_lowercase
60 | )
61 | PREDECESSOR_MODEL_HASH = "predecessor_model_hash"
62 | ITERATION = "iteration"
63 | PETAB_PROBLEM = "petab_problem"
64 | PETAB_YAML = "petab_yaml"
65 | HASH = "hash"
66 |
67 | # MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_ID, PETAB_YAML]
68 | MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_SUBSPACE_ID, PETAB_YAML]
69 |
70 | # PEtab
71 | PETAB_ESTIMATE_TRUE = 1
72 |
73 | # Problem
74 | MODEL_SPACE_FILES = "model_space_files"
75 | PROBLEM = "problem"
76 | VERSION = "version"
77 |
78 | # Candidate space
79 | CANDIDATE_SPACE = "candidate_space"
80 | CANDIDATE_SPACE_ARGUMENTS = "candidate_space_arguments"
81 | METHOD = "method"
82 | METHOD_SCHEME = "method_scheme"
83 | NEXT_METHOD = "next_method"
84 | PREVIOUS_METHODS = "previous_methods"
85 | PREDECESSOR_MODEL = "predecessor_model"
86 |
87 |
88 | class Method(str, Enum):
89 | """String literals for model selection methods."""
90 |
91 | #: The backward stepwise method.
92 | BACKWARD = "backward"
93 | #: The brute-force method.
94 | BRUTE_FORCE = "brute_force"
95 | #: The FAMoS method.
96 | FAMOS = "famos"
97 | #: The forward stepwise method.
98 | FORWARD = "forward"
99 | #: The lateral, or swap, method.
100 | LATERAL = "lateral"
101 | #: The jump-to-most-distant-model method.
102 | MOST_DISTANT = "most_distant"
103 |
104 |
105 | # Typing
106 | TYPE_PATH = str | Path
107 |
108 | # UI
109 | MODELS = "models"
110 | UNCALIBRATED_MODELS = "uncalibrated_models"
111 | TERMINATE = "terminate"
112 |
113 | #: Methods that move through model space by taking steps away from some model.
114 | STEPWISE_METHODS = [
115 | Method.BACKWARD,
116 | Method.FORWARD,
117 | Method.LATERAL,
118 | ]
119 | #: Methods that require an initial model.
120 | INITIAL_MODEL_METHODS = [
121 | Method.BACKWARD,
122 | Method.FORWARD,
123 | Method.LATERAL,
124 | ]
125 |
126 | #: Virtual initial models can be used to initialize some initial model methods.
127 | # FIXME replace by real "dummy" model object
128 | # VIRTUAL_INITIAL_MODEL = "virtual_initial_model"
129 | #: Methods that are compatible with a virtual initial model.
130 | VIRTUAL_INITIAL_MODEL_METHODS = [
131 | Method.BACKWARD,
132 | Method.FORWARD,
133 | ]
134 |
135 |
136 | __all__ = [
137 | x
138 | for x in dir(sys.modules[__name__])
139 | if not x.startswith("_")
140 | and x not in ("sys", "Enum", "Path", "Dict", "List", "Literal", "Union")
141 | ]
142 |
143 |
144 | # Unchecked
145 | MODEL = "model"
146 |
147 | # Zero-indexed column/row indices
148 | MODEL_ID_COLUMN = 0
149 | PETAB_YAML_COLUMN = 1
150 | # It is assumed that all columns after PARAMETER_DEFINITIONS_START contain
151 | # parameter IDs.
152 | PARAMETER_DEFINITIONS_START = 2
153 | HEADER_ROW = 0
154 |
155 | PARAMETER_VALUE_DELIMITER = ";"
156 | CODE_DELIMITER = "-"
157 | PETAB_ESTIMATE_FALSE = 0
158 |
159 | # TYPING_PATH = Union[str, Path]
160 |
161 | # Model space file columns
162 | # TODO ensure none of these occur twice in the column header (this would
163 | # suggest that a parameter has a conflicting name)
164 | # MODEL_ID = 'modelId' # TODO already defined, reorganize constants
165 | # YAML = 'YAML' # FIXME
166 | MODEL_CODE = "model_code"
167 | MODEL_HASHES = "model_hashes"
168 | PETAB_HASH_DIGEST_SIZE = None
169 | # If `predecessor_model_hash` is defined for a model, it is the ID of the model that the
170 | # current model was/is to be compared to. This is part of the result and is
171 | # only (optionally) set by the PEtab calibration tool. It is not defined by the
172 | # PEtab Select model selection problem (but may be subsequently stored in the
173 | # PEtab Select model report format.
174 | HASH = "hash"
175 |
176 | YAML_FILENAME = "yaml"
177 |
178 | # DISTANCES = {
179 | # FORWARD: {
180 | # 'l1': 1,
181 | # 'size': 1,
182 | # },
183 | # BACKWARD: {
184 | # 'l1': 1,
185 | # 'size': -1,
186 | # },
187 | # LATERAL: {
188 | # 'l1': 2,
189 | # 'size': 0,
190 | # },
191 | # }
192 |
193 |
194 | # Parameters can be fixed to a value, or estimated if indicated with the string
195 | # `ESTIMATE`.
196 | TYPE_PARAMETER = float | int | Literal[ESTIMATE]
197 | TYPE_PARAMETER_OPTIONS = list[TYPE_PARAMETER]
198 | # Parameter ID -> parameter value mapping.
199 | TYPE_PARAMETER_DICT = dict[str, TYPE_PARAMETER]
200 | # Parameter ID -> multiple possible parameter values.
201 | TYPE_PARAMETER_OPTIONS_DICT = dict[str, TYPE_PARAMETER_OPTIONS]
202 |
203 | TYPE_CRITERION = float
204 |
--------------------------------------------------------------------------------
/petab_select/criteria.py:
--------------------------------------------------------------------------------
1 | """Implementations of model selection criteria."""
2 |
3 | import numpy as np
4 | import petab.v1 as petab
5 | from petab.v1.C import OBJECTIVE_PRIOR_PARAMETERS, OBJECTIVE_PRIOR_TYPE
6 |
7 | import petab_select
8 |
9 | from .constants import PETAB_PROBLEM, Criterion # LH,; LLH,; NLLH,
10 |
11 | __all__ = [
12 | "calculate_aic",
13 | "calculate_aicc",
14 | "calculate_bic",
15 | "CriterionComputer",
16 | ]
17 |
18 |
19 | # use as attribute e.g. `Model.criterion_computer`?
20 | class CriterionComputer:
21 | """Compute various criteria."""
22 |
23 | def __init__(
24 | self,
25 | model: "petab_select.model.Model",
26 | ):
27 | self.model = model
28 | self._petab_problem = None
29 |
30 | @property
31 | def petab_problem(self) -> petab.Problem:
32 | """The PEtab problem that corresponds to the model.
33 |
34 | Implemented as a property such that the :class:`petab.Problem` object
35 | is only constructed if explicitly requested.
36 |
37 | Improves speed of operations on models by a lot. For example, analysis of models
38 | that already have criteria computed can skip loading their PEtab problem again.
39 | """
40 | # TODO refactor, if `petab_problem` is going to be produced here anyway, store
41 | # in model instance instead, for use elsewhere (e.g. pyPESTO)
42 | # i.e.: this is a property of a `Model` instance, not `CriterionComputer`
43 | if self._petab_problem is None:
44 | self._petab_problem = self.model.to_petab()[PETAB_PROBLEM]
45 | return self._petab_problem
46 |
47 | def __call__(self, criterion: Criterion) -> float:
48 | """Get a criterion value.
49 |
50 | Args:
51 | criterion:
52 | The ID of the criterion.
53 |
54 | Returns:
55 | The criterion value.
56 | """
57 | return getattr(self, "get_" + criterion.value.lower())()
58 |
59 | def get_aic(self) -> float:
60 | """Get the Akaike information criterion."""
61 | return calculate_aic(
62 | nllh=self.get_nllh(),
63 | n_estimated=self.get_n_estimated(),
64 | )
65 |
66 | def get_aicc(self) -> float:
67 | """Get the corrected Akaike information criterion."""
68 | return calculate_aicc(
69 | nllh=self.get_nllh(),
70 | n_estimated=self.get_n_estimated(),
71 | n_measurements=self.get_n_measurements(),
72 | n_priors=self.get_n_priors(),
73 | )
74 |
75 | def get_bic(self) -> float:
76 | """Get the Bayesian information criterion."""
77 | return calculate_bic(
78 | nllh=self.get_nllh(),
79 | n_estimated=self.get_n_estimated(),
80 | n_measurements=self.get_n_measurements(),
81 | n_priors=self.get_n_priors(),
82 | )
83 |
84 | def get_nllh(self) -> float:
85 | """Get the negative log-likelihood."""
86 | nllh = self.model.get_criterion(Criterion.NLLH, compute=False)
87 | if nllh is None:
88 | nllh = -1 * self.get_llh()
89 | return nllh
90 |
91 | def get_llh(self) -> float:
92 | """Get the log-likelihood."""
93 | llh = self.model.get_criterion(Criterion.LLH, compute=False)
94 | if llh is None:
95 | llh = np.log(self.get_lh())
96 | return llh
97 |
98 | def get_lh(self) -> float:
99 | """Get the likelihood."""
100 | lh = self.model.get_criterion(Criterion.LH, compute=False)
101 | llh = self.model.get_criterion(Criterion.LLH, compute=False)
102 | nllh = self.model.get_criterion(Criterion.NLLH, compute=False)
103 |
104 | if lh is not None:
105 | return lh
106 | elif llh is not None:
107 | return np.exp(llh)
108 | elif nllh is not None:
109 | return np.exp(-1 * nllh)
110 |
111 | raise ValueError(
112 | "Please supply the likelihood (LH) or a compatible transformation. Compatible transformations: log(LH), -log(LH)."
113 | )
114 |
115 | def get_n_estimated(self) -> int:
116 | """Get the number of estimated parameters."""
117 | return len(self.petab_problem.x_free_indices)
118 |
119 | def get_n_measurements(self) -> int:
120 | """Get the number of measurements."""
121 | return len(self.petab_problem.measurement_df)
122 |
123 | def get_n_priors(self) -> int:
124 | """Get the number of priors."""
125 | df = self.petab_problem.parameter_df
126 |
127 | # At least one of the objective prior columns should be present.
128 | if not (
129 | OBJECTIVE_PRIOR_TYPE in df or OBJECTIVE_PRIOR_PARAMETERS in df
130 | ):
131 | return 0
132 |
133 | # If both objective prior columns are not present, raise an error.
134 | if not (
135 | OBJECTIVE_PRIOR_TYPE in df and OBJECTIVE_PRIOR_PARAMETERS in df
136 | ):
137 | raise NotImplementedError(
138 | "Currently expect that prior types are specified with prior parameters (no default values). Please provide an example for implementation."
139 | )
140 |
141 | # Expect that the number of non-empty values in both objective prior columns
142 | # are the same.
143 | if not (
144 | df[OBJECTIVE_PRIOR_TYPE].notna().sum()
145 | == df[OBJECTIVE_PRIOR_PARAMETERS].notna().sum()
146 | ):
147 | raise NotImplementedError(
148 | "Some objective prior values are missing."
149 | )
150 |
151 | number_of_priors = df[OBJECTIVE_PRIOR_TYPE].notna().sum()
152 | return number_of_priors
153 |
154 | # def get_criterion(self, id: str) -> TYPE_CRITERION:
155 | # """Get a criterion value, by criterion ID.
156 | # FIXME: superseded by `__call__`
157 |
158 | # id:
159 | # The ID of the criterion (e.g. `petab_select.constants.Criterion.AIC`).
160 |
161 | # Returns:
162 | # The criterion value.
163 | # """
164 | # return getattr(self, f'get_{id}')()
165 |
166 |
167 | # TODO should fixed parameters count as measurements/priors when comparing to models
168 | # that estimate the same parameters?
169 | def calculate_aic(
170 | nllh: float,
171 | n_estimated: int,
172 | ) -> float:
173 | """
174 | Calculate the Akaike information criterion (AIC) for a model.
175 |
176 | Args:
177 | nllh:
178 | The negative log likelihood.
179 | n_estimated:
180 | The number of estimated parameters in the model.
181 |
182 | Returns:
183 | The AIC value.
184 | """
185 | return 2 * (n_estimated + nllh)
186 |
187 |
188 | def calculate_aicc(
189 | nllh: float,
190 | n_estimated: int,
191 | n_measurements: int,
192 | n_priors: int,
193 | ) -> float:
194 | """
195 | Calculate the corrected Akaike information criterion (AICc) for a model.
196 |
197 | Args:
198 | nllh:
199 | The negative log likelihood.
200 | n_estimated:
201 | The number of estimated parameters in the model.
202 | n_measurements:
203 | The number of measurements used in the likelihood.
204 | n_priors:
205 | The number of priors used in the objective function.
206 |
207 | Returns:
208 | The AICc value.
209 | """
210 | return calculate_aic(
211 | nllh=nllh, n_estimated=n_estimated
212 | ) + 2 * n_estimated * (n_estimated + 1) / (
213 | n_measurements + n_priors - n_estimated - 1
214 | )
215 |
216 |
217 | def calculate_bic(
218 | nllh: float,
219 | n_estimated: int,
220 | n_measurements: int,
221 | n_priors: int,
222 | ):
223 | """
224 | Calculate the Bayesian information criterion (BIC) for a model.
225 |
226 | Args
227 | nllh:
228 | The negative log likelihood.
229 | n_estimated:
230 | The number of estimated parameters in the model.
231 | n_measurements:
232 | The number of measurements used in the likelihood.
233 | n_priors:
234 | The number of priors used in the objective function.
235 |
236 | Returns:
237 | The BIC value.
238 | """
239 | return n_estimated * np.log(n_measurements + n_priors) + 2 * nllh
240 |
--------------------------------------------------------------------------------
/petab_select/handlers.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 |
3 | # `float` for `np.inf`
4 | TYPE_LIMIT = float | int
5 |
6 |
7 | # TODO exclusions handler
8 |
9 |
10 | class LimitHandler:
11 | """A handler for classes that have a limit.
12 |
13 | Attributes:
14 | current:
15 | A callable to determine the current value.
16 | limit:
17 | The upper limit of the current value.
18 | """
19 |
20 | def __init__(
21 | self,
22 | current: Callable[[], bool],
23 | limit: TYPE_LIMIT,
24 | ):
25 | self.current = current
26 | self.limit = limit
27 |
28 | def reached(self) -> bool:
29 | """Check whether the limit has been reached."""
30 | if self.current() >= self.limit:
31 | return True
32 | return False
33 |
34 | def set_limit(self, limit: TYPE_LIMIT) -> None:
35 | """Set the limit.
36 |
37 | Args:
38 | limit:
39 | The limit.
40 | """
41 | self.limit = limit
42 |
43 | def get_limit(self) -> TYPE_LIMIT:
44 | """Get the limit.
45 |
46 | Returns:
47 | The limit.
48 | """
49 | return self.limit
50 |
--------------------------------------------------------------------------------
/petab_select/misc.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 |
3 | # import json
4 | from typing import Any
5 |
6 | from .constants import ( # TYPE_PARAMETER_OPTIONS_DICT,
7 | ESTIMATE,
8 | TYPE_PARAMETER_DICT,
9 | TYPE_PARAMETER_OPTIONS,
10 | )
11 |
12 | __all__ = [
13 | "parameter_string_to_value",
14 | ]
15 |
16 |
17 | def hashify(x: Any, **kwargs) -> str:
18 | """Generate a hash.
19 |
20 | Currently uses the :func:`hashlib.blake2b` method. `**kwargs` are forwarded
21 | to this method.
22 |
23 | Args:
24 | x:
25 | The object to hash. `x` will be converted to a string (`str(x)`), then
26 | hashed.
27 |
28 | Returns:
29 | The hash, as a hexadecimal string.
30 | """
31 | # return int(hashlib.sha256(str(x).encode('utf-8')).hexdigest(), 16)
32 | return hashlib.blake2b(
33 | str(x).encode("utf-8"),
34 | **kwargs,
35 | ).hexdigest()
36 |
37 |
38 | def hash_parameter_dict(dict_: TYPE_PARAMETER_DICT, **kwargs):
39 | """Hash a dictionary of parameter values."""
40 | value = tuple((k, dict_[k]) for k in sorted(dict_))
41 | return hashify(value, **kwargs)
42 |
43 |
44 | def hash_parameter_options(list_: TYPE_PARAMETER_OPTIONS, **kwargs):
45 | """Hash parameter options."""
46 | return hashify(list(list_), **kwargs)
47 |
48 |
49 | def hash_str(str_: str, **kwargs):
50 | return hashify(str_, **kwargs)
51 |
52 |
53 | def hash_list(list_: list, **kwargs):
54 | return hashify(list(list_), **kwargs)
55 |
56 |
57 | def snake_case_to_camel_case(string: str) -> str:
58 | """Convert a string from snake case to camel case.
59 |
60 | Args:
61 | string:
62 | The string, in snake case.
63 |
64 | Returns:
65 | The string, in camel case.
66 | """
67 | string_pieces = string.split("_")
68 | string_camel = ""
69 | for string_piece in string_pieces:
70 | string_camel += string_piece[0].upper() + string_piece[1:]
71 | return string_camel
72 |
73 |
74 | def parameter_string_to_value(
75 | parameter_string: str,
76 | passthrough_estimate: bool = False,
77 | ) -> float | int | str:
78 | """Cast a parameter value from string to numeric.
79 |
80 | Args:
81 | parameter_string:
82 | The parameter value, as a string.
83 | passthrough_estimate:
84 | Whether to return `ESTIMATE` as `ESTIMATE`. If `False`, raises an exception
85 | if `parameter_string == ESTIMATE`.
86 |
87 | Returns:
88 | The parameter value, as a numeric type.
89 | """
90 | if parameter_string == ESTIMATE:
91 | if passthrough_estimate:
92 | return parameter_string
93 | raise ValueError("Please handle estimated parameters differently.")
94 | float_value = float(parameter_string)
95 | int_value = int(float_value)
96 |
97 | if int_value == float_value:
98 | return int_value
99 | return float_value
100 |
101 |
102 | # def hash_dictionary(dictionary: Dict[str, Union[]]):
103 | # return hash(json.dumps(dictionary, sort_keys=True))
104 | #
105 | #
106 | # def hash_list(list_: List):
107 | # return hash(json.dumps(list_, sort_keys=True))
108 |
--------------------------------------------------------------------------------
/petab_select/model_space.py:
--------------------------------------------------------------------------------
1 | """The `ModelSpace` class and related methods."""
2 |
3 | from __future__ import annotations
4 |
5 | import logging
6 | import warnings
7 | from collections.abc import Iterable
8 | from pathlib import Path
9 | from typing import Any
10 |
11 | import numpy as np
12 | import pandas as pd
13 |
14 | from .candidate_space import CandidateSpace
15 | from .constants import (
16 | MODEL_SUBSPACE_ID,
17 | TYPE_PATH,
18 | )
19 | from .model import Model
20 | from .model_subspace import ModelSubspace
21 |
22 | __all__ = [
23 | "ModelSpace",
24 | ]
25 |
26 |
27 | class ModelSpace:
28 | """A model space, as a collection of model subspaces.
29 |
30 | Attributes:
31 | model_subspaces:
32 | List of model subspaces.
33 | exclusions:
34 | Hashes of models that are excluded from the model space.
35 | """
36 |
37 | def __init__(
38 | self,
39 | model_subspaces: list[ModelSubspace],
40 | ):
41 | self.model_subspaces = {
42 | model_subspace.model_subspace_id: model_subspace
43 | for model_subspace in model_subspaces
44 | }
45 |
46 | @staticmethod
47 | def load(
48 | data: TYPE_PATH | pd.DataFrame | list[TYPE_PATH | pd.DataFrame],
49 | root_path: TYPE_PATH = None,
50 | ) -> ModelSpace:
51 | """Load a model space from dataframe(s) or file(s).
52 |
53 | Args:
54 | data:
55 | The data. TSV file(s) or pandas dataframe(s).
56 | root_path:
57 | Any paths in dataframe will be resolved relative to this path.
58 | Paths in TSV files will be resolved relative to the directory
59 | of the TSV file.
60 |
61 | Returns:
62 | The model space.
63 | """
64 | if not isinstance(data, list):
65 | data = [data]
66 | dfs = [
67 | (
68 | root_path,
69 | df.reset_index() if df.index.name == MODEL_SUBSPACE_ID else df,
70 | )
71 | if isinstance(df, pd.DataFrame)
72 | else (Path(df).parent, pd.read_csv(df, sep="\t"))
73 | for df in data
74 | ]
75 |
76 | model_subspaces = []
77 | for root_path, df in dfs:
78 | for _, definition in df.iterrows():
79 | model_subspaces.append(
80 | ModelSubspace.from_definition(
81 | definition=definition,
82 | root_path=root_path,
83 | )
84 | )
85 | model_space = ModelSpace(model_subspaces=model_subspaces)
86 | return model_space
87 |
88 | def save(self, filename: TYPE_PATH | None = None) -> pd.DataFrame:
89 | """Export the model space to a dataframe (and TSV).
90 |
91 | Args:
92 | filename:
93 | If provided, the dataframe will be saved here as a TSV.
94 | Paths will be made relative to the parent directory of this
95 | filename.
96 |
97 | Returns:
98 | The dataframe.
99 | """
100 | root_path = Path(filename).parent if filename else None
101 |
102 | data = []
103 | for model_subspace in self.model_subspaces.values():
104 | data.append(model_subspace.to_definition(root_path=root_path))
105 | df = pd.DataFrame(data)
106 | df = df.set_index(MODEL_SUBSPACE_ID)
107 |
108 | if filename:
109 | df.to_csv(filename, sep="\t")
110 |
111 | return df
112 |
113 | def search(
114 | self,
115 | candidate_space: CandidateSpace,
116 | limit: int = np.inf,
117 | exclude: bool = True,
118 | ):
119 | """Search all model subspaces according to a candidate space method.
120 |
121 | Args:
122 | candidate_space:
123 | The candidate space.
124 | limit:
125 | The maximum number of models to send to the candidate space (i.e. this
126 | limit is on the number of models considered, not necessarily approved
127 | as candidates).
128 | Note that using a limit may produce unexpected results. For
129 | example, it may bias candidate models to be chosen only from
130 | a subset of model subspaces.
131 | exclude:
132 | Whether to exclude the new candidates from the model subspaces.
133 | """
134 | if candidate_space.limit.reached():
135 | warnings.warn(
136 | "The candidate space has already reached its limit of accepted models.",
137 | RuntimeWarning,
138 | stacklevel=2,
139 | )
140 | return candidate_space.models
141 |
142 | @candidate_space.wrap_search_subspaces
143 | def search_subspaces(only_one_subspace: bool = False):
144 | # TODO change dict to list of subspaces. Each subspace should manage its own
145 | # ID
146 | if only_one_subspace and len(self.model_subspaces) > 1:
147 | logging.warning(
148 | f"There is more than one model subspace. This can lead to problems for candidate space {candidate_space}, especially if they have different PEtab YAML files."
149 | )
150 | for model_subspace in self.model_subspaces.values():
151 | model_subspace.search(
152 | candidate_space=candidate_space, limit=limit
153 | )
154 | if len(candidate_space.models) == limit:
155 | break
156 | elif len(candidate_space.models) > limit:
157 | raise ValueError(
158 | "An unknown error has occurred. Too many models were "
159 | f"generated. Requested limit: {limit}. Number of "
160 | f"generated models: {len(candidate_space.models)}."
161 | )
162 |
163 | search_subspaces()
164 |
165 | if exclude:
166 | self.exclude_models(candidate_space.models)
167 |
168 | return candidate_space.models
169 |
170 | def __len__(self):
171 | """Get the number of models in this space."""
172 | subspace_counts = [len(s) for s in self.model_subspaces]
173 | total_count = sum(subspace_counts)
174 | return total_count
175 |
176 | def exclude_model(self, model: Model):
177 | # FIXME add Exclusions Mixin (or object) to handle exclusions on the subspace
178 | # and space level.
179 | for model_subspace in self.model_subspaces.values():
180 | model_subspace.exclude_model(model)
181 |
182 | def exclude_models(self, models: Iterable[Model]):
183 | # FIXME add Exclusions Mixin (or object) to handle exclusions on the subspace
184 | # and space level.
185 | for model_subspace in self.model_subspaces.values():
186 | model_subspace.exclude_models(models)
187 | # model_subspace.reset_exclusions()
188 |
189 | def exclude_model_hashes(self, model_hashes: Iterable[str]):
190 | # FIXME add Exclusions Mixin (or object) to handle exclusions on the subspace
191 | # and space level.
192 | for model_subspace in self.model_subspaces.values():
193 | model_subspace.exclude_model_hashes(model_hashes=model_hashes)
194 |
195 | def reset_exclusions(
196 | self,
197 | exclusions: list[Any] | None | None = None,
198 | ) -> None:
199 | """Reset the exclusions in the model subspaces."""
200 | for model_subspace in self.model_subspaces.values():
201 | model_subspace.reset_exclusions(exclusions)
202 |
--------------------------------------------------------------------------------
/petab_select/petab.py:
--------------------------------------------------------------------------------
1 | """Helper methods for working with PEtab problems."""
2 |
3 | from typing import Literal
4 |
5 | import numpy as np
6 | import petab.v1 as petab
7 | from petab.v1.C import ESTIMATE
8 |
9 | __all__ = ["get_petab_parameters"]
10 |
11 |
12 | def get_petab_parameters(
13 | petab_problem: petab.Problem, as_lists: bool = False
14 | ) -> dict[str, float | Literal[ESTIMATE] | list[float | Literal[ESTIMATE]]]:
15 | """Convert PEtab problem parameters to the format in model space files.
16 |
17 | Args:
18 | petab_problem:
19 | The PEtab problem.
20 | as_lists:
21 | Each value will be provided inside a list object, similar to the
22 | format for multiple values for a parameter in a model subspace.
23 |
24 | Returns:
25 | Keys are parameter IDs, values are the nominal values for fixed
26 | parameters, or :const:`ESTIMATE` for estimated parameters.
27 | """
28 | values = np.array(petab_problem.x_nominal, dtype=object)
29 | values[petab_problem.x_free_indices] = ESTIMATE
30 | if as_lists:
31 | values = [[v] for v in values]
32 | return dict(zip(petab_problem.x_ids, values, strict=True))
33 |
--------------------------------------------------------------------------------
/petab_select/problem.py:
--------------------------------------------------------------------------------
1 | """The model selection problem class."""
2 |
3 | from __future__ import annotations
4 |
5 | import copy
6 | import warnings
7 | from collections.abc import Callable, Iterable
8 | from functools import partial
9 | from os.path import relpath
10 | from pathlib import Path
11 | from typing import Annotated, Any
12 |
13 | import mkstd
14 | from pydantic import (
15 | BaseModel,
16 | Field,
17 | PlainSerializer,
18 | PrivateAttr,
19 | ValidationInfo,
20 | ValidatorFunctionWrapHandler,
21 | model_validator,
22 | )
23 |
24 | from .analyze import get_best
25 | from .candidate_space import CandidateSpace, method_to_candidate_space_class
26 | from .constants import (
27 | CRITERION,
28 | PREDECESSOR_MODEL,
29 | ROOT_PATH,
30 | TYPE_PATH,
31 | Criterion,
32 | Method,
33 | )
34 | from .model import Model, ModelHash, default_compare
35 | from .model_space import ModelSpace
36 | from .models import Models
37 |
38 | __all__ = [
39 | "Problem",
40 | "ProblemStandard",
41 | ]
42 |
43 |
44 | class State(BaseModel):
45 | """Carry the state of applying model selection methods to the problem."""
46 |
47 | models: Models = Field(default_factory=Models)
48 | """All calibrated models."""
49 | iteration: int = Field(default=0)
50 | """The latest iteration of model selection."""
51 |
52 | def increment_iteration(self) -> None:
53 | """Start the next iteration."""
54 | self.iteration += 1
55 |
56 | def reset(self) -> None:
57 | """Reset the state.
58 |
59 | N.B.: does not reset all state information, which currently also exists
60 | in other classes. Open a GitHub issue if you see unusual behavior. A
61 | quick fix is to simply recreate the PEtab Select problem, and any other
62 | objects that you use, e.g. the candidate space, whenever you need a
63 | full reset.
64 | https://github.com/PEtab-dev/petab_select/issues
65 | """
66 | # FIXME state information is currently distributed across multiple
67 | # classes, e.g. exclusions in model subspaces and candidate spaces.
68 | # move all state information here.
69 | self.models = Models()
70 | self.iteration = 0
71 |
72 |
73 | class Problem(BaseModel):
74 | """The model selection problem."""
75 |
76 | format_version: str = Field(default="1.0.0")
77 | """The file format version."""
78 | criterion: Annotated[
79 | Criterion, PlainSerializer(lambda x: x.value, return_type=str)
80 | ]
81 | """The criterion used to compare models."""
82 | method: Annotated[
83 | Method, PlainSerializer(lambda x: x.value, return_type=str)
84 | ]
85 | """The method used to search the model space."""
86 | model_space_files: list[Path]
87 | """The files that define the model space."""
88 | candidate_space_arguments: dict[str, Any] = Field(default_factory=dict)
89 | """Method-specific arguments.
90 |
91 | These are forwarded to the candidate space constructor.
92 | """
93 |
94 | _compare: Callable[[Model, Model], bool] = PrivateAttr(default=None)
95 | """The method by which models are compared."""
96 | _state: State = PrivateAttr(default_factory=State)
97 |
98 | @model_validator(mode="wrap")
99 | def _check_input(
100 | data: dict[str, Any] | Problem,
101 | handler: ValidatorFunctionWrapHandler,
102 | info: ValidationInfo,
103 | ) -> Problem:
104 | if isinstance(data, Problem):
105 | return data
106 |
107 | compare = data.pop("compare", None) or data.pop("_compare", None)
108 | if "state" in data:
109 | data["_state"] = data["state"]
110 | root_path = Path(data.pop(ROOT_PATH, ""))
111 |
112 | problem = handler(data)
113 |
114 | if compare is None:
115 | compare = partial(default_compare, criterion=problem.criterion)
116 | problem._compare = compare
117 |
118 | problem._model_space = ModelSpace.load(
119 | [
120 | root_path / model_space_file
121 | for model_space_file in problem.model_space_files
122 | ]
123 | )
124 |
125 | if PREDECESSOR_MODEL in problem.candidate_space_arguments:
126 | problem.candidate_space_arguments[PREDECESSOR_MODEL] = (
127 | root_path
128 | / problem.candidate_space_arguments[PREDECESSOR_MODEL]
129 | )
130 |
131 | return problem
132 |
133 | @property
134 | def state(self) -> State:
135 | return self._state
136 |
137 | @staticmethod
138 | def from_yaml(filename: TYPE_PATH) -> Problem:
139 | """Load a problem from a YAML file."""
140 | problem = ProblemStandard.load_data(
141 | filename=filename,
142 | root_path=Path(filename).parent,
143 | )
144 | return problem
145 |
146 | def to_yaml(
147 | self,
148 | filename: str | Path,
149 | ) -> None:
150 | """Save a problem to a YAML file.
151 |
152 | All paths will be made relative to the ``filename`` directory.
153 |
154 | Args:
155 | filename:
156 | Location of the YAML file.
157 | """
158 | root_path = Path(filename).parent
159 |
160 | problem = copy.deepcopy(self)
161 | problem.model_space_files = [
162 | relpath(
163 | model_space_file.resolve(),
164 | start=root_path,
165 | )
166 | for model_space_file in problem.model_space_files
167 | ]
168 | ProblemStandard.save_data(data=problem, filename=filename)
169 |
170 | def save(
171 | self,
172 | directory: str | Path,
173 | ) -> None:
174 | """Save all data (problem and model space) to a ``directory``.
175 |
176 | Inside the directory, two files will be created:
177 | (1) ``petab_select_problem.yaml``, and
178 | (2) ``model_space.tsv``.
179 |
180 | All paths will be made relative to the ``directory``.
181 | """
182 | directory = Path(directory)
183 | directory.mkdir(exist_ok=True, parents=True)
184 |
185 | problem = copy.deepcopy(self)
186 | problem.model_space_files = ["model_space.tsv"]
187 | if PREDECESSOR_MODEL in problem.candidate_space_arguments:
188 | problem.candidate_space_arguments[PREDECESSOR_MODEL] = relpath(
189 | problem.candidate_space_arguments[PREDECESSOR_MODEL],
190 | start=directory,
191 | )
192 | ProblemStandard.save_data(
193 | data=problem, filename=directory / "petab_select_problem.yaml"
194 | )
195 |
196 | problem.model_space.save(filename=directory / "model_space.tsv")
197 |
198 | @property
199 | def compare(self):
200 | return self._compare
201 |
202 | @property
203 | def model_space(self):
204 | return self._model_space
205 |
206 | def __str__(self):
207 | return (
208 | f"Method: {self.method}\n"
209 | f"Criterion: {self.criterion}\n"
210 | f"Format version: {self.format_version}\n"
211 | )
212 |
213 | def exclude_models(
214 | self,
215 | models: Models,
216 | ) -> None:
217 | """Exclude models from the model space.
218 |
219 | Args:
220 | models:
221 | The models.
222 | """
223 | self.model_space.exclude_models(models)
224 |
225 | def exclude_model_hashes(
226 | self,
227 | model_hashes: Iterable[str],
228 | ) -> None:
229 | """Exclude models from the model space, by model hashes.
230 |
231 | Args:
232 | model_hashes:
233 | The model hashes.
234 | """
235 | # FIXME think about design here -- should we have exclude_models here?
236 | warnings.warn(
237 | "Use `exclude_models` instead. It also accepts hashes.",
238 | DeprecationWarning,
239 | stacklevel=2,
240 | )
241 | self.exclude_models(models=Models(models=model_hashes, problem=self))
242 |
243 | def get_best(
244 | self,
245 | models: Models,
246 | # models: list[Model] | dict[ModelHash, Model] | None,
247 | criterion: str | None | None = None,
248 | compute_criterion: bool = False,
249 | ) -> Model:
250 | """Get the best model from a collection of models.
251 |
252 | The best model is selected based on the selection problem's criterion.
253 |
254 | Args:
255 | models:
256 | The models.
257 | criterion:
258 | The criterion. Defaults to the problem criterion.
259 | compute_criterion:
260 | Whether to try computing criterion values, if sufficient
261 | information is available (e.g., likelihood and number of
262 | parameters, to compute AIC).
263 |
264 | Returns:
265 | The best model.
266 | """
267 | warnings.warn(
268 | "Use ``petab_select.ui.get_best`` instead.",
269 | DeprecationWarning,
270 | stacklevel=2,
271 | )
272 | if criterion is None:
273 | criterion = self.criterion
274 |
275 | return get_best(
276 | models=models,
277 | criterion=criterion,
278 | compare=self.compare,
279 | compute_criterion=compute_criterion,
280 | )
281 |
282 | def model_hash_to_model(self, model_hash: str | ModelHash) -> Model:
283 | """Get the model that matches a model hash.
284 |
285 | Args:
286 | model_hash:
287 | The model hash.
288 |
289 | Returns:
290 | The model.
291 | """
292 | return ModelHash.from_hash(model_hash).get_model(
293 | petab_select_problem=self,
294 | )
295 |
296 | def get_model(
297 | self, model_subspace_id: str, model_subspace_indices: list[int]
298 | ) -> Model:
299 | return self.model_space.model_subspaces[
300 | model_subspace_id
301 | ].indices_to_model(model_subspace_indices)
302 |
303 | def new_candidate_space(
304 | self,
305 | *args,
306 | method: Method = None,
307 | **kwargs,
308 | ) -> CandidateSpace:
309 | """Construct a new candidate space.
310 |
311 | Args:
312 | args, kwargs:
313 | Arguments are passed to the candidate space constructor.
314 | method:
315 | The model selection method.
316 | """
317 | if method is None:
318 | method = self.method
319 | kwargs[CRITERION] = kwargs.get(CRITERION, self.criterion)
320 | candidate_space_class = method_to_candidate_space_class(method)
321 | candidate_space_arguments = (
322 | candidate_space_class.read_arguments_from_yaml_dict(
323 | self.candidate_space_arguments
324 | )
325 | )
326 | candidate_space_kwargs = {
327 | **candidate_space_arguments,
328 | **kwargs,
329 | }
330 | candidate_space = candidate_space_class(
331 | *args,
332 | **candidate_space_kwargs,
333 | )
334 | return candidate_space
335 |
336 |
337 | ProblemStandard = mkstd.YamlStandard(model=Problem)
338 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=64", "setuptools-scm>=8"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "petab_select"
7 | dynamic = ["version"]
8 | maintainers = [
9 | {name = "Dilan Pathirana", email = "dilan.pathirana@uni-bonn.de"},
10 | ]
11 | authors = [
12 | {name = "The PEtab Select developers"},
13 | ]
14 | description = "PEtab Select: an extension to PEtab for model selection."
15 | readme = "README.md"
16 | requires-python = ">=3.10"
17 | license = {text = "BSD-3-Clause"}
18 | dependencies = [
19 | "more-itertools>=10.5.0",
20 | "numpy>=1.15.1",
21 | "pandas>=1.2.0",
22 | "petab>=0.5.0",
23 | "pyyaml>=6.0.2",
24 | "click>=8.1.7",
25 | "dill>=0.3.9",
26 | "mkstd>=0.0.8",
27 | "networkx>=3.2",
28 | ]
29 | [project.optional-dependencies]
30 | plot = [
31 | "matplotlib>=2.2.3",
32 | "upsetplot",
33 | ]
34 | test = [
35 | "pytest >= 5.4.3",
36 | "pytest-cov >= 2.10.0",
37 | "tox >= 3.12.4",
38 | ]
39 | doc = [
40 | "sphinx>=8.1.3",
41 | "sphinxcontrib-napoleon>=0.7",
42 | "sphinx-markdown-tables>=0.0.15",
43 | "sphinx-rtd-theme>=0.5.1",
44 | "recommonmark>=0.7.1",
45 | # pin until ubuntu comes with newer pandoc:
46 | # /home/docs/checkouts/readthedocs.org/user_builds/petab-select/envs/63/lib/python3.11/site-packages/nbsphinx/__init__.py:1058: RuntimeWarning: You are using an unsupported version of pandoc (2.9.2.1).
47 | # Your version must be at least (2.14.2) but less than (4.0.0).
48 | "nbsphinx>=0.9.5",
49 | "pandoc>=2.4",
50 | "nbconvert>=7.16.4",
51 | "ipykernel>= 6.23.1",
52 | "ipython>=7.21.0",
53 | "readthedocs-sphinx-ext>=2.2.5",
54 | "sphinx-autodoc-typehints",
55 | "petab_select[plot]",
56 | ]
57 |
58 | [project.scripts]
59 | petab_select = "petab_select.cli:cli"
60 |
61 | [tool.setuptools_scm]
62 |
63 | [tool.ruff]
64 | line-length = 79
65 | exclude = ["amici_models"]
66 | extend-include = ["*.ipynb"]
67 | lint.ignore = [
68 | # FIXME: we should be able to remove move of those
69 | "D103", # Missing docstring in public function
70 | "S101", # Use of assert detected
71 | "E501", # Line too long
72 | "F403", # star import
73 | "T201", # print statement
74 | "S301", # pickle module used
75 | "S102", # Use of exec detected
76 | "S307", # Use of possibly insecure function
77 | "B006",
78 | "E722",
79 | "B904",
80 | "B007",
81 | "F841",
82 | ]
83 | lint.select = [
84 | "F", # Pyflakes
85 | "I", # isort
86 | # "D", # pydocstyle (PEP 257) FIXME enable after https://github.com/PEtab-dev/petab_select/pull/67
87 | "S", # flake8-bandit
88 | "B", # flake8-bugbear
89 | "C4", # flake8-comprehensions
90 | "T20", # flake8-print
91 | "W", # pycodestyle Warnings
92 | "E", # pycodestyle Errors
93 | "UP", # pyupgrade
94 | # "ANN", # flakes-annotations
95 | ]
96 | [tool.ruff.lint.pydocstyle]
97 | convention = "pep257"
98 |
--------------------------------------------------------------------------------
/requirements_dev.txt:
--------------------------------------------------------------------------------
1 | git+https://github.com/ICB-DCM/pyPESTO.git@develop#egg=pypesto
2 | tox >= 3.12.4
3 | pre-commit >= 2.10.1
4 | flake8 >= 4.0.1
5 | pytest
6 | twine
7 | build
8 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from setuptools import find_packages, setup
4 |
5 | org = "PEtab-dev"
6 | repo = "petab_select"
7 |
8 |
9 | def read(fname):
10 | """Read a file."""
11 | return open(fname).read()
12 |
13 |
14 | def absolute_links(txt):
15 | """Replace relative petab github links by absolute links."""
16 | raw_base = f"(https://raw.githubusercontent.com/{org}/{repo}/main/"
17 | embedded_base = f"(https://github.com/{org}/{repo}/tree/main/"
18 | # iterate over links
19 | for var in re.findall(r"\[.*?\]\((?!http).*?\)", txt):
20 | if re.match(r".*?.(png|svg)\)", var):
21 | # link to raw file
22 | rep = var.replace("(", raw_base)
23 | else:
24 | # link to github embedded file
25 | rep = var.replace("(", embedded_base)
26 | txt = txt.replace(var, rep)
27 | return txt
28 |
29 |
30 | # project metadata
31 | # noinspection PyUnresolvedReferences
32 | setup(
33 | long_description=absolute_links(read("README.md")),
34 | long_description_content_type="text/markdown",
35 | url=f"https://github.com/{org}/{repo}",
36 | packages=find_packages(exclude=["doc*", "test*"]),
37 | include_package_data=True,
38 | )
39 |
--------------------------------------------------------------------------------
/test/.gitignore:
--------------------------------------------------------------------------------
1 | */output
2 |
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/__init__.py
--------------------------------------------------------------------------------
/test/analyze/input/models.yaml:
--------------------------------------------------------------------------------
1 | - criteria:
2 | AIC: 5
3 | model_id: model_1
4 | model_subspace_id: M
5 | model_subspace_indices:
6 | - 0
7 | - 1
8 | - 1
9 | iteration: 1
10 | parameters:
11 | k1: 0.2
12 | k2: estimate
13 | k3: estimate
14 | estimated_parameters:
15 | k2: 0.15
16 | k3: 0.0
17 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
18 | predecessor_model_hash: dummy_p0-0
19 | - criteria:
20 | AIC: 4
21 | model_id: model_2
22 | model_subspace_id: M
23 | model_subspace_indices:
24 | - 1
25 | - 1
26 | - 0
27 | iteration: 5
28 | parameters:
29 | k1: estimate
30 | k2: estimate
31 | k3: 0
32 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
33 | predecessor_model_hash: virtual_initial_model-
34 | - criteria:
35 | AIC: 3
36 | model_id: model_3
37 | model_subspace_id: M2
38 | model_subspace_indices:
39 | - 0
40 | - 1
41 | - 1
42 | iteration: 1
43 | parameters:
44 | k1: 0.2
45 | k2: estimate
46 | k3: estimate
47 | estimated_parameters:
48 | k2: 0.15
49 | k3: 0.0
50 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
51 | predecessor_model_hash: virtual_initial_model-
52 | - criteria:
53 | AIC: 2
54 | model_id: model_4
55 | model_subspace_id: M2
56 | model_subspace_indices:
57 | - 1
58 | - 1
59 | - 0
60 | iteration: 2
61 | parameters:
62 | k1: estimate
63 | k2: estimate
64 | k3: 0
65 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
66 | predecessor_model_hash: virtual_initial_model-
67 |
--------------------------------------------------------------------------------
/test/analyze/test_analyze.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from petab_select import (
7 | VIRTUAL_INITIAL_MODEL,
8 | Criterion,
9 | Models,
10 | analyze,
11 | )
12 |
13 | base_dir = Path(__file__).parent
14 |
15 | DUMMY_HASH = "dummy_p0-0"
16 |
17 |
18 | @pytest.fixture
19 | def models() -> Models:
20 | return Models.from_yaml(base_dir / "input" / "models.yaml")
21 |
22 |
23 | def test_group_by_predecessor_model(models: Models) -> None:
24 | """Test ``analyze.group_by_predecessor_model``."""
25 | groups = analyze.group_by_predecessor_model(models)
26 | # Expected groups
27 | assert len(groups) == 2
28 | assert VIRTUAL_INITIAL_MODEL.hash in groups
29 | assert DUMMY_HASH in groups
30 | # Expected group members
31 | assert len(groups[DUMMY_HASH]) == 1
32 | assert "M-011" in groups[DUMMY_HASH]
33 | assert len(groups[VIRTUAL_INITIAL_MODEL.hash]) == 3
34 | assert "M-110" in groups[VIRTUAL_INITIAL_MODEL.hash]
35 | assert "M2-011" in groups[VIRTUAL_INITIAL_MODEL.hash]
36 | assert "M2-110" in groups[VIRTUAL_INITIAL_MODEL.hash]
37 |
38 |
39 | def test_group_by_iteration(models: Models) -> None:
40 | """Test ``analyze.group_by_iteration``."""
41 | groups = analyze.group_by_iteration(models)
42 | # Expected groups
43 | assert len(groups) == 3
44 | assert 1 in groups
45 | assert 2 in groups
46 | assert 5 in groups
47 | # Expected group members
48 | assert len(groups[1]) == 2
49 | assert "M-011" in groups[1]
50 | assert "M2-011" in groups[1]
51 | assert len(groups[2]) == 1
52 | assert "M2-110" in groups[2]
53 | assert len(groups[5]) == 1
54 | assert "M-110" in groups[5]
55 |
56 |
57 | def test_get_best_by_iteration(models: Models) -> None:
58 | """Test ``analyze.get_best_by_iteration``."""
59 | groups = analyze.get_best_by_iteration(models, criterion=Criterion.AIC)
60 | # Expected groups
61 | assert len(groups) == 3
62 | assert 1 in groups
63 | assert 2 in groups
64 | assert 5 in groups
65 | # Expected best models
66 | assert groups[1].hash == "M2-011"
67 | assert groups[2].hash == "M2-110"
68 | assert groups[5].hash == "M-110"
69 |
70 |
71 | def test_relative_criterion_values(models: Models) -> None:
72 | """Test ``analyze.get_relative_criterion_values``."""
73 | # TODO move to test_models.py?
74 | criterion_values = models.get_criterion(criterion=Criterion.AIC)
75 | test_value = models.get_criterion(criterion=Criterion.AIC, relative=True)
76 | expected_value = [
77 | criterion_value - min(criterion_values)
78 | for criterion_value in criterion_values
79 | ]
80 | assert test_value == expected_value
81 |
82 |
83 | def test_compute_weights(models: Models) -> None:
84 | """Test ``analyze.compute_weights``."""
85 | criterion_values = np.array(
86 | models.get_criterion(criterion=Criterion.AIC, relative=True)
87 | )
88 | expected_weights = (
89 | np.exp(-0.5 * criterion_values) / np.exp(-0.5 * criterion_values).sum()
90 | )
91 | test_weights = analyze.compute_weights(
92 | models=models, criterion=Criterion.AIC
93 | )
94 | np.testing.assert_allclose(test_weights, expected_weights)
95 |
--------------------------------------------------------------------------------
/test/candidate_space/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/candidate_space/__init__.py
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/petab/FAMoS_2019.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
82 |
83 |
84 |
133 |
134 |
135 |
184 |
185 |
186 |
235 |
236 |
237 |
238 |
239 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/petab/FAMoS_2019_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1
2 | parameter_file: parameters_FAMoS_2019.tsv
3 | problems:
4 | - condition_files:
5 | - experimental_conditions_FAMoS_2019.tsv
6 | measurement_files:
7 | - FAMos_2019_synth_measurements_0.1noise.tsv
8 | observable_files:
9 | - observables_FAMoS_2019.tsv
10 | sbml_files:
11 | - FAMoS_2019.xml
12 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/petab/FAMos_2019_synth_measurements_0.1noise.tsv:
--------------------------------------------------------------------------------
1 | observableId preequilibrationConditionId simulationConditionId measurement time
2 | obs_A model1_data1 90.4052240225917 1
3 | obs_A model1_data1 81.9155490462083 2
4 | obs_A model1_data1 74.118638599112 3
5 | obs_A model1_data1 67.076299526445 4
6 | obs_A model1_data1 60.7313867042841 5
7 | obs_A model1_data1 54.9758314698805 6
8 | obs_A model1_data1 49.6249591089255 7
9 | obs_A model1_data1 45.0169961394269 8
10 | obs_A model1_data1 40.6823757392052 9
11 | obs_A model1_data1 36.9752363960308 10
12 | obs_A model1_data1 33.2745079749387 11
13 | obs_A model1_data1 30.0742660095655 12
14 | obs_A model1_data1 27.3931545498098 13
15 | obs_A model1_data1 24.670937708162 14
16 | obs_A model1_data1 22.1726221101129 15
17 | obs_A model1_data1 20.2857836657795 16
18 | obs_A model1_data1 18.3124205334985 17
19 | obs_A model1_data1 16.4529450566483 18
20 | obs_A model1_data1 14.9595322104523 19
21 | obs_A model1_data1 13.6178352834443 20
22 | obs_A model1_data1 12.2698759532401 21
23 | obs_A model1_data1 10.9117793381776 22
24 | obs_A model1_data1 10.0075269655117 23
25 | obs_A model1_data1 9.12894000775891 24
26 | obs_A model1_data1 8.17552471715755 25
27 | obs_A model1_data1 7.58832637116343 26
28 | obs_A model1_data1 6.81481747306769 27
29 | obs_A model1_data1 6.01069041893428 28
30 | obs_A model1_data1 5.4656594358222 29
31 | obs_A model1_data1 5.09509789405046 30
32 | obs_B model1_data1 9.0007018297756 1
33 | obs_B model1_data1 15.6885551330214 2
34 | obs_B model1_data1 20.5555275008022 3
35 | obs_B model1_data1 24.3505896189885 4
36 | obs_B model1_data1 26.8387134615526 5
37 | obs_B model1_data1 28.5579108201491 6
38 | obs_B model1_data1 29.3386378993961 7
39 | obs_B model1_data1 29.6842414879669 8
40 | obs_B model1_data1 29.6012601902716 9
41 | obs_B model1_data1 29.1563989173142 10
42 | obs_B model1_data1 28.0915597379842 11
43 | obs_B model1_data1 27.1771823929323 12
44 | obs_B model1_data1 26.1996449432414 13
45 | obs_B model1_data1 24.8894795720939 14
46 | obs_B model1_data1 23.7958092736297 15
47 | obs_B model1_data1 22.2750402794255 16
48 | obs_B model1_data1 20.9051510979545 17
49 | obs_B model1_data1 19.60915015308 18
50 | obs_B model1_data1 18.3055488298787 19
51 | obs_B model1_data1 17.1316305109407 20
52 | obs_B model1_data1 15.9130210671752 21
53 | obs_B model1_data1 14.6561680021543 22
54 | obs_B model1_data1 13.7379948520348 23
55 | obs_B model1_data1 12.688188384814 24
56 | obs_B model1_data1 11.6886236992598 25
57 | obs_B model1_data1 10.9831245313639 26
58 | obs_B model1_data1 10.0710098586007 27
59 | obs_B model1_data1 9.27665785065283 28
60 | obs_B model1_data1 8.47256283413784 29
61 | obs_B model1_data1 7.71049462935505 30
62 | obs_C model1_data1 0.232989094821477 1
63 | obs_C model1_data1 0.993499632351299 2
64 | obs_C model1_data1 2.08965948178993 3
65 | obs_C model1_data1 3.45518423336059 4
66 | obs_C model1_data1 4.95084252305398 5
67 | obs_C model1_data1 7.06264408709022 6
68 | obs_C model1_data1 9.2858902955231 7
69 | obs_C model1_data1 11.7393663816763 8
70 | obs_C model1_data1 14.606714600774 9
71 | obs_C model1_data1 17.6742934532886 10
72 | obs_C model1_data1 20.9938653132865 11
73 | obs_C model1_data1 24.5937563937411 12
74 | obs_C model1_data1 28.7675196186448 13
75 | obs_C model1_data1 33.0775441425221 14
76 | obs_C model1_data1 37.8341206927535 15
77 | obs_C model1_data1 43.1532681672162 16
78 | obs_C model1_data1 48.7396795490924 17
79 | obs_C model1_data1 54.9712638711448 18
80 | obs_C model1_data1 61.7750014382742 19
81 | obs_C model1_data1 69.2063960173405 20
82 | obs_C model1_data1 77.1551044736292 21
83 | obs_C model1_data1 86.3278593027893 22
84 | obs_C model1_data1 95.9685934407861 23
85 | obs_C model1_data1 106.714805376763 24
86 | obs_C model1_data1 118.817854048206 25
87 | obs_C model1_data1 131.819296657749 26
88 | obs_C model1_data1 146.388431918277 27
89 | obs_C model1_data1 161.981657265843 28
90 | obs_C model1_data1 179.473855137439 29
91 | obs_C model1_data1 198.770987150463 30
92 | obs_D model1_data1 0.731687120540101 1
93 | obs_D model1_data1 3.42150866001713 2
94 | obs_D model1_data1 7.13277164777543 3
95 | obs_D model1_data1 11.3626537046561 4
96 | obs_D model1_data1 16.750832125073 5
97 | obs_D model1_data1 22.330317912854 6
98 | obs_D model1_data1 28.0551756808811 7
99 | obs_D model1_data1 33.8335955902643 8
100 | obs_D model1_data1 39.9060773338183 9
101 | obs_D model1_data1 45.7358426721851 10
102 | obs_D model1_data1 51.3568108138562 11
103 | obs_D model1_data1 57.0227486341004 12
104 | obs_D model1_data1 62.3225424428748 13
105 | obs_D model1_data1 67.269121150386 14
106 | obs_D model1_data1 72.0226608118994 15
107 | obs_D model1_data1 76.7924085667823 16
108 | obs_D model1_data1 80.9736216012929 17
109 | obs_D model1_data1 85.2366010964468 18
110 | obs_D model1_data1 88.9322839411142 19
111 | obs_D model1_data1 92.2912691178331 20
112 | obs_D model1_data1 95.8866770717466 21
113 | obs_D model1_data1 98.6905660097913 22
114 | obs_D model1_data1 101.836946624983 23
115 | obs_D model1_data1 104.3108526573 24
116 | obs_D model1_data1 106.840673767566 25
117 | obs_D model1_data1 108.98933023116 26
118 | obs_D model1_data1 111.29006614616 27
119 | obs_D model1_data1 113.111729230062 28
120 | obs_D model1_data1 114.967340516731 29
121 | obs_D model1_data1 116.555255981599 30
122 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/petab/experimental_conditions_FAMoS_2019.tsv:
--------------------------------------------------------------------------------
1 | conditionId
2 | model1_data1
3 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/petab/observables_FAMoS_2019.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula observableTransformation noiseDistribution
2 | obs_A A 1 lin normal
3 | obs_B B 1 lin normal
4 | obs_C C 1 lin normal
5 | obs_D D 1 lin normal
6 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/petab/parameters_FAMoS_2019.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterScale lowerBound upperBound estimate nominalValue
2 | ro_A lin -5 5 0 0
3 | ro_B lin -5 5 1 0.1
4 | ro_C lin -5 5 1 0.1
5 | ro_D lin -5 5 0 0
6 | mu_AB log10 1E-15 5 1 0.1
7 | mu_BA log10 1E-15 5 0 0
8 | mu_AC log10 1E-15 5 0 0
9 | mu_CA log10 1E-15 5 0 0
10 | mu_AD log10 1E-15 5 0 0
11 | mu_DA log10 1E-15 5 0 0
12 | mu_BC log10 1E-15 5 1 0.05
13 | mu_CB log10 1E-15 5 0 0
14 | mu_BD log10 1E-15 5 1 0.2
15 | mu_DB log10 1E-15 5 0 0
16 | mu_CD log10 1E-15 5 0 0
17 | mu_DC log10 1E-15 5 0 0
18 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/select/FAMoS_2019_petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | version: beta_1
2 | criterion: AICc
3 | method: famos
4 | model_space_files:
5 | - model_space_FAMoS_2019.tsv
6 | candidate_space_arguments:
7 | critical_parameter_sets: []
8 | method_scheme:
9 | - previous_methods:
10 | - backward
11 | - forward
12 | next_method: lateral
13 | - previous_methods:
14 | - forward
15 | - backward
16 | next_method: lateral
17 | - previous_methods:
18 | - backward
19 | - lateral
20 | next_method: null
21 | - previous_methods:
22 | - forward
23 | - lateral
24 | next_method: null
25 | - previous_methods:
26 | - forward
27 | next_method: backward
28 | - previous_methods:
29 | - backward
30 | next_method: forward
31 | - previous_methods:
32 | - lateral
33 | next_method: forward
34 | - previous_methods:
35 | - most_distant
36 | next_method: lateral
37 | - previous_methods: null
38 | next_method: lateral
39 | n_reattempts: 1
40 | predecessor_model: ../test_files/predecessor_model.yaml
41 | consecutive_laterals: true
42 | swap_parameter_sets:
43 | - - ro_A
44 | - mu_BA
45 | - mu_CA
46 | - mu_DA
47 | - - ro_B
48 | - mu_AB
49 | - mu_CB
50 | - mu_DB
51 | - - ro_C
52 | - mu_AC
53 | - mu_BC
54 | - mu_DC
55 | - - ro_D
56 | - mu_AD
57 | - mu_BD
58 | - mu_CD
59 | summary_tsv: output_famos/summary.tsv
60 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/select/model_space_FAMoS_2019.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml ro_A ro_B ro_C ro_D mu_AB mu_BA mu_AC mu_CA mu_AD mu_DA mu_BC mu_CB mu_BD mu_DB mu_CD mu_DC
2 | model_subspace_1 ../petab/FAMoS_2019_problem.yaml 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate 0;estimate
3 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/test_files/predecessor_model.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: model_subspace_1
2 | model_subspace_indices:
3 | - 1
4 | - 1
5 | - 0
6 | - 0
7 | - 1
8 | - 1
9 | - 0
10 | - 1
11 | - 1
12 | - 1
13 | - 0
14 | - 0
15 | - 0
16 | - 1
17 | - 1
18 | - 1
19 | criteria:
20 | AIC: 30330.782621349786
21 | AICc: 30332.80096997364
22 | BIC: 30358.657538777607
23 | NLLH: 15155.391310674893
24 | model_hash: model_subspace_1-1100110111000111
25 | model_subspace_petab_yaml: ../petab/FAMoS_2019_problem.yaml
26 | estimated_parameters:
27 | mu_AB: 0.09706971737957297
28 | mu_AD: -0.6055359156893474
29 | mu_BA: 0.6989700040781575
30 | mu_CA: -13.545121478780585
31 | mu_CD: -13.955162965672203
32 | mu_DA: -13.405909047226377
33 | mu_DB: -13.402598631022197
34 | mu_DC: -1.1619119214640863
35 | ro_A: -1.6431508614147425
36 | ro_B: 2.9912966824709097
37 | iteration: null
38 | model_id: model_subspace_1-1100110111000111
39 | parameters:
40 | ro_A: estimate
41 | ro_B: estimate
42 | ro_C: 0
43 | ro_D: 0
44 | mu_AB: estimate
45 | mu_BA: estimate
46 | mu_AC: 0
47 | mu_CA: estimate
48 | mu_AD: estimate
49 | mu_DA: estimate
50 | mu_BC: 0
51 | mu_CB: 0
52 | mu_BD: 0
53 | mu_DB: estimate
54 | mu_CD: estimate
55 | mu_DC: estimate
56 | predecessor_model_hash: virtual_initial_model-
57 |
--------------------------------------------------------------------------------
/test/candidate_space/input/famos_synthetic/test_files/regenerate_model_hashes.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pandas as pd
4 |
5 | import petab_select
6 |
7 | script_dir = Path(__file__).parent.resolve()
8 |
9 | petab_select_problem = petab_select.Problem.from_yaml(
10 | script_dir / "../select/FAMoS_2019_petab_select_problem.yaml"
11 | )
12 | df = pd.read_csv(script_dir / "calibration_results.tsv", sep="\t", dtype=str)
13 |
14 | model_hashes = list(df.model_hash)
15 | new_model_hashes = [
16 | petab_select_problem.get_model(
17 | model_subspace_id="model_subspace_1",
18 | model_subspace_indices=[
19 | int(s)
20 | for s in petab_select.model.ModelHash.from_hash(
21 | model_hash
22 | ).unhash_model_subspace_indices()
23 | ],
24 | ).get_hash()
25 | for model_hash in model_hashes
26 | ]
27 |
28 | df["model_hash"] = new_model_hashes
29 | df.to_csv(script_dir / "calibration_results.tsv", sep="\t", index=False)
30 |
--------------------------------------------------------------------------------
/test/candidate_space/test_candidate_space.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pandas as pd
4 | import pytest
5 |
6 | # from petab_select.candidate_space import (
7 | # BackwardCandidateSpace,
8 | # BruteForceCandidateSpace,
9 | # ForwardCandidateSpace,
10 | # LateralCandidateSpace,
11 | # )
12 | from petab_select.constants import (
13 | ESTIMATE,
14 | )
15 | from petab_select.model_space import ModelSpace
16 |
17 |
18 | @pytest.fixture
19 | def ordered_model_parameterizations():
20 | good_models_ascending = [
21 | # forward
22 | "00000",
23 | "10000",
24 | "11000",
25 | "11100",
26 | "11110",
27 | # backward
28 | "01110",
29 | "01100",
30 | # forward
31 | "01101",
32 | "01111",
33 | # backward
34 | "00111",
35 | "00011",
36 | ]
37 | bad_models = [
38 | "01011",
39 | "11011",
40 | ]
41 |
42 | # All good models are unique
43 | assert len(set(good_models_ascending)) == len(good_models_ascending)
44 | # All bad models are unique
45 | assert len(set(bad_models)) == len(bad_models)
46 | # No models are defined as both bad and good.
47 | assert not set(good_models_ascending).intersection(bad_models)
48 |
49 | return good_models_ascending, bad_models
50 |
51 |
52 | @pytest.fixture
53 | def calibrated_model_space(ordered_model_parameterizations):
54 | good_models_ascending, bad_models = ordered_model_parameterizations
55 |
56 | # As good models are ordered ascending by "goodness", and criteria
57 | # decreases for better models, the criteria decreases as the index increases.
58 | good_model_criteria = {
59 | model: 100 - index for index, model in enumerate(good_models_ascending)
60 | }
61 | # All bad models are currently set to the same "bad" criterion value.
62 | bad_model_criteria = {model: 1000 for model in bad_models}
63 |
64 | model_criteria = {
65 | **good_model_criteria,
66 | **bad_model_criteria,
67 | }
68 | return model_criteria
69 |
70 |
71 | @pytest.fixture
72 | def model_space(calibrated_model_space) -> pd.DataFrame:
73 | data = {
74 | "model_subspace_id": [],
75 | "petab_yaml": [],
76 | "k1": [],
77 | "k2": [],
78 | "k3": [],
79 | "k4": [],
80 | "k5": [],
81 | }
82 |
83 | for model in calibrated_model_space:
84 | data["model_subspace_id"].append(f"model_subspace_{model}")
85 | data["petab_yaml"].append(
86 | Path(__file__).parent.parent.parent
87 | / "doc"
88 | / "examples"
89 | / "model_selection"
90 | / "petab_problem.yaml"
91 | )
92 | k1, k2, k3, k4, k5 = (
93 | "0" if parameter == "0" else ESTIMATE for parameter in model
94 | )
95 | data["k1"].append(k1)
96 | data["k2"].append(k2)
97 | data["k3"].append(k3)
98 | data["k4"].append(k4)
99 | data["k5"].append(k5)
100 | df = pd.DataFrame(data=data)
101 | model_space = ModelSpace.load(df)
102 | return model_space
103 |
--------------------------------------------------------------------------------
/test/candidate_space/test_famos.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pandas as pd
4 | import pytest
5 | from more_itertools import one
6 |
7 | import petab_select
8 | from petab_select import Method, ModelHash
9 | from petab_select.constants import (
10 | CANDIDATE_SPACE,
11 | MODEL_HASH,
12 | TERMINATE,
13 | UNCALIBRATED_MODELS,
14 | Criterion,
15 | )
16 |
17 |
18 | @pytest.fixture
19 | def input_path():
20 | return Path(__file__).parent / "input" / "famos_synthetic"
21 |
22 |
23 | @pytest.fixture
24 | def petab_select_problem(input_path):
25 | return petab_select.Problem.from_yaml(
26 | input_path / "select" / "FAMoS_2019_petab_select_problem.yaml"
27 | )
28 |
29 |
30 | @pytest.fixture
31 | def expected_criterion_values(input_path):
32 | calibration_results = pd.read_csv(
33 | input_path / "test_files" / "calibration_results.tsv",
34 | sep="\t",
35 | ).set_index(MODEL_HASH)
36 | return {
37 | ModelHash.model_validate(k): v
38 | for k, v in calibration_results[Criterion.AICC].items()
39 | }
40 |
41 |
42 | @pytest.fixture
43 | def expected_progress_list():
44 | return [
45 | (Method.LATERAL, set()),
46 | (Method.LATERAL, {4, 15}),
47 | (Method.LATERAL, {9, 13}),
48 | (Method.FORWARD, set()),
49 | (Method.FORWARD, {3}),
50 | (Method.FORWARD, {11}),
51 | (Method.BACKWARD, set()),
52 | (Method.BACKWARD, {6}),
53 | (Method.BACKWARD, {10}),
54 | (Method.BACKWARD, {8}),
55 | (Method.BACKWARD, {14}),
56 | (Method.BACKWARD, {1}),
57 | (Method.BACKWARD, {16}),
58 | (Method.BACKWARD, {4}),
59 | (Method.FORWARD, set()),
60 | (Method.LATERAL, set()),
61 | (Method.MOST_DISTANT, {2, 3, 4, 5, 6, 7, 9, 11, 12, 13, 15}),
62 | (Method.LATERAL, {16, 7}),
63 | (Method.LATERAL, {5, 12}),
64 | (Method.LATERAL, {13, 15}),
65 | (Method.LATERAL, {1, 6}),
66 | (Method.FORWARD, set()),
67 | (Method.FORWARD, {3}),
68 | (Method.FORWARD, {7}),
69 | (Method.FORWARD, {2}),
70 | (Method.FORWARD, {11}),
71 | (Method.BACKWARD, set()),
72 | (Method.BACKWARD, {7}),
73 | (Method.BACKWARD, {16}),
74 | (Method.BACKWARD, {4}),
75 | (Method.FORWARD, set()),
76 | (Method.LATERAL, set()),
77 | (Method.LATERAL, {9, 15}),
78 | (Method.FORWARD, set()),
79 | (Method.BACKWARD, set()),
80 | (Method.LATERAL, set()),
81 | ]
82 |
83 |
84 | def test_famos(
85 | petab_select_problem,
86 | expected_criterion_values,
87 | expected_progress_list,
88 | ):
89 | def calibrate(
90 | model,
91 | expected_criterion_values=expected_criterion_values,
92 | ) -> None:
93 | model.set_criterion(
94 | criterion=petab_select_problem.criterion,
95 | value=expected_criterion_values[model.hash],
96 | )
97 |
98 | def parse_summary_to_progress_list(summary_tsv: str) -> tuple[Method, set]:
99 | """Get progress information from the summary file."""
100 | df_raw = pd.read_csv(summary_tsv, sep="\t")
101 | df = df_raw.loc[~pd.isnull(df_raw["predecessor change"])]
102 |
103 | parameter_list = list(
104 | one(
105 | petab_select_problem.model_space.model_subspaces.values()
106 | ).parameters
107 | )
108 |
109 | progress_list = []
110 |
111 | for index, (_, row) in enumerate(df.iterrows()):
112 | method = Method(row["method"])
113 |
114 | model = {
115 | 1 + parameter_list.index(parameter_id)
116 | for parameter_id in eval(row["current model"])
117 | }
118 | if index == 0:
119 | model0 = model
120 |
121 | difference = model.symmetric_difference(model0)
122 | progress_list.append((method, difference))
123 | model0 = model
124 |
125 | return progress_list
126 |
127 | progress_list = []
128 |
129 | candidate_space = petab_select_problem.new_candidate_space()
130 | expected_repeated_model_hash0 = candidate_space.predecessor_model.hash
131 | candidate_space.summary_tsv.unlink(missing_ok=True)
132 | candidate_space._setup_summary_tsv()
133 |
134 | with (
135 | pytest.raises(StopIteration, match="No valid models found."),
136 | pytest.warns(RuntimeWarning) as warning_record,
137 | ):
138 | while True:
139 | # Initialize iteration
140 | iteration = petab_select.ui.start_iteration(
141 | problem=petab_select_problem,
142 | candidate_space=candidate_space,
143 | )
144 |
145 | # Calibrate candidate models
146 | for candidate_model in iteration[UNCALIBRATED_MODELS]:
147 | calibrate(candidate_model)
148 |
149 | # Finalize iteration
150 | iteration_results = petab_select.ui.end_iteration(
151 | problem=petab_select_problem,
152 | candidate_space=iteration[CANDIDATE_SPACE],
153 | calibrated_models=iteration[UNCALIBRATED_MODELS],
154 | )
155 | candidate_space = iteration_results[CANDIDATE_SPACE]
156 |
157 | # Stop iteration if there are no candidate models
158 | if iteration_results[TERMINATE]:
159 | raise StopIteration("No valid models found.")
160 |
161 | # A model is encountered twice and therefore skipped.
162 | expected_repeated_model_hash1 = petab_select_problem.get_model(
163 | model_subspace_id=one(
164 | petab_select_problem.model_space.model_subspaces
165 | ),
166 | model_subspace_indices=[int(s) for s in "0001011010010010"],
167 | ).hash
168 | # The predecessor model is also re-encountered.
169 | assert len(warning_record) == 2
170 | assert (
171 | str(expected_repeated_model_hash0) in warning_record[0].message.args[0]
172 | )
173 | assert (
174 | str(expected_repeated_model_hash1) in warning_record[1].message.args[0]
175 | )
176 |
177 | progress_list = parse_summary_to_progress_list(candidate_space.summary_tsv)
178 |
179 | assert progress_list == expected_progress_list, progress_list
180 |
--------------------------------------------------------------------------------
/test/cli/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/cli/__init__.py
--------------------------------------------------------------------------------
/test/cli/expected_output/model/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/test/cli/expected_output/model/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0.0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/test/cli/expected_output/model/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2+k4+k5 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/test/cli/expected_output/model/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0.0 1000.0 0.2 0
3 | k2 k_{2} lin 0.0 1000.0 0.15 1
4 | k3 k_{3} lin 0.0 1000.0 0.0 1
5 | k4 k_{4} lin 0.0 1000.0 0.0 0
6 | k5 k_{5} lin 0.0 1000.0 0.0 0
7 | sigma_x2 \sigma_{x2} lin 1e-05 1000.0 0.15 1
8 |
--------------------------------------------------------------------------------
/test/cli/expected_output/model/problem.yaml:
--------------------------------------------------------------------------------
1 | parameter_file: parameters.tsv
2 | format_version: 1
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | sbml_files:
9 | - model.xml
10 | observable_files:
11 | - observables.tsv
12 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_1/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_1/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0.0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_1/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2+k4+k5 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_1/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0.0 1000.0 0.2 0
3 | k2 k_{2} lin 0.0 1000.0 0.15 1
4 | k3 k_{3} lin 0.0 1000.0 0.0 1
5 | k4 k_{4} lin 0.0 1000.0 0.0 0
6 | k5 k_{5} lin 0.0 1000.0 0.0 0
7 | sigma_x2 \sigma_{x2} lin 1e-05 1000.0 0.15 1
8 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_1/problem.yaml:
--------------------------------------------------------------------------------
1 | parameter_file: parameters.tsv
2 | format_version: 1
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | sbml_files:
9 | - model.xml
10 | observable_files:
11 | - observables.tsv
12 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_2/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_2/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0.0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_2/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2+k4+k5 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_2/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0.0 1000.0 0.2 1
3 | k2 k_{2} lin 0.0 1000.0 0.1 1
4 | k3 k_{3} lin 0.0 1000.0 0.0 0
5 | k4 k_{4} lin 0.0 1000.0 0.0 0
6 | k5 k_{5} lin 0.0 1000.0 0.0 0
7 | sigma_x2 \sigma_{x2} lin 1e-05 1000.0 0.15 1
8 |
--------------------------------------------------------------------------------
/test/cli/expected_output/models/model_2/problem.yaml:
--------------------------------------------------------------------------------
1 | parameter_file: parameters.tsv
2 | format_version: 1
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | sbml_files:
9 | - model.xml
10 | observable_files:
11 | - observables.tsv
12 |
--------------------------------------------------------------------------------
/test/cli/input/model.yaml:
--------------------------------------------------------------------------------
1 | - model_subspace_id: M
2 | model_subspace_indices:
3 | - 0
4 | - 1
5 | - 1
6 | criteria: {}
7 | model_hash: M-011
8 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
9 | estimated_parameters:
10 | k2: 0.15
11 | k3: 0.0
12 | model_id: model
13 | parameters:
14 | k1: 0.2
15 | k2: estimate
16 | k3: estimate
17 |
--------------------------------------------------------------------------------
/test/cli/input/models.yaml:
--------------------------------------------------------------------------------
1 | - model_subspace_id: M
2 | model_subspace_indices:
3 | - 0
4 | - 1
5 | - 1
6 | criteria: {}
7 | model_hash: M-011
8 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
9 | estimated_parameters:
10 | k2: 0.15
11 | k3: 0.0
12 | model_id: model_1
13 | parameters:
14 | k1: 0.2
15 | k2: estimate
16 | k3: estimate
17 | - model_subspace_id: M
18 | model_subspace_indices:
19 | - 1
20 | - 1
21 | - 0
22 | criteria: {}
23 | model_hash: M-110
24 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
25 | model_id: model_2
26 | parameters:
27 | k1: estimate
28 | k2: estimate
29 | k3: 0
30 |
--------------------------------------------------------------------------------
/test/cli/test_cli.py:
--------------------------------------------------------------------------------
1 | import filecmp
2 | from pathlib import Path
3 |
4 | import pytest
5 | from click.testing import CliRunner
6 |
7 | # from petab_select import Model
8 | import petab_select.cli
9 |
10 | base_dir = Path(__file__).parent
11 |
12 |
13 | @pytest.fixture
14 | def output_path() -> Path:
15 | return base_dir / "output"
16 |
17 |
18 | @pytest.fixture
19 | def expected_output_path() -> Path:
20 | return base_dir / "expected_output"
21 |
22 |
23 | @pytest.fixture
24 | def model_yaml() -> Path:
25 | return base_dir / "input" / "model.yaml"
26 |
27 |
28 | @pytest.fixture
29 | def models_yaml() -> Path:
30 | return base_dir / "input" / "models.yaml"
31 |
32 |
33 | @pytest.fixture
34 | def cli_runner() -> CliRunner:
35 | return CliRunner()
36 |
37 |
38 | def test_model_to_petab(
39 | model_yaml,
40 | output_path,
41 | expected_output_path,
42 | cli_runner,
43 | ) -> None:
44 | """Test conversion of a model to PEtab problem files."""
45 | output_path_model = output_path / "model"
46 | output_path_model.mkdir(parents=True, exist_ok=True)
47 |
48 | result = cli_runner.invoke(
49 | petab_select.cli.model_to_petab,
50 | [
51 | "--model",
52 | model_yaml,
53 | "--output",
54 | output_path_model,
55 | ],
56 | )
57 |
58 | # The new PEtab problem YAML file is output to stdout correctly.
59 | assert (
60 | result.stdout == f'{base_dir / "output" / "model" / "problem.yaml"}\n'
61 | )
62 |
63 | comparison = filecmp.dircmp(
64 | expected_output_path / "model",
65 | output_path_model,
66 | )
67 | # The PEtab problem files are as expected.
68 | assert not comparison.diff_files
69 | assert sorted(comparison.same_files) == [
70 | "conditions.tsv",
71 | "measurements.tsv",
72 | "model.xml",
73 | "observables.tsv",
74 | "parameters.tsv",
75 | "problem.yaml",
76 | ]
77 |
78 |
79 | def test_models_to_petab(
80 | models_yaml,
81 | output_path,
82 | expected_output_path,
83 | cli_runner,
84 | ) -> None:
85 | """Test conversion of multiple models to PEtab problem files."""
86 | output_path_models = output_path / "models"
87 | output_path_models.mkdir(parents=True, exist_ok=True)
88 |
89 | result = cli_runner.invoke(
90 | petab_select.cli.models_to_petab,
91 | [
92 | "--models",
93 | models_yaml,
94 | "--output",
95 | output_path_models,
96 | ],
97 | )
98 |
99 | # The new PEtab problem YAML files are output with model IDs to `stdout`
100 | # correctly.
101 | assert result.stdout == (
102 | f'model_1\t{base_dir / "output" / "models" / "model_1" / "problem.yaml"}\n'
103 | f'model_2\t{base_dir / "output" / "models" / "model_2" / "problem.yaml"}\n'
104 | )
105 |
106 | comparison = filecmp.dircmp(
107 | expected_output_path / "models" / "model_1",
108 | output_path_models / "model_1",
109 | )
110 | # The first set of PEtab problem files are as expected.
111 | assert not comparison.diff_files
112 | assert sorted(comparison.same_files) == [
113 | "conditions.tsv",
114 | "measurements.tsv",
115 | "model.xml",
116 | "observables.tsv",
117 | "parameters.tsv",
118 | "problem.yaml",
119 | ]
120 |
121 | comparison = filecmp.dircmp(
122 | expected_output_path / "models" / "model_2",
123 | output_path_models / "model_2",
124 | )
125 | # The second set of PEtab problem files are as expected.
126 | assert not comparison.diff_files
127 | assert sorted(comparison.same_files) == [
128 | "conditions.tsv",
129 | "measurements.tsv",
130 | "model.xml",
131 | "observables.tsv",
132 | "parameters.tsv",
133 | "problem.yaml",
134 | ]
135 |
136 | comparison = filecmp.dircmp(
137 | output_path_models / "model_1",
138 | output_path_models / "model_2",
139 | )
140 | # The first and second set of PEtab problems only differ in their
141 | # parameters table and nowhere else.
142 | assert comparison.diff_files == ["parameters.tsv"]
143 | assert sorted(comparison.same_files) == [
144 | "conditions.tsv",
145 | "measurements.tsv",
146 | "model.xml",
147 | "observables.tsv",
148 | "problem.yaml",
149 | ]
150 |
--------------------------------------------------------------------------------
/test/model/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/model/__init__.py
--------------------------------------------------------------------------------
/test/model/expected_output/petab/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/test/model/expected_output/petab/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0.0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/test/model/expected_output/petab/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2+k4+k5 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/test/model/expected_output/petab/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0.0 1000.0 0.2 0
3 | k2 k_{2} lin 0.0 1000.0 0.15 1
4 | k3 k_{3} lin 0.0 1000.0 0.0 1
5 | k4 k_{4} lin 0.0 1000.0 0.0 0
6 | k5 k_{5} lin 0.0 1000.0 0.0 0
7 | sigma_x2 \sigma_{x2} lin 1e-05 1000.0 0.15 1
8 |
--------------------------------------------------------------------------------
/test/model/expected_output/petab/problem.yaml:
--------------------------------------------------------------------------------
1 | parameter_file: parameters.tsv
2 | format_version: 1
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | sbml_files:
9 | - model.xml
10 | observable_files:
11 | - observables.tsv
12 |
--------------------------------------------------------------------------------
/test/model/input/model.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M
2 | model_subspace_indices:
3 | - 0
4 | - 1
5 | - 1
6 | criteria: {}
7 | model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml
8 | model_id: model
9 | parameters:
10 | k1: 0.2
11 | k2: estimate
12 | k3: estimate
13 | estimated_parameters:
14 | k2: 0.15
15 | k3: 0.0
16 |
--------------------------------------------------------------------------------
/test/model/test_model.py:
--------------------------------------------------------------------------------
1 | import filecmp
2 | from pathlib import Path
3 |
4 | import pytest
5 |
6 | from petab_select import Model
7 |
8 | base_dir = Path(__file__).parent
9 |
10 |
11 | @pytest.fixture
12 | def output_path() -> Path:
13 | return base_dir / "output"
14 |
15 |
16 | @pytest.fixture
17 | def expected_output_path() -> Path:
18 | return base_dir / "expected_output"
19 |
20 |
21 | @pytest.fixture
22 | def model() -> Model:
23 | return Model.from_yaml(base_dir / "input" / "model.yaml")
24 |
25 |
26 | def test_model_to_petab(model, output_path, expected_output_path) -> None:
27 | """Test conversion of a model to a PEtab problem and files."""
28 | output_path_petab = output_path / "petab"
29 | output_path_petab.mkdir(parents=True, exist_ok=True)
30 | # TODO test `petab_problem`? Shouldn't be necessary since the generated
31 | # files are tested below.
32 | petab_problem, petab_problem_yaml = model.to_petab(
33 | output_path=output_path_petab
34 | )
35 |
36 | comparison = filecmp.dircmp(
37 | expected_output_path / "petab",
38 | output_path_petab,
39 | )
40 | # The PEtab problem files are as expected.
41 | assert not comparison.diff_files
42 | assert sorted(comparison.same_files) == [
43 | "conditions.tsv",
44 | "measurements.tsv",
45 | "model.xml",
46 | "observables.tsv",
47 | "parameters.tsv",
48 | "problem.yaml",
49 | ]
50 |
--------------------------------------------------------------------------------
/test/model_space/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/model_space/__init__.py
--------------------------------------------------------------------------------
/test/model_space/model_space_file_1.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3 k4
2 | model_subspace_1 ../../doc/examples/model_selection/petab_problem.yaml 0.2;estimate 0.1;estimate estimate 0;0.1;estimate
3 | model_subspace_2 ../../doc/examples/model_selection/petab_problem.yaml 0 0 0 estimate
4 |
--------------------------------------------------------------------------------
/test/model_space/model_space_file_2.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3 k4
2 | model_subspace_3 ../../doc/examples/model_selection/petab_problem.yaml estimate estimate 0.3;estimate estimate
3 |
--------------------------------------------------------------------------------
/test/model_space/test_model_space.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pytest
4 |
5 | from petab_select.candidate_space import (
6 | BackwardCandidateSpace,
7 | BruteForceCandidateSpace,
8 | ForwardCandidateSpace,
9 | )
10 | from petab_select.constants import (
11 | ESTIMATE,
12 | Criterion,
13 | )
14 | from petab_select.model_space import ModelSpace
15 |
16 | base_dir = Path(__file__).parent
17 |
18 |
19 | @pytest.fixture
20 | def model_space_files() -> list[Path]:
21 | return [
22 | base_dir / "model_space_file_1.tsv",
23 | base_dir / "model_space_file_2.tsv",
24 | ]
25 |
26 |
27 | @pytest.fixture
28 | def model_space(model_space_files) -> ModelSpace:
29 | return ModelSpace.load(model_space_files)
30 |
31 |
32 | def test_model_space_forward_virtual(model_space):
33 | candidate_space = ForwardCandidateSpace(criterion=Criterion.NLLH)
34 | model_space.search(candidate_space)
35 |
36 | # The forward candidate space is initialized without a model, so a virtual initial
37 | # model is used. This means the expected models are the "smallest" models (as many
38 | # fixed parameters as possible) in the model space.
39 | expected_models = [
40 | (
41 | "model_subspace_1",
42 | {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.0},
43 | ),
44 | (
45 | "model_subspace_1",
46 | {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1},
47 | ),
48 | (
49 | "model_subspace_2",
50 | {"k1": 0.0, "k2": 0.0, "k3": 0.0, "k4": ESTIMATE},
51 | ),
52 | ]
53 |
54 | models = [
55 | (model.model_subspace_id, model.parameters)
56 | for model in candidate_space.models
57 | ]
58 |
59 | # Search found only expected models.
60 | assert all(model in expected_models for model in models)
61 | # All expected models have now been added to the candidate space.
62 | assert all(model in models for model in expected_models)
63 | # Probably unnecessary: same number of models in expectation vs realization
64 | assert len(expected_models) == len(candidate_space.models)
65 |
66 |
67 | @pytest.mark.filterwarnings("ignore:Model has been previously excluded")
68 | def test_model_space_backward_virtual(model_space):
69 | candidate_space = BackwardCandidateSpace(criterion=Criterion.NLLH)
70 | model_space.search(candidate_space)
71 |
72 | # The backward candidate space is initialized without a model, so a virtual
73 | # initial model is used. This means the expected models are the "smallest"
74 | # models (as many fixed parameters as possible) in the model space.
75 | expected_models = [
76 | ("model_subspace_1", {f"k{i}": ESTIMATE for i in range(1, 5)}),
77 | # This model could be excluded, if the hashes/model comparisons enabled
78 | # identification of identical models between different subspaces.
79 | # TODO delete above, keep below comment, when implemented...
80 | # This model is not included because it is exactly the same as the
81 | # other model (same PEtab YAML and parameterization), hence has been
82 | # excluded from the candidate space.
83 | ("model_subspace_3", {f"k{i}": ESTIMATE for i in range(1, 5)}),
84 | ]
85 |
86 | models = [
87 | (model.model_subspace_id, model.parameters)
88 | for model in candidate_space.models
89 | ]
90 |
91 | # Search found only expected models.
92 | assert all(model in expected_models for model in models)
93 | # All expected models have now been added to the candidate space.
94 | assert all(model in models for model in expected_models)
95 | # Probably unnecessary: same number of models in expectation vs realization
96 | assert len(expected_models) == len(candidate_space.models)
97 |
98 |
99 | def test_model_space_brute_force_limit(model_space):
100 | candidate_space = BruteForceCandidateSpace(criterion=Criterion.NLLH)
101 | model_space.search(candidate_space, limit=13)
102 |
103 | # There are fifteen total models in the model space. Limiting to 13 models should
104 | # result in all models except the last two models in the last model subspace.
105 | expected_models = [
106 | (
107 | "model_subspace_1",
108 | {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.0},
109 | ),
110 | (
111 | "model_subspace_1",
112 | {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1},
113 | ),
114 | (
115 | "model_subspace_1",
116 | {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": ESTIMATE},
117 | ),
118 | (
119 | "model_subspace_1",
120 | {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.0},
121 | ),
122 | (
123 | "model_subspace_1",
124 | {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1},
125 | ),
126 | (
127 | "model_subspace_1",
128 | {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": ESTIMATE},
129 | ),
130 | (
131 | "model_subspace_1",
132 | {"k1": ESTIMATE, "k2": 0.1, "k3": ESTIMATE, "k4": 0.0},
133 | ),
134 | (
135 | "model_subspace_1",
136 | {"k1": ESTIMATE, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1},
137 | ),
138 | (
139 | "model_subspace_1",
140 | {"k1": ESTIMATE, "k2": 0.1, "k3": ESTIMATE, "k4": ESTIMATE},
141 | ),
142 | (
143 | "model_subspace_1",
144 | {"k1": ESTIMATE, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.0},
145 | ),
146 | (
147 | "model_subspace_1",
148 | {"k1": ESTIMATE, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1},
149 | ),
150 | (
151 | "model_subspace_1",
152 | {"k1": ESTIMATE, "k2": ESTIMATE, "k3": ESTIMATE, "k4": ESTIMATE},
153 | ),
154 | (
155 | "model_subspace_2",
156 | {"k1": 0.0, "k2": 0.0, "k3": 0.0, "k4": ESTIMATE},
157 | ),
158 | ]
159 |
160 | models = [
161 | (model.model_subspace_id, model.parameters)
162 | for model in candidate_space.models
163 | ]
164 |
165 | # Search found only expected models.
166 | assert all(model in expected_models for model in models)
167 | # All expected models have now been added to the candidate space.
168 | assert all(model in models for model in expected_models)
169 | # Probably unnecessary: same number of models in expectation vs realization
170 | assert len(expected_models) == len(candidate_space.models)
171 |
172 |
173 | """
174 | @pytest.fixture
175 | def e():
176 | return ESTIMATE
177 |
178 | @pytest.fixture
179 | def points(e):
180 | return [
181 | [0, 0, 0, 0],
182 |
183 | [e, 0, 0, 0],
184 | [0, e, 0, 0],
185 | [0, 0, e, 0],
186 | [0, 0, 0, e],
187 |
188 | [e, e, 0, 0],
189 | [e, 0, e, 0],
190 | [e, 0, 0, e],
191 | [0, e, e, 0],
192 | [0, e, 0, e],
193 | [0, 0, e, e],
194 |
195 | [e, e, e, 0],
196 | [e, e, 0, e],
197 | [e, 0, e, e],
198 | [0, e, e, e],
199 |
200 | [e, e, e, e],
201 | ]
202 |
203 |
204 | def point_to_parameters(point):
205 | return {
206 | f'k{index}': value
207 | for index, value in enumerate(point)
208 | }
209 |
210 |
211 | @pytest.fixture
212 | def models(points):
213 | return [
214 | Model(
215 | model_id='',
216 | petab_yaml='',
217 | parameters=point_to_parameters(point),
218 | criteria=None,
219 | )
220 | for point in points
221 | ]
222 |
223 |
224 | @pytest.fixture
225 | def model_space(models):
226 | def model_iterator(models=models):
227 | for model in models:
228 | yield model
229 | return ModelSpace(model_iterator)
230 |
231 |
232 | def test_distance(model_space, e):
233 | model0 = Model(
234 | model_id='model0',
235 | petab_yaml='.',
236 | parameters=point_to_parameters([e, 0, 0, 0]),
237 | criteria=None,
238 | )
239 |
240 | forward_candidate_space = ForwardCandidateSpace(model0)
241 | backward_candidate_space = BackwardCandidateSpace(model0)
242 | lateral_candidate_space = LateralCandidateSpace(model0)
243 | brute_force_candidate_space = BruteForceCandidateSpace(model0)
244 |
245 | neighbors = model_space.neighbors(forward_candidate_space)
246 | assert len(neighbors) == 3
247 | model_space.reset()
248 | neighbors = model_space.neighbors(backward_candidate_space)
249 | assert len(neighbors) == 1
250 | model_space.reset()
251 | # FIXME currently skips "same" model (same estimated parameters).
252 | # However, might be that the fixed parameters are different values.
253 | neighbors = model_space.neighbors(lateral_candidate_space)
254 | assert len(neighbors) == 3
255 | model_space.reset()
256 | neighbors = model_space.neighbors(brute_force_candidate_space)
257 | assert len(neighbors) == 16
258 | """
259 |
--------------------------------------------------------------------------------
/test/model_subspace/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/model_subspace/__init__.py
--------------------------------------------------------------------------------
/test/problem/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/problem/__init__.py
--------------------------------------------------------------------------------
/test/problem/expected_output/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 ../../../doc/examples/model_selection/petab_problem.yaml 0 0 0
3 | M1_1 ../../../doc/examples/model_selection/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 ../../../doc/examples/model_selection/petab_problem.yaml 0.2 estimate 0
5 | M1_3 ../../../doc/examples/model_selection/petab_problem.yaml estimate 0.1 0
6 | M1_4 ../../../doc/examples/model_selection/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 ../../../doc/examples/model_selection/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 ../../../doc/examples/model_selection/petab_problem.yaml estimate estimate 0
9 | M1_7 ../../../doc/examples/model_selection/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test/problem/expected_output/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1.0.0
2 | criterion: AIC
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 | candidate_space_arguments: {}
7 |
--------------------------------------------------------------------------------
/test/problem/test_problem.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import petab_select
4 |
5 | test_path = Path(__file__).parent
6 |
7 | problem_yaml = (
8 | test_path.parent.parent
9 | / "doc"
10 | / "examples"
11 | / "model_selection"
12 | / "petab_select_problem.yaml"
13 | )
14 |
15 |
16 | def test_round_trip():
17 | """Test storing/loading of a full problem."""
18 | problem0 = petab_select.Problem.from_yaml(problem_yaml)
19 | problem0.save(test_path / "output")
20 |
21 | with open(test_path / "expected_output/petab_select_problem.yaml") as f:
22 | problem_yaml0 = f.read()
23 | with open(test_path / "expected_output/model_space.tsv") as f:
24 | model_space_tsv0 = f.read()
25 |
26 | with open(test_path / "output/petab_select_problem.yaml") as f:
27 | problem_yaml1 = f.read()
28 | with open(test_path / "output/model_space.tsv") as f:
29 | model_space_tsv1 = f.read()
30 |
31 | # The exported problem YAML is as expected, with updated relative paths.
32 | assert problem_yaml1 == problem_yaml0
33 | # The exported model space TSV is as expected, with updated relative paths.
34 | assert model_space_tsv1 == model_space_tsv0
35 |
--------------------------------------------------------------------------------
/test/ui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PEtab-dev/petab_select/68a9d5f436e5dc67516f1c87cfc9e017cc744ed2/test/ui/__init__.py
--------------------------------------------------------------------------------
/test/ui/test_ui.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pytest
4 | from more_itertools import one
5 |
6 | import petab_select
7 | from petab_select import Models
8 | from petab_select.constants import (
9 | CANDIDATE_SPACE,
10 | MODELS,
11 | UNCALIBRATED_MODELS,
12 | )
13 |
14 |
15 | @pytest.fixture
16 | def petab_select_problem():
17 | return petab_select.Problem.from_yaml(
18 | Path(__file__).parents[2]
19 | / "doc"
20 | / "examples"
21 | / "model_selection"
22 | / "petab_select_problem.yaml"
23 | )
24 |
25 |
26 | def test_user_calibrated_models(petab_select_problem):
27 | """Test handling of user-calibrated models."""
28 | model_M1_2 = petab_select_problem.model_space.model_subspaces[
29 | "M1_2"
30 | ].indices_to_model((0, 0, 0))
31 | model_M1_2.set_criterion(
32 | criterion=petab_select_problem.criterion, value=12.3
33 | )
34 | user_calibrated_models = Models([model_M1_2])
35 |
36 | # Initial iteration: expect the "empty" model. Set dummy criterion and continue.
37 | iteration = petab_select.ui.start_iteration(
38 | problem=petab_select_problem,
39 | user_calibrated_models=user_calibrated_models,
40 | )
41 | model_M1_0 = one(iteration[UNCALIBRATED_MODELS])
42 | # The initial iteration proceeded as expected: the "empty" model was identified as a candidate.
43 | assert model_M1_0.model_subspace_id == "M1_0"
44 | model_M1_0.set_criterion(petab_select_problem.criterion, 100)
45 | iteration_results = petab_select.ui.end_iteration(
46 | problem=petab_select_problem,
47 | candidate_space=iteration[CANDIDATE_SPACE],
48 | calibrated_models=[model_M1_0],
49 | )
50 |
51 | # Second iteration. User calibrated models should now change behavior.
52 | iteration = petab_select.ui.start_iteration(
53 | problem=petab_select_problem,
54 | candidate_space=iteration_results[CANDIDATE_SPACE],
55 | user_calibrated_models=user_calibrated_models,
56 | )
57 | # The user calibrated model was not included in the iteration's uncalibrated models.
58 | uncalibrated_model_ids = [
59 | model.model_subspace_id for model in iteration[UNCALIBRATED_MODELS]
60 | ]
61 | assert set(uncalibrated_model_ids) == {"M1_1", "M1_3"}
62 | for uncalibrated_model in iteration[UNCALIBRATED_MODELS]:
63 | uncalibrated_model.set_criterion(petab_select_problem.criterion, 50)
64 | iteration_results = petab_select.ui.end_iteration(
65 | problem=petab_select_problem,
66 | candidate_space=iteration[CANDIDATE_SPACE],
67 | calibrated_models=iteration[UNCALIBRATED_MODELS],
68 | )
69 | iteration_model_ids = [
70 | model.model_subspace_id for model in iteration_results[MODELS]
71 | ]
72 | # The user-calibrated model is included in the final set of models for this iteration
73 | assert set(iteration_model_ids) == {"M1_1", "M1_2", "M1_3"}
74 |
--------------------------------------------------------------------------------
/test_cases/0001/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_1
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: -4.087702752023436
8 | AIC: -6.175405504046871
9 | model_hash: M1_1-000
10 | model_subspace_petab_yaml: petab/petab_problem.yaml
11 | estimated_parameters:
12 | sigma_x2: 0.12242920313036142
13 | iteration: 1
14 | model_id: M1_1-000
15 | parameters:
16 | k1: 0.2
17 | k2: 0.1
18 | k3: 0
19 | predecessor_model_hash: virtual_initial_model-
20 |
--------------------------------------------------------------------------------
/test_cases/0001/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_1 petab/petab_problem.yaml 0.2 0.1 0
3 |
--------------------------------------------------------------------------------
/test_cases/0001/petab/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/test_cases/0001/petab/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time noiseParameters
2 | obs_x2 model1_data1 0 0 sigma_x2
3 | obs_x2 model1_data1 0.19421762 1 sigma_x2
4 | obs_x2 model1_data1 0.0484032 5 sigma_x2
5 | obs_x2 model1_data1 0.61288016 10 sigma_x2
6 | obs_x2 model1_data1 4.07930835 30 sigma_x2
7 | obs_x2 model1_data1 10.12008893 60 sigma_x2
8 |
--------------------------------------------------------------------------------
/test_cases/0001/petab/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2 noiseParameter1_obs_x2
3 |
--------------------------------------------------------------------------------
/test_cases/0001/petab/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0 1 0.2 1
3 | k2 k_{2} lin 0 1 0.1 1
4 | k3 k_{3} lin 0 1 0 1
5 | sigma_x2 \sigma_{x2} lin 1.00E-05 1.00E+03 0.15 1
6 |
--------------------------------------------------------------------------------
/test_cases/0001/petab/petab_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1
2 | parameter_file: parameters.tsv
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | observable_files:
9 | - observables.tsv
10 | sbml_files:
11 | - model.xml
12 |
--------------------------------------------------------------------------------
/test_cases/0001/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AIC
3 | method: brute_force
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/test_cases/0002/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_3
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: -4.352662995581719
8 | AIC: -4.705325991163438
9 | model_hash: M1_3-000
10 | model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml
11 | estimated_parameters:
12 | k1: 0.2016087813530968
13 | sigma_x2: 0.11714041764571122
14 | iteration: 2
15 | model_id: M1_3-000
16 | parameters:
17 | k1: estimate
18 | k2: 0.1
19 | k3: 0
20 | predecessor_model_hash: M1_0-000
21 |
--------------------------------------------------------------------------------
/test_cases/0002/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 ../0001/petab/petab_problem.yaml 0 0 0
3 | M1_1 ../0001/petab/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 ../0001/petab/petab_problem.yaml 0.2 estimate 0
5 | M1_3 ../0001/petab/petab_problem.yaml estimate 0.1 0
6 | M1_4 ../0001/petab/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 ../0001/petab/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 ../0001/petab/petab_problem.yaml estimate estimate 0
9 | M1_7 ../0001/petab/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test_cases/0002/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AIC
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/test_cases/0003/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1
2 | model_subspace_indices:
3 | - 1
4 | - 1
5 | - 0
6 | criteria:
7 | NLLH: -4.0877027520227704
8 | BIC: -6.383646034817486
9 | model_hash: M1-110
10 | model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml
11 | estimated_parameters:
12 | sigma_x2: 0.12242924701706556
13 | iteration: 1
14 | model_id: M1-110
15 | parameters:
16 | k1: 0.2
17 | k2: 0.1
18 | k3: 0
19 | predecessor_model_hash: virtual_initial_model-
20 |
--------------------------------------------------------------------------------
/test_cases/0003/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1 ../0001/petab/petab_problem.yaml 0;0.2;estimate 0;0.1;estimate 0;estimate
3 |
--------------------------------------------------------------------------------
/test_cases/0003/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: BIC
3 | method: brute_force
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/test_cases/0004/constraints.tsv:
--------------------------------------------------------------------------------
1 | YAML if constraint
2 | petab_1/petab_1.yaml k1 <= k2 k1 < k2 && k3 == 0
3 |
--------------------------------------------------------------------------------
/test_cases/0004/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_3
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: -4.352662995594862
8 | AICc: -0.7053259911897243
9 | model_hash: M1_3-000
10 | model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml
11 | estimated_parameters:
12 | k1: 0.20160877986376358
13 | sigma_x2: 0.11714041204425464
14 | iteration: 3
15 | model_id: M1_3-000
16 | parameters:
17 | k1: estimate
18 | k2: 0.1
19 | k3: 0
20 | predecessor_model_hash: M1_6-000
21 |
--------------------------------------------------------------------------------
/test_cases/0004/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 ../0001/petab/petab_problem.yaml 0 0 0
3 | M1_1 ../0001/petab/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 ../0001/petab/petab_problem.yaml 0.2 estimate 0
5 | M1_3 ../0001/petab/petab_problem.yaml estimate 0.1 0
6 | M1_4 ../0001/petab/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 ../0001/petab/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 ../0001/petab/petab_problem.yaml estimate estimate 0
9 | M1_7 ../0001/petab/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test_cases/0004/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AICc
3 | method: backward
4 | model_space_files:
5 | - model_space.tsv
6 | constraint_files:
7 | - constraints.tsv
8 |
--------------------------------------------------------------------------------
/test_cases/0005/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_3
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: -4.352662995589992
8 | AIC: -4.7053259911799845
9 | model_hash: M1_3-000
10 | model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml
11 | estimated_parameters:
12 | k1: 0.20160877971477925
13 | sigma_x2: 0.11714036509532029
14 | iteration: 2
15 | model_id: M1_3-000
16 | parameters:
17 | k1: estimate
18 | k2: 0.1
19 | k3: 0
20 | predecessor_model_hash: M1_0-000
21 |
--------------------------------------------------------------------------------
/test_cases/0005/initial_models.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id YAML k1 k2 k3
2 | I1_6 ../0001/petab/petab_problem.yaml estimate estimate 0
3 |
--------------------------------------------------------------------------------
/test_cases/0005/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 ../0001/petab/petab_problem.yaml 0 0 0
3 | M1_1 ../0001/petab/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 ../0001/petab/petab_problem.yaml 0.2 estimate 0
5 | M1_3 ../0001/petab/petab_problem.yaml estimate 0.1 0
6 | M1_4 ../0001/petab/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 ../0001/petab/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 ../0001/petab/petab_problem.yaml estimate estimate 0
9 | M1_7 ../0001/petab/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test_cases/0005/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AIC
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 | initial_model_files:
7 | - initial_models.tsv
8 |
--------------------------------------------------------------------------------
/test_cases/0006/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_0
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: -4.087702752023439
8 | AIC: -6.1754055040468785
9 | model_hash: M1_0-000
10 | model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml
11 | estimated_parameters:
12 | sigma_x2: 0.12242920634250658
13 | iteration: 1
14 | model_id: M1_0-000
15 | parameters:
16 | k1: 0.2
17 | k2: 0.1
18 | k3: 0
19 | predecessor_model_hash: virtual_initial_model-
20 |
--------------------------------------------------------------------------------
/test_cases/0006/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 ../0001/petab/petab_problem.yaml 0.2 0.1 0
3 | M1_1 ../0001/petab/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 ../0001/petab/petab_problem.yaml 0.2 estimate 0
5 | M1_3 ../0001/petab/petab_problem.yaml estimate 0.1 0
6 | M1_4 ../0001/petab/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 ../0001/petab/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 ../0001/petab/petab_problem.yaml estimate estimate 0
9 | M1_7 ../0001/petab/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test_cases/0006/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AIC
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/test_cases/0007/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_0
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: 5.558597930767597
8 | AIC: 11.117195861535194
9 | model_hash: M1_0-000
10 | model_subspace_petab_yaml: petab/petab_problem.yaml
11 | estimated_parameters: {}
12 | iteration: 1
13 | model_id: M1_0-000
14 | parameters:
15 | k1: 0.2
16 | k2: 0.1
17 | k3: 0
18 | predecessor_model_hash: virtual_initial_model-
19 |
--------------------------------------------------------------------------------
/test_cases/0007/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 petab/petab_problem.yaml 0.2 0.1 0
3 | M1_1 petab/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 petab/petab_problem.yaml 0.2 estimate 0
5 | M1_3 petab/petab_problem.yaml estimate 0.1 0
6 | M1_4 petab/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 petab/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 petab/petab_problem.yaml estimate estimate 0
9 | M1_7 petab/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test_cases/0007/petab/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId conditionName
2 | model1_data1 condition1
3 |
--------------------------------------------------------------------------------
/test_cases/0007/petab/measurements.tsv:
--------------------------------------------------------------------------------
1 | observableId simulationConditionId measurement time
2 | obs_x2 model1_data1 0 0
3 | obs_x2 model1_data1 0.19421762 1
4 | obs_x2 model1_data1 0.0484032 5
5 | obs_x2 model1_data1 0.61288016 10
6 | obs_x2 model1_data1 4.07930835 30
7 | obs_x2 model1_data1 10.12008893 60
8 |
--------------------------------------------------------------------------------
/test_cases/0007/petab/model.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | 2019-03-27T18:47:48Z
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | 2019-03-27T18:57:38Z
27 |
28 |
29 |
30 |
31 |
32 |
33 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 | 2019-03-27T18:52:47Z
68 |
69 |
70 |
71 |
72 |
73 |
74 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 | 2019-03-27T18:55:12Z
98 |
99 |
100 |
101 |
102 |
103 |
104 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 | 2019-03-27T18:59:36Z
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 | 2019-03-27T18:59:35Z
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 | 2019-03-27T18:59:34Z
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 | 2019-03-27T18:50:50Z
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 | 2019-03-27T18:53:29Z
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 | 2019-03-27T18:55:51Z
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 | 2019-03-27T18:49:35Z
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
296 |
297 |
298 |
299 |
300 |
301 |
--------------------------------------------------------------------------------
/test_cases/0007/petab/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableFormula noiseFormula
2 | obs_x2 x2 1
3 |
--------------------------------------------------------------------------------
/test_cases/0007/petab/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | k1 k_{1} lin 0 1.00E+03 0.2 1
3 | k2 k_{2} lin 0 1.00E+03 0.1 1
4 | k3 k_{3} lin 0 1.00E+03 0 1
5 |
--------------------------------------------------------------------------------
/test_cases/0007/petab/petab_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1
2 | parameter_file: parameters.tsv
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | observable_files:
9 | - observables.tsv
10 | sbml_files:
11 | - model.xml
12 |
--------------------------------------------------------------------------------
/test_cases/0007/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AIC
3 | method: forward
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/test_cases/0008/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M1_0
2 | model_subspace_indices:
3 | - 0
4 | - 0
5 | - 0
6 | criteria:
7 | NLLH: 5.558597930767597
8 | AICc: 11.117195861535194
9 | model_hash: M1_0-000
10 | model_subspace_petab_yaml: ../0007/petab/petab_problem.yaml
11 | estimated_parameters: {}
12 | iteration: 4
13 | model_id: M1_0-000
14 | parameters:
15 | k1: 0.2
16 | k2: 0.1
17 | k3: 0
18 | predecessor_model_hash: M1_3-000
19 |
--------------------------------------------------------------------------------
/test_cases/0008/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml k1 k2 k3
2 | M1_0 ../0007/petab/petab_problem.yaml 0.2 0.1 0
3 | M1_1 ../0007/petab/petab_problem.yaml 0.2 0.1 estimate
4 | M1_2 ../0007/petab/petab_problem.yaml 0.2 estimate 0
5 | M1_3 ../0007/petab/petab_problem.yaml estimate 0.1 0
6 | M1_4 ../0007/petab/petab_problem.yaml 0.2 estimate estimate
7 | M1_5 ../0007/petab/petab_problem.yaml estimate 0.1 estimate
8 | M1_6 ../0007/petab/petab_problem.yaml estimate estimate 0
9 | M1_7 ../0007/petab/petab_problem.yaml estimate estimate estimate
10 |
--------------------------------------------------------------------------------
/test_cases/0008/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AICc
3 | method: backward
4 | model_space_files:
5 | - model_space.tsv
6 |
--------------------------------------------------------------------------------
/test_cases/0009/README.md:
--------------------------------------------------------------------------------
1 | N.B. This original Blasi et al. problem is difficult to solve with a stepwise method. Many forward/backward/forward+backward variants failed. This test case was found by:
2 | 1. performing 100 FAMoS starts, initialized at random models. Usually <5% of the starts ended at the best model.
3 | 2. assessing reproducibility. Most of the starts that end at the best model are not reproducible. Instead, the path through model space can differ a lot despite "good" calibration, because many pairs of models differ in AICc by less than numerical noise.
4 |
5 | 1 start was found that reproducibly ends at the best model. The initial model of that start is the predecessor model in this test case. However, the path through model space is not reproducible -- there are at least two possibilities, perhaps more, depending on simulation tolerances. Hence, you should expect to produce a similar `expected_summary.tsv`, but perhaps with a few rows swapped. If you see a different `summary.tsv`, please report (or retry a few times). In particular, a different `summary.tsv` file will have a different sequence of values in the `current model criterion` column (accounting for numerical noise).
6 |
--------------------------------------------------------------------------------
/test_cases/0009/expected.yaml:
--------------------------------------------------------------------------------
1 | model_subspace_id: M
2 | model_subspace_indices:
3 | - 0
4 | - 1
5 | - 0
6 | - 0
7 | - 0
8 | - 1
9 | - 0
10 | - 0
11 | - 0
12 | - 0
13 | - 1
14 | - 0
15 | - 0
16 | - 0
17 | - 0
18 | - 1
19 | - 0
20 | - 0
21 | - 1
22 | - 0
23 | - 0
24 | - 0
25 | - 0
26 | - 0
27 | - 0
28 | - 0
29 | - 0
30 | - 1
31 | - 0
32 | - 0
33 | - 0
34 | - 1
35 | criteria:
36 | NLLH: -862.3517925313981
37 | AICc: -1708.1109924702037
38 | model_hash: M-01000100001000010010000000010001
39 | model_subspace_petab_yaml: petab/petab_problem.yaml
40 | estimated_parameters:
41 | a_0ac_k08: 0.40850355273291267
42 | a_k05_k05k12: 30.888150959586138
43 | a_k12_k05k12: 8.267845459216893
44 | a_k16_k12k16: 10.424629099941777
45 | a_k05k12_k05k08k12: 4.872747603868694
46 | a_k12k16_k08k12k16: 33.03769174387633
47 | a_k08k12k16_4ac: 53.80106471593421
48 | a_b: 0.06675819571287103
49 | iteration: 11
50 | model_id: M-01000100001000010010000000010001
51 | parameters:
52 | a_0ac_k05: 1
53 | a_0ac_k08: estimate
54 | a_0ac_k12: 1
55 | a_0ac_k16: 1
56 | a_k05_k05k08: 1
57 | a_k05_k05k12: estimate
58 | a_k05_k05k16: 1
59 | a_k08_k05k08: 1
60 | a_k08_k08k12: 1
61 | a_k08_k08k16: 1
62 | a_k12_k05k12: estimate
63 | a_k12_k08k12: 1
64 | a_k12_k12k16: 1
65 | a_k16_k05k16: 1
66 | a_k16_k08k16: 1
67 | a_k16_k12k16: estimate
68 | a_k05k08_k05k08k12: 1
69 | a_k05k08_k05k08k16: 1
70 | a_k05k12_k05k08k12: estimate
71 | a_k05k12_k05k12k16: 1
72 | a_k05k16_k05k08k16: 1
73 | a_k05k16_k05k12k16: 1
74 | a_k08k12_k05k08k12: 1
75 | a_k08k12_k08k12k16: 1
76 | a_k08k16_k05k08k16: 1
77 | a_k08k16_k08k12k16: 1
78 | a_k12k16_k05k12k16: 1
79 | a_k12k16_k08k12k16: estimate
80 | a_k05k08k12_4ac: 1
81 | a_k05k08k16_4ac: 1
82 | a_k05k12k16_4ac: 1
83 | a_k08k12k16_4ac: estimate
84 | predecessor_model_hash: M-01000100001010010010000000010001
85 |
--------------------------------------------------------------------------------
/test_cases/0009/model_space.tsv:
--------------------------------------------------------------------------------
1 | model_subspace_id model_subspace_petab_yaml a_0ac_k05 a_0ac_k08 a_0ac_k12 a_0ac_k16 a_k05_k05k08 a_k05_k05k12 a_k05_k05k16 a_k08_k05k08 a_k08_k08k12 a_k08_k08k16 a_k12_k05k12 a_k12_k08k12 a_k12_k12k16 a_k16_k05k16 a_k16_k08k16 a_k16_k12k16 a_k05k08_k05k08k12 a_k05k08_k05k08k16 a_k05k12_k05k08k12 a_k05k12_k05k12k16 a_k05k16_k05k08k16 a_k05k16_k05k12k16 a_k08k12_k05k08k12 a_k08k12_k08k12k16 a_k08k16_k05k08k16 a_k08k16_k08k12k16 a_k12k16_k05k12k16 a_k12k16_k08k12k16 a_k05k08k12_4ac a_k05k08k16_4ac a_k05k12k16_4ac a_k08k12k16_4ac
2 | M petab/petab_problem.yaml 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate 1.0;estimate
3 |
--------------------------------------------------------------------------------
/test_cases/0009/petab/conditions.tsv:
--------------------------------------------------------------------------------
1 | conditionId
2 | condition
3 |
--------------------------------------------------------------------------------
/test_cases/0009/petab/observables.tsv:
--------------------------------------------------------------------------------
1 | observableId observableName observableFormula observableTransformation noiseFormula noiseDistribution
2 | observable_x_0ac y_{0ac} x_0ac log sigma_ normal
3 | observable_x_4ac y_{4ac} x_4ac log sigma_ normal
4 | observable_x_k12 y_{k12} x_k12 log sigma_ normal
5 | observable_x_k12k16 y_{k12k16} x_k12k16 log sigma_ normal
6 | observable_x_k16 y_{k16} x_k16 log sigma_ normal
7 | observable_x_k05 y_{k05} x_k05 log sigma_ normal
8 | observable_x_k05k12 y_{k05k12} x_k05k12 log sigma_ normal
9 | observable_x_k05k12k16 y_{k05k12k16} x_k05k12k16 log sigma_ normal
10 | observable_x_k05k08 y_{k05k08} x_k05k08 log sigma_ normal
11 | observable_x_k05k08k12 y_{k05k08k12} x_k05k08k12 log sigma_ normal
12 | observable_x_k05k08k16 y_{k05k08k16} x_k05k08k16 log sigma_ normal
13 | observable_x_k08 y_{k08} x_k08 log sigma_ normal
14 | observable_x_k08k12 y_{k08k12} x_k08k12 log sigma_ normal
15 | observable_x_k08k12k16 y_{k08k12k16} x_k08k12k16 log sigma_ normal
16 | observable_x_k08k16 y_{k08k16} x_k08k16 log sigma_ normal
17 |
--------------------------------------------------------------------------------
/test_cases/0009/petab/parameters.tsv:
--------------------------------------------------------------------------------
1 | parameterId parameterName parameterScale lowerBound upperBound nominalValue estimate
2 | a_0ac_k05 a_{0ac\\rightarrow k05} log10 1e-3 1e3 1.0 0
3 | a_0ac_k08 a_{0ac\\rightarrow k08} log10 1e-3 1e3 0.4085094224060409 1
4 | a_0ac_k12 a_{0ac\\rightarrow k12} log10 1e-3 1e3 1.0 0
5 | a_0ac_k16 a_{0ac\\rightarrow k16} log10 1e-3 1e3 1.0 0
6 | a_k05_k05k08 a_{k05\\rightarrow k05k08} log10 1e-3 1e3 1.0 0
7 | a_k05_k05k12 a_{k05\\rightarrow k05k12} log10 1e-3 1e3 30.88820217382275 1
8 | a_k05_k05k16 a_{k05\\rightarrow k05k16} log10 1e-3 1e3 1.0 0
9 | a_k08_k05k08 a_{k08\\rightarrow k05k08} log10 1e-3 1e3 1.0 0
10 | a_k08_k08k12 a_{k08\\rightarrow k08k12} log10 1e-3 1e3 1.0 0
11 | a_k08_k08k16 a_{k08\\rightarrow k08k16} log10 1e-3 1e3 1.0 0
12 | a_k12_k05k12 a_{k12\\rightarrow k05k12} log10 1e-3 1e3 8.267790285514408 1
13 | a_k12_k08k12 a_{k12\\rightarrow k08k12} log10 1e-3 1e3 1.0 0
14 | a_k12_k12k16 a_{k12\\rightarrow k12k16} log10 1e-3 1e3 1.0 0
15 | a_k16_k05k16 a_{k16\\rightarrow k05k16} log10 1e-3 1e3 1.0 0
16 | a_k16_k08k16 a_{k16\\rightarrow k08k16} log10 1e-3 1e3 1.0 0
17 | a_k16_k12k16 a_{k16\\rightarrow k12k16} log10 1e-3 1e3 10.424803031743464 1
18 | a_k05k08_k05k08k12 a_{k05k08\\rightarrow k05k08k12} log10 1e-3 1e3 1.0 0
19 | a_k05k08_k05k08k16 a_{k05k08\\rightarrow k05k08k16} log10 1e-3 1e3 1.0 0
20 | a_k05k12_k05k08k12 a_{k05k12\\rightarrow k05k08k12} log10 1e-3 1e3 4.872782281489833 1
21 | a_k05k12_k05k12k16 a_{k05k12\\rightarrow k05k12k16} log10 1e-3 1e3 1.0 0
22 | a_k05k16_k05k08k16 a_{k05k16\\rightarrow k05k08k16} log10 1e-3 1e3 1.0 0
23 | a_k05k16_k05k12k16 a_{k05k16\\rightarrow k05k12k16} log10 1e-3 1e3 1.0 0
24 | a_k08k12_k05k08k12 a_{k08k12\\rightarrow k05k08k12} log10 1e-3 1e3 1.0 0
25 | a_k08k12_k08k12k16 a_{k08k12\\rightarrow k08k12k16} log10 1e-3 1e3 1.0 0
26 | a_k08k16_k05k08k16 a_{k08k16\\rightarrow k05k08k16} log10 1e-3 1e3 1.0 0
27 | a_k08k16_k08k12k16 a_{k08k16\\rightarrow k08k12k16} log10 1e-3 1e3 1.0 0
28 | a_k12k16_k05k12k16 a_{k12k16\\rightarrow k05k12k16} log10 1e-3 1e3 1.0 0
29 | a_k12k16_k08k12k16 a_{k12k16\\rightarrow k08k12k16} log10 1e-3 1e3 33.037720591816104 1
30 | a_k05k08k12_4ac a_{k05k08k12\\rightarrow 4ac} log10 1e-3 1e3 1.0 0
31 | a_k05k08k16_4ac a_{k05k08k16\\rightarrow 4ac} log10 1e-3 1e3 1.0 0
32 | a_k05k12k16_4ac a_{k05k12k16\\rightarrow 4ac} log10 1e-3 1e3 1.0 0
33 | a_k08k12k16_4ac a_{k08k12k16\\rightarrow 4ac} log10 1e-3 1e3 53.801757067907324 1
34 | a_b a_b log10 1e-3 1e3 0.0667579590703576 1
35 | da_b da_b lin 1.0 0
36 | sigma_ sigma_ log10 1e-3 1e3 1.0 0
37 |
--------------------------------------------------------------------------------
/test_cases/0009/petab/petab_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: 1
2 | parameter_file: parameters.tsv
3 | problems:
4 | - condition_files:
5 | - conditions.tsv
6 | measurement_files:
7 | - measurements.tsv
8 | observable_files:
9 | - observables.tsv
10 | sbml_files:
11 | - model.xml
12 |
--------------------------------------------------------------------------------
/test_cases/0009/petab_select_problem.yaml:
--------------------------------------------------------------------------------
1 | format_version: beta_1
2 | criterion: AICc
3 | method: famos
4 | model_space_files:
5 | - model_space.tsv
6 | candidate_space_arguments:
7 | critical_parameter_sets: []
8 | consecutive_laterals: true
9 | predecessor_model: predecessor_model.yaml
10 | summary_tsv: output/summary.tsv
11 | swap_parameter_sets:
12 | - - a_0ac_k05
13 | - a_0ac_k08
14 | - a_0ac_k12
15 | - a_0ac_k16
16 | - a_k05_k05k08
17 | - a_k05_k05k12
18 | - a_k05_k05k16
19 | - a_k05k08_k05k08k12
20 | - a_k05k08_k05k08k16
21 | - a_k05k08k12_4ac
22 | - a_k05k08k16_4ac
23 | - a_k05k12_k05k08k12
24 | - a_k05k12_k05k12k16
25 | - a_k05k12k16_4ac
26 | - a_k05k16_k05k08k16
27 | - a_k05k16_k05k12k16
28 | - a_k08_k05k08
29 | - a_k08_k08k12
30 | - a_k08_k08k16
31 | - a_k08k12_k05k08k12
32 | - a_k08k12_k08k12k16
33 | - a_k08k12k16_4ac
34 | - a_k08k16_k05k08k16
35 | - a_k08k16_k08k12k16
36 | - a_k12_k05k12
37 | - a_k12_k08k12
38 | - a_k12_k12k16
39 | - a_k12k16_k05k12k16
40 | - a_k12k16_k08k12k16
41 | - a_k16_k05k16
42 | - a_k16_k08k16
43 | - a_k16_k12k16
44 |
--------------------------------------------------------------------------------
/test_cases/0009/predecessor_model.yaml:
--------------------------------------------------------------------------------
1 | criteria:
2 | NLLH: inf
3 | estimated_parameters: {}
4 | model_hash: M-01000001111110010011010011010001
5 | model_id: M-01000001111110010011010011010001
6 | model_subspace_id: M
7 | model_subspace_indices:
8 | - 0
9 | - 1
10 | - 0
11 | - 0
12 | - 0
13 | - 0
14 | - 0
15 | - 1
16 | - 1
17 | - 1
18 | - 1
19 | - 1
20 | - 1
21 | - 0
22 | - 0
23 | - 1
24 | - 0
25 | - 0
26 | - 1
27 | - 1
28 | - 0
29 | - 1
30 | - 0
31 | - 0
32 | - 1
33 | - 1
34 | - 0
35 | - 1
36 | - 0
37 | - 0
38 | - 0
39 | - 1
40 | parameters:
41 | a_0ac_k05: 1
42 | a_0ac_k08: estimate
43 | a_0ac_k12: 1
44 | a_0ac_k16: 1
45 | a_k05_k05k08: 1
46 | a_k05_k05k12: 1
47 | a_k05_k05k16: 1
48 | a_k05k08_k05k08k12: 1
49 | a_k05k08_k05k08k16: 1
50 | a_k05k08k12_4ac: 1
51 | a_k05k08k16_4ac: 1
52 | a_k05k12_k05k08k12: estimate
53 | a_k05k12_k05k12k16: estimate
54 | a_k05k12k16_4ac: 1
55 | a_k05k16_k05k08k16: 1
56 | a_k05k16_k05k12k16: estimate
57 | a_k08_k05k08: estimate
58 | a_k08_k08k12: estimate
59 | a_k08_k08k16: estimate
60 | a_k08k12_k05k08k12: 1
61 | a_k08k12_k08k12k16: 1
62 | a_k08k12k16_4ac: estimate
63 | a_k08k16_k05k08k16: estimate
64 | a_k08k16_k08k12k16: estimate
65 | a_k12_k05k12: estimate
66 | a_k12_k08k12: estimate
67 | a_k12_k12k16: estimate
68 | a_k12k16_k05k12k16: 1
69 | a_k12k16_k08k12k16: estimate
70 | a_k16_k05k16: 1
71 | a_k16_k08k16: 1
72 | a_k16_k12k16: estimate
73 | model_subspace_petab_yaml: petab/petab_problem.yaml
74 | predecessor_model_hash: virtual_initial_model-
75 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist =
3 | clean
4 | base
5 |
6 | [testenv]
7 | passenv = GITHUB_ACTIONS
8 |
9 | [testenv:clean]
10 | skip_install = true
11 | allowlist_externals =
12 | rm
13 | deps = coverage
14 | commands =
15 | coverage erase
16 | description =
17 | Clean up
18 |
19 | [testenv:base]
20 | extras = test
21 | commands =
22 | pytest --cov=petab_select --cov-report=xml --cov-append test -s
23 | coverage report
24 | description =
25 | Test basic functionality
26 |
--------------------------------------------------------------------------------