├── .coveragerc ├── .flake8 ├── .github └── workflows │ ├── copyright-test.yml │ ├── coverage-test.yml │ ├── docs-test.yml │ ├── notebook-interfaces-test.yml │ ├── notebook-test.yml │ ├── style-test.yml │ ├── unit-test-os-coverage.yml │ ├── unit-test-python-coverage.yml │ └── upload-to-pypi.yml ├── .gitignore ├── .readthedocs.yaml ├── .requirements-docs.txt ├── CHANGELOG.md ├── CITATION ├── CONTRIBUTING.md ├── LICENSE.md ├── MANIFEST.in ├── README.md ├── docs ├── .gitignore ├── Makefile ├── make.bat └── source │ ├── _static │ └── custom.css │ ├── abc_samplers │ ├── abc_smc.rst │ ├── base_classes.rst │ ├── index.rst │ └── rejection_abc.rst │ ├── boundaries.rst │ ├── conf.py │ ├── core_classes_and_methods.rst │ ├── diagnostic_plots.rst │ ├── diagnostics.rst │ ├── error_measures.rst │ ├── function_evaluation.rst │ ├── index.rst │ ├── io.rst │ ├── log_likelihoods.rst │ ├── log_pdfs.rst │ ├── log_priors.rst │ ├── mcmc_samplers │ ├── adaptive_covariance_mc.rst │ ├── base_classes.rst │ ├── differential_evolution_mcmc.rst │ ├── dram_ac_mcmc.rst │ ├── dream_mcmc.rst │ ├── dual_averaging.rst │ ├── emcee_hammer_mcmc.rst │ ├── haario_ac_mcmc.rst │ ├── haario_bardenet_ac_mcmc.rst │ ├── hamiltonian_mcmc.rst │ ├── index.rst │ ├── mala_mcmc.rst │ ├── metropolis_mcmc.rst │ ├── monomial_gamma_hamiltonian_mcmc.rst │ ├── nuts_mcmc.rst │ ├── population_mcmc.rst │ ├── rao_blackwell_ac_mcmc.rst │ ├── relativistic_mcmc.rst │ ├── running.rst │ ├── slice_doubling_mcmc.rst │ ├── slice_rank_shrinking_mcmc.rst │ ├── slice_stepout_mcmc.rst │ └── summary_mcmc.rst │ ├── nested_samplers │ ├── index.rst │ ├── nested_ellipsoid_sampler.rst │ ├── nested_rejection_sampler.rst │ └── nested_sampler.rst │ ├── noise_generators.rst │ ├── noise_model_diagnostics.rst │ ├── optimisers │ ├── adam.rst │ ├── base_classes.rst │ ├── cmaes.rst │ ├── cmaes_bare.rst │ ├── convenience_methods.rst │ ├── gradient_descent.rst │ ├── index.rst │ ├── irpropmin.rst │ ├── nelder_mead.rst │ ├── pso.rst │ ├── running.rst │ ├── snes.rst │ └── xnes.rst │ ├── toy │ ├── annulus_logpdf.rst │ ├── beeler_reuter_ap_model.rst │ ├── cone_logpdf.rst │ ├── constant_model.rst │ ├── eight_schools_logpdf.rst │ ├── fitzhugh_nagumo_model.rst │ ├── gaussian_logpdf.rst │ ├── german_credit_hierarchical_logpdf.rst │ ├── german_credit_logpdf.rst │ ├── goodwin_oscillator_model.rst │ ├── hes1_michaelis_menten_model.rst │ ├── high_dimensional_gaussian_logpdf.rst │ ├── hodgkin_huxley_ik_model.rst │ ├── index.rst │ ├── logistic_model.rst │ ├── lotka_volterra_model.rst │ ├── multimodal_gaussian_logpdf.rst │ ├── neals_funnel.rst │ ├── parabolic_error.rst │ ├── repressilator_model.rst │ ├── rosenbrock.rst │ ├── simple_egg_box_logpdf.rst │ ├── simple_harmonic_oscillator_model.rst │ ├── sir_model.rst │ ├── stochastic │ │ ├── index.rst │ │ ├── markov_jump_model.rst │ │ ├── stochastic_degradation_model.rst │ │ ├── stochastic_logistic_model.rst │ │ ├── stochastic_michaelis_menten_model.rst │ │ ├── stochastic_production_degradation_model.rst │ │ └── stochastic_schlogl_model.rst │ ├── toy_classes.rst │ └── twisted_gaussian_logpdf.rst │ ├── transformations.rst │ └── utilities.rst ├── example.svg ├── examples ├── README.md ├── interfaces │ ├── automatic-differentiation-using-autograd.ipynb │ ├── stan.ipynb │ ├── statsmodels-arima.ipynb │ └── statsmodels-state-space.ipynb ├── miscellaneous │ ├── model-rt-estimation-renewal-equation.ipynb │ ├── sars_incidence.csv │ └── sars_serial_interval.csv ├── optimisation │ ├── adam.ipynb │ ├── ask-and-tell.ipynb │ ├── cmaes-bare.ipynb │ ├── cmaes.ipynb │ ├── convenience.ipynb │ ├── first-example.ipynb │ ├── gradient-descent.ipynb │ ├── irpropmin.ipynb │ ├── maximum-likelihood.ipynb │ ├── multi-objective.ipynb │ ├── nelder-mead.ipynb │ ├── pso.ipynb │ ├── snes.ipynb │ ├── spotting-unidentifiable-parameters.ipynb │ ├── transformed-parameters.ipynb │ └── xnes.ipynb ├── plotting │ ├── customise-pints-plots.ipynb │ ├── mcmc-autocorrelation.ipynb │ ├── mcmc-histogram-plots.ipynb │ ├── mcmc-pairwise-scatterplots.ipynb │ ├── mcmc-predicted-time-series.ipynb │ ├── mcmc-trace-plots.ipynb │ ├── optimisation-2d-surface.ipynb │ ├── residuals-autocorrelation-diagnostics.ipynb │ ├── residuals-autocorrelation.ipynb │ └── residuals-variance-diagnostics.ipynb ├── sampling │ ├── abc-smc.ipynb │ ├── adaptive-covariance-dram.ipynb │ ├── adaptive-covariance-haario-bardenet.ipynb │ ├── adaptive-covariance-haario.ipynb │ ├── adaptive-covariance-rao-blackwell.ipynb │ ├── differential-evolution-mcmc.ipynb │ ├── dream-mcmc.ipynb │ ├── effective-sample-size.ipynb │ ├── emcee-hammer.ipynb │ ├── first-example.ipynb │ ├── hamiltonian-mcmc.ipynb │ ├── mala-mcmc.ipynb │ ├── metropolis-mcmc.ipynb │ ├── monomial-gamma-hmc.ipynb │ ├── nested-ellipsoidal-sampling.ipynb │ ├── nested-rejection-sampling.ipynb │ ├── nuts-mcmc.ipynb │ ├── population-mcmc.ipynb │ ├── readme-example.ipynb │ ├── rejection-abc.ipynb │ ├── relativistic-mcmc.ipynb │ ├── slice-doubling-mcmc.ipynb │ ├── slice-overrelaxation-mcmc.ipynb │ ├── slice-rank-shrinking-mcmc.ipynb │ ├── slice-stepout-mcmc.ipynb │ ├── transformation-with-and-without-jacobian.ipynb │ └── transformed-parameters.ipynb ├── stats │ ├── autoregressive-moving-average-errors.ipynb │ ├── beginners-tutorial.ipynb │ ├── cauchy-sampling-error.ipynb │ ├── constant-and-multiplicative-gaussian-error.ipynb │ ├── custom-logpdf.ipynb │ ├── custom-model.ipynb │ ├── custom-prior.ipynb │ ├── integrated-gaussian-log-likelihood.ipynb │ ├── log-priors.ipynb │ ├── multiplicative-gaussian-errors.ipynb │ ├── pooling.ipynb │ └── student-t-sampling-error.ipynb ├── toy │ ├── distribution-annulus.ipynb │ ├── distribution-cone.ipynb │ ├── distribution-eight-schools.ipynb │ ├── distribution-german-credit-hierarchical.ipynb │ ├── distribution-german-credit.ipynb │ ├── distribution-high-dimensional-gaussian.ipynb │ ├── distribution-multimodal-gaussian.ipynb │ ├── distribution-neals-funnel.ipynb │ ├── distribution-rosenbrock.ipynb │ ├── distribution-simple-egg-box.ipynb │ ├── distribution-twisted-gaussian.ipynb │ ├── model-beeler-reuter-ap.ipynb │ ├── model-constant.ipynb │ ├── model-fitzhugh-nagumo.ipynb │ ├── model-goodwin-oscillator.ipynb │ ├── model-hes1-michaelis-menten.ipynb │ ├── model-hodgkin-huxley-ik.ipynb │ ├── model-logistic.ipynb │ ├── model-lotka-volterra.ipynb │ ├── model-repressilator.ipynb │ ├── model-simple-harmonic-oscillator.ipynb │ ├── model-sir.ipynb │ ├── model-stochastic-degradation.ipynb │ ├── model-stochastic-logistic-growth.ipynb │ ├── model-stochastic-michaelis-menten.ipynb │ ├── model-stochastic-production-degradation.ipynb │ └── model-stochastic-schlogl.ipynb └── using-python.md ├── papers ├── README.md ├── main-pints.bib └── used-pints.bib ├── pints ├── __init__.py ├── _abc │ ├── __init__.py │ ├── _abc_rejection.py │ └── _abc_smc.py ├── _boundaries.py ├── _core.py ├── _diagnostics.py ├── _error_measures.py ├── _evaluation.py ├── _log_likelihoods.py ├── _log_pdfs.py ├── _log_priors.py ├── _logger.py ├── _mcmc │ ├── __init__.py │ ├── _adaptive_covariance.py │ ├── _differential_evolution.py │ ├── _dram_ac.py │ ├── _dream.py │ ├── _dual_averaging.py │ ├── _emcee_hammer.py │ ├── _haario_ac.py │ ├── _haario_bardenet_ac.py │ ├── _hamiltonian.py │ ├── _mala.py │ ├── _metropolis.py │ ├── _monomial_gamma_hamiltonian.py │ ├── _nuts.py │ ├── _population.py │ ├── _rao_blackwell_ac.py │ ├── _relativistic.py │ ├── _slice_doubling.py │ ├── _slice_rank_shrinking.py │ ├── _slice_stepout.py │ └── _summary.py ├── _nested │ ├── __init__.py │ ├── _ellipsoid.py │ └── _rejection.py ├── _optimisers │ ├── __init__.py │ ├── _adam.py │ ├── _cmaes.py │ ├── _cmaes_bare.py │ ├── _gradient_descent.py │ ├── _irpropmin.py │ ├── _nelder_mead.py │ ├── _pso.py │ ├── _snes.py │ └── _xnes.py ├── _sample_initial_points.py ├── _transformation.py ├── _util.py ├── interfaces │ ├── __init__.py │ └── stan │ │ ├── __init__.py │ │ └── _stan.py ├── io.py ├── noise.py ├── plot │ ├── __init__.py │ ├── _autocorrelation.py │ ├── _function.py │ ├── _function_between_points.py │ ├── _histogram.py │ ├── _pairwise.py │ ├── _series.py │ ├── _surface.py │ └── _trace.py ├── residuals_diagnostics.py ├── tests │ ├── shared.py │ ├── test_abc_controller.py │ ├── test_abc_rejection.py │ ├── test_abc_smc.py │ ├── test_boundaries.py │ ├── test_diagnostics.py │ ├── test_dual_averaging.py │ ├── test_error_measures.py │ ├── test_evaluators.py │ ├── test_interfaces_stan.py │ ├── test_io.py │ ├── test_log_likelihoods.py │ ├── test_log_pdfs.py │ ├── test_log_priors.py │ ├── test_logger.py │ ├── test_matrix2d.py │ ├── test_mcmc_adaptive.py │ ├── test_mcmc_controller.py │ ├── test_mcmc_differential_evolution.py │ ├── test_mcmc_dram_ac.py │ ├── test_mcmc_dream.py │ ├── test_mcmc_emcee_hammer.py │ ├── test_mcmc_haario_ac.py │ ├── test_mcmc_haario_bardenet_ac.py │ ├── test_mcmc_hamiltonian.py │ ├── test_mcmc_mala.py │ ├── test_mcmc_metropolis.py │ ├── test_mcmc_monomial_gamma_hmc.py │ ├── test_mcmc_nuts.py │ ├── test_mcmc_population.py │ ├── test_mcmc_rao_blackwell_ac.py │ ├── test_mcmc_relativistic.py │ ├── test_mcmc_slice_doubling.py │ ├── test_mcmc_slice_rank_shrinking.py │ ├── test_mcmc_slice_stepout.py │ ├── test_mcmc_summary.py │ ├── test_multi_output_problem.py │ ├── test_nested_controller.py │ ├── test_nested_ellipsoid_sampler.py │ ├── test_nested_rejection_sampler.py │ ├── test_noise.py │ ├── test_opt_adam.py │ ├── test_opt_cmaes.py │ ├── test_opt_cmaes_bare.py │ ├── test_opt_controller.py │ ├── test_opt_easy_optimisation.py │ ├── test_opt_gradient_descent.py │ ├── test_opt_irpropmin.py │ ├── test_opt_nelder_mead.py │ ├── test_opt_pso.py │ ├── test_opt_snes.py │ ├── test_opt_xnes.py │ ├── test_plot.py │ ├── test_residuals_diagnostics.py │ ├── test_sample_initial_points.py │ ├── test_single_output_problem.py │ ├── test_test_shared.py │ ├── test_timer.py │ ├── test_toy_annulus_logpdf.py │ ├── test_toy_br_ap_model.py │ ├── test_toy_classes.py │ ├── test_toy_cone_logpdf.py │ ├── test_toy_constant_model.py │ ├── test_toy_eight_schools_logpdf.py │ ├── test_toy_fitzhugh_nagumo_model.py │ ├── test_toy_gaussian_logpdf.py │ ├── test_toy_german_credit_hierarchical_logpdf.py │ ├── test_toy_german_credit_logpdf.py │ ├── test_toy_goodwin_oscillator_model.py │ ├── test_toy_hes1_michaelis_menten_model.py │ ├── test_toy_hh_ik_model.py │ ├── test_toy_high_dimensional_gaussian_logpdf.py │ ├── test_toy_logistic_model.py │ ├── test_toy_lotka_volterra_model.py │ ├── test_toy_multimodal_gaussian_logpdf.py │ ├── test_toy_neals_funnel_logpdf.py │ ├── test_toy_parabolic_error.py │ ├── test_toy_repressilator.py │ ├── test_toy_rosenbrock.py │ ├── test_toy_sho_model.py │ ├── test_toy_simple_egg_box_logpdf.py │ ├── test_toy_sir_model.py │ ├── test_toy_stochastic_degradation_model.py │ ├── test_toy_stochastic_logistic_model.py │ ├── test_toy_stochastic_markov_jump_model.py │ ├── test_toy_stochastic_michaelis_menten_model.py │ ├── test_toy_stochastic_production_degradation_model.py │ ├── test_toy_stochastic_schlogl_model.py │ ├── test_toy_twisted_gaussian_logpdf.py │ ├── test_transformation.py │ ├── test_vector.py │ └── test_version.py ├── toy │ ├── __init__.py │ ├── _annulus.py │ ├── _beeler_reuter_model.py │ ├── _cone.py │ ├── _constant_model.py │ ├── _eight_schools.py │ ├── _fitzhugh_nagumo_model.py │ ├── _gaussian.py │ ├── _german_credit.py │ ├── _german_credit_hierarchical.py │ ├── _goodwin_oscillator_model.py │ ├── _hes1_michaelis_menten.py │ ├── _hh_ik_model.py │ ├── _high_dimensional_gaussian.py │ ├── _logistic_model.py │ ├── _lotka_volterra_model.py │ ├── _multimodal_gaussian.py │ ├── _neals_funnel.py │ ├── _parabola.py │ ├── _repressilator_model.py │ ├── _rosenbrock.py │ ├── _sho_model.py │ ├── _simple_egg_box.py │ ├── _sir_model.py │ ├── _toy_classes.py │ ├── _twisted_gaussian_banana.py │ └── stochastic │ │ ├── __init__.py │ │ ├── _degradation_model.py │ │ ├── _logistic_model.py │ │ ├── _markov_jump_model.py │ │ ├── _michaelis_menten_model.py │ │ ├── _production_degradation_model.py │ │ └── _schlogl_model.py └── version ├── postBuild ├── run-tests.py └── setup.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = pints 3 | omit = 4 | pints/tests/* 5 | 6 | [report] 7 | exclude_lines = 8 | pragma: no cover 9 | pragma: no linux cover 10 | def __repr__ 11 | raise AssertionError 12 | raise NotImplementedError 13 | except ImportError: 14 | if False: 15 | if __name__ == .__main__.: 16 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude= 3 | .git, 4 | problems, 5 | pints/__init__.py, 6 | pints/plot/__init__.py, 7 | pints/toy/__init__.py, 8 | env, 9 | venv, 10 | build 11 | ignore= 12 | # Block comment should start with '# ' 13 | # Not if it's a commented out line 14 | E265, 15 | 16 | # Ambiguous variable names 17 | # It's absolutely fine to have i and I 18 | E741, 19 | 20 | # List comprehension redefines variable 21 | # Re-using throw-away variables like `i`, `x`, etc. is a Good Idea 22 | F812, 23 | 24 | # Blank line at end of file 25 | # This increases readability 26 | W391, 27 | 28 | # Line break before binary operator 29 | # This is now actually advised in pep8 30 | W503, 31 | 32 | # Line break after binary operator 33 | W504, 34 | 35 | -------------------------------------------------------------------------------- /.github/workflows/copyright-test.yml: -------------------------------------------------------------------------------- 1 | name: Copyright 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # Run workflow when PR is changed, or when changed from draft to ready 9 | types: [opened, synchronize, reopened, ready_for_review] 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | build-and-test: 16 | name: Copyright 17 | runs-on: ubuntu-latest 18 | 19 | # Skip job if it's a draft PR 20 | if: github.event.pull_request.draft == false 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v5 27 | with: 28 | python-version: '3.13' 29 | 30 | - name: install pints 31 | run: | 32 | python --version 33 | python -m pip install --upgrade pip setuptools wheel 34 | python -m pip install . 35 | 36 | - name: run copyright test 37 | run: | 38 | python run-tests.py --copyright 39 | -------------------------------------------------------------------------------- /.github/workflows/coverage-test.yml: -------------------------------------------------------------------------------- 1 | name: Coverage 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # Run workflow when PR is changed, or when changed from draft to ready 9 | types: [opened, synchronize, reopened, ready_for_review] 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | build-and-test: 16 | name: Coverage 17 | runs-on: ubuntu-latest 18 | 19 | # Skip job if it's a draft PR 20 | if: github.event.pull_request.draft == false 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v5 27 | with: 28 | # Testing on version that Stan currently supports 29 | python-version: '3.12' 30 | 31 | - name: install pints 32 | run: | 33 | python --version 34 | python -m pip install --upgrade pip setuptools wheel 35 | python -m pip install .[stan] 36 | python -m pip install coverage codecov 37 | 38 | - name: run coverage 39 | run: | 40 | coverage run run-tests.py --unit 41 | 42 | - name: codecov 43 | if: success() 44 | env: 45 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 46 | run: | 47 | codecov 48 | -------------------------------------------------------------------------------- /.github/workflows/docs-test.yml: -------------------------------------------------------------------------------- 1 | name: Doctest 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # Run workflow when PR is changed, or when changed from draft to ready 9 | types: [opened, synchronize, reopened, ready_for_review] 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | build-and-test: 16 | name: Docs 17 | runs-on: ubuntu-latest 18 | 19 | # Skip job if it's a draft PR 20 | if: github.event.pull_request.draft == false 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v5 27 | with: 28 | python-version: '3.13' 29 | 30 | - name: install pints 31 | run: | 32 | python --version 33 | python -m pip install --upgrade pip setuptools wheel 34 | python -m pip install . 35 | python -m pip install .[docs] 36 | 37 | - name: run doctest 38 | run: | 39 | python run-tests.py --doctest 40 | -------------------------------------------------------------------------------- /.github/workflows/notebook-interfaces-test.yml: -------------------------------------------------------------------------------- 1 | name: Notebooks (interfaces) 2 | 3 | on: 4 | schedule: 5 | # 4am on monday (minute, hour, day, month, day-of-the-week) 6 | - cron: '0 4 * * 1' 7 | 8 | jobs: 9 | 10 | build-and-test: 11 | name: interfaces 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: '3.12' 21 | 22 | # We use e.g. install pints[stan] to install dependencies for interfaces 23 | # that have some code in pints/interfaces. Dependencies that are not used 24 | # by any code in the `pints` module are installed with pip (e.g. 25 | # `pip install autograd`). 26 | - name: install pints, interfaces, extra dependencies 27 | run: | 28 | python --version 29 | python -m pip install --upgrade pip setuptools wheel 30 | python -m pip install .[dev,stan] 31 | python -m pip install autograd 32 | python -m pip install statsmodels 33 | 34 | - name: run jupyter notebooks 35 | run: | 36 | python run-tests.py --interfaces 37 | -------------------------------------------------------------------------------- /.github/workflows/notebook-test.yml: -------------------------------------------------------------------------------- 1 | name: Notebooks 2 | 3 | on: 4 | schedule: 5 | # 4am on monday (minute, hour, day, month, day-of-the-week) 6 | - cron: '0 4 * * 1' 7 | 8 | jobs: 9 | 10 | build-and-test: 11 | name: Notebooks 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: '3.13' 21 | 22 | - name: install pints 23 | run: | 24 | python --version 25 | python -m pip install --upgrade pip setuptools wheel 26 | python -m pip install . 27 | python -m pip install .[dev] 28 | 29 | # Install pandas for "estimation renewal" notebook 30 | - name: install pandas 31 | run: | 32 | python -m pip install pandas 33 | 34 | - name: run jupyter notebooks 35 | run: | 36 | python run-tests.py --books 37 | -------------------------------------------------------------------------------- /.github/workflows/style-test.yml: -------------------------------------------------------------------------------- 1 | name: Style tests (flake8) 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # Run workflow when PR is changed, or when changed from draft to ready 9 | types: [opened, synchronize, reopened, ready_for_review] 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | build-and-test: 16 | name: Style 17 | runs-on: ubuntu-latest 18 | 19 | # Skip job if it's a draft PR 20 | if: github.event.pull_request.draft == false 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v5 27 | with: 28 | python-version: '3.13' 29 | 30 | 31 | - name: install pints 32 | run: | 33 | python --version 34 | python -m pip install --upgrade pip setuptools wheel 35 | python -m pip install . 36 | python -m pip install .[dev] 37 | 38 | - name: run style tests 39 | run: | 40 | python -m flake8 41 | -------------------------------------------------------------------------------- /.github/workflows/unit-test-os-coverage.yml: -------------------------------------------------------------------------------- 1 | name: Unit tests (OS versions) 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # Run workflow when PR is changed, or when changed from draft to ready 9 | types: [opened, synchronize, reopened, ready_for_review] 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | build-and-test: 16 | name: OS unit tests 17 | runs-on: ${{ matrix.os }} 18 | 19 | # Skip job if it's a draft PR 20 | if: github.event.pull_request.draft == false 21 | 22 | strategy: 23 | matrix: 24 | os: [ubuntu-20.04, macos-latest, windows-latest] 25 | # Note: Ubuntu 22 is checked by other tests 26 | 27 | steps: 28 | - uses: actions/checkout@v4 29 | 30 | - name: Set up Python 31 | uses: actions/setup-python@v5 32 | with: 33 | python-version: '3.13' 34 | 35 | - name: install pints 36 | run: | 37 | python --version 38 | python -m pip install --upgrade pip setuptools wheel 39 | python -m pip install . 40 | 41 | - name: run unit tests 42 | run: | 43 | python run-tests.py --unit 44 | -------------------------------------------------------------------------------- /.github/workflows/unit-test-python-coverage.yml: -------------------------------------------------------------------------------- 1 | name: Unit tests (python versions) 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | # Run workflow when PR is changed, or when changed from draft to ready 9 | types: [opened, synchronize, reopened, ready_for_review] 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | build-and-test: 16 | name: Python unit tests 17 | runs-on: ubuntu-latest 18 | 19 | # Skip job if it's a draft PR 20 | if: github.event.pull_request.draft == false 21 | 22 | strategy: 23 | matrix: 24 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] 25 | # Note: Latest version is tested by coverage test and os tests 26 | 27 | steps: 28 | - uses: actions/checkout@v4 29 | 30 | - name: Set up Python ${{ matrix.python-version }} 31 | uses: actions/setup-python@v5 32 | with: 33 | python-version: ${{ matrix.python-version }} 34 | architecture: x64 35 | 36 | - name: install pints 37 | run: | 38 | python --version 39 | python -m pip install --upgrade pip setuptools wheel 40 | python -m pip install . 41 | 42 | - name: install stan 43 | if: ${{ matrix.python-version >= '3.10' && matrix.python-version < '3.13' }} 44 | run : python -m pip install .[stan] 45 | 46 | - name: run unit tests 47 | run: | 48 | python run-tests.py --unit 49 | -------------------------------------------------------------------------------- /.github/workflows/upload-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Upload to PyPi 2 | 3 | on: 4 | release: 5 | types: 6 | - published 7 | 8 | jobs: 9 | 10 | upload-to-pypi: 11 | name: Upload to Pypi 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: '3.11' 21 | 22 | - name: install dependencies 23 | run: | 24 | python --version 25 | python -m pip install --upgrade pip setuptools wheel 26 | python -m pip install --upgrade twine 27 | 28 | - name: create release binaries 29 | run: | 30 | python setup.py sdist bdist_wheel 31 | 32 | - name: publish to PyPI 33 | uses: pypa/gh-action-pypi-publish@master 34 | with: 35 | user: __token__ 36 | password: ${{ secrets.PYPI_TOKEN }} 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Setuptools files 7 | *.egg-info 8 | 9 | # Jupyter checkpoints 10 | .ipynb_checkpoints/ 11 | 12 | # Jupyter stan build files 13 | examples/interfaces/build 14 | 15 | # Apple stuff 16 | .DS_Store 17 | 18 | # R files 19 | .Rhistory 20 | .coverage 21 | 22 | # vscode 23 | .vscode 24 | 25 | # pycharm 26 | .idea 27 | 28 | # Studies have their own repo now 29 | studies 30 | 31 | # Virtual environments 32 | venv/* 33 | env/* 34 | 35 | # Build artifacts 36 | build/* 37 | dist/* 38 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.9" # Currently (2023-07-25) seems to be the sweet spot... 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/source/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: .requirements-docs.txt 36 | - method: pip 37 | path: . 38 | 39 | -------------------------------------------------------------------------------- /.requirements-docs.txt: -------------------------------------------------------------------------------- 1 | # Requirements for readthedocs.io 2 | cma>=2 3 | numpy>=1.8 4 | scipy>=0.14 5 | matplotlib>=1.5 6 | guzzle-sphinx-theme 7 | sphinx>=1.5, !=1.7.3 8 | tabulate 9 | -------------------------------------------------------------------------------- /CITATION: -------------------------------------------------------------------------------- 1 | To cite PINTS in publications, please use: 2 | 3 | Clerx, M., Robinson, M., Lambert, B., Lei, C. L., Ghosh, S., Mirams, G. R., & Gavaghan, D. J. (2019). 4 | Probabilistic Inference on Noisy Time Series (PINTS). 5 | Journal of Open Research Software, 7(1), 23. 6 | 7 | https://doi.org/10.5334/jors.252 8 | 9 | A BibTeX entry for LaTeX users is 10 | 11 | @article{Clerx2019Pints, 12 | title={Probabilistic Inference on Noisy Time Series ({PINTS})}, 13 | author={Clerx, Michael and Robinson, Martin and Lambert, Ben and Lei, Chon Lok and Ghosh, Sanmitra and Mirams, Gary R and Gavaghan, David J}, 14 | journal={Journal of Open Research Software}, 15 | volume={7}, 16 | number={1}, 17 | pages={23}, 18 | year={2019}, 19 | doi={10.5334/jors.252} 20 | } 21 | 22 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | iBSD 3-Clause License 2 | 3 | Copyright (c) 2017-2025, University of Oxford (University of Oxford means the 4 | Chancellor, Masters and Scholars of the University of Oxford, having an 5 | administrative office at Wellington Square, Oxford OX1 2JD, UK). 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 14 | 2. Redistributions in binary form must reproduce the above copyright notice, 15 | this list of conditions and the following disclaimer in the documentation 16 | and/or other materials provided with the distribution. 17 | 18 | 3. Neither the name of the copyright holder nor the names of its 19 | contributors may be used to endorse or promote products derived from 20 | this software without specific prior written permission. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 26 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 28 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include pints/version 2 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = Pints 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=Pints 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/source/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* More space between class definitions, and functions */ 2 | dl.class, dl.function { 3 | margin-bottom: 40px; 4 | } 5 | -------------------------------------------------------------------------------- /docs/source/abc_samplers/abc_smc.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | ABC-SMC sampler 3 | *************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: ABCSMC -------------------------------------------------------------------------------- /docs/source/abc_samplers/base_classes.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | ABC sampler base class 3 | ********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: ABCSampler 8 | .. autoclass:: ABCController -------------------------------------------------------------------------------- /docs/source/abc_samplers/index.rst: -------------------------------------------------------------------------------- 1 | ************ 2 | ABC samplers 3 | ************ 4 | 5 | .. currentmodule:: pints 6 | 7 | Pints provides a number of samplers for Approximate Bayesian 8 | Computation (ABC), all implementing the :class:`ABCSampler` 9 | interface, that can be used to sample from a stochastic model 10 | given a :class:`LogPrior` and a :class:`ErrorMeasure`. 11 | 12 | 13 | .. toctree:: 14 | 15 | base_classes 16 | abc_smc 17 | rejection_abc -------------------------------------------------------------------------------- /docs/source/abc_samplers/rejection_abc.rst: -------------------------------------------------------------------------------- 1 | ********************* 2 | Rejection ABC sampler 3 | ********************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: RejectionABC -------------------------------------------------------------------------------- /docs/source/boundaries.rst: -------------------------------------------------------------------------------- 1 | ********** 2 | Boundaries 3 | ********** 4 | 5 | .. currentmodule:: pints 6 | 7 | Simple boundaries for an optimisation can be created using 8 | :class:`RectangularBoundaries`. 9 | More complex types can be made using :class:`LogPDFBoundaries` or a custom 10 | implementation of the :class:`Boundaries` interface. 11 | 12 | Overview: 13 | 14 | - :class:`Boundaries` 15 | - :class:`ComposedBoundaries` 16 | - :class:`LogPDFBoundaries` 17 | - :class:`RectangularBoundaries` 18 | 19 | 20 | .. autoclass:: Boundaries 21 | 22 | .. autoclass:: ComposedBoundaries 23 | 24 | .. autoclass:: LogPDFBoundaries 25 | 26 | .. autoclass:: RectangularBoundaries 27 | -------------------------------------------------------------------------------- /docs/source/core_classes_and_methods.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Core classes and methods 3 | ************************ 4 | 5 | .. currentmodule:: pints 6 | 7 | Pints provides the :class:`SingleOutputProblem` and 8 | :class:`MultiOutputProblem` classes to formulate 9 | inverse problems based on time series data and 10 | :class:`ForwardModel`. 11 | 12 | Overview: 13 | 14 | - :class:`ForwardModel` 15 | - :class:`ForwardModelS1` 16 | - :class:`MultiOutputProblem` 17 | - :class:`SingleOutputProblem` 18 | - :class:`TunableMethod` 19 | - :func:`version` 20 | 21 | .. autofunction:: version 22 | 23 | .. autoclass:: TunableMethod 24 | 25 | 26 | Forward model 27 | ************* 28 | 29 | .. autoclass:: ForwardModel 30 | 31 | Forward model with sensitivities 32 | ******************************** 33 | 34 | .. autoclass:: ForwardModelS1 35 | 36 | Problems 37 | ******** 38 | 39 | .. autoclass:: SingleOutputProblem 40 | 41 | .. autoclass:: MultiOutputProblem 42 | 43 | -------------------------------------------------------------------------------- /docs/source/diagnostic_plots.rst: -------------------------------------------------------------------------------- 1 | **************** 2 | Diagnostic plots 3 | **************** 4 | 5 | .. module:: pints.plot 6 | 7 | For users who have Matplotlib installed, Pints offers a number of diagnostic 8 | plots that can be used to quickly check obtained results. 9 | 10 | Plotting functions: 11 | 12 | - :func:`function` 13 | - :func:`function_between_points` 14 | - :func:`surface` 15 | 16 | Diagnosing MCMC results: 17 | 18 | - :func:`autocorrelation` 19 | - :func:`histogram` 20 | - :func:`pairwise` 21 | - :func:`series` 22 | - :func:`trace` 23 | 24 | Functions 25 | --------- 26 | 27 | .. autofunction:: function 28 | 29 | .. autofunction:: function_between_points 30 | 31 | .. autofunction:: surface 32 | 33 | MCMC Diagnostics 34 | ---------------- 35 | 36 | .. autofunction:: autocorrelation 37 | 38 | .. autofunction:: histogram 39 | 40 | .. autofunction:: pairwise 41 | 42 | .. autofunction:: series 43 | 44 | .. autofunction:: trace 45 | 46 | -------------------------------------------------------------------------------- /docs/source/diagnostics.rst: -------------------------------------------------------------------------------- 1 | *********************** 2 | Diagnosing MCMC results 3 | *********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | Pints provides a number of functions to diagnose MCMC progress and convergence. 8 | 9 | Overview: 10 | 11 | - :func:`effective_sample_size` 12 | - :func:`rhat` 13 | 14 | 15 | .. autofunction:: rhat 16 | 17 | .. autofunction:: rhat_all_params 18 | 19 | .. autofunction:: effective_sample_size 20 | 21 | -------------------------------------------------------------------------------- /docs/source/error_measures.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Error measures 3 | ************** 4 | 5 | .. currentmodule:: pints 6 | 7 | Error measures are callable objects that return some scalar representing the 8 | error between a model and an experiment. 9 | 10 | Example:: 11 | 12 | error = pints.SumOfSquaresError(problem) 13 | x = [1,2,3] 14 | fx = error(x) 15 | 16 | Overview: 17 | 18 | - :class:`ErrorMeasure` 19 | - :class:`MeanSquaredError` 20 | - :class:`NormalisedRootMeanSquaredError` 21 | - :class:`ProbabilityBasedError` 22 | - :class:`ProblemErrorMeasure` 23 | - :class:`RootMeanSquaredError` 24 | - :class:`SumOfErrors` 25 | - :class:`SumOfSquaresError` 26 | 27 | 28 | .. autoclass:: ErrorMeasure 29 | 30 | .. autoclass:: MeanSquaredError 31 | 32 | .. autoclass:: NormalisedRootMeanSquaredError 33 | 34 | .. autoclass:: ProbabilityBasedError 35 | 36 | .. autoclass:: ProblemErrorMeasure 37 | 38 | .. autoclass:: RootMeanSquaredError 39 | 40 | .. autoclass:: SumOfErrors 41 | 42 | .. autoclass:: SumOfSquaresError 43 | -------------------------------------------------------------------------------- /docs/source/function_evaluation.rst: -------------------------------------------------------------------------------- 1 | ******************* 2 | Function evaluation 3 | ******************* 4 | 5 | .. currentmodule:: pints 6 | 7 | The :class:`Evaluator` classes provide an abstraction layer that makes it 8 | easier to implement sequential and/or parallel evaluation of functions. 9 | 10 | Example:: 11 | 12 | f = pints.SumOfSquaresError(problem) 13 | e = pints.ParallelEvaluator(f) 14 | x = [[1, 2], 15 | [3, 4], 16 | [5, 6], 17 | [7, 8], 18 | ] 19 | fx = e.evaluate(x) 20 | 21 | Overview: 22 | 23 | - :func:`evaluate` 24 | - :class:`Evaluator` 25 | - :class:`ParallelEvaluator` 26 | - :class:`SequentialEvaluator` 27 | - :class:`MultiSequentialEvaluator` 28 | 29 | 30 | .. autofunction:: evaluate 31 | 32 | .. autoclass:: Evaluator 33 | 34 | .. autoclass:: ParallelEvaluator 35 | 36 | .. autoclass:: SequentialEvaluator 37 | 38 | .. autoclass:: MultiSequentialEvaluator 39 | -------------------------------------------------------------------------------- /docs/source/io.rst: -------------------------------------------------------------------------------- 1 | ****************** 2 | I/O Helper classes 3 | ****************** 4 | 5 | .. module:: pints.io 6 | 7 | .. autofunction:: load_samples 8 | 9 | .. autofunction:: save_samples 10 | 11 | -------------------------------------------------------------------------------- /docs/source/log_likelihoods.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Log-likelihoods 3 | *************** 4 | 5 | .. currentmodule:: pints 6 | 7 | The classes below all implement the :class:`ProblemLogLikelihood` interface, 8 | and can calculate a log-likelihood based on some time-series :class:`Problem` 9 | and an assumed noise model. 10 | 11 | Example:: 12 | 13 | logpdf = pints.GaussianLogLikelihood(problem) 14 | x = [1, 2, 3] 15 | fx = logpdf(x) 16 | 17 | Overview: 18 | 19 | - :class:`AR1LogLikelihood` 20 | - :class:`ARMA11LogLikelihood` 21 | - :class:`CauchyLogLikelihood` 22 | - :class:`CensoredGaussianLogLikelihood` 23 | - :class:`ConstantAndMultiplicativeGaussianLogLikelihood` 24 | - :class:`GaussianIntegratedLogUniformLogLikelihood` 25 | - :class:`GaussianIntegratedUniformLogLikelihood` 26 | - :class:`GaussianKnownSigmaLogLikelihood` 27 | - :class:`GaussianLogLikelihood` 28 | - :class:`KnownNoiseLogLikelihood` 29 | - :class:`LogNormalLogLikelihood` 30 | - :class:`MultiplicativeGaussianLogLikelihood` 31 | - :class:`ScaledLogLikelihood` 32 | - :class:`StudentTLogLikelihood` 33 | - :class:`UnknownNoiseLogLikelihood` 34 | 35 | 36 | .. autoclass:: AR1LogLikelihood 37 | 38 | .. autoclass:: ARMA11LogLikelihood 39 | 40 | .. autoclass:: CauchyLogLikelihood 41 | 42 | .. autoclass:: CensoredGaussianLogLikelihood 43 | 44 | .. autoclass:: ConstantAndMultiplicativeGaussianLogLikelihood 45 | 46 | .. autoclass:: GaussianIntegratedLogUniformLogLikelihood 47 | 48 | .. autoclass:: GaussianIntegratedUniformLogLikelihood 49 | 50 | .. autoclass:: GaussianKnownSigmaLogLikelihood 51 | 52 | .. autoclass:: GaussianLogLikelihood 53 | 54 | .. autoclass:: KnownNoiseLogLikelihood 55 | 56 | .. autoclass:: LogNormalLogLikelihood 57 | 58 | .. autoclass:: MultiplicativeGaussianLogLikelihood 59 | 60 | .. autoclass:: ScaledLogLikelihood 61 | 62 | .. autoclass:: StudentTLogLikelihood 63 | 64 | .. autoclass:: UnknownNoiseLogLikelihood 65 | -------------------------------------------------------------------------------- /docs/source/log_pdfs.rst: -------------------------------------------------------------------------------- 1 | ******** 2 | Log-PDFs 3 | ******** 4 | 5 | .. currentmodule:: pints 6 | 7 | :class:`LogPDFs` are callable objects that represent 8 | distributions, including likelihoods and Bayesian priors and posteriors. 9 | They are unnormalised, i.e. their area does not necessarily sum up to 1, and 10 | for efficiency reasons we always work with the logarithm e.g. a log-likelihood 11 | instead of a likelihood. 12 | 13 | Example:: 14 | 15 | p = pints.GaussianLogPrior(mean=0, variance=1) 16 | x = p(0.1) 17 | 18 | Overview: 19 | 20 | - :class:`LogPDF` 21 | - :class:`LogPrior` 22 | - :class:`LogPosterior` 23 | - :class:`PooledLogPDF` 24 | - :class:`ProblemLogLikelihood` 25 | - :class:`SumOfIndependentLogPDFs` 26 | 27 | 28 | .. autoclass:: LogPDF 29 | 30 | .. autoclass:: LogPrior 31 | 32 | .. autoclass:: LogPosterior 33 | 34 | .. autoclass:: PooledLogPDF 35 | 36 | .. autoclass:: ProblemLogLikelihood 37 | 38 | .. autoclass:: SumOfIndependentLogPDFs 39 | 40 | -------------------------------------------------------------------------------- /docs/source/log_priors.rst: -------------------------------------------------------------------------------- 1 | ********** 2 | Log-priors 3 | ********** 4 | 5 | .. currentmodule:: pints 6 | 7 | A number of :class:`LogPriors` are provided for use in e.g. 8 | Bayesian inference. 9 | 10 | Example:: 11 | 12 | p = pints.GaussianLogPrior(mean=0, variance=1) 13 | x = p(0.1) 14 | 15 | Overview: 16 | 17 | - :class:`BetaLogPrior` 18 | - :class:`CauchyLogPrior` 19 | - :class:`ComposedLogPrior` 20 | - :class:`ExponentialLogPrior` 21 | - :class:`GammaLogPrior` 22 | - :class:`GaussianLogPrior` 23 | - :class:`HalfCauchyLogPrior` 24 | - :class:`InverseGammaLogPrior` 25 | - :class:`LogNormalLogPrior` 26 | - :class:`LogUniformLogPrior` 27 | - :class:`MultivariateGaussianLogPrior` 28 | - :class:`NormalLogPrior` 29 | - :class:`StudentTLogPrior` 30 | - :class:`TruncatedGaussianLogPrior` 31 | - :class:`UniformLogPrior` 32 | 33 | 34 | .. autoclass:: BetaLogPrior 35 | 36 | .. autoclass:: CauchyLogPrior 37 | 38 | .. autoclass:: ComposedLogPrior 39 | 40 | .. autoclass:: ExponentialLogPrior 41 | 42 | .. autoclass:: GammaLogPrior 43 | 44 | .. autoclass:: GaussianLogPrior 45 | 46 | .. autoclass:: HalfCauchyLogPrior 47 | 48 | .. autoclass:: InverseGammaLogPrior 49 | 50 | .. autoclass:: LogNormalLogPrior 51 | 52 | .. autoclass:: LogUniformLogPrior 53 | 54 | .. autoclass:: MultivariateGaussianLogPrior 55 | 56 | .. autoclass:: NormalLogPrior 57 | 58 | .. autoclass:: StudentTLogPrior 59 | 60 | .. autoclass:: TruncatedGaussianLogPrior 61 | 62 | .. autoclass:: UniformLogPrior 63 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/adaptive_covariance_mc.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | Adaptive Covariance MC 3 | ********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: AdaptiveCovarianceMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/base_classes.rst: -------------------------------------------------------------------------------- 1 | ************************* 2 | MCMC Sampler base classes 3 | ************************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: MCMCSampler 8 | 9 | .. autoclass:: SingleChainMCMC 10 | 11 | .. autoclass:: MultiChainMCMC 12 | 13 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/differential_evolution_mcmc.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | Differential Evolution MCMC 3 | *************************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: DifferentialEvolutionMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/dram_ac_mcmc.rst: -------------------------------------------------------------------------------- 1 | ********* 2 | Dram ACMC 3 | ********* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: DramACMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/dream_mcmc.rst: -------------------------------------------------------------------------------- 1 | ********* 2 | DreamMCMC 3 | ********* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: DreamMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/dual_averaging.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Dual Averaging 3 | ************** 4 | 5 | Dual averaging is not a sampling method, but is a method of adaptivly tuning the 6 | Hamintonian Monte Carlo (HMC) step size and mass matrix for the particular log-posterior 7 | being sampled. Pint's NUTS sampler uses dual averaging, but we have defined the dual 8 | averaging method separately so that in the future it can be used in HMC and other 9 | HMC-derived samplers. 10 | 11 | 12 | .. currentmodule:: pints 13 | 14 | .. autoclass:: DualAveragingAdaption 15 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/emcee_hammer_mcmc.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | EmceeHammerMCMC 3 | *************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: EmceeHammerMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/haario_ac_mcmc.rst: -------------------------------------------------------------------------------- 1 | *********** 2 | Haario ACMC 3 | *********** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: HaarioACMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/haario_bardenet_ac_mcmc.rst: -------------------------------------------------------------------------------- 1 | ******************** 2 | Haario Bardenet ACMC 3 | ******************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: HaarioBardenetACMC 8 | 9 | .. autoclass:: AdaptiveCovarianceMCMC 10 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/hamiltonian_mcmc.rst: -------------------------------------------------------------------------------- 1 | **************** 2 | Hamiltonian MCMC 3 | **************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: HamiltonianMCMC 8 | 9 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/index.rst: -------------------------------------------------------------------------------- 1 | ************* 2 | MCMC Samplers 3 | ************* 4 | 5 | .. currentmodule:: pints 6 | 7 | Pints provides a number of MCMC methods, all implementing the :class:`MCMC` 8 | interface, that can be used to sample from an unknown 9 | :class:`PDF` (usually a Bayesian 10 | :class:`Posterior`). 11 | 12 | .. toctree:: 13 | 14 | running 15 | base_classes 16 | adaptive_covariance_mc 17 | differential_evolution_mcmc 18 | dram_ac_mcmc 19 | dream_mcmc 20 | dual_averaging 21 | emcee_hammer_mcmc 22 | haario_ac_mcmc 23 | haario_bardenet_ac_mcmc 24 | hamiltonian_mcmc 25 | mala_mcmc 26 | metropolis_mcmc 27 | monomial_gamma_hamiltonian_mcmc 28 | nuts_mcmc 29 | population_mcmc 30 | rao_blackwell_ac_mcmc 31 | relativistic_mcmc 32 | slice_doubling_mcmc 33 | slice_rank_shrinking_mcmc 34 | slice_stepout_mcmc 35 | summary_mcmc 36 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/mala_mcmc.rst: -------------------------------------------------------------------------------- 1 | ************************************************** 2 | Metropolis-Adjusted Langevin Algorithm (MALA) MCMC 3 | ************************************************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: MALAMCMC 8 | 9 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/metropolis_mcmc.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | Metropolis Random Walk MCMC 3 | *************************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: MetropolisRandomWalkMCMC 8 | 9 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/monomial_gamma_hamiltonian_mcmc.rst: -------------------------------------------------------------------------------- 1 | ******************************* 2 | Monomial-Gamma Hamiltonian MCMC 3 | ******************************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: MonomialGammaHamiltonianMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/nuts_mcmc.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | No-U-Turn MCMC Sampler 3 | ********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: NoUTurnMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/population_mcmc.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Population MCMC 3 | *************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: PopulationMCMC 8 | 9 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/rao_blackwell_ac_mcmc.rst: -------------------------------------------------------------------------------- 1 | ****************** 2 | Rao-Blackwell ACMC 3 | ****************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: RaoBlackwellACMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/relativistic_mcmc.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | Relativistic MCMC 3 | ***************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: RelativisticMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/running.rst: -------------------------------------------------------------------------------- 1 | *********************** 2 | Running an MCMC routine 3 | *********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autofunction:: mcmc_sample 8 | 9 | .. autoclass:: MCMCController 10 | 11 | .. autoclass:: MCMCSampling 12 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/slice_doubling_mcmc.rst: -------------------------------------------------------------------------------- 1 | ************************************************** 2 | Slice Sampling - Doubling MCMC 3 | ************************************************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: SliceDoublingMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/slice_rank_shrinking_mcmc.rst: -------------------------------------------------------------------------------- 1 | ************************************ 2 | Slice Sampling - Rank Shrinking MCMC 3 | ************************************ 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: SliceRankShrinkingMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/slice_stepout_mcmc.rst: -------------------------------------------------------------------------------- 1 | ************************************************** 2 | Slice Sampling - Stepout MCMC 3 | ************************************************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: SliceStepoutMCMC 8 | -------------------------------------------------------------------------------- /docs/source/mcmc_samplers/summary_mcmc.rst: -------------------------------------------------------------------------------- 1 | ************ 2 | MCMC Summary 3 | ************ 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: MCMCSummary 8 | -------------------------------------------------------------------------------- /docs/source/nested_samplers/index.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Nested samplers 3 | *************** 4 | 5 | .. toctree:: 6 | 7 | nested_sampler 8 | nested_ellipsoid_sampler 9 | nested_rejection_sampler 10 | 11 | -------------------------------------------------------------------------------- /docs/source/nested_samplers/nested_ellipsoid_sampler.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Nested ellipsoid sampler 3 | ************************ 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: NestedEllipsoidSampler 8 | 9 | -------------------------------------------------------------------------------- /docs/source/nested_samplers/nested_rejection_sampler.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Nested rejection sampler 3 | ************************ 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: NestedRejectionSampler 8 | 9 | -------------------------------------------------------------------------------- /docs/source/nested_samplers/nested_sampler.rst: -------------------------------------------------------------------------------- 1 | ************************* 2 | Nested sampler base class 3 | ************************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: NestedSampler 8 | 9 | .. autoclass:: NestedController 10 | -------------------------------------------------------------------------------- /docs/source/noise_generators.rst: -------------------------------------------------------------------------------- 1 | **************** 2 | Noise generators 3 | **************** 4 | 5 | .. module:: pints.noise 6 | 7 | Pints contains a module ``pints.noise`` that contains methods that generate 8 | different kinds of noise. 9 | This can then be added to simulation output to create "realistic" experimental 10 | data. 11 | 12 | Overview: 13 | 14 | - :func:`ar1` 15 | - :func:`ar1_unity` 16 | - :func:`arma11` 17 | - :func:`arma11_unity` 18 | - :func:`independent` 19 | - :func:`multiplicative_gaussian` 20 | 21 | 22 | .. autofunction:: ar1 23 | 24 | .. autofunction:: ar1_unity 25 | 26 | .. autofunction:: arma11 27 | 28 | .. autofunction:: arma11_unity 29 | 30 | .. autofunction:: independent 31 | 32 | .. autofunction:: multiplicative_gaussian 33 | 34 | -------------------------------------------------------------------------------- /docs/source/noise_model_diagnostics.rst: -------------------------------------------------------------------------------- 1 | *********************** 2 | Noise model diagnostics 3 | *********************** 4 | 5 | .. module:: pints.residuals_diagnostics 6 | 7 | Pints includes functionality to generate diagnostic plots of the residuals. These tools may be useful to evaluate the validity of a noise model. 8 | 9 | Plotting functions: 10 | 11 | - :func:`plot_residuals_autocorrelation` 12 | - :func:`plot_residuals_binned_autocorrelation` 13 | - :func:`plot_residuals_binned_std` 14 | - :func:`plot_residuals_distance` 15 | - :func:`plot_residuals_vs_output` 16 | 17 | Diagnostics: 18 | 19 | - :func:`acorr` 20 | - :func:`calculate_residuals` 21 | 22 | 23 | Plotting functions 24 | ------------------ 25 | 26 | .. autofunction:: plot_residuals_autocorrelation 27 | 28 | .. autofunction:: plot_residuals_binned_autocorrelation 29 | 30 | .. autofunction:: plot_residuals_binned_std 31 | 32 | .. autofunction:: plot_residuals_distance 33 | 34 | .. autofunction:: plot_residuals_vs_output 35 | 36 | 37 | Diagnostics 38 | ----------- 39 | 40 | .. autofunction:: acorr 41 | 42 | .. autofunction:: calculate_residuals 43 | -------------------------------------------------------------------------------- /docs/source/optimisers/adam.rst: -------------------------------------------------------------------------------- 1 | ********************************* 2 | Adam (adaptive moment estimation) 3 | ********************************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: Adam 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/base_classes.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | Optimiser base classes 3 | ********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: Optimiser 8 | 9 | .. autoclass:: PopulationBasedOptimiser 10 | 11 | -------------------------------------------------------------------------------- /docs/source/optimisers/cmaes.rst: -------------------------------------------------------------------------------- 1 | ****** 2 | CMA-ES 3 | ****** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: CMAES 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/cmaes_bare.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | Bare-bones CMA-ES 3 | ***************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: BareCMAES 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/convenience_methods.rst: -------------------------------------------------------------------------------- 1 | ******************* 2 | Convenience methods 3 | ******************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autofunction:: fmin 8 | 9 | .. autofunction:: curve_fit 10 | -------------------------------------------------------------------------------- /docs/source/optimisers/gradient_descent.rst: -------------------------------------------------------------------------------- 1 | ************************************** 2 | Gradient descent (fixed learning rate) 3 | ************************************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: GradientDescent 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/index.rst: -------------------------------------------------------------------------------- 1 | ********** 2 | Optimisers 3 | ********** 4 | 5 | .. currentmodule:: pints 6 | 7 | Pints provides a number of optimisers, all implementing the :class:`Optimiser` 8 | interface, that can be used to find the parameters that minimise an 9 | :class:`ErrorMeasure` or maximise a :class:`LogPDF`. 10 | 11 | The easiest way to run an optimisation is by using the :func:`optimise` method 12 | or the :class:`OptimisationController` class. 13 | 14 | .. toctree:: 15 | 16 | running 17 | base_classes 18 | convenience_methods 19 | adam 20 | cmaes_bare 21 | cmaes 22 | gradient_descent 23 | irpropmin 24 | nelder_mead 25 | pso 26 | snes 27 | xnes 28 | 29 | -------------------------------------------------------------------------------- /docs/source/optimisers/irpropmin.rst: -------------------------------------------------------------------------------- 1 | ************************* 2 | Improved Rprop- (iRprop-) 3 | ************************* 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: IRPropMin 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/nelder_mead.rst: -------------------------------------------------------------------------------- 1 | *********** 2 | Nelder-Mead 3 | *********** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: NelderMead 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/pso.rst: -------------------------------------------------------------------------------- 1 | *** 2 | PSO 3 | *** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: PSO 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/running.rst: -------------------------------------------------------------------------------- 1 | *********************** 2 | Running an optimisation 3 | *********************** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autofunction:: optimise 8 | 9 | .. autoclass:: OptimisationController 10 | 11 | .. autoclass:: Optimisation 12 | 13 | -------------------------------------------------------------------------------- /docs/source/optimisers/snes.rst: -------------------------------------------------------------------------------- 1 | **** 2 | SNES 3 | **** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: SNES 8 | 9 | -------------------------------------------------------------------------------- /docs/source/optimisers/xnes.rst: -------------------------------------------------------------------------------- 1 | **** 2 | xNES 3 | **** 4 | 5 | .. currentmodule:: pints 6 | 7 | .. autoclass:: XNES 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/annulus_logpdf.rst: -------------------------------------------------------------------------------- 1 | ******************** 2 | Annulus Distribution 3 | ******************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: AnnulusLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/beeler_reuter_ap_model.rst: -------------------------------------------------------------------------------- 1 | ************************************ 2 | Beeler-Reuter Action Potential Model 3 | ************************************ 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: ActionPotentialModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/cone_logpdf.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | Cone Distribution 3 | ***************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: ConeLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/constant_model.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Constant Model 3 | ************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: ConstantModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/eight_schools_logpdf.rst: -------------------------------------------------------------------------------- 1 | ************************** 2 | Eight Schools distribution 3 | ************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: EightSchoolsLogPDF 8 | -------------------------------------------------------------------------------- /docs/source/toy/fitzhugh_nagumo_model.rst: -------------------------------------------------------------------------------- 1 | ********************* 2 | Fitzhugh-Nagumo Model 3 | ********************* 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: FitzhughNagumoModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/gaussian_logpdf.rst: -------------------------------------------------------------------------------- 1 | ********************* 2 | Gaussian distribution 3 | ********************* 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: GaussianLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/german_credit_hierarchical_logpdf.rst: -------------------------------------------------------------------------------- 1 | ************************************************ 2 | German Credit Hierarchical Logistic Distribution 3 | ************************************************ 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: GermanCreditHierarchicalLogPDF 8 | -------------------------------------------------------------------------------- /docs/source/toy/german_credit_logpdf.rst: -------------------------------------------------------------------------------- 1 | *********************************** 2 | German Credit Logistic Distribution 3 | *********************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: GermanCreditLogPDF 8 | -------------------------------------------------------------------------------- /docs/source/toy/goodwin_oscillator_model.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Goodwin oscillator model 3 | ************************ 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: GoodwinOscillatorModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/hes1_michaelis_menten_model.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | HES1 Michaelis-Menten Model 3 | *************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: Hes1Model 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/high_dimensional_gaussian_logpdf.rst: -------------------------------------------------------------------------------- 1 | ************************************** 2 | High dimensional Gaussian distribution 3 | ************************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: HighDimensionalGaussianLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/hodgkin_huxley_ik_model.rst: -------------------------------------------------------------------------------- 1 | ********************************** 2 | Hodgkin-Huxley IK Experiment Model 3 | ********************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: HodgkinHuxleyIKModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/index.rst: -------------------------------------------------------------------------------- 1 | ************ 2 | Toy problems 3 | ************ 4 | 5 | The `toy` module provides toy :class:`models`, 6 | :class:`distributions` and 7 | :class:`error measures` that can be used for tests and in 8 | examples. 9 | 10 | Some toy classes provide extra functionality defined in the 11 | :class:`pints.toy.ToyModel` and :class:`pints.toy.ToyLogPDF` classes. 12 | 13 | 14 | .. toctree:: 15 | 16 | toy_classes 17 | annulus_logpdf 18 | beeler_reuter_ap_model 19 | cone_logpdf 20 | constant_model 21 | eight_schools_logpdf 22 | fitzhugh_nagumo_model 23 | gaussian_logpdf 24 | german_credit_hierarchical_logpdf 25 | german_credit_logpdf 26 | goodwin_oscillator_model 27 | hes1_michaelis_menten_model 28 | high_dimensional_gaussian_logpdf 29 | hodgkin_huxley_ik_model 30 | logistic_model 31 | lotka_volterra_model 32 | multimodal_gaussian_logpdf 33 | neals_funnel 34 | parabolic_error 35 | repressilator_model 36 | rosenbrock 37 | simple_egg_box_logpdf 38 | simple_harmonic_oscillator_model 39 | sir_model 40 | twisted_gaussian_logpdf 41 | -------------------------------------------------------------------------------- /docs/source/toy/logistic_model.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Logistic model 3 | ************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: LogisticModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/lotka_volterra_model.rst: -------------------------------------------------------------------------------- 1 | ******************** 2 | Lotka-Volterra model 3 | ******************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: LotkaVolterraModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/multimodal_gaussian_logpdf.rst: -------------------------------------------------------------------------------- 1 | ******************************** 2 | Multimodal Gaussian distribution 3 | ******************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: MultimodalGaussianLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/neals_funnel.rst: -------------------------------------------------------------------------------- 1 | ************************** 2 | Neal's Funnel Distribution 3 | ************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: NealsFunnelLogPDF 8 | -------------------------------------------------------------------------------- /docs/source/toy/parabolic_error.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Parabolic error 3 | *************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: ParabolicError 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/repressilator_model.rst: -------------------------------------------------------------------------------- 1 | ******************* 2 | Repressilator model 3 | ******************* 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: RepressilatorModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/rosenbrock.rst: -------------------------------------------------------------------------------- 1 | ******************* 2 | Rosenbrock function 3 | ******************* 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: RosenbrockError 8 | 9 | .. autoclass:: RosenbrockLogPDF 10 | 11 | -------------------------------------------------------------------------------- /docs/source/toy/simple_egg_box_logpdf.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | Simple Egg Box Distribution 3 | *************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: SimpleEggBoxLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/simple_harmonic_oscillator_model.rst: -------------------------------------------------------------------------------- 1 | ******************************** 2 | Simple Harmonic Oscillator model 3 | ******************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: SimpleHarmonicOscillatorModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/sir_model.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | SIR Epidemiology model 3 | ********************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: SIRModel 8 | 9 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/index.rst: -------------------------------------------------------------------------------- 1 | *********************** 2 | Stochastic Toy Problems 3 | *********************** 4 | 5 | The `stochastic` module provides toy :class:`models`, 6 | :class:`distributions` and 7 | :class:`error measures` that can be used for tests and in 8 | examples. 9 | 10 | 11 | .. toctree:: 12 | 13 | markov_jump_model 14 | stochastic_degradation_model 15 | stochastic_logistic_model 16 | stochastic_michaelis_menten_model 17 | stochastic_production_degradation_model 18 | stochastic_schlogl_model 19 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/markov_jump_model.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | Markov Jump Model 3 | ***************** 4 | 5 | .. currentmodule:: pints.toy.stochastic 6 | 7 | .. autoclass:: MarkovJumpModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/stochastic_degradation_model.rst: -------------------------------------------------------------------------------- 1 | **************************** 2 | Stochastic degradation model 3 | **************************** 4 | 5 | .. currentmodule:: pints.toy.stochastic 6 | 7 | .. autoclass:: DegradationModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/stochastic_logistic_model.rst: -------------------------------------------------------------------------------- 1 | ************************* 2 | Stochastic Logistic Model 3 | ************************* 4 | 5 | .. currentmodule:: pints.toy.stochastic 6 | 7 | .. autoclass:: LogisticModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/stochastic_michaelis_menten_model.rst: -------------------------------------------------------------------------------- 1 | ********************************* 2 | Stochastic Michaelis Menten model 3 | ********************************* 4 | 5 | .. currentmodule:: pints.toy.stochastic 6 | 7 | .. autoclass:: MichaelisMentenModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/stochastic_production_degradation_model.rst: -------------------------------------------------------------------------------- 1 | ******************************************* 2 | Stochastic production and degradation model 3 | ******************************************* 4 | 5 | .. currentmodule:: pints.toy.stochastic 6 | 7 | .. autoclass:: ProductionDegradationModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/stochastic/stochastic_schlogl_model.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Schlogl's model 3 | *************** 4 | 5 | .. currentmodule:: pints.toy.stochastic 6 | 7 | .. autoclass:: SchloglModel 8 | -------------------------------------------------------------------------------- /docs/source/toy/toy_classes.rst: -------------------------------------------------------------------------------- 1 | **************** 2 | Toy base classes 3 | **************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: ToyLogPDF 8 | 9 | .. autoclass:: ToyModel 10 | 11 | .. autoclass:: ToyODEModel 12 | 13 | .. automethod:: _dfdp 14 | .. automethod:: _rhs 15 | -------------------------------------------------------------------------------- /docs/source/toy/twisted_gaussian_logpdf.rst: -------------------------------------------------------------------------------- 1 | ***************************** 2 | Twisted Gaussian distribution 3 | ***************************** 4 | 5 | .. currentmodule:: pints.toy 6 | 7 | .. autoclass:: TwistedGaussianLogPDF 8 | 9 | -------------------------------------------------------------------------------- /docs/source/transformations.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Transformations 3 | *************** 4 | 5 | .. currentmodule:: pints 6 | 7 | :class:`Transformation` objects provide methods to transform between different 8 | representations of a parameter space; for example from a "model space" 9 | (:math:`p`) where parameters have units and some physical counterpart to 10 | a "search space" (e.g. :math:`q = \log(p)`) where parameters are 11 | non-dimensionalised and less-recognisable to the modeller. 12 | The transformed space may in many cases prove simpler to work with for 13 | inference: leading to more effective and efficient optimisation and sampling. 14 | 15 | To perform optimisation or sampling in a transformed space, users can choose to 16 | write their :class:`pints.ForwardModel` in "search space" directly, but the 17 | issue with this is that we will no longer be correctly inferring the "model 18 | parameters". An alternative is to write the ``ForwardModel`` in model 19 | parameters, and pass a :class:`Transformation` object to e.g. an 20 | :class:`OptimisationController` or :class:`MCMCController`. Using the 21 | :class:`Transformation` object ensures users get the correct statistics about 22 | the model parameters (not the search space parameters). 23 | 24 | Parameter transformation can be useful in many situations, for example 25 | transforming from a constrained parameter space to an unconstrained search 26 | space using :class:`RectangularBoundariesTransformation` leads to crucial 27 | performance improvements for many methods. 28 | 29 | Example:: 30 | 31 | transform = pints.LogTransformation(n_parameters) 32 | mcmc = pints.MCMCController(log_posterior, n_chains, x0, transform=transform) 33 | 34 | Overview: 35 | 36 | - :class:`ComposedTransformation` 37 | - :class:`IdentityTransformation` 38 | - :class:`LogitTransformation` 39 | - :class:`LogTransformation` 40 | - :class:`RectangularBoundariesTransformation` 41 | - :class:`ScalingTransformation` 42 | - :class:`Transformation` 43 | - :class:`TransformedBoundaries` 44 | - :class:`TransformedErrorMeasure` 45 | - :class:`TransformedLogPDF` 46 | - :class:`TransformedLogPrior` 47 | - :class:`UnitCubeTransformation` 48 | 49 | 50 | Transformation types 51 | ******************** 52 | 53 | .. autoclass:: Transformation 54 | 55 | .. autoclass:: ComposedTransformation 56 | 57 | .. autoclass:: IdentityTransformation 58 | 59 | .. autoclass:: LogitTransformation 60 | 61 | .. autoclass:: LogTransformation 62 | 63 | .. autoclass:: RectangularBoundariesTransformation 64 | 65 | .. autoclass:: ScalingTransformation 66 | 67 | .. autoclass:: UnitCubeTransformation 68 | 69 | Transformed objects 70 | ******************* 71 | 72 | .. autoclass:: TransformedBoundaries 73 | 74 | .. autoclass:: TransformedErrorMeasure 75 | 76 | .. autoclass:: TransformedLogPDF 77 | 78 | .. autoclass:: TransformedLogPrior 79 | 80 | .. autoclass:: TransformedRectangularBoundaries 81 | 82 | -------------------------------------------------------------------------------- /docs/source/utilities.rst: -------------------------------------------------------------------------------- 1 | ********* 2 | Utilities 3 | ********* 4 | 5 | .. currentmodule:: pints 6 | 7 | Overview: 8 | 9 | - :func:`strfloat` 10 | - :class:`Loggable` 11 | - :class:`Logger` 12 | - :class:`Timer` 13 | - :func:`matrix2d` 14 | - :func:`vector` 15 | - :func:`sample_initial_points` 16 | 17 | .. autofunction:: strfloat 18 | 19 | .. autoclass:: Loggable 20 | :private-members: 21 | 22 | .. autoclass:: Logger 23 | 24 | .. autoclass:: Timer 25 | 26 | .. autofunction:: matrix2d 27 | 28 | .. autofunction:: vector 29 | 30 | .. autofunction:: sample_initial_points 31 | -------------------------------------------------------------------------------- /examples/miscellaneous/sars_incidence.csv: -------------------------------------------------------------------------------- 1 | "day","incidence" 2 | 1,1 3 | 2,0 4 | 3,0 5 | 4,1 6 | 5,0 7 | 6,0 8 | 7,2 9 | 8,0 10 | 9,2 11 | 10,2 12 | 11,1 13 | 12,1 14 | 13,1 15 | 14,0 16 | 15,0 17 | 16,0 18 | 17,4 19 | 18,1 20 | 19,2 21 | 20,4 22 | 21,13 23 | 22,23 24 | 23,35 25 | 24,26 26 | 25,12 27 | 26,17 28 | 27,19 29 | 28,17 30 | 29,28 31 | 30,23 32 | 31,27 33 | 32,27 34 | 33,11 35 | 34,21 36 | 35,21 37 | 36,25 38 | 37,31 39 | 38,103 40 | 39,96 41 | 40,69 42 | 41,58 43 | 42,48 44 | 43,33 45 | 44,25 46 | 45,43 47 | 46,37 48 | 47,30 49 | 48,29 50 | 49,28 51 | 50,34 52 | 51,34 53 | 52,32 54 | 53,24 55 | 54,17 56 | 55,16 57 | 56,23 58 | 57,18 59 | 58,15 60 | 59,11 61 | 60,19 62 | 61,16 63 | 62,9 64 | 63,17 65 | 64,14 66 | 65,6 67 | 66,4 68 | 67,8 69 | 68,9 70 | 69,11 71 | 70,11 72 | 71,11 73 | 72,6 74 | 73,13 75 | 74,3 76 | 75,8 77 | 76,2 78 | 77,4 79 | 78,4 80 | 79,6 81 | 80,4 82 | 81,5 83 | 82,4 84 | 83,8 85 | 84,2 86 | 85,3 87 | 86,2 88 | 87,5 89 | 88,3 90 | 89,3 91 | 90,4 92 | 91,3 93 | 92,1 94 | 93,2 95 | 94,0 96 | 95,1 97 | 96,3 98 | 97,3 99 | 98,1 100 | 99,1 101 | 100,1 102 | 101,1 103 | 102,1 104 | 103,1 105 | 104,1 106 | 105,0 107 | 106,2 108 | 107,0 109 | -------------------------------------------------------------------------------- /examples/miscellaneous/sars_serial_interval.csv: -------------------------------------------------------------------------------- 1 | "day","w" 2 | 1,0 3 | 2,0.001 4 | 3,0.012 5 | 4,0.043 6 | 5,0.078 7 | 6,0.104 8 | 7,0.117 9 | 8,0.116 10 | 9,0.108 11 | 10,0.094 12 | 11,0.078 13 | 12,0.063 14 | 13,0.049 15 | 14,0.038 16 | 15,0.028 17 | 16,0.021 18 | 17,0.015 19 | 18,0.011 20 | 19,0.008 21 | 20,0.005 22 | 21,0.004 23 | 22,0.003 24 | 23,0.002 25 | 24,0.001 26 | 25,0.001 27 | -------------------------------------------------------------------------------- /examples/using-python.md: -------------------------------------------------------------------------------- 1 | # Using Python 2 | 3 | This page contains a few selected links to Python learning resources, many more are available online! 4 | If you are new to programming in Python, these can help you get started. 5 | 6 | ## Getting Python 7 | 8 | - [Official downloads](https://www.python.org/downloads/) - from the Python Foundation. Any further modules can be installed when needed. 9 | - [Anaconda](https://docs.anaconda.com/anaconda/install/) - a popular package for scientists, which includes many commonly-used numeric & scientific packages. 10 | - [Jupyter](https://jupyter.org/install.html) - Jupyter notebooks let you combine written text with Python code - all the examples in this repository are written using Jupyter (but can be viewed without it, just by clicking the links!). 11 | 12 | ## Python for non-programmers 13 | 14 | - [Automate the boring stuff](https://automatetheboringstuff.com/) - a free, online, project-based textbook for beginners. 15 | - [Python 3 documentation](https://docs.python.org/3/) - official documentation for the latest version of Python. 16 | 17 | ## Python for Matlab/Octave users 18 | 19 | - [Matlab to Python migration guide (pdf)](https://www.enthought.com/wp-content/uploads/Enthought-MATLAB-to-Python-White-Paper.pdf) 20 | - [NumPy for Matlab users](https://numpy.org/devdocs/user/numpy-for-matlab-users.html) - NumPy, SciPy, and matplotlib are a set of highly popular packages that provide many features also included in Matlab 21 | - [Spyder](https://www.spyder-ide.org/) - a Python IDE with a familiar interface for matlab users 22 | 23 | ## Python for R users 24 | 25 | - [How can R Users Learn Python for Data Science ?](https://www.hackerearth.com/blog/developers/how-can-r-users-start-learning-python-for-data-science/) - a short article 26 | - [R to Python cheatsheet](http://mathesaurus.sourceforge.net/r-numpy.html) - some common R commands in Python 27 | -------------------------------------------------------------------------------- /papers/README.md: -------------------------------------------------------------------------------- 1 | # PINTS publications 2 | 3 | This directory contains details of publications about PINTS ([main-pints.bib](main-pints.bib)) and publications that have made use of PINTS ([used-pints.bib](used-pints.bib)). 4 | -------------------------------------------------------------------------------- /papers/main-pints.bib: -------------------------------------------------------------------------------- 1 | @article{Clerx2019Pints, 2 | title={Probabilistic Inference on Noisy Time Series ({PINTS})}, 3 | author={Clerx, Michael and Robinson, Martin and Lambert, Ben and Lei, Chon Lok and Ghosh, Sanmitra and Mirams, Gary R and Gavaghan, David J}, 4 | journal={Journal of Open Research Software}, 5 | volume={7}, 6 | number={1}, 7 | pages={23}, 8 | year={2019}, 9 | doi={10.5334/jors.252} 10 | } 11 | -------------------------------------------------------------------------------- /papers/used-pints.bib: -------------------------------------------------------------------------------- 1 | @article{Robinson2018Separating, 2 | title={Separating the effects of experimental noise from inherent system variability in voltammetry: The {[Fe(CN)6]3--/4--} Process}, 3 | author={Robinson, Martin and Simonov, Alexandr N and Zhang, Jie and Bond, Alan M and Gavaghan, David}, 4 | journal={Analytical chemistry}, 5 | volume={91}, 6 | number={3}, 7 | pages={1944--1953}, 8 | year={2018}, 9 | publisher={ACS Publications}, 10 | doi={10.1021/acs.analchem.8b04238} 11 | } 12 | 13 | @article{Robinson2018Parametrization, 14 | title={Integration of Heuristic and Automated Parametrization of Three Unresolved Two-Electron Surface-Confined Polyoxometalate Reduction Processes by {AC} Voltammetry}, 15 | author={Robinson, Martin and Ounnunkad, Kontad and Zhang, Jie and Gavaghan, David and Bond, Alan}, 16 | journal={ChemElectroChem}, 17 | volume={5}, 18 | number={23}, 19 | pages={3771--3785}, 20 | year={2018}, 21 | publisher={Wiley Online Library}, 22 | doi={10.1002/celc.201800950} 23 | } 24 | 25 | @article{Lei2019RapidHergI, 26 | author = {Lei, {Chon Lok} and Clerx, Michael and Gavaghan, David J. and Polonchuk, Liudmila and Mirams, Gary R. and Wang, Ken}, 27 | title = {Rapid characterisation of {hERG} channel kinetics {I}: using an automated high-throughput system}, 28 | year = {2019}, 29 | doi = {10.1016/j.bpj.2019.07.029}, 30 | publisher = {Elsevier}, 31 | journal = {Biophysical Journal} 32 | } 33 | 34 | @article{Lei2019RapidHergII, 35 | author = {Lei, {Chon Lok} and Clerx, Michael and Beattie, Kylie A. and Melgari, Dario and Hancox, Jules C. and Gavaghan, David J. and Polonchuk, Liudmila and Wang, Ken and Mirams, Gary R.}, 36 | title = {Rapid characterisation of {hERG} channel kinetics {II}: temperature dependence}, 37 | year = {2019}, 38 | doi = {10.1016/j.bpj.2019.07.030}, 39 | publisher = {Elsevier}, 40 | journal = {Biophysical Journal} 41 | } 42 | 43 | @article{Clerx2019FourWays, 44 | title={Four Ways to Fit an Ion Channel Model}, 45 | author={Clerx, Michael and Beattie, Kylie A and Gavaghan, David J and Mirams, Gary R}, 46 | journal={Biophyical Journal}, 47 | year={2019}, 48 | doi={10.1016/j.bpj.2019.08.001} 49 | } 50 | -------------------------------------------------------------------------------- /pints/_abc/_abc_rejection.py: -------------------------------------------------------------------------------- 1 | # 2 | # ABC Rejection method 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import pints 9 | import numpy as np 10 | 11 | 12 | class RejectionABC(pints.ABCSampler): 13 | r""" 14 | Implements the rejection ABC algorithm as described in [1]. 15 | 16 | Here is a high-level description of the algorithm: 17 | 18 | .. math:: 19 | \begin{align} 20 | \theta^* &\sim p(\theta) \\ 21 | x &\sim p(x|\theta^*) \\ 22 | \textrm{if } s(x) < \textrm{threshold}, \textrm{then} \\ 23 | \theta^* \textrm{ is added to list of samples} \\ 24 | \end{align} 25 | 26 | In other words, the first two steps sample parameters 27 | from the prior distribution :math:`p(\theta)` and then sample 28 | simulated data from the sampling distribution (conditional on 29 | the sampled parameter values), :math:`p(x|\theta^*)`. 30 | In the end, if the error measure between our simulated data and 31 | the original data is within the threshold, we add the sampled 32 | parameters to the list of samples. 33 | 34 | References 35 | ---------- 36 | .. [1] "Approximate Bayesian Computation (ABC) in practice". Katalin 37 | Csillery, Michael G.B. Blum, Oscar E. Gaggiotti, Olivier Francois 38 | (2010) Trends in Ecology & Evolution 39 | https://doi.org/10.1016/j.tree.2010.04.001 40 | 41 | """ 42 | def __init__(self, log_prior): 43 | 44 | self._log_prior = log_prior 45 | self._threshold = 1 46 | self._xs = None 47 | self._ready_for_tell = False 48 | 49 | def name(self): 50 | """ See :meth:`pints.ABCSampler.name()`. """ 51 | return 'Rejection ABC' 52 | 53 | def ask(self, n_samples): 54 | """ See :meth:`ABCSampler.ask()`. """ 55 | if self._ready_for_tell: 56 | raise RuntimeError('Ask called before tell.') 57 | self._xs = self._log_prior.sample(n_samples) 58 | 59 | self._ready_for_tell = True 60 | return self._xs 61 | 62 | def tell(self, fx): 63 | """ See :meth:`ABCSampler.tell()`. """ 64 | if not self._ready_for_tell: 65 | raise RuntimeError('Tell called before ask.') 66 | self._ready_for_tell = False 67 | 68 | fx = pints.vector(fx) 69 | accepted = fx < self._threshold 70 | if not np.any(accepted): 71 | return None 72 | else: 73 | return [self._xs[c].tolist() for c, x in enumerate(accepted) if x] 74 | 75 | def threshold(self): 76 | """ 77 | Returns threshold error distance that determines if a sample is 78 | accepted (if ``error < threshold``). 79 | """ 80 | return self._threshold 81 | 82 | def set_threshold(self, threshold): 83 | """ 84 | Sets threshold error distance that determines if a sample is accepted 85 | (if ``error < threshold``). 86 | """ 87 | x = float(threshold) 88 | if x <= 0: 89 | raise ValueError('Threshold must be greater than zero.') 90 | self._threshold = threshold 91 | -------------------------------------------------------------------------------- /pints/_mcmc/_haario_ac.py: -------------------------------------------------------------------------------- 1 | # 2 | # Adaptive covariance MCMC method by Haario 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import pints 9 | import numpy as np 10 | 11 | 12 | class HaarioACMC(pints.AdaptiveCovarianceMC): 13 | """ 14 | Adaptive Metropolis MCMC, which is algorithm 4 in [1]_ and is described in 15 | the text in [2]_. 16 | 17 | This algorithm differs from :class:`HaarioBardenetACMC` only through its 18 | use of ``alpha`` in the updating of ``log_lambda`` (rather than a binary 19 | accept/reject). 20 | 21 | Initialise:: 22 | 23 | mu 24 | Sigma 25 | adaptation_count = 0 26 | log lambda = 0 27 | 28 | In each adaptive iteration (t):: 29 | 30 | adaptation_count = adaptation_count + 1 31 | gamma = (adaptation_count)^-eta 32 | theta* ~ N(theta_t, lambda * Sigma) 33 | alpha = min(1, p(theta*|data) / p(theta_t|data)) 34 | u ~ uniform(0, 1) 35 | if alpha > u: 36 | theta_(t+1) = theta* 37 | accepted = 1 38 | else: 39 | theta_(t+1) = theta_t 40 | accepted = 0 41 | 42 | mu = (1 - gamma) mu + gamma theta_(t+1) 43 | Sigma = (1 - gamma) Sigma + gamma (theta_(t+1) - mu)(theta_(t+1) - mu) 44 | log lambda = log lambda + gamma (alpha - self._target_acceptance) 45 | gamma = adaptation_count^-eta 46 | 47 | Extends :class:`AdaptiveCovarianceMC`, does not use sensitivities. 48 | 49 | References 50 | ---------- 51 | .. [1] A tutorial on adaptive MCMC 52 | Christophe Andrieu and Johannes Thoms, Statistical Computing, 2008, 53 | 18: 343-373. 54 | https://doi.org/10.1007/s11222-008-9110-y 55 | 56 | .. [2] An adaptive Metropolis algorithm 57 | Heikki Haario, Eero Saksman, and Johanna Tamminen (2001) Bernoulli. 58 | """ 59 | def __init__(self, x0, sigma0=None): 60 | super(HaarioACMC, self).__init__(x0, sigma0) 61 | self._log_lambda = 0 62 | 63 | def _adapt_internal(self, accepted, log_ratio): 64 | """ See :meth:`pints.AdaptiveCovarianceMC._adapt()`. """ 65 | p = np.exp(log_ratio) if log_ratio < 0 else 1 66 | self._log_lambda += self._gamma * (p - self._target_acceptance) 67 | 68 | def _generate_proposal(self): 69 | """ See :meth:`AdaptiveCovarianceMC._generate_proposal()`. """ 70 | return np.random.multivariate_normal( 71 | self._current, self._sigma * np.exp(self._log_lambda)) 72 | 73 | def name(self): 74 | """ See :meth:`pints.MCMCSampler.name()`. """ 75 | return 'Haario adaptive covariance MCMC' 76 | 77 | -------------------------------------------------------------------------------- /pints/_mcmc/_haario_bardenet_ac.py: -------------------------------------------------------------------------------- 1 | # 2 | # Haario-Bardenet adaptive covariance MCMC method 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import pints 9 | import numpy as np 10 | 11 | 12 | class HaarioBardenetACMC(pints.AdaptiveCovarianceMC): 13 | """ 14 | Adaptive Metropolis MCMC, which is algorithm in the supplementary materials 15 | of [1]_, which in turn is based on [2]_. 16 | 17 | Initialise:: 18 | 19 | mu 20 | Sigma 21 | adaptation_count = 0 22 | log lambda = 0 23 | 24 | In each adaptive iteration (t):: 25 | 26 | adaptation_count = adaptation_count + 1 27 | gamma = (adaptation_count)^-eta 28 | theta* ~ N(theta_t, lambda * Sigma) 29 | alpha = min(1, p(theta*|data) / p(theta_t|data)) 30 | u ~ uniform(0, 1) 31 | if alpha > u: 32 | theta_(t+1) = theta* 33 | accepted = 1 34 | else: 35 | theta_(t+1) = theta_t 36 | accepted = 0 37 | 38 | alpha = accepted 39 | 40 | mu = (1 - gamma) mu + gamma theta_(t+1) 41 | Sigma = (1 - gamma) Sigma + gamma (theta_(t+1) - mu)(theta_(t+1) - mu) 42 | log lambda = log lambda + gamma (alpha - self._target_acceptance) 43 | gamma = adaptation_count^-eta 44 | 45 | Extends :class:`AdaptiveCovarianceMC`, does not use sensitivities. 46 | 47 | References 48 | ---------- 49 | .. [1] Johnstone, Chang, Bardenet, de Boer, Gavaghan, Pathmanathan, 50 | Clayton, Mirams (2015) "Uncertainty and variability in models of the 51 | cardiac action potential: Can we build trustworthy models?" 52 | Journal of Molecular and Cellular Cardiology. 53 | https://doi.org/10.1016/j.yjmcc.2015.11.018 54 | 55 | .. [2] Haario, Saksman, Tamminen (2001) "An adaptive Metropolis algorithm" 56 | Bernoulli. 57 | https://doi.org/10.2307/3318737 58 | """ 59 | def __init__(self, x0, sigma0=None): 60 | super(HaarioBardenetACMC, self).__init__(x0, sigma0) 61 | 62 | # Initial log lambda is zero 63 | self._log_lambda = 0 64 | 65 | def _adapt_internal(self, accepted, log_ratio): 66 | """ See :meth:`pints.AdaptiveCovarianceMC.tell()`. """ 67 | p = 1 if accepted else 0 68 | self._log_lambda += self._gamma * (p - self._target_acceptance) 69 | 70 | def _generate_proposal(self): 71 | """ See :meth:`AdaptiveCovarianceMC._generate_proposal()`. """ 72 | return np.random.multivariate_normal( 73 | self._current, self._sigma * np.exp(self._log_lambda)) 74 | 75 | def name(self): 76 | """ See :meth:`pints.MCMCSampler.name()`. """ 77 | return 'Haario-Bardenet adaptive covariance MCMC' 78 | 79 | 80 | class AdaptiveCovarianceMCMC(HaarioBardenetACMC): 81 | """ 82 | Deprecated alias of :class:`pints.HaarioBardenetACMC`. 83 | """ 84 | 85 | def __init__(self, x0, sigma0=None): 86 | 87 | # Deprecated on 2019-09-26 88 | import warnings 89 | warnings.warn( 90 | 'The class `pints.AdaptiveCovarianceMCMC` is deprecated.' 91 | ' Please use `pints.HaarioBardenetACMC` instead.') 92 | super(AdaptiveCovarianceMCMC, self).__init__(x0, sigma0) 93 | 94 | -------------------------------------------------------------------------------- /pints/_mcmc/_rao_blackwell_ac.py: -------------------------------------------------------------------------------- 1 | # 2 | # Rao-Blackwell Adaptive MCMC method 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import pints 9 | import numpy as np 10 | 11 | 12 | class RaoBlackwellACMC(pints.AdaptiveCovarianceMC): 13 | """ 14 | Rao-Blackwell adaptive MCMC, as described by Algorithm 3 in [1]_. 15 | After initialising mu0 and sigma0, in each iteration after initial 16 | phase (t), the following steps occur:: 17 | 18 | theta* ~ N(theta_t, lambda * sigma0) 19 | alpha(theta_t, theta*) = min(1, p(theta*|data) / p(theta_t|data)) 20 | u ~ uniform(0, 1) 21 | if alpha(theta_t, theta*) > u: 22 | theta_t+1 = theta* 23 | else: 24 | theta_t+1 = theta_t 25 | mu_t+1 = mu_t + gamma_t+1 * (theta_t+1 - mu_t) 26 | sigma_t+1 = sigma_t + gamma_t+1 * 27 | (bar((theta_t+1 - mu_t)(theta_t+1 - mu_t)') - sigma_t) 28 | 29 | where:: 30 | 31 | bar(theta_t+1) = alpha(theta_t, theta*) theta* + 32 | (1 - alpha(theta_t, theta*)) theta_t 33 | 34 | Note that we deviate from the paper in two places:: 35 | 36 | gamma_t = t^-eta 37 | Y_t+1 ~ N(theta_t, lambda * sigma0) rather than 38 | Y_t+1 ~ N(theta_t, sigma0) 39 | 40 | Extends :class:`AdaptiveCovarianceMC`, does not use sensitivities. 41 | 42 | References 43 | ---------- 44 | .. [1] A tutorial on adaptive MCMC 45 | Christophe Andrieu and Johannes Thoms, Statistical Computing, 2008, 46 | 18: 343-373. 47 | https://doi.org/10.1007/s11222-008-9110-y 48 | """ 49 | def __init__(self, x0, sigma0=None): 50 | super(RaoBlackwellACMC, self).__init__(x0, sigma0) 51 | 52 | # heuristic based on normal approximation 53 | self._lambda = (2.38**2) / self._n_parameters 54 | 55 | self._X = None 56 | self._Y = None 57 | 58 | def _adapt_sigma(self, log_ratio): 59 | """ 60 | Updates sigma using Rao-Blackwellised formula:: 61 | 62 | sigma_t+1 = sigma_t + gamma_t+1 * 63 | (bar((theta_t+1 - mu_t)(theta_t+1 - mu_t)') - sigma_t) 64 | 65 | where:: 66 | 67 | bar(X_t+1) = alpha(X_t, Y_t+1) * Y_t+1 + 68 | (1 - alpha(X_t, Y_t+1)) * X_t 69 | """ 70 | acceptance_prob = np.exp(log_ratio) if log_ratio < 0 else 1 71 | X_bar = acceptance_prob * self._Y + (1 - acceptance_prob) * self._X 72 | dsigm = np.reshape(X_bar - self._mu, (self._n_parameters, 1)) 73 | self._sigma = ((1 - self._gamma) * self._sigma + 74 | self._gamma * np.dot(dsigm, dsigm.T)) 75 | 76 | def _generate_proposal(self): 77 | """ See :meth:`AdaptiveCovarianceMC._generate_proposal()`. """ 78 | return np.random.multivariate_normal( 79 | self._current, self._lambda * self._sigma) 80 | 81 | def name(self): 82 | """ See :meth:`pints.MCMCSampler.name()`. """ 83 | return 'Rao-Blackwell adaptive covariance MCMC' 84 | 85 | def tell(self, fx): 86 | """ See :meth:`pints.AdaptiveCovarianceMC.tell()`. """ 87 | self._Y = np.copy(self._proposed) 88 | self._X = np.copy(self._current) 89 | 90 | return super(RaoBlackwellACMC, self).tell(fx) 91 | 92 | -------------------------------------------------------------------------------- /pints/_nested/_rejection.py: -------------------------------------------------------------------------------- 1 | # 2 | # Nested rejection sampler implementation. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | # 9 | import pints 10 | 11 | 12 | class NestedRejectionSampler(pints.NestedSampler): 13 | """ 14 | Creates a nested sampler that estimates the marginal likelihood and 15 | generates samples from the posterior. 16 | 17 | This is the simplest form of nested sampler and involves using rejection 18 | sampling from the prior as described in the algorithm on page 839 in [1]_ 19 | to estimate the marginal likelihood and generate weights, preliminary 20 | samples (with their respective likelihoods), required to generate posterior 21 | samples. 22 | 23 | The posterior samples are generated as described in [1]_ on page 849 by 24 | randomly sampling the preliminary point, accounting for their weights and 25 | likelihoods. 26 | 27 | Initialise:: 28 | 29 | Z = 0 30 | X_0 = 1 31 | 32 | Draw samples from prior:: 33 | 34 | for i in 1:n_active_points: 35 | theta_i ~ p(theta), i.e. sample from the prior 36 | L_i = p(theta_i|X) 37 | endfor 38 | 39 | In each iteration of the algorithm (t):: 40 | 41 | L_min = min(L) 42 | indexmin = min_index(L) 43 | X_t = exp(-t / n_active_points) 44 | w_t = X_t - X_t-1 45 | Z = Z + L_min * w_t 46 | theta* ~ p(theta) 47 | while p(theta*|X) < L_min: 48 | theta* ~ p(theta) 49 | endwhile 50 | theta_indexmin = theta* 51 | L_indexmin = p(theta*|X) 52 | 53 | At the end of iterations, there is a final ``Z`` increment:: 54 | 55 | Z = Z + (1 / n_active_points) * (L_1 + L_2 + ..., + L_n_active_points) 56 | 57 | The posterior samples are generated as described in [1] on page 849 by 58 | weighting each dropped sample in proportion to the volume of the 59 | posterior region it was sampled from. That is, the probability 60 | for drawing a given sample j is given by:: 61 | 62 | p_j = L_j * w_j / Z 63 | 64 | where j = 1, ..., n_iterations. 65 | 66 | Extends :class:`NestedSampler`. 67 | 68 | References 69 | ---------- 70 | .. [1] "Nested Sampling for General Bayesian Computation", John Skilling, 71 | Bayesian Analysis 1:4 (2006). 72 | https://doi.org/10.1214/06-BA127 73 | """ 74 | def __init__(self, log_prior): 75 | super(NestedRejectionSampler, self).__init__(log_prior) 76 | 77 | self._needs_sensitivities = False 78 | 79 | def ask(self, n_points): 80 | """ 81 | Proposes new point(s) by sampling from the prior. 82 | """ 83 | if n_points > 1: 84 | self._proposed = self._log_prior.sample(n_points) 85 | else: 86 | self._proposed = self._log_prior.sample(n_points)[0] 87 | return self._proposed 88 | 89 | def n_hyper_parameters(self): 90 | """ See :meth:`TunableMethod.n_hyper_parameters()`. """ 91 | return 1 92 | 93 | def set_hyper_parameters(self, x): 94 | """ 95 | Hyper-parameter vector is: ``[active_points_rate]`` 96 | 97 | Parameters 98 | ---------- 99 | x 100 | An array of length ``n_hyper_parameters`` used to set the 101 | hyper-parameters 102 | """ 103 | self.set_n_active_points(x[0]) 104 | 105 | def name(self): 106 | """ See :meth:`pints.NestedSampler.name()`. """ 107 | return 'Nested rejection sampler' 108 | -------------------------------------------------------------------------------- /pints/_util.py: -------------------------------------------------------------------------------- 1 | # 2 | # Utility classes for Pints 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import pints 9 | import numpy as np 10 | import timeit 11 | 12 | 13 | def strfloat(x): 14 | """ 15 | Converts a float to a string, with maximum precision. 16 | """ 17 | return pints.FLOAT_FORMAT.format(float(x)) 18 | 19 | 20 | class Timer(object): 21 | """ 22 | Provides accurate timing. 23 | 24 | Example 25 | ------- 26 | :: 27 | 28 | timer = pints.Timer() 29 | print(timer.format(timer.time())) 30 | 31 | """ 32 | def __init__(self, output=None): 33 | self._start = timeit.default_timer() 34 | self._methods = {} 35 | 36 | def format(self, time=None): 37 | """ 38 | Formats a (non-integer) number of seconds, returns a string like 39 | "5 weeks, 3 days, 1 hour, 4 minutes, 9 seconds", or "0.0019 seconds". 40 | """ 41 | if time is None: 42 | time = self.time() 43 | if time < 1e-2: 44 | return str(time) + ' seconds' 45 | elif time < 60: 46 | return str(round(time, 2)) + ' seconds' 47 | output = [] 48 | time = int(round(time)) 49 | units = [ 50 | (604800, 'week'), 51 | (86400, 'day'), 52 | (3600, 'hour'), 53 | (60, 'minute'), 54 | ] 55 | for k, name in units: 56 | f = time // k 57 | if f > 0 or output: 58 | output.append(str(f) + ' ' + (name if f == 1 else name + 's')) 59 | time -= f * k 60 | output.append('1 second' if time == 1 else str(time) + ' seconds') 61 | return ', '.join(output) 62 | 63 | def reset(self): 64 | """ 65 | Resets this timer's start time. 66 | """ 67 | self._start = timeit.default_timer() 68 | 69 | def time(self): 70 | """ 71 | Returns the time (in seconds) since this timer was created, or since 72 | meth:`reset()` was last called. 73 | """ 74 | return timeit.default_timer() - self._start 75 | 76 | 77 | def vector(x): 78 | """ 79 | Copies ``x`` and returns a 1d read-only NumPy array of floats with shape 80 | ``(n,)``. 81 | 82 | Raises a ``ValueError`` if ``x`` has an incompatible shape. 83 | """ 84 | if np.isscalar(x): 85 | x = np.array([float(x)]) 86 | else: 87 | x = np.array(x, copy=True, dtype=float) 88 | x.setflags(write=False) 89 | if x.ndim != 1: 90 | n = np.max(x.shape) 91 | if np.prod(x.shape) != n: 92 | raise ValueError( 93 | 'Unable to convert to 1d vector of scalar values.') 94 | x = x.reshape((n,)) 95 | return x 96 | 97 | 98 | def matrix2d(x): 99 | """ 100 | Copies ``x`` and returns a 2d read-only NumPy array of floats with shape 101 | ``(m, n)``. 102 | 103 | Raises a ``ValueError`` if ``x`` has an incompatible shape. 104 | """ 105 | x = np.array(x, copy=True, dtype=float) 106 | if x.ndim == 1: 107 | x = x.reshape((len(x), 1)) 108 | elif x.ndim != 2: 109 | raise ValueError('Unable to convert to 2d matrix.') 110 | x.setflags(write=False) 111 | return x 112 | -------------------------------------------------------------------------------- /pints/interfaces/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Root of the interfaces module. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | 9 | -------------------------------------------------------------------------------- /pints/interfaces/stan/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Root of the PyStan interface module. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | 9 | from ._stan import StanLogPDF # noqa 10 | -------------------------------------------------------------------------------- /pints/io.py: -------------------------------------------------------------------------------- 1 | # 2 | # I/O helper classes for Pints 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | 9 | 10 | def load_samples(filename, n=None): 11 | """ 12 | Loads samples from the given ``filename`` and returns a 2d NumPy array 13 | containing them. 14 | 15 | If the optional argument ``n`` is given, the method assumes there are ``n`` 16 | files, with names based on ``filename`` such that e.g. ``test.csv`` would 17 | become ``test_0.csv``, ``test_1.csv``, ..., ``test_n.csv``. In this case 18 | a list of 2d NumPy arrays is returned. 19 | 20 | Assumes the first line in each file is a header. 21 | 22 | See also :meth:`save_samples()`. 23 | """ 24 | import numpy as np 25 | import os 26 | 27 | # Define data loading method 28 | def load(filename): 29 | with open(filename, 'r') as f: 30 | lines = iter(f) 31 | next(lines) # Skip header 32 | return np.asarray( 33 | [[float(x) for x in line.split(',')] for line in lines]) 34 | 35 | # Load from filename directly 36 | if n is None: 37 | return load(filename) 38 | 39 | # Load from systematically named files 40 | n = int(n) 41 | if n < 1: 42 | raise ValueError( 43 | 'Argument `n` must be `None` or an integer greater than zero.') 44 | parts = os.path.splitext(filename) 45 | filenames = [parts[0] + '_' + str(i) + parts[1] for i in range(n)] 46 | 47 | # Check if files exist before loading (saves times) 48 | for filename in filenames: 49 | if not os.path.isfile(filename): 50 | raise FileNotFoundError('File not found: ' + filename) 51 | 52 | # Load and return 53 | return [load(filename) for filename in filenames] 54 | 55 | 56 | def save_samples(filename, *sample_lists): 57 | """ 58 | Stores one or multiple lists of samples at the path given by ``filename``. 59 | 60 | If one list of samples is given, the filename is used as is. If multiple 61 | lists are given, the filenames are updated to include ``_0``, ``_1``, 62 | ``_2``, etc. 63 | 64 | For example, ``save_samples('test.csv', samples)`` will store information 65 | from ``samples`` in ``test.csv``. Using 66 | ``save_samples('test.csv', samples_0, samples_1)`` will store the samples 67 | from ``samples_0`` to ``test_0.csv`` and ``samples_1`` to ``test_1.csv``. 68 | 69 | See also: :meth:`load_samples()`. 70 | """ 71 | import numpy as np 72 | import os 73 | import pints 74 | 75 | # Get filenames 76 | k = len(sample_lists) 77 | if k < 1: 78 | raise ValueError('At least one set of samples must be given.') 79 | elif k == 1: 80 | filenames = [filename] 81 | else: 82 | parts = os.path.splitext(filename) 83 | filenames = [parts[0] + '_' + str(i) + parts[1] for i in range(k)] 84 | 85 | # Check shapes 86 | try: 87 | sample_lists = np.array(sample_lists, dtype=float) 88 | except ValueError: 89 | raise ValueError( 90 | 'Sample lists must contain only floats and be of same length.') 91 | shape = sample_lists[0].shape 92 | if len(shape) != 2: 93 | raise ValueError( 94 | 'Samples must be given as 2d arrays (e.g. lists of lists).') 95 | 96 | # Store 97 | filename = iter(filenames) 98 | header = ','.join(['"p' + str(j) + '"' for j in range(shape[1])]) 99 | for samples in sample_lists: 100 | with open(next(filename), 'w') as f: 101 | f.write(header + '\n') 102 | for sample in samples: 103 | f.write(','.join([pints.strfloat(x) for x in sample]) + '\n') 104 | 105 | -------------------------------------------------------------------------------- /pints/plot/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Quick diagnostic plots. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from ._autocorrelation import autocorrelation 9 | from ._function import function 10 | from ._function_between_points import function_between_points 11 | from ._histogram import histogram 12 | from ._pairwise import pairwise 13 | from ._series import series 14 | from ._surface import surface 15 | from ._trace import trace 16 | 17 | -------------------------------------------------------------------------------- /pints/plot/_autocorrelation.py: -------------------------------------------------------------------------------- 1 | # 2 | # Plots autocorrelation in a chain 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import numpy as np 9 | 10 | 11 | def autocorrelation(samples, max_lags=100, parameter_names=None): 12 | """ 13 | Creates and returns an autocorrelation plot for a given markov chain or 14 | list of `samples`. 15 | 16 | Returns a ``matplotlib`` figure object and axes handle. 17 | 18 | Parameters 19 | ---------- 20 | samples 21 | A list of samples, with shape ``(n_samples, n_parameters)``, where 22 | ``n_samples`` is the number of samples in the list and ``n_parameters`` 23 | is the number of parameters. 24 | max_lags 25 | The maximum autocorrelation lag to plot. 26 | parameter_names 27 | A list of parameter names, which will be displayed in the legend of the 28 | autocorrelation subplots. If no names are provided, the parameters are 29 | enumerated. 30 | """ 31 | import matplotlib.pyplot as plt 32 | 33 | # Check samples size 34 | try: 35 | n_sample, n_param = samples.shape 36 | except ValueError: 37 | raise ValueError('`samples` must be of shape (n_sample,' 38 | + ' n_parameters).') 39 | 40 | # Check parameter names 41 | if parameter_names is None: 42 | parameter_names = ['Parameter' + str(i + 1) for i in range(n_param)] 43 | elif len(parameter_names) != n_param: 44 | raise ValueError( 45 | 'Length of `parameter_names` must be same as number of' 46 | ' parameters.') 47 | 48 | fig, axes = plt.subplots(n_param, 1, sharex=True, figsize=(6, 2 * n_param)) 49 | if n_param == 1: 50 | axes = np.asarray([axes], dtype=object) 51 | for i in range(n_param): 52 | axes[i].acorr(samples[:, i] - np.mean(samples[:, i]), maxlags=max_lags) 53 | axes[i].set_xlim(-0.5, max_lags + 0.5) 54 | axes[i].legend([parameter_names[i]], loc='upper right') 55 | 56 | # Add x-label to final plot only 57 | axes[i].set_xlabel('Lag') 58 | 59 | # Add vertical y-label to middle plot 60 | # fig.text(0.04, 0.5, 'Autocorrelation', va='center', rotation='vertical') 61 | axes[int(i / 2)].set_ylabel('Autocorrelation') 62 | 63 | plt.tight_layout() 64 | return fig, axes 65 | 66 | -------------------------------------------------------------------------------- /pints/plot/_function.py: -------------------------------------------------------------------------------- 1 | # 2 | # Evaluate a function around a point 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import numpy as np 9 | 10 | import pints 11 | 12 | 13 | def function(f, x, lower=None, upper=None, evaluations=20): 14 | """ 15 | Creates 1d plots of a :class:`LogPDF` or a :class:`ErrorMeasure` around a 16 | point `x` (i.e. a 1-dimensional plot in each direction). 17 | 18 | Returns a ``matplotlib`` figure object and axes handle. 19 | 20 | Parameters 21 | ---------- 22 | f 23 | A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot. 24 | x 25 | A point in the function's input space. 26 | lower 27 | Optional lower bounds for each parameter, used to specify the lower 28 | bounds of the plot. 29 | upper 30 | Optional upper bounds for each parameter, used to specify the upper 31 | bounds of the plot. 32 | evaluations 33 | The number of evaluations to use in each plot. 34 | """ 35 | import matplotlib.pyplot as plt 36 | 37 | # Check function and get n_parameters 38 | if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)): 39 | raise ValueError( 40 | 'Given function must be pints.LogPDF or pints.ErrorMeasure.') 41 | n_param = f.n_parameters() 42 | 43 | # Check point 44 | x = pints.vector(x) 45 | if len(x) != n_param: 46 | raise ValueError( 47 | 'Given point `x` must have same number of parameters as function.') 48 | 49 | # Check boundaries 50 | if lower is None: 51 | # Guess boundaries based on point x 52 | lower = x * 0.95 53 | lower[lower == 0] = -1 54 | else: 55 | lower = pints.vector(lower) 56 | if len(lower) != n_param: 57 | raise ValueError('Lower bounds must have same number of' 58 | + ' parameters as function.') 59 | if upper is None: 60 | # Guess boundaries based on point x 61 | upper = x * 1.05 62 | upper[upper == 0] = 1 63 | else: 64 | upper = pints.vector(upper) 65 | if len(upper) != n_param: 66 | raise ValueError('Upper bounds must have same number of' 67 | + ' parameters as function.') 68 | 69 | # Check number of evaluations 70 | evaluations = int(evaluations) 71 | if evaluations < 1: 72 | raise ValueError('Number of evaluations must be greater than zero.') 73 | 74 | # Create points to plot 75 | xs = np.tile(x, (n_param * evaluations, 1)) 76 | for j in range(n_param): 77 | i1 = j * evaluations 78 | i2 = i1 + evaluations 79 | xs[i1:i2, j] = np.linspace(lower[j], upper[j], evaluations) 80 | 81 | # Evaluate points 82 | fs = pints.evaluate(f, xs, parallel=False) 83 | 84 | # Create figure 85 | fig, axes = plt.subplots(n_param, 1, figsize=(6, 2 * n_param)) 86 | if n_param == 1: 87 | axes = np.asarray([axes], dtype=object) 88 | for j, p in enumerate(x): 89 | i1 = j * evaluations 90 | i2 = i1 + evaluations 91 | axes[j].plot(xs[i1:i2, j], fs[i1:i2], c='green', label='Function') 92 | axes[j].axvline(p, c='blue', label='Value') 93 | axes[j].set_xlabel('Parameter ' + str(1 + j)) 94 | axes[j].legend() 95 | 96 | plt.tight_layout() 97 | return fig, axes 98 | 99 | -------------------------------------------------------------------------------- /pints/plot/_function_between_points.py: -------------------------------------------------------------------------------- 1 | # 2 | # Evaluate function between two points 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import numpy as np 9 | 10 | import pints 11 | 12 | 13 | def function_between_points(f, point_1, point_2, padding=0.25, evaluations=20): 14 | """ 15 | Creates and returns a plot of a function between two points in parameter 16 | space. 17 | 18 | Returns a ``matplotlib`` figure object and axes handle. 19 | 20 | Parameters 21 | ---------- 22 | f 23 | A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot. 24 | point_1 25 | The first point in parameter space. The method will find a line from 26 | ``point_1`` to ``point_2`` and plot ``f`` at several points along it. 27 | point_2 28 | The second point. 29 | padding 30 | Specifies the amount of padding around the line segment 31 | ``[point_1, point_2]`` that will be shown in the plot. 32 | evaluations 33 | The number of evaluation along the line in parameter space. 34 | """ 35 | import matplotlib.pyplot as plt 36 | 37 | # Check function and get n_parameters 38 | if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)): 39 | raise ValueError( 40 | 'Given function must be pints.LogPDF or pints.ErrorMeasure.') 41 | n_param = f.n_parameters() 42 | 43 | # Check points 44 | point_1 = pints.vector(point_1) 45 | point_2 = pints.vector(point_2) 46 | if not (len(point_1) == len(point_2) == n_param): 47 | raise ValueError('Both points must have the same number of parameters' 48 | + ' as the given function.') 49 | 50 | # Check padding 51 | padding = float(padding) 52 | if padding < 0: 53 | raise ValueError('Padding cannot be negative.') 54 | 55 | # Check evaluation 56 | evaluations = int(evaluations) 57 | if evaluations < 3: 58 | raise ValueError('The number of evaluations must be 3 or greater.') 59 | 60 | # Figure setting 61 | fig, axes = plt.subplots(1, 1, figsize=(6, 4)) 62 | axes.set_xlabel('Point 1 to point 2') 63 | axes.set_ylabel('Function') 64 | 65 | # Generate some x-values near the given parameters 66 | s = np.linspace(-padding, 1 + padding, evaluations) 67 | 68 | # Direction 69 | r = point_2 - point_1 70 | 71 | # Calculate function with other parameters fixed 72 | x = [point_1 + sj * r for sj in s] 73 | y = pints.evaluate(f, x, parallel=False) 74 | 75 | # Plot 76 | axes.plot(s, y, color='green') 77 | axes.axvline(0, color='#1f77b4', label='Point 1') 78 | axes.axvline(1, color='#7f7f7f', label='Point 2') 79 | axes.legend() 80 | 81 | return fig, axes 82 | 83 | -------------------------------------------------------------------------------- /pints/tests/test_abc_rejection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Tests the basic methods of the ABC Rejection routine. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy as toy 11 | import pints.toy.stochastic 12 | import unittest 13 | import numpy as np 14 | 15 | 16 | class TestRejectionABC(unittest.TestCase): 17 | """ 18 | Tests the basic methods of the ABC Rejection routine. 19 | """ 20 | @classmethod 21 | def setUpClass(cls): 22 | """ 23 | Set up toy model, parameter values, problem, 24 | error measure. 25 | """ 26 | 27 | # Create toy model 28 | cls.model = toy.stochastic.DegradationModel() 29 | cls.real_parameters = [0.1] 30 | cls.times = np.linspace(0, 10, 10) 31 | cls.values = cls.model.simulate(cls.real_parameters, cls.times) 32 | 33 | # Create an object (problem) with links to the model and time series 34 | cls.problem = pints.SingleOutputProblem( 35 | cls.model, cls.times, cls.values) 36 | 37 | # Create a uniform prior over both the parameters 38 | cls.log_prior = pints.UniformLogPrior( 39 | [0.0], 40 | [0.3] 41 | ) 42 | 43 | # Set error measure 44 | cls.error_measure = pints.RootMeanSquaredError(cls.problem) 45 | 46 | def test_method(self): 47 | 48 | # Create abc rejection scheme 49 | abc = pints.RejectionABC(self.log_prior) 50 | 51 | # Configure 52 | n_draws = 1 53 | niter = 20 54 | 55 | # Perform short run using ask and tell framework 56 | samples = [] 57 | while len(samples) < niter: 58 | x = abc.ask(n_draws)[0] 59 | fx = self.error_measure(x) 60 | sample = abc.tell(fx) 61 | while sample is None: 62 | x = abc.ask(n_draws)[0] 63 | fx = self.error_measure(x) 64 | sample = abc.tell(fx) 65 | samples.append(sample) 66 | 67 | samples = np.array(samples) 68 | self.assertEqual(samples.shape[0], niter) 69 | 70 | def test_errors(self): 71 | # test errors in abc rejection 72 | abc = pints.RejectionABC(self.log_prior) 73 | abc.ask(1) 74 | # test two asks raises error 75 | self.assertRaises(RuntimeError, abc.ask, 1) 76 | # test tell with large values returns empty arrays 77 | self.assertTrue(abc.tell(np.array([100])) is None) 78 | # test error raised if tell called before ask 79 | self.assertRaises(RuntimeError, abc.tell, 2.5) 80 | 81 | def test_setters_and_getters(self): 82 | # test setting and getting 83 | abc = pints.RejectionABC(self.log_prior) 84 | self.assertEqual('Rejection ABC', abc.name()) 85 | self.assertEqual(abc.threshold(), 1) 86 | abc.set_threshold(2) 87 | self.assertEqual(abc.threshold(), 2) 88 | self.assertRaises(ValueError, abc.set_threshold, -3) 89 | 90 | 91 | if __name__ == '__main__': 92 | unittest.main() 93 | -------------------------------------------------------------------------------- /pints/tests/test_io.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the Pints io methods. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import os 10 | import pints 11 | import pints.io 12 | import numpy as np 13 | import unittest 14 | 15 | from shared import TemporaryDirectory 16 | 17 | 18 | class TestIO(unittest.TestCase): 19 | """ 20 | Tests Pints io methods. 21 | """ 22 | 23 | def test_load_save_samples(self): 24 | # Tests the load_samples and save_samples() methods. 25 | 26 | m = 10 # 10 samples 27 | n = 5 # 5 parameters 28 | chain0 = np.random.uniform(size=(m, n)) 29 | chain1 = np.random.uniform(size=(m, n)) 30 | 31 | # Must support lists as well as arrays 32 | chain2 = [] 33 | for row in np.random.uniform(size=(m, n)): 34 | chain2.append(list(row)) 35 | 36 | # Check saving and loading 37 | with TemporaryDirectory() as d: 38 | # Single chain 39 | filename = d.path('test.csv') 40 | pints.io.save_samples(filename, chain0) 41 | self.assertTrue(os.path.isfile(filename)) 42 | test0 = pints.io.load_samples(filename) 43 | self.assertEqual(chain0.shape, test0.shape) 44 | self.assertTrue(np.all(chain0 == test0)) 45 | self.assertFalse(chain0 is test0) 46 | 47 | # Multiple chains 48 | filename = d.path('multi.csv') 49 | pints.io.save_samples(filename, chain0, chain1, chain2) 50 | self.assertTrue(os.path.isfile(d.path('multi_0.csv'))) 51 | self.assertTrue(os.path.isfile(d.path('multi_1.csv'))) 52 | self.assertTrue(os.path.isfile(d.path('multi_2.csv'))) 53 | test0, test1, test2 = pints.io.load_samples(filename, 3) 54 | self.assertEqual(chain0.shape, test0.shape) 55 | self.assertTrue(np.all(chain0 == test0)) 56 | self.assertFalse(chain0 is test0) 57 | self.assertEqual(chain1.shape, test1.shape) 58 | self.assertTrue(np.all(chain1 == test1)) 59 | self.assertFalse(chain1 is test1) 60 | self.assertEqual(np.asarray(chain2).shape, test2.shape) 61 | self.assertTrue(np.all(np.asarray(chain2) == test2)) 62 | self.assertFalse(chain2 is test2) 63 | 64 | # Check invalid save_samples() calls 65 | self.assertRaisesRegex( 66 | ValueError, 'At least one set of samples', 67 | pints.io.save_samples, filename) 68 | chainX = np.random.uniform(size=(2, 2, 2)) 69 | self.assertRaisesRegex( 70 | ValueError, 'must be given as 2d arrays', 71 | pints.io.save_samples, filename, chainX) 72 | chainY = [[1, 2], [3, 4, 5]] 73 | self.assertRaisesRegex( 74 | ValueError, 'same length', 75 | pints.io.save_samples, filename, chainY) 76 | 77 | # Test invalid load_samples calls 78 | self.assertRaisesRegex( 79 | ValueError, 'integer greater than zero', 80 | pints.io.load_samples, filename, 0) 81 | filename = d.path('x.csv') 82 | self.assertRaises( 83 | FileNotFoundError, pints.io.load_samples, filename) 84 | self.assertRaises( 85 | FileNotFoundError, pints.io.load_samples, filename, 10) 86 | 87 | 88 | if __name__ == '__main__': 89 | unittest.main() 90 | -------------------------------------------------------------------------------- /pints/tests/test_matrix2d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the matrix2d type 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | import numpy as np 12 | 13 | 14 | class TestMatrix2d(unittest.TestCase): 15 | """ 16 | Tests conversion to a read-only 2-d matrix type 17 | """ 18 | def test_matrix2d(self): 19 | # Test correct use with 2d arrays 20 | x = np.array([[1, 2], [2, 3], [4, 3]]) 21 | v = pints.matrix2d(x) 22 | x = np.array([[1, 2, 3], [2, 3, 4]]) 23 | v = pints.matrix2d(x) 24 | 25 | # Test correct use with 1d arrays 26 | x = np.array([1, 2, 3, 4]).reshape((4, 1)) 27 | v = pints.matrix2d(x) 28 | self.assertEqual(v.shape, (4, 1)) 29 | x = np.array([1, 2, 3, 4]) 30 | v = pints.matrix2d(x) 31 | self.assertEqual(v.shape, (4, 1)) 32 | 33 | # Test correct use with lists 34 | x = [[1, 2], [2, 3], [4, 3]] 35 | v = pints.matrix2d(x) 36 | x = [[1, 2, 3], [2, 3, 4]] 37 | v = pints.matrix2d(x) 38 | 39 | # Test incorrect use with higher dimensional arrays 40 | x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) 41 | self.assertTrue(x.ndim == 3) 42 | self.assertRaises(ValueError, pints.matrix2d, x) 43 | 44 | # Test read-only 45 | def assign(): 46 | v[0, 0] = 10 47 | self.assertRaises(ValueError, assign) 48 | 49 | 50 | if __name__ == '__main__': 51 | unittest.main() 52 | -------------------------------------------------------------------------------- /pints/tests/test_mcmc_haario_ac.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the basic methods of the Remi adaptive covariance MCMC routine. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy as toy 11 | import unittest 12 | import numpy as np 13 | 14 | from shared import StreamCapture 15 | 16 | 17 | class TestHaarioACMC(unittest.TestCase): 18 | """ 19 | Tests the basic methods of the adaptive covariance MCMC routine. 20 | """ 21 | 22 | @classmethod 23 | def setUpClass(cls): 24 | """ Set up problem for tests. """ 25 | 26 | # Create toy model 27 | cls.model = toy.LogisticModel() 28 | cls.real_parameters = [0.015, 500] 29 | cls.times = np.linspace(0, 1000, 1000) 30 | cls.values = cls.model.simulate(cls.real_parameters, cls.times) 31 | 32 | # Add noise 33 | cls.noise = 10 34 | cls.values += np.random.normal(0, cls.noise, cls.values.shape) 35 | cls.real_parameters.append(cls.noise) 36 | cls.real_parameters = np.array(cls.real_parameters) 37 | 38 | # Create an object with links to the model and time series 39 | cls.problem = pints.SingleOutputProblem( 40 | cls.model, cls.times, cls.values) 41 | 42 | # Create a uniform prior over both the parameters and the new noise 43 | # variable 44 | cls.log_prior = pints.UniformLogPrior( 45 | [0.01, 400, cls.noise * 0.1], 46 | [0.02, 600, cls.noise * 100] 47 | ) 48 | 49 | # Create a log likelihood 50 | cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem) 51 | 52 | # Create an un-normalised log-posterior (log-likelihood + log-prior) 53 | cls.log_posterior = pints.LogPosterior( 54 | cls.log_likelihood, cls.log_prior) 55 | 56 | def test_method(self): 57 | 58 | # Create mcmc 59 | x0 = self.real_parameters * 1.1 60 | mcmc = pints.HaarioACMC(x0) 61 | 62 | # Configure 63 | mcmc.set_target_acceptance_rate(0.3) 64 | mcmc.set_initial_phase(True) 65 | 66 | # Perform short run 67 | rate = [] 68 | chain = [] 69 | for i in range(100): 70 | x = mcmc.ask() 71 | fx = self.log_posterior(x) 72 | y, fy, ac = mcmc.tell(fx) 73 | if i == 20: 74 | mcmc.set_initial_phase(False) 75 | if i >= 50: 76 | chain.append(y) 77 | rate.append(mcmc.acceptance_rate()) 78 | self.assertTrue(isinstance(ac, bool)) 79 | if ac: 80 | self.assertTrue(np.all(x == y)) 81 | self.assertEqual(fx, fy) 82 | 83 | chain = np.array(chain) 84 | rate = np.array(rate) 85 | self.assertEqual(chain.shape[0], 50) 86 | self.assertEqual(chain.shape[1], len(x0)) 87 | self.assertEqual(rate.shape[0], 100) 88 | 89 | def test_hyperparameters(self): 90 | # Hyperparameters unchanged from base class 91 | mcmc = pints.HaarioACMC(self.real_parameters) 92 | self.assertEqual(mcmc.n_hyper_parameters(), 1) 93 | 94 | def test_name(self): 95 | # Test name method 96 | mcmc = pints.HaarioACMC(self.real_parameters) 97 | self.assertEqual(mcmc.name(), 'Haario adaptive covariance MCMC') 98 | 99 | def test_logging(self): 100 | 101 | # Test logging includes name. 102 | x = [self.real_parameters] * 3 103 | mcmc = pints.MCMCController( 104 | self.log_posterior, 3, x, method=pints.HaarioACMC) 105 | mcmc.set_max_iterations(5) 106 | with StreamCapture() as c: 107 | mcmc.run() 108 | text = c.text() 109 | self.assertIn('Haario adaptive covariance MCMC', text) 110 | 111 | 112 | if __name__ == '__main__': 113 | unittest.main() 114 | -------------------------------------------------------------------------------- /pints/tests/test_mcmc_rao_blackwell_ac.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the basic methods of the Rao-Blackwel adaptive covariance MCMC routine. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy as toy 11 | import unittest 12 | import numpy as np 13 | 14 | from shared import StreamCapture 15 | 16 | 17 | class TestRaoBlackwellACMC(unittest.TestCase): 18 | """ 19 | Tests the basic methods of the Rao-Blackwell AC routine. 20 | """ 21 | 22 | @classmethod 23 | def setUpClass(cls): 24 | """ Set up problem for tests. """ 25 | 26 | # Create toy model 27 | cls.model = toy.LogisticModel() 28 | cls.real_parameters = [0.015, 500] 29 | cls.times = np.linspace(0, 1000, 1000) 30 | cls.values = cls.model.simulate(cls.real_parameters, cls.times) 31 | 32 | # Add noise 33 | cls.noise = 10 34 | cls.values += np.random.normal(0, cls.noise, cls.values.shape) 35 | cls.real_parameters.append(cls.noise) 36 | cls.real_parameters = np.array(cls.real_parameters) 37 | 38 | # Create an object with links to the model and time series 39 | cls.problem = pints.SingleOutputProblem( 40 | cls.model, cls.times, cls.values) 41 | 42 | # Create a uniform prior over both the parameters and the new noise 43 | # variable 44 | cls.log_prior = pints.UniformLogPrior( 45 | [0.01, 400, cls.noise * 0.1], 46 | [0.02, 600, cls.noise * 100] 47 | ) 48 | 49 | # Create a log likelihood 50 | cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem) 51 | 52 | # Create an un-normalised log-posterior (log-likelihood + log-prior) 53 | cls.log_posterior = pints.LogPosterior( 54 | cls.log_likelihood, cls.log_prior) 55 | 56 | def test_method(self): 57 | 58 | # Create mcmc 59 | x0 = self.real_parameters * 1.1 60 | mcmc = pints.RaoBlackwellACMC(x0) 61 | 62 | # Configure 63 | mcmc.set_target_acceptance_rate(0.3) 64 | mcmc.set_initial_phase(True) 65 | 66 | # Perform short run 67 | rate = [] 68 | chain = [] 69 | for i in range(100): 70 | x = mcmc.ask() 71 | fx = self.log_posterior(x) 72 | y, fy, ac = mcmc.tell(fx) 73 | if i == 20: 74 | mcmc.set_initial_phase(False) 75 | if i >= 50: 76 | chain.append(y) 77 | rate.append(mcmc.acceptance_rate()) 78 | self.assertTrue(isinstance(ac, bool)) 79 | if ac: 80 | self.assertTrue(np.all(x == y)) 81 | self.assertEqual(fx, fy) 82 | 83 | chain = np.array(chain) 84 | rate = np.array(rate) 85 | self.assertEqual(chain.shape[0], 50) 86 | self.assertEqual(chain.shape[1], len(x0)) 87 | self.assertEqual(rate.shape[0], 100) 88 | 89 | def test_hyperparameters(self): 90 | # Hyperparameters unchanged from base class 91 | mcmc = pints.RaoBlackwellACMC(self.real_parameters) 92 | self.assertEqual(mcmc.n_hyper_parameters(), 1) 93 | 94 | def test_name(self): 95 | # Test name method 96 | mcmc = pints.RaoBlackwellACMC(self.real_parameters) 97 | self.assertEqual(mcmc.name(), 'Rao-Blackwell adaptive covariance MCMC') 98 | 99 | def test_logging(self): 100 | 101 | # Test logging includes name. 102 | x = [self.real_parameters] * 3 103 | mcmc = pints.MCMCController( 104 | self.log_posterior, 3, x, method=pints.RaoBlackwellACMC) 105 | mcmc.set_max_iterations(5) 106 | with StreamCapture() as c: 107 | mcmc.run() 108 | text = c.text() 109 | self.assertIn('Rao-Blackwell adaptive covariance MCMC', text) 110 | 111 | 112 | if __name__ == '__main__': 113 | unittest.main() 114 | -------------------------------------------------------------------------------- /pints/tests/test_multi_output_problem.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests MultiOutputProblem methods. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import numpy as np 12 | import unittest 13 | 14 | 15 | class TestMultiOutputProblem(unittest.TestCase): 16 | """ 17 | Tests MultiOutputProblem methods. 18 | """ 19 | def test_basics(self): 20 | 21 | model = pints.toy.FitzhughNagumoModel() 22 | self.assertEqual(model.n_outputs(), 2) 23 | 24 | times = [0, 1, 2, 3] 25 | x = [1, 1, 1] 26 | values = model.simulate(x, times) 27 | noisy = values + np.array([ 28 | [0.01, -0.02], [-0.01, -0.02], [-0.01, 0.02], [0.01, -0.02]]) 29 | problem = pints.MultiOutputProblem(model, times, noisy) 30 | 31 | self.assertTrue(np.all(times == problem.times())) 32 | self.assertTrue(np.all(noisy == problem.values())) 33 | self.assertTrue(np.all(values == problem.evaluate(x))) 34 | self.assertEqual(problem.n_parameters(), model.n_parameters(), 2) 35 | self.assertEqual(problem.n_parameters(), model.n_parameters(), 2) 36 | self.assertEqual(problem.n_outputs(), model.n_outputs(), 3) 37 | self.assertEqual(problem.n_times(), len(times)) 38 | 39 | # Test errors 40 | times[0] = -2 41 | self.assertRaises( 42 | ValueError, pints.MultiOutputProblem, model, times, values) 43 | times = [1, 2, 2, 1] 44 | self.assertRaises( 45 | ValueError, pints.MultiOutputProblem, model, times, values) 46 | times = [1, 2, 3] 47 | self.assertRaises( 48 | ValueError, pints.MultiOutputProblem, model, times, values) 49 | 50 | # Single value model is fine too! 51 | model = pints.toy.LogisticModel() 52 | self.assertEqual(model.n_outputs(), 1) 53 | values = model.simulate([1, 1], times) 54 | pints.MultiOutputProblem(model, times, values) 55 | 56 | 57 | if __name__ == '__main__': 58 | unittest.main() 59 | -------------------------------------------------------------------------------- /pints/tests/test_nested_rejection_sampler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests nested rejection sampler. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | 12 | import pints 13 | import pints.toy 14 | 15 | 16 | class TestNestedRejectionSampler(unittest.TestCase): 17 | """ 18 | Unit (not functional!) tests for :class:`NestedRejectionSampler`. 19 | """ 20 | 21 | @classmethod 22 | def setUpClass(cls): 23 | """ Prepare for the test. """ 24 | # Create toy model 25 | model = pints.toy.LogisticModel() 26 | cls.real_parameters = [0.015, 500] 27 | times = np.linspace(0, 1000, 1000) 28 | values = model.simulate(cls.real_parameters, times) 29 | 30 | # Add noise 31 | np.random.seed(1) 32 | cls.noise = 10 33 | values += np.random.normal(0, cls.noise, values.shape) 34 | cls.real_parameters.append(cls.noise) 35 | 36 | # Create an object with links to the model and time series 37 | problem = pints.SingleOutputProblem(model, times, values) 38 | 39 | # Create a uniform prior over both the parameters and the new noise 40 | # variable 41 | cls.log_prior = pints.UniformLogPrior( 42 | [0.01, 400], 43 | [0.02, 600] 44 | ) 45 | 46 | # Create a log-likelihood 47 | cls.log_likelihood = pints.GaussianKnownSigmaLogLikelihood( 48 | problem, cls.noise) 49 | 50 | def test_construction_errors(self): 51 | # Tests if invalid constructor calls are picked up. 52 | 53 | # First arg must be a log likelihood 54 | self.assertRaisesRegex( 55 | ValueError, 'must extend pints.LogPrior', 56 | pints.NestedRejectionSampler, self.log_likelihood) 57 | 58 | def test_hyper_params(self): 59 | # Tests the hyper parameter interface is working. 60 | sampler = pints.NestedRejectionSampler(self.log_prior) 61 | self.assertEqual(sampler.n_hyper_parameters(), 1) 62 | sampler.set_hyper_parameters([220]) 63 | 64 | def test_getters_and_setters(self): 65 | # Tests various get() and set() methods. 66 | sampler = pints.NestedRejectionSampler(self.log_prior) 67 | 68 | # Active points 69 | x = sampler.n_active_points() + 1 70 | self.assertNotEqual(sampler.n_active_points(), x) 71 | sampler.set_n_active_points(x) 72 | self.assertEqual(sampler.n_active_points(), x) 73 | self.assertRaisesRegex( 74 | ValueError, 'greater than 5', sampler.set_n_active_points, 5) 75 | self.assertEqual(sampler.name(), 'Nested rejection sampler') 76 | self.assertTrue(not sampler.needs_initial_phase()) 77 | 78 | def test_ask(self): 79 | # Tests ask. 80 | sampler = pints.NestedRejectionSampler(self.log_prior) 81 | pts = sampler.ask(1) 82 | self.assertTrue(np.isfinite(self.log_likelihood(pts))) 83 | 84 | # test multiple points being asked and tell'd 85 | sampler = pints.NestedRejectionSampler(self.log_prior) 86 | pts = sampler.ask(50) 87 | self.assertEqual(len(pts), 50) 88 | fx = [self.log_likelihood(pt) for pt in pts] 89 | proposed = sampler.tell(fx) 90 | self.assertTrue(len(proposed) > 1) 91 | 92 | 93 | if __name__ == '__main__': 94 | unittest.main() 95 | -------------------------------------------------------------------------------- /pints/tests/test_opt_easy_optimisation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the easy optimisation methods fmin and curve_fit. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import unittest 11 | import numpy as np 12 | 13 | 14 | class TestEasyOptimisation(unittest.TestCase): 15 | """ 16 | Tests the easy optimisation methods fmin and curve_fit. 17 | """ 18 | def test_fmin(self): 19 | # Tests :meth:`pints.fmin()`. 20 | 21 | # Note: This just wraps around `OptimisationController`, so testing 22 | # done here is for wrapper code, not main functionality! 23 | 24 | # Basic test 25 | np.random.seed(1) 26 | xopt, fopt = pints.fmin(f, [1, 1], method=pints.XNES) 27 | self.assertAlmostEqual(xopt[0], 3) 28 | self.assertAlmostEqual(xopt[1], -5) 29 | 30 | # Function must be callable 31 | self.assertRaisesRegex(ValueError, 'callable', pints.fmin, 3, [1]) 32 | 33 | # Test with boundaries 34 | xopt, fopt = pints.fmin( 35 | f, [1, 1], boundaries=([-10, -10], [10, 10]), method=pints.SNES) 36 | self.assertAlmostEqual(xopt[0], 3) 37 | self.assertAlmostEqual(xopt[1], -5) 38 | 39 | # Test with extra arguments 40 | def g(x, y, z): 41 | return (x[0] - 3) ** 2 + (x[1] + 5) ** 2 + y / z 42 | xopt, fopt = pints.fmin(g, [1, 1], args=[1, 2], method=pints.XNES) 43 | self.assertAlmostEqual(xopt[0], 3) 44 | self.assertAlmostEqual(xopt[1], -5) 45 | 46 | # Test with parallelisation 47 | pints.fmin(f, [1, 1], parallel=True, method=pints.XNES) 48 | 49 | def test_curve_fit(self): 50 | # Tests :meth:`pints.curve_fit()`. 51 | 52 | # Note: This just wraps around `OptimisationController`, so testing 53 | # done here is for wrapper code, not main functionality! 54 | np.random.seed(1) 55 | 56 | # Basic test 57 | x = np.linspace(-5, 5, 100) 58 | e = np.random.normal(loc=0, scale=0.1, size=x.shape) 59 | y = g(x, 9, 3, 1) + e 60 | 61 | p0 = [0, 0, 0] 62 | np.random.seed(1) 63 | popt, fopt = pints.curve_fit(g, x, y, p0, method=pints.XNES) 64 | self.assertAlmostEqual(popt[0], 9, places=1) 65 | self.assertAlmostEqual(popt[1], 3, places=1) 66 | self.assertAlmostEqual(popt[2], 1, places=1) 67 | 68 | # Function must be callable 69 | self.assertRaisesRegex( 70 | ValueError, 'callable', pints.curve_fit, 3, x, y, p0) 71 | 72 | # Test with boundaries 73 | popt, fopt = pints.curve_fit( 74 | g, x, y, p0, 75 | boundaries=([-10, -10, -10], [10, 10, 10]), method=pints.XNES) 76 | self.assertAlmostEqual(popt[0], 9, places=1) 77 | self.assertAlmostEqual(popt[1], 3, places=1) 78 | self.assertAlmostEqual(popt[2], 1, places=1) 79 | 80 | # Test with parallelisation 81 | pints.curve_fit(g, x, y, p0, parallel=True, method=pints.XNES) 82 | 83 | # Test with invalid sizes of `x` and `y` 84 | x = np.linspace(-5, 5, 99) 85 | self.assertRaisesRegex( 86 | ValueError, 'dimension', pints.curve_fit, g, x, y, p0) 87 | 88 | 89 | def f(x): 90 | """ Pickleable test function. """ 91 | return (x[0] - 3) ** 2 + (x[1] + 5) ** 2 92 | 93 | 94 | def g(x, a, b, c): 95 | """ Pickleable test function. """ 96 | return a + b * x + c * x ** 2 97 | 98 | 99 | if __name__ == '__main__': 100 | unittest.main() 101 | -------------------------------------------------------------------------------- /pints/tests/test_single_output_problem.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests SingleOutputProblem methods. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import numpy as np 12 | import unittest 13 | 14 | 15 | class TestSingleOutputProblem(unittest.TestCase): 16 | """ 17 | Tests SingleOutputProblem methods. 18 | """ 19 | def test_basics(self): 20 | # Test everything 21 | 22 | model = pints.toy.LogisticModel() 23 | times = [0, 1, 2, 3] 24 | x = [1, 1] 25 | values = model.simulate(x, times) 26 | noisy = values + np.array([0.01, -0.01, 0.01, -0.01]) 27 | problem = pints.SingleOutputProblem(model, times, noisy) 28 | 29 | self.assertTrue(np.all(times == problem.times())) 30 | self.assertTrue(np.all(noisy == problem.values())) 31 | self.assertTrue(np.all(values == problem.evaluate(x))) 32 | self.assertEqual(problem.n_parameters(), model.n_parameters(), 2) 33 | self.assertEqual(problem.n_outputs(), model.n_outputs(), 1) 34 | self.assertEqual(problem.n_times(), len(times)) 35 | 36 | # Test errors 37 | times[0] = -2 38 | self.assertRaises( 39 | ValueError, pints.SingleOutputProblem, model, times, values) 40 | times = [1, 2, 2, 1] 41 | self.assertRaises( 42 | ValueError, pints.SingleOutputProblem, model, times, values) 43 | times = [1, 2, 3] 44 | self.assertRaises( 45 | ValueError, pints.SingleOutputProblem, model, times, values) 46 | 47 | # Multi-output problem not allowed 48 | model = pints.toy.FitzhughNagumoModel() 49 | self.assertEqual(model.n_outputs(), 2) 50 | values = model.simulate([1, 1, 1], times) 51 | self.assertRaises( 52 | ValueError, pints.SingleOutputProblem, model, times, values) 53 | 54 | 55 | if __name__ == '__main__': 56 | unittest.main() 57 | -------------------------------------------------------------------------------- /pints/tests/test_timer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the Timer class. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import unittest 11 | import sys 12 | 13 | 14 | class TestTimer(unittest.TestCase): 15 | """ 16 | Tests the basic methods of the Timer class. 17 | """ 18 | def __init__(self, name): 19 | super(TestTimer, self).__init__(name) 20 | 21 | def test_timing(self): 22 | # Test the time() and reset() methods. 23 | 24 | t = pints.Timer() 25 | a = t.time() 26 | self.assertGreaterEqual(a, 0) 27 | for i in range(10): 28 | self.assertGreater(t.time(), a) 29 | a = t.time() 30 | t.reset() 31 | b = t.time() 32 | self.assertGreaterEqual(b, 0) 33 | self.assertLess(b, a) 34 | 35 | def test_format(self): 36 | # Test the format() method. 37 | 38 | t = pints.Timer() 39 | self.assertEqual(t.format(1e-3), '0.001 seconds') 40 | self.assertEqual(t.format(0.000123456789), '0.000123456789 seconds') 41 | self.assertEqual(t.format(0.123456789), '0.12 seconds') 42 | if sys.hexversion < 0x3000000: 43 | self.assertEqual(t.format(2), '2.0 seconds') 44 | else: 45 | self.assertEqual(t.format(2), '2 seconds') 46 | self.assertEqual(t.format(2.5), '2.5 seconds') 47 | self.assertEqual(t.format(12.5), '12.5 seconds') 48 | self.assertEqual(t.format(59.41), '59.41 seconds') 49 | self.assertEqual(t.format(59.4126347547), '59.41 seconds') 50 | self.assertEqual(t.format(60.2), '1 minute, 0 seconds') 51 | self.assertEqual(t.format(61), '1 minute, 1 second') 52 | self.assertEqual(t.format(121), '2 minutes, 1 second') 53 | self.assertEqual( 54 | t.format(604800), 55 | '1 week, 0 days, 0 hours, 0 minutes, 0 seconds') 56 | self.assertEqual( 57 | t.format(2 * 604800 + 3 * 3600 + 60 + 4), 58 | '2 weeks, 0 days, 3 hours, 1 minute, 4 seconds') 59 | 60 | # Test without argument 61 | self.assertIsInstance(t.format(), str) 62 | 63 | 64 | if __name__ == '__main__': 65 | unittest.main() 66 | -------------------------------------------------------------------------------- /pints/tests/test_toy_br_ap_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Beeler-Reuter AP (toy) model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | import pints 12 | import pints.toy 13 | 14 | 15 | class TestActionPotentialModel(unittest.TestCase): 16 | """ 17 | Tests if the Beeler-Reuter AP (toy) model works. 18 | """ 19 | 20 | def test_creation(self): 21 | 22 | # Test creation 23 | model = pints.toy.ActionPotentialModel() 24 | 25 | # Test setting and getting init cond. 26 | self.assertFalse(np.all(model.initial_conditions() == [-80, 1e-5])) 27 | model.set_initial_conditions([-80, 1e-5]) 28 | self.assertTrue(np.all(model.initial_conditions() == [-80, 1e-5])) 29 | 30 | # Initial conditions cannot be negative 31 | self.assertRaisesRegex( 32 | ValueError, 'cannot be negative', 33 | pints.toy.ActionPotentialModel, [-80, -1]) 34 | 35 | def test_suggestions(self): 36 | 37 | model = pints.toy.ActionPotentialModel() 38 | p0 = model.suggested_parameters() 39 | self.assertEqual(len(p0), model.n_parameters()) 40 | 41 | times = model.suggested_times() 42 | self.assertTrue(np.all(times[1:] > times[:-1])) 43 | 44 | def test_simulation(self): 45 | 46 | model = pints.toy.ActionPotentialModel() 47 | times = model.suggested_times() 48 | p0 = model.suggested_parameters() 49 | 50 | # Test simulating all eight states 51 | states = model.simulate_all_states(p0, times) 52 | self.assertEqual(len(states.shape), 2) 53 | self.assertEqual(states.shape[0], len(times)) 54 | self.assertEqual(states.shape[1], 8) 55 | 56 | # Test initial state 57 | x0 = np.array([-84.622, 2e-7, 0.01, 0.99, 0.98, 0.003, 0.99, 0.0004]) 58 | self.assertTrue(np.all(states[0] == x0)) 59 | 60 | # Test state during AP (at 100ms, with stimulus applied at t=0ms) 61 | # Reference values taken from a Myokit simulation with the same model 62 | i100 = 200 63 | self.assertEqual(times[i100], 100) 64 | x100 = [ 65 | 1.09411249975881226e+01, 66 | 6.14592181872724475e-06, 67 | 9.93428572073381311e-01, 68 | 2.17659582339377205e-11, 69 | -3.78573517405393772e-11, 70 | 9.72515482289432853e-01, 71 | 7.67322448537725688e-01, 72 | 2.34776564989968184e-01, 73 | ] 74 | self.assertAlmostEqual(states[i100][0], x100[0], places=2) 75 | self.assertAlmostEqual(states[i100][1], x100[1], places=2) 76 | self.assertAlmostEqual(states[i100][2], x100[2], places=2) 77 | self.assertAlmostEqual(states[i100][3], x100[3], places=2) 78 | self.assertAlmostEqual(states[i100][4], x100[4], places=2) 79 | self.assertAlmostEqual(states[i100][5], x100[5], places=2) 80 | self.assertAlmostEqual(states[i100][6], x100[6], places=2) 81 | self.assertAlmostEqual(states[i100][7], x100[7], places=2) 82 | 83 | # Test simulation outputting only observables 84 | partial = model.simulate(p0, times) 85 | self.assertEqual(len(times), len(partial)) 86 | self.assertEqual(partial.shape[1], model.n_outputs()) 87 | 88 | partial = np.array(partial) 89 | states = np.array(states) 90 | self.assertTrue(np.all(partial[:, 0] == states[:, 0])) 91 | self.assertTrue(np.all(partial[:, 1] == states[:, 1])) 92 | 93 | 94 | if __name__ == '__main__': 95 | unittest.main() 96 | -------------------------------------------------------------------------------- /pints/tests/test_toy_classes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Toy ODE Model setters / getters work 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | import pints 12 | import pints.toy 13 | 14 | 15 | class TestToyODEModel(unittest.TestCase): 16 | """ 17 | Tests if the Toy ODE Model's non-abstract methods work. 18 | """ 19 | 20 | def test_run(self): 21 | model = pints.toy.Hes1Model() 22 | vals = [1, 2, 3] 23 | model.set_initial_conditions(vals) 24 | self.assertTrue(np.array_equal(model.initial_conditions(), 25 | vals)) 26 | 27 | 28 | if __name__ == '__main__': 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /pints/tests/test_toy_eight_schools_logpdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the eight-schools toy problem. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import unittest 12 | import numpy as np 13 | 14 | 15 | class TestEightSchoolsLogPDF(unittest.TestCase): 16 | """ 17 | Tests the eight-schools toy problem. 18 | """ 19 | def test_basic(self): 20 | """ Tests calls and data. """ 21 | 22 | # Default settings 23 | f = pints.toy.EightSchoolsLogPDF() 24 | f1, dp = f.evaluateS1(np.ones(10)) 25 | self.assertEqual(f1, f(np.ones(10))) 26 | self.assertAlmostEqual(f1, -43.02226038161451) 27 | self.assertEqual(len(dp), 10) 28 | self.assertEqual(dp[0], -1.0 / 25) 29 | self.assertAlmostEqual(dp[1], -8.076923076923077, places=6) 30 | self.assertEqual(dp[2], 3.0 / 25) 31 | val = f([1, 0.5, 0.4, 1, 1, 1, 1, 1, 1, 1]) 32 | self.assertAlmostEqual(val, -38.24061255483484, places=6) 33 | 34 | # Default settings with non-ones input 35 | f1, dp = f.evaluateS1([n + 1 for n in range(10)]) 36 | self.assertEqual(f1, f([n + 1 for n in range(10)])) 37 | self.assertAlmostEqual(f1, -83.0819420614) 38 | self.assertEqual(len(dp), 10) 39 | self.assertEqual(dp[0], 10.96) 40 | self.assertAlmostEqual(dp[1], 31.3620689655, places=6) 41 | self.assertEqual(dp[2], -7.0 / 18) 42 | 43 | # non-centered paramerisation 44 | f = pints.toy.EightSchoolsLogPDF(centered=False) 45 | f1, dp = f.evaluateS1(np.ones(10)) 46 | self.assertEqual(f1, f(np.ones(10))) 47 | self.assertAlmostEqual(f1, -46.649195204910605) 48 | self.assertEqual(len(dp), 10) 49 | self.assertAlmostEqual(dp[0], 0.3029093172890521, places=6) 50 | self.assertAlmostEqual(dp[1], 0.2659862403659751, places=6) 51 | self.assertAlmostEqual(dp[2], -0.8844444444444445, places=6) 52 | val = f([1, 0.5, 0.4, 1, 1, 1, 1, 1, 1, 1]) 53 | self.assertAlmostEqual(val, -46.41445177944207, places=6) 54 | 55 | # Test data 56 | data = f.data() 57 | self.assertEqual(len(data), 3) 58 | self.assertEqual(data["J"], 8) 59 | self.assertTrue( 60 | np.array_equal(data["y"], [28, 8, -3, 7, -1, 1, 18, 12])) 61 | self.assertTrue( 62 | np.array_equal(data["sigma"], [15, 10, 16, 11, 9, 11, 10, 18])) 63 | 64 | def test_bad_calls(self): 65 | # Tests bad calls 66 | 67 | # Bad calls to function 68 | f = pints.toy.EightSchoolsLogPDF() 69 | self.assertRaises(ValueError, f.__call__, [1, 2, 3]) 70 | self.assertRaises(ValueError, f.__call__, np.ones(11)) 71 | 72 | # Bad calls to evaluate 73 | self.assertRaises(ValueError, f.evaluateS1, [1, 2, 3]) 74 | self.assertRaises(ValueError, f.evaluateS1, np.ones(11)) 75 | 76 | def test_negative_sd(self): 77 | # Tests that sd < 0 returns -log infinity 78 | f = pints.toy.EightSchoolsLogPDF() 79 | x = np.ones(10) 80 | x[1] = -1 81 | self.assertEqual(f(x), -np.inf) 82 | logp, grad = f.evaluateS1(x) 83 | self.assertEqual(logp, -np.inf) 84 | self.assertTrue(np.array_equal(grad, np.full([1, 10], -np.inf))) 85 | 86 | def test_bounds(self): 87 | """ Tests suggested_bounds() """ 88 | f = pints.toy.EightSchoolsLogPDF() 89 | bounds = f.suggested_bounds() 90 | self.assertEqual(bounds[0][1], 0) 91 | 92 | 93 | if __name__ == '__main__': 94 | unittest.main() 95 | -------------------------------------------------------------------------------- /pints/tests/test_toy_fitzhugh_nagumo_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Fitzhugh-Nagumo toy model runs. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | import pints.toy 12 | import numpy as np 13 | 14 | 15 | class TestFitzhughNagumoModel(unittest.TestCase): 16 | """ 17 | Tests if the Fitzhugh-Nagumo toy model runs. 18 | """ 19 | 20 | def test_run(self): 21 | 22 | # Test basic properties 23 | model = pints.toy.FitzhughNagumoModel() 24 | self.assertEqual(model.n_parameters(), 3) 25 | self.assertEqual(model.n_outputs(), 2) 26 | 27 | # Test simulation 28 | x = model.suggested_parameters() 29 | times = model.suggested_times() 30 | values = model.simulate(x, times) 31 | self.assertEqual(values.shape, (len(times), 2)) 32 | 33 | # Simulation with sensitivities 34 | values, dvalues_dp = model.simulateS1(x, times) 35 | self.assertEqual(values.shape, (len(times), 2)) 36 | self.assertEqual(dvalues_dp.shape, (len(times), 2, 3)) 37 | 38 | # Test alternative starting position 39 | model = pints.toy.FitzhughNagumoModel([0.1, 0.1]) 40 | values = model.simulate(x, times) 41 | self.assertEqual(values.shape, (len(times), 2)) 42 | 43 | # Times can't be negative 44 | times = [-1, 2, 3, 4] 45 | self.assertRaises(ValueError, model.simulate, x, times) 46 | 47 | # Initial value must have size 2 48 | pints.toy.FitzhughNagumoModel([1, 1]) 49 | self.assertRaises(ValueError, pints.toy.FitzhughNagumoModel, [1]) 50 | 51 | def test_values(self): 52 | # value-based tests of Fitzhugh-Nagumo model 53 | parameters = [0.2, 0.4, 2.5] 54 | y0 = [-2, 1.5] 55 | times = np.linspace(0, 20, 201) 56 | model = pints.toy.FitzhughNagumoModel(y0) 57 | values = model.simulate(parameters, times) 58 | self.assertAlmostEqual(values[200, 0], 1.675726, places=6) 59 | self.assertAlmostEqual(values[200, 1], -0.226142, places=6) 60 | 61 | def test_sensitivities(self): 62 | # compares sensitivities against standards 63 | model = pints.toy.FitzhughNagumoModel([2, 3]) 64 | parameters = [0.2, 0.7, 2.8] 65 | 66 | # Test with initial point t=0 included in range 67 | sols, sens = model.simulateS1(parameters, [0, 7, 12]) 68 | self.assertAlmostEqual(sens[1, 0, 2], 5.01378, 5) 69 | self.assertAlmostEqual(sens[2, 1, 1], 0.82883, 4) 70 | 71 | # Test without initial point in range 72 | sols, sens = model.simulateS1(parameters, [7, 12]) 73 | self.assertAlmostEqual(sens[0, 0, 2], 5.01378, 5) 74 | self.assertAlmostEqual(sens[1, 1, 1], 0.82883, 4) 75 | 76 | # Test without any points in range 77 | sols, sens = model.simulateS1(parameters, []) 78 | self.assertEqual(sols.shape, (0, 2)) 79 | self.assertEqual(sens.shape, (0, 2, 3)) 80 | 81 | 82 | if __name__ == '__main__': 83 | unittest.main() 84 | -------------------------------------------------------------------------------- /pints/tests/test_toy_german_credit_hierarchical_logpdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the German credit toy distribution. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import unittest 12 | import numpy as np 13 | 14 | 15 | class TestGermanCreditHierarchicalLogPDF(unittest.TestCase): 16 | """ 17 | Tests the logpdf toy distribution from fitting a hierarchical logistic 18 | model to German credit data. 19 | """ 20 | @classmethod 21 | def setUpClass(cls): 22 | """ Set up problem for tests. """ 23 | # download data 24 | model = pints.toy.GermanCreditHierarchicalLogPDF(download=True) 25 | x, y, z = model.data() 26 | cls.y = y 27 | cls.x = x 28 | cls.model = model 29 | 30 | def test_download(self): 31 | # tests that method can download data from UCI repo 32 | x, y, z = self.model.data() 33 | self.assertEqual(x.shape[0], 1000) 34 | self.assertEqual(x.shape[1], 25) 35 | self.assertEqual(len(y), 1000) 36 | 37 | def test_errors(self): 38 | # tests errors of inapropriate function calls and inits 39 | self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF, 40 | np.zeros((27, 27)), self.y) 41 | self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF, 42 | self.x, np.ones(1000) * 2) 43 | self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF, 44 | self.x, self.y, True) 45 | self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF, 46 | None, self.y) 47 | self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF, 48 | self.x, None) 49 | 50 | def test_local(self): 51 | # tests that model can be instantiated using local files 52 | x, y, z = self.model.data() 53 | model = pints.toy.GermanCreditHierarchicalLogPDF(x=x, y=y) 54 | x1, y1, z1 = model.data() 55 | self.assertTrue(np.array_equal(x, x1)) 56 | self.assertTrue(np.array_equal(y, y1)) 57 | self.assertTrue(np.array_equal(z, z1)) 58 | 59 | def test_values(self): 60 | # tests calls 61 | self.assertAlmostEqual(self.model(np.ones(326)), 62 | -20174.077700157857, 63 | places=6) 64 | 65 | def test_sensitivities(self): 66 | # test sensitivity values vs reference 67 | val, dp = self.model.evaluateS1(np.ones(326)) 68 | self.assertEqual(val, self.model(np.ones(326))) 69 | self.assertEqual(len(dp), 326) 70 | self.assertAlmostEqual(dp[0], -1000.02) 71 | self.assertAlmostEqual(dp[1], -700.8386959844057, places=6) 72 | 73 | def test_givens(self): 74 | # tests whether boundaries are correct and n_parameters 75 | self.assertEqual(326, self.model.n_parameters()) 76 | borders = self.model.suggested_bounds() 77 | self.assertEqual(borders[0][0], -100) 78 | self.assertEqual(borders[1][0], 100) 79 | 80 | 81 | if __name__ == '__main__': 82 | unittest.main() 83 | -------------------------------------------------------------------------------- /pints/tests/test_toy_german_credit_logpdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the German credit toy distribution. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import unittest 12 | import numpy as np 13 | 14 | 15 | class TestGermanCreditLogPDF(unittest.TestCase): 16 | """ 17 | Tests the logpdf toy distribution from fitting a logistic model to German 18 | credit data. 19 | """ 20 | @classmethod 21 | def setUpClass(cls): 22 | """ Set up problem for tests. """ 23 | # download data 24 | model = pints.toy.GermanCreditLogPDF(download=True) 25 | x, y = model.data() 26 | cls.y = y 27 | cls.x = x 28 | cls.model = model 29 | 30 | def test_download(self): 31 | # tests that method can download data from UCI repo 32 | x, y = self.model.data() 33 | self.assertEqual(x.shape[0], 1000) 34 | self.assertEqual(x.shape[1], 25) 35 | self.assertEqual(len(y), 1000) 36 | 37 | def test_errors(self): 38 | # tests errors of inapropriate function calls and inits 39 | self.assertRaises(ValueError, pints.toy.GermanCreditLogPDF, 40 | np.zeros((27, 27)), self.y) 41 | self.assertRaises(ValueError, pints.toy.GermanCreditLogPDF, 42 | self.x, np.ones(1000) * 2) 43 | self.assertRaises(ValueError, pints.toy.GermanCreditLogPDF, 44 | self.x, self.y, True) 45 | self.assertRaises(ValueError, pints.toy.GermanCreditLogPDF, 46 | None, self.y) 47 | self.assertRaises(ValueError, pints.toy.GermanCreditLogPDF, 48 | self.x, None) 49 | 50 | def test_local(self): 51 | # tests that model can be instantiated using local files 52 | x, y = self.model.data() 53 | model = pints.toy.GermanCreditLogPDF(x=x, y=y) 54 | x1, y1 = model.data() 55 | self.assertTrue(np.array_equal(x, x1)) 56 | self.assertTrue(np.array_equal(y, y1)) 57 | 58 | def test_values(self): 59 | # tests calls 60 | self.assertAlmostEqual(self.model(np.zeros(25)), -693.1471805599322, 61 | places=6) 62 | self.assertAlmostEqual(self.model(np.ones(25)), -2887.6292678483533, 63 | places=6) 64 | 65 | def test_sensitivities(self): 66 | # test sensitivity values vs reference 67 | val, dp = self.model.evaluateS1(np.zeros(25)) 68 | self.assertEqual(val, self.model(np.zeros(25))) 69 | self.assertEqual(len(dp), 25) 70 | self.assertEqual(dp[0], -200) 71 | self.assertAlmostEqual(dp[1], -160.7785147438439, places=6) 72 | 73 | def test_givens(self): 74 | # tests whether boundaries are correct and n_parameters 75 | self.assertEqual(25, self.model.n_parameters()) 76 | borders = self.model.suggested_bounds() 77 | self.assertEqual(borders[0][0], -100) 78 | self.assertEqual(borders[1][0], 100) 79 | 80 | 81 | if __name__ == '__main__': 82 | unittest.main() 83 | -------------------------------------------------------------------------------- /pints/tests/test_toy_goodwin_oscillator_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the goodwin oscillator (toy) model runs. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | import pints.toy 12 | import numpy as np 13 | 14 | 15 | class TestGoodwinOscillatorModel(unittest.TestCase): 16 | """ 17 | Tests if the goodwin oscillator (toy) model runs. 18 | """ 19 | 20 | def test_run(self): 21 | model = pints.toy.GoodwinOscillatorModel() 22 | self.assertEqual(model.n_parameters(), 5) 23 | self.assertEqual(model.n_outputs(), 3) 24 | times = model.suggested_times() 25 | parameters = model.suggested_parameters() 26 | values = model.simulate(parameters, times) 27 | self.assertEqual(values.shape, (len(times), 3)) 28 | 29 | def test_values(self): 30 | # value-based tests of Goodwin-oscillator 31 | model = pints.toy.GoodwinOscillatorModel() 32 | parameters = [3, 2.5, 0.15, 0.1, 0.12] 33 | times = np.linspace(0, 10, 101) 34 | values = model.simulate(parameters, times) 35 | self.assertEqual(values[0, 0], 0.0054) 36 | self.assertEqual(values[0, 1], 0.053) 37 | self.assertEqual(values[0, 2], 1.93) 38 | self.assertAlmostEqual(values[100, 0], 0.0061854, places=6) 39 | self.assertAlmostEqual(values[100, 1], 0.1779547, places=6) 40 | self.assertAlmostEqual(values[100, 2], 2.6074527, places=6) 41 | 42 | def test_sensitivity(self): 43 | # tests construction of matrices for sensitivity calculation and 44 | # compares sensitivities vs standards 45 | model = pints.toy.GoodwinOscillatorModel() 46 | parameters = [3, 2.5, 0.15, 0.1, 0.12] 47 | k2, k3, m1, m2, m3 = parameters 48 | time = np.linspace(0, 10, 101) 49 | state = [0.01, 0.1, 2] 50 | x, y, z = state 51 | ret = model.jacobian(state, 0.0, parameters) 52 | self.assertEqual(ret[0, 0], -m1) 53 | self.assertEqual(ret[0, 1], 0) 54 | self.assertEqual(ret[0, 2], -10 * z**9 / ((1 + z**10)**2)) 55 | self.assertEqual(ret[1, 0], k2) 56 | self.assertEqual(ret[1, 1], -m2) 57 | self.assertEqual(ret[1, 2], 0) 58 | self.assertEqual(ret[2, 0], 0) 59 | self.assertEqual(ret[2, 1], k3) 60 | self.assertEqual(ret[2, 2], -m3) 61 | values = model.simulate(parameters, time) 62 | values1, dvals = model.simulateS1(parameters, time) 63 | self.assertTrue(np.array_equal(values.shape, values1.shape)) 64 | self.assertTrue(np.array_equal( 65 | dvals.shape, 66 | np.array([len(time), model.n_outputs(), model.n_parameters()]))) 67 | # note -- haven't coded this up separately to check but compare against 68 | # current output in case of future changes 69 | self.assertTrue(np.abs(-2.20655371e-05 - dvals[10, 0, 0]) < 10**(-5)) 70 | for i in range(len(time)): 71 | for j in range(3): 72 | self.assertTrue( 73 | np.abs(values[i, j] - values1[i, j]) < 10**(-3)) 74 | 75 | model = pints.toy.GoodwinOscillatorModel() 76 | parameters = model.suggested_parameters() 77 | sols, sens = model.simulateS1(parameters, [35, 80]) 78 | self.assertAlmostEqual(sens[0, 0, 2], 0.07705, 4) 79 | self.assertAlmostEqual(sens[1, 1, 3], 3.35704, 4) 80 | 81 | 82 | if __name__ == '__main__': 83 | unittest.main() 84 | -------------------------------------------------------------------------------- /pints/tests/test_toy_hh_ik_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Hodgkin-Huxley IK (toy) model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | import pints 12 | import pints.toy 13 | 14 | 15 | class TestHodgkinHuxleyIKModel(unittest.TestCase): 16 | """ 17 | Tests if the Hodgkin-Huxley IK (toy) model works. 18 | """ 19 | 20 | def test_creation(self): 21 | 22 | # Test simple creation 23 | pints.toy.HodgkinHuxleyIKModel() 24 | 25 | # Test initial condition out of bounds 26 | self.assertRaises(ValueError, pints.toy.HodgkinHuxleyIKModel, 0) 27 | self.assertRaises(ValueError, pints.toy.HodgkinHuxleyIKModel, 1) 28 | 29 | def test_suggestions(self): 30 | 31 | model = pints.toy.HodgkinHuxleyIKModel() 32 | 33 | # Parameters 34 | p0 = model.suggested_parameters() 35 | self.assertEqual(len(p0), model.n_parameters()) 36 | 37 | # Times 38 | times = model.suggested_times() 39 | self.assertTrue(np.all(times[1:] > times[:-1])) 40 | 41 | # Maximum duration 42 | self.assertEqual(model.suggested_duration(), 1200) 43 | 44 | def test_simulation(self): 45 | 46 | model = pints.toy.HodgkinHuxleyIKModel() 47 | p0 = model.suggested_parameters() 48 | times = model.suggested_times() 49 | 50 | # Run 51 | values = model.simulate(p0, times) 52 | self.assertEqual(len(times), len(values)) 53 | 54 | # Test against reference values from a simulation with Myokit 55 | 56 | # Test at time 0ms 57 | i = 0 58 | self.assertEqual(times[i], 0) 59 | self.assertAlmostEqual(values[i], 3.790799999999997) 60 | 61 | # Test at time 0.25ms 62 | i = 1 63 | self.assertEqual(times[i], 0.25) 64 | self.assertAlmostEqual(values[i], 3.83029, places=2) 65 | 66 | # Test during a step 67 | i = 390 68 | self.assertEqual(times[i], 97.5) 69 | self.assertAlmostEqual(values[i], 15.9405, places=2) 70 | 71 | # Test towards end of simulation (in step again) 72 | i = 4790 73 | self.assertEqual(times[i], 1197.5) 74 | self.assertAlmostEqual(values[i], 3862.8, places=0) 75 | 76 | # Test time out of bounds 77 | self.assertRaises(ValueError, model.simulate, p0, [-1, 0, 1]) 78 | 79 | def test_fold_method(self): 80 | 81 | # Tests the method that 'folds' the data for a plot similar to that in 82 | # the original paper 83 | 84 | model = pints.toy.HodgkinHuxleyIKModel() 85 | p0 = model.suggested_parameters() 86 | times = model.suggested_times() 87 | values = model.simulate(p0, times) 88 | 89 | folded = model.fold(times, values) 90 | self.assertEqual(len(folded), 12) 91 | 92 | 93 | if __name__ == '__main__': 94 | unittest.main() 95 | -------------------------------------------------------------------------------- /pints/tests/test_toy_lotka_volterra_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Lotka-Volterra toy model runs. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | 12 | import pints 13 | import pints.toy 14 | 15 | 16 | class TestLotkaVolterraModel(unittest.TestCase): 17 | """ 18 | Tests if the Lotka-Volterra toy model runs. 19 | """ 20 | 21 | def test_run(self): 22 | model = pints.toy.LotkaVolterraModel() 23 | self.assertEqual(model.n_parameters(), 4) 24 | self.assertEqual(model.n_outputs(), 2) 25 | times = model.suggested_times() 26 | parameters = model.suggested_parameters() 27 | values = model.simulate(parameters, times) 28 | self.assertEqual(values.shape, (len(times), 2)) 29 | self.assertTrue(np.all(values > 0)) 30 | 31 | # Test setting and getting init cond. 32 | self.assertFalse(np.all(model.initial_conditions() == [10, 10])) 33 | model.set_initial_conditions([10, 10]) 34 | self.assertTrue(np.all(model.initial_conditions() == [10, 10])) 35 | 36 | # Initial conditions cannot be negative 37 | model = pints.toy.LotkaVolterraModel([0, 0]) 38 | self.assertRaises(ValueError, pints.toy.LotkaVolterraModel, [-1, 0]) 39 | self.assertRaises(ValueError, pints.toy.LotkaVolterraModel, [0, -1]) 40 | self.assertRaises(ValueError, pints.toy.LotkaVolterraModel, [-1, -1]) 41 | 42 | def test_values(self): 43 | # value-based tests of solution 44 | x0 = 3 45 | y0 = 5 46 | model = pints.toy.LotkaVolterraModel([x0, y0]) 47 | parameters = [1, 2, 2, 0.5] 48 | times = np.linspace(0, 5, 101) 49 | values = model.simulate(parameters, times) 50 | self.assertEqual(values[0, 0], x0) 51 | self.assertEqual(values[0, 1], y0) 52 | self.assertAlmostEqual(values[1, 0], 1.929494, places=6) 53 | self.assertAlmostEqual(values[1, 1], 4.806542, places=6) 54 | self.assertAlmostEqual(values[100, 0], 1.277762, places=6) 55 | self.assertAlmostEqual(values[100, 1], 0.000529, places=6) 56 | 57 | def test_sensitivities(self): 58 | # tests sensitivities against standards 59 | model = pints.toy.LotkaVolterraModel() 60 | vals = model.suggested_values() 61 | self.assertEqual(vals.shape[0], 21) 62 | sols, sens = model.simulateS1([0.43, 0.2, 0.9, 0.28], [5, 10]) 63 | self.assertAlmostEqual(sens[0, 0, 0], -4.889418, 5) 64 | self.assertAlmostEqual(sens[1, 1, 3], -0.975323, 5) 65 | 66 | 67 | if __name__ == '__main__': 68 | unittest.main() 69 | -------------------------------------------------------------------------------- /pints/tests/test_toy_parabolic_error.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the parabolic error toy error measure. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | 11 | import numpy as np 12 | 13 | import pints 14 | import pints.toy 15 | 16 | 17 | class TestParabolicError(unittest.TestCase): 18 | """ 19 | Tests the parabolic error toy error measure. 20 | """ 21 | def test_parabolic_error(self): 22 | 23 | # Test a 2d case 24 | f = pints.toy.ParabolicError() 25 | self.assertEqual(f.n_parameters(), 2) 26 | self.assertEqual(list(f.optimum()), [0, 0]) 27 | self.assertEqual(f([0, 0]), 0) 28 | self.assertTrue(f([0.1, 0.1]) > 0) 29 | 30 | # Test a 3d case 31 | f = pints.toy.ParabolicError([1, 1, 1]) 32 | self.assertEqual(f.n_parameters(), 3) 33 | self.assertEqual(list(f.optimum()), [1, 1, 1]) 34 | self.assertEqual(f([1, 1, 1]), 0) 35 | self.assertTrue(f([1.1, 1.1, 1.1]) > 0) 36 | 37 | # Test sensitivities 38 | x = [1, 1, 1] 39 | fx, dfx = f.evaluateS1(x) 40 | self.assertEqual(fx, f(x)) 41 | self.assertEqual(dfx.shape, (3, )) 42 | self.assertTrue(np.all(dfx == [0, 0, 0])) 43 | 44 | x = [1.1, 1, 0.8] 45 | fx, dfx = f.evaluateS1(x) 46 | self.assertEqual(fx, f(x)) 47 | self.assertEqual(dfx.shape, (3, )) 48 | self.assertAlmostEqual(dfx[0], 0.2) 49 | self.assertAlmostEqual(dfx[1], 0) 50 | self.assertAlmostEqual(dfx[2], -0.4) 51 | 52 | # Bad calls to function 53 | f = pints.toy.ParabolicError([1, 1, 1]) 54 | self.assertRaises(ValueError, f.__call__, [1, 2]) 55 | self.assertRaises(ValueError, f.__call__, [1, 2, 3, 4, 5]) 56 | 57 | 58 | if __name__ == '__main__': 59 | unittest.main() 60 | -------------------------------------------------------------------------------- /pints/tests/test_toy_repressilator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Repressilator toy model runs. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | import pints.toy 12 | import numpy as np 13 | 14 | 15 | class TestRepressilatorModel(unittest.TestCase): 16 | """ 17 | Tests if the Repressilator toy model runs. 18 | """ 19 | 20 | def test_run(self): 21 | 22 | # Test basic properties 23 | model = pints.toy.RepressilatorModel() 24 | self.assertEqual(model.n_parameters(), 4) 25 | self.assertEqual(model.n_outputs(), 3) 26 | 27 | # Test simulation 28 | x = model.suggested_parameters() 29 | times = model.suggested_times() 30 | values = model.simulate(x, times) 31 | self.assertEqual(values.shape, (len(times), model.n_outputs())) 32 | 33 | # Test setting intial conditions 34 | model = pints.toy.RepressilatorModel([1, 1, 1, 1, 1, 1]) 35 | 36 | # Must have 6 init cond. 37 | self.assertRaises( 38 | ValueError, pints.toy.RepressilatorModel, [1, 1, 1, 1, 1]) 39 | 40 | # Concentrations are never negative 41 | self.assertRaises( 42 | ValueError, pints.toy.RepressilatorModel, [1, 1, 1, -1, 1, 1]) 43 | 44 | def test_values(self): 45 | # value-based tests of repressilator model 46 | times = np.linspace(0, 10, 101) 47 | parameters = [2, 900, 6, 1.5] 48 | y0 = [5, 3, 1, 2, 3.5, 2.5] 49 | model = pints.toy.RepressilatorModel(y0) 50 | values = model.simulate(parameters, times) 51 | self.assertTrue(np.array_equal(values[0, :], y0[:3])) 52 | self.assertAlmostEqual(values[1, 0], 18.88838, places=5) 53 | self.assertAlmostEqual(values[1, 1], 13.77623, places=5) 54 | self.assertAlmostEqual(values[1, 2], 9.05763, places=5) 55 | self.assertAlmostEqual(values[100, 0], 14.75099, places=5) 56 | self.assertAlmostEqual(values[100, 1], 16.55494, places=5) 57 | self.assertAlmostEqual(values[100, 2], 16.60688, places=5) 58 | 59 | 60 | if __name__ == '__main__': 61 | unittest.main() 62 | -------------------------------------------------------------------------------- /pints/tests/test_toy_rosenbrock.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the Rosenbrock toy problems. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import unittest 12 | import numpy as np 13 | 14 | 15 | class TestRosenbrock(unittest.TestCase): 16 | """ 17 | Tests the Rosenbrock toy problems. 18 | """ 19 | def test_error(self): 20 | f = pints.toy.RosenbrockError() 21 | self.assertEqual(f.n_parameters(), 2) 22 | fx = f([10, 10]) 23 | self.assertTrue(np.isscalar(fx)) 24 | self.assertEqual(fx, 810081) 25 | 26 | xopt = f.optimum() 27 | fopt = f(xopt) 28 | self.assertEqual(fopt, 0) 29 | 30 | np.random.seed(1) 31 | for x in np.random.uniform(-5, 5, size=(10, 2)): 32 | self.assertTrue(f(x) > fopt) 33 | 34 | def test_log_pdf(self): 35 | f = pints.toy.RosenbrockLogPDF() 36 | self.assertEqual(f.n_parameters(), 2) 37 | fx = f([0.5, 6.0]) 38 | self.assertTrue(np.isscalar(fx)) 39 | self.assertAlmostEqual(fx, np.log(1.0 / 3307.5)) 40 | 41 | xopt = f.optimum() 42 | fopt = f(xopt) 43 | self.assertEqual(fopt, 0) 44 | 45 | # sensitivity test 46 | l, dl = f.evaluateS1([3, 4]) 47 | 48 | self.assertEqual(l, -np.log(2505)) 49 | self.assertEqual(len(dl), 2) 50 | self.assertEqual(dl[0], float(-6004.0 / 2505.0)) 51 | self.assertEqual(dl[1], float(200.0 / 501.0)) 52 | 53 | # suggested bounds and distance measure 54 | bounds = f.suggested_bounds() 55 | bounds = [[-2, 4], [-1, 12]] 56 | bounds = np.transpose(bounds).tolist() 57 | self.assertTrue(np.array_equal(bounds, f.suggested_bounds())) 58 | 59 | x = np.ones((100, 3)) 60 | self.assertRaises(ValueError, f.distance, x) 61 | x = np.ones((100, 3, 2)) 62 | self.assertRaises(ValueError, f.distance, x) 63 | 64 | # there is no simple way to generate samples from Rosenbrock 65 | nsamples = 10000 66 | g = pints.toy.GaussianLogPDF([1, 1], [1, 1]) 67 | samples = g.sample(nsamples) 68 | self.assertTrue(f.distance(samples) > 0) 69 | x = np.ones((100, 3)) 70 | self.assertRaises(ValueError, f.distance, x) 71 | x = np.ones((100, 2, 2)) 72 | self.assertRaises(ValueError, f.distance, x) 73 | 74 | # generate samples with mean and variance closer to true values 75 | g1 = pints.toy.GaussianLogPDF([0.86935785, 2.59978086], 76 | [[1.80537968, 2.70257559], 77 | [2.70257559, 8.52658308]]) 78 | samples1 = g1.sample(nsamples) 79 | self.assertTrue(f.distance(samples1) > 0) 80 | self.assertTrue(f.distance(samples) > f.distance(samples1)) 81 | 82 | 83 | if __name__ == '__main__': 84 | unittest.main() 85 | -------------------------------------------------------------------------------- /pints/tests/test_toy_sho_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the simple harmonic oscillator model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | import pints 12 | import pints.toy 13 | 14 | 15 | class TestSimpleHarmonicOscillator(unittest.TestCase): 16 | """ 17 | Tests if the simple harmonic oscillator model works. 18 | """ 19 | 20 | def test_values_and_sensitivities_underdamped(self): 21 | # test values and sensitivities for some parameter values 22 | model = pints.toy.SimpleHarmonicOscillatorModel() 23 | times = [0, 1, 2, 10] 24 | parameters = [2.5, -3.5, 0.3] 25 | values = model.simulate(parameters, times) 26 | self.assertEqual(len(values), len(times)) 27 | self.assertEqual(values[0], 2.5) 28 | self.assertAlmostEqual(values[1], -1.0894215, places=6) 29 | self.assertAlmostEqual(values[2], -2.8830890, places=6) 30 | self.assertAlmostEqual(values[3], -0.1849098, places=6) 31 | 32 | values1, sensitivities = model.simulateS1(parameters, times) 33 | self.assertTrue(np.all(values == values)) 34 | self.assertEqual(sensitivities.shape[0], len(times)) 35 | self.assertEqual(sensitivities.shape[1], 3) 36 | self.assertEqual(sensitivities[0, 0], 1) 37 | self.assertEqual(sensitivities[0, 1], 0) 38 | self.assertEqual(sensitivities[0, 2], 0) 39 | self.assertAlmostEqual(sensitivities[1, 0], 0.5822839, places=6) 40 | self.assertAlmostEqual(sensitivities[1, 1], 0.7271804, places=6) 41 | self.assertAlmostEqual(sensitivities[1, 2], 1.5291374, places=6) 42 | 43 | def test_values_and_sensitivities_criticaldamp(self): 44 | # test values and sensitivities for critical damping 45 | model = pints.toy.SimpleHarmonicOscillatorModel() 46 | times = [0, 0.5, 1, 1.5] 47 | parameters = [1, 2.3, 2] 48 | values = model.simulate(parameters, times) 49 | self.assertEqual(len(values), len(times)) 50 | self.assertEqual(values[0], 1) 51 | self.assertAlmostEqual(values[1], 1.6073062, places=6) 52 | self.assertAlmostEqual(values[2], 1.5818816, places=6) 53 | self.assertAlmostEqual(values[3], 1.3276245, places=6) 54 | 55 | values1, sensitivities = model.simulateS1(parameters, times) 56 | self.assertTrue(np.all(values == values)) 57 | self.assertEqual(sensitivities.shape[0], len(times)) 58 | self.assertEqual(sensitivities.shape[1], 3) 59 | self.assertEqual(sensitivities[0, 0], 1) 60 | self.assertEqual(sensitivities[0, 1], 0) 61 | self.assertTrue(np.all(sensitivities[:, 2] == np.zeros(len(times)))) 62 | self.assertAlmostEqual(sensitivities[1, 0], 0.9097959, places=6) 63 | self.assertAlmostEqual(sensitivities[1, 1], 0.3032653, places=6) 64 | 65 | def test_suggested(self): 66 | # tests suggested values 67 | model = pints.toy.SimpleHarmonicOscillatorModel() 68 | times = model.suggested_times() 69 | parameters = model.suggested_parameters() 70 | self.assertTrue(np.all(np.linspace(0, 50, 100) == times)) 71 | self.assertTrue(np.all([1, 0, 0.15] == parameters)) 72 | 73 | def test_n_parameters(self): 74 | model = pints.toy.SimpleHarmonicOscillatorModel() 75 | self.assertEqual(model.n_parameters(), 3) 76 | 77 | def test_errors(self): 78 | # tests errors 79 | model = pints.toy.SimpleHarmonicOscillatorModel() 80 | times = [0, 1, 2, 10000] 81 | parameters = [1, 1, 0.6] 82 | times[1] = -1 83 | self.assertRaises(ValueError, model.simulate, parameters, times) 84 | 85 | 86 | if __name__ == '__main__': 87 | unittest.main() 88 | -------------------------------------------------------------------------------- /pints/tests/test_toy_simple_egg_box_logpdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the simple egg box toy LogPDF. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import pints 10 | import pints.toy 11 | import unittest 12 | import numpy as np 13 | 14 | 15 | class TestSimpleEggBoxLogPDF(unittest.TestCase): 16 | """ 17 | Tests the simple egg box logpdf toy distribution. 18 | """ 19 | def test_simple_egg_box_logpdf(self): 20 | # Test basics 21 | f = pints.toy.SimpleEggBoxLogPDF() 22 | self.assertEqual(f.n_parameters(), 2) 23 | self.assertTrue(np.isscalar(f(np.zeros(2)))) 24 | 25 | # Test construction errors 26 | self.assertRaises( 27 | ValueError, pints.toy.SimpleEggBoxLogPDF, sigma=0) 28 | self.assertRaises( 29 | ValueError, pints.toy.SimpleEggBoxLogPDF, r=0) 30 | 31 | def test_sampling_and_divergence(self): 32 | # Tests :meth:`SimpleEggBoxLogPDF.kl_divergence()`. 33 | 34 | # Ensure consistent output 35 | np.random.seed(1) 36 | 37 | # Create some log pdfs 38 | log_pdf1 = pints.toy.SimpleEggBoxLogPDF(2, 4) 39 | log_pdf2 = pints.toy.SimpleEggBoxLogPDF(3, 6) 40 | 41 | # Generate samples from each 42 | n = 100 43 | samples1 = log_pdf1.sample(n) 44 | samples2 = log_pdf2.sample(n) 45 | 46 | # Test divergence scores 47 | s11 = log_pdf1.kl_divergence(samples1) 48 | s12 = log_pdf1.kl_divergence(samples2) 49 | self.assertLess(s11, s12) 50 | s21 = log_pdf2.kl_divergence(samples1) 51 | s22 = log_pdf2.kl_divergence(samples2) 52 | self.assertLess(s22, s21) 53 | 54 | # Test penalising if a mode is missing 55 | samples3 = np.vstack(( 56 | samples2[samples2[:, 0] > 0], # Top half 57 | samples2[samples2[:, 1] < 0], # Left half 58 | )) 59 | s23 = log_pdf2.kl_divergence(samples3) 60 | self.assertLess(s22, s23) 61 | self.assertGreater(s23 / s22, 100) 62 | 63 | # Test sample arguments 64 | self.assertRaises(ValueError, log_pdf1.sample, -1) 65 | 66 | # Test shape testing 67 | self.assertEqual(samples1.shape, (n, 2)) 68 | x = np.ones((n, 3)) 69 | self.assertRaises(ValueError, log_pdf1.kl_divergence, x) 70 | x = np.ones((n, 2, 2)) 71 | self.assertRaises(ValueError, log_pdf1.kl_divergence, x) 72 | 73 | def test_sensitivity_bounds_distance(self): 74 | # Tests :meth:`SimpleEggBoxLogPDF.evaluateS1()`, 75 | # :meth:`SimpleEggBoxLogPDF.suggested_bounds()` and 76 | # :meth:`SimpleEggBoxLogPDF.distance()` 77 | 78 | f = pints.toy.SimpleEggBoxLogPDF() 79 | l, dl = f.evaluateS1([-5, 2]) 80 | self.assertEqual(l, f([-5, 2])) 81 | self.assertAlmostEqual(l, -13.781024134434123) 82 | self.assertAlmostEqual(dl[0], -1.5) 83 | self.assertAlmostEqual(dl[1], 2.9999991) 84 | self.assertTrue(np.array_equal(f.suggested_bounds(), 85 | [[-16.0, -16.0], [16.0, 16.0]])) 86 | samples = f.sample(100) 87 | self.assertTrue(f.kl_divergence(samples) > 0) 88 | self.assertEqual(f.kl_divergence(samples), f.distance(samples)) 89 | 90 | f = pints.toy.SimpleEggBoxLogPDF(3, 5) 91 | l, dl = f.evaluateS1([-1, -7]) 92 | self.assertEqual(l, f([-1, -7])) 93 | self.assertAlmostEqual(l, -46.269777289511559) 94 | self.assertAlmostEqual(dl[0], -4.6662126879796366) 95 | self.assertAlmostEqual(dl[1], -2.6666666666666639) 96 | self.assertTrue(np.array_equal(f.suggested_bounds(), 97 | [[-30.0, -30.0], [30.0, 30.0]])) 98 | samples = f.sample(100) 99 | self.assertTrue(f.kl_divergence(samples) > 0) 100 | self.assertEqual(f.kl_divergence(samples), f.distance(samples)) 101 | 102 | 103 | if __name__ == '__main__': 104 | unittest.main() 105 | -------------------------------------------------------------------------------- /pints/tests/test_toy_sir_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the SIR toy model runs. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | import pints.toy 12 | import numpy as np 13 | 14 | 15 | class TestSIRModel(unittest.TestCase): 16 | """ 17 | Tests if the SIR toy model runs. 18 | """ 19 | 20 | def test_run(self): 21 | 22 | # Test basic properties 23 | model = pints.toy.SIRModel() 24 | self.assertEqual(model.n_parameters(), 3) 25 | self.assertEqual(model.n_outputs(), 2) 26 | 27 | # Test simulation 28 | x = model.suggested_parameters() 29 | times = model.suggested_times() 30 | values = model.simulate(x, times) 31 | self.assertEqual(values.shape, (len(times), model.n_outputs())) 32 | 33 | # Test suggested values 34 | v = model.suggested_values() 35 | self.assertEqual(v.shape, (len(times), model.n_outputs())) 36 | 37 | # Test setting intial conditions 38 | model = pints.toy.SIRModel([1, 1, 1]) 39 | 40 | # Must have 3 init cond. 41 | self.assertRaises( 42 | ValueError, pints.toy.SIRModel, [1, 1]) 43 | 44 | # Populations are never negative 45 | self.assertRaises( 46 | ValueError, pints.toy.SIRModel, [1, 1, -1]) 47 | 48 | def test_values(self): 49 | # value-based tests of model solution 50 | S0 = 100 51 | parameters = [0.05, 0.4, S0] 52 | times = np.linspace(0, 10, 101) 53 | model = pints.toy.SIRModel([S0, 10, 1]) 54 | values = model.simulate(parameters, times) 55 | self.assertEqual(values[0, 0], 10) 56 | self.assertEqual(values[0, 1], 1) 57 | self.assertAlmostEqual(values[1, 0], 15.61537, places=5) 58 | self.assertAlmostEqual(values[1, 1], 1.50528, places=5) 59 | self.assertAlmostEqual(values[100, 0], 2.45739, places=5) 60 | self.assertAlmostEqual(values[100, 1], 108.542466, places=5) 61 | 62 | 63 | if __name__ == '__main__': 64 | unittest.main() 65 | -------------------------------------------------------------------------------- /pints/tests/test_toy_stochastic_degradation_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the degradation (toy) model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | 11 | import numpy as np 12 | 13 | from pints.toy.stochastic import DegradationModel 14 | 15 | 16 | class TestDegradationModel(unittest.TestCase): 17 | """ 18 | Tests if the degradation (toy) model works. 19 | """ 20 | def test_n_parameters(self): 21 | x_0 = 20 22 | model = DegradationModel(x_0) 23 | self.assertEqual(model.n_parameters(), 1) 24 | 25 | def test_simulation_length(self): 26 | x_0 = 20 27 | model = DegradationModel(x_0) 28 | times = np.linspace(0, 1, 100) 29 | k = [0.1] 30 | values = model.simulate(k, times) 31 | self.assertEqual(len(values), 100) 32 | 33 | def test_propensities(self): 34 | x_0 = 20 35 | k = [0.1] 36 | model = DegradationModel(x_0) 37 | self.assertTrue( 38 | np.allclose( 39 | model._propensities([x_0], k), 40 | np.array([2.0]))) 41 | 42 | def test_suggested(self): 43 | model = DegradationModel(20) 44 | times = model.suggested_times() 45 | parameters = model.suggested_parameters() 46 | self.assertTrue(len(times) == 101) 47 | self.assertTrue(parameters > 0) 48 | 49 | def test_mean_variance(self): 50 | # test mean 51 | model = DegradationModel(10) 52 | v_mean = model.mean([1], [5, 10]) 53 | self.assertEqual(v_mean[0], 10 * np.exp(-5)) 54 | self.assertEqual(v_mean[1], 10 * np.exp(-10)) 55 | 56 | model = DegradationModel(20) 57 | v_mean = model.mean([5], [7.2]) 58 | self.assertEqual(v_mean[0], 20 * np.exp(-7.2 * 5)) 59 | 60 | # test variance 61 | model = DegradationModel(10) 62 | v_var = model.variance([1], [5, 10]) 63 | self.assertEqual(v_var[0], 10 * (np.exp(5) - 1.0) / np.exp(10)) 64 | self.assertAlmostEqual(v_var[1], 10 * (np.exp(10) - 1.0) / np.exp(20)) 65 | 66 | model = DegradationModel(20) 67 | v_var = model.variance([2.0], [2.0]) 68 | self.assertAlmostEqual(v_var[0], 20 * (np.exp(4) - 1.0) / np.exp(8)) 69 | 70 | def test_errors(self): 71 | model = DegradationModel(20) 72 | 73 | # parameters, times cannot be negative 74 | times = np.linspace(0, 100, 101) 75 | parameters = [-0.1] 76 | self.assertRaisesRegex(ValueError, 'constant must be positive', 77 | model.mean, parameters, times) 78 | self.assertRaisesRegex(ValueError, 'constant must be positive', 79 | model.variance, parameters, times) 80 | 81 | times_2 = np.linspace(-10, 10, 21) 82 | parameters_2 = [0.1] 83 | self.assertRaisesRegex(ValueError, 'Negative times', 84 | model.mean, parameters_2, times_2) 85 | self.assertRaisesRegex(ValueError, 'Negative times', 86 | model.variance, parameters_2, times_2) 87 | 88 | # this model should have 1 parameter 89 | parameters_3 = [0.1, 1] 90 | self.assertRaisesRegex(ValueError, 'only 1 parameter', 91 | model.mean, parameters_3, times) 92 | self.assertRaisesRegex(ValueError, 'only 1 parameter', 93 | model.variance, parameters_3, times) 94 | 95 | 96 | if __name__ == '__main__': 97 | unittest.main() 98 | -------------------------------------------------------------------------------- /pints/tests/test_toy_stochastic_michaelis_menten_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Michaelis Menten (toy) model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | from pints.toy.stochastic import MichaelisMentenModel 12 | 13 | 14 | class TestMichaelisMentenModel(unittest.TestCase): 15 | """ 16 | Tests if the Michaelis Menten (toy) model works. 17 | """ 18 | def test_n_parameters(self): 19 | x_0 = [1e4, 2e3, 2e4, 0] 20 | model = MichaelisMentenModel(x_0) 21 | self.assertEqual(model.n_parameters(), 3) 22 | 23 | def test_simulation_length(self): 24 | x_0 = [1e4, 2e3, 2e4, 0] 25 | model = MichaelisMentenModel(x_0) 26 | times = np.linspace(0, 1, 100) 27 | k = [1e-5, 0.2, 0.2] 28 | values = model.simulate(k, times) 29 | self.assertEqual(len(values), 100) 30 | 31 | def test_propensities(self): 32 | x_0 = [1e4, 2e3, 2e4, 0] 33 | k = [1e-5, 0.2, 0.2] 34 | model = MichaelisMentenModel(x_0) 35 | self.assertTrue( 36 | np.allclose( 37 | model._propensities(x_0, k), 38 | np.array([200.0, 4000.0, 4000.0]))) 39 | 40 | def test_n_outputs(self): 41 | x_0 = [1e4, 2e3, 2e4, 0] 42 | model = MichaelisMentenModel(x_0) 43 | self.assertEqual(model.n_outputs(), 4) 44 | 45 | 46 | if __name__ == '__main__': 47 | unittest.main() 48 | -------------------------------------------------------------------------------- /pints/tests/test_toy_stochastic_production_degradation_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the production and degradation (toy) model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | from pints.toy.stochastic import ProductionDegradationModel 12 | 13 | 14 | class TestProductionDegradationModel(unittest.TestCase): 15 | """ 16 | Tests if the degradation (toy) model works. 17 | """ 18 | def test_n_parameters(self): 19 | x_0 = 20 20 | model = ProductionDegradationModel(x_0) 21 | self.assertEqual(model.n_parameters(), 2) 22 | 23 | def test_simulation_length(self): 24 | x_0 = 20 25 | model = ProductionDegradationModel(x_0) 26 | times = np.linspace(0, 1, 100) 27 | k = [0.1, 0.2] 28 | values = model.simulate(k, times) 29 | self.assertEqual(len(values), 100) 30 | 31 | def test_propensities(self): 32 | x_0 = 20 33 | k = [0.1, 0.2] 34 | model = ProductionDegradationModel(x_0) 35 | self.assertTrue( 36 | np.allclose( 37 | model._propensities([x_0], k), 38 | np.array([2.0, 0.2]))) 39 | 40 | def test_suggested(self): 41 | model = ProductionDegradationModel(20) 42 | times = model.suggested_times() 43 | parameters = model.suggested_parameters() 44 | self.assertTrue(len(times) == 101) 45 | self.assertTrue(np.all(parameters > 0)) 46 | 47 | 48 | if __name__ == '__main__': 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /pints/tests/test_toy_stochastic_schlogl_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests if the Schlogl (toy) model works. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import numpy as np 11 | from pints.toy.stochastic import SchloglModel 12 | 13 | 14 | class TestSchloglModel(unittest.TestCase): 15 | """ 16 | Tests if the degradation (toy) model works. 17 | """ 18 | def test_n_parameters(self): 19 | x_0 = 20 20 | model = SchloglModel(x_0) 21 | self.assertEqual(model.n_parameters(), 4) 22 | 23 | def test_simulation_length(self): 24 | x_0 = 20 25 | model = SchloglModel(x_0) 26 | times = np.linspace(0, 1, 100) 27 | k = [0.1, 0.2, 0.3, 0.4] 28 | values = model.simulate(k, times) 29 | self.assertEqual(len(values), 100) 30 | 31 | def test_propensities(self): 32 | x_0 = 20 33 | model = SchloglModel(x_0) 34 | k = model.suggested_parameters() 35 | self.assertTrue( 36 | np.allclose( 37 | model._propensities([x_0], k), 38 | np.array([68.4, 1.71, 2200.0, 750.0]))) 39 | 40 | def test_suggested(self): 41 | model = SchloglModel(20) 42 | times = model.suggested_times() 43 | parameters = model.suggested_parameters() 44 | self.assertTrue(len(times) == 101) 45 | self.assertTrue(np.all(parameters > 0)) 46 | 47 | 48 | if __name__ == '__main__': 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /pints/tests/test_vector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the basic methods of the CMA-ES optimiser. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | import numpy as np 12 | 13 | 14 | class TestVector(unittest.TestCase): 15 | """ 16 | Tests conversion to a read-only vector type 17 | """ 18 | def test_vector(self): 19 | # Test correct use with 1d arrays 20 | x = np.array([1, 2, 3]) 21 | v = pints.vector(x) 22 | x = np.array([1]) 23 | v = pints.vector(x) 24 | x = np.array([]) 25 | v = pints.vector(x) 26 | 27 | # Test correct use with higher dimensional arrays 28 | x = np.array([1, 2, 3]) 29 | w = pints.vector(x) 30 | x = x.reshape((3, 1, 1, 1, 1)) 31 | v = pints.vector(x) 32 | self.assertTrue(np.all(w == v)) 33 | x = x.reshape((1, 3, 1, 1, 1)) 34 | v = pints.vector(x) 35 | self.assertTrue(np.all(w == v)) 36 | x = x.reshape((1, 1, 1, 1, 3)) 37 | v = pints.vector(x) 38 | self.assertTrue(np.all(w == v)) 39 | 40 | # Test correct use with lists 41 | x = [1, 2, 3] 42 | v = pints.vector(x) 43 | self.assertTrue(np.all(w == v)) 44 | x = [4] 45 | v = pints.vector(x) 46 | x = [] 47 | v = pints.vector(x) 48 | 49 | # Test incorrect use with higher dimensional arrays 50 | x = np.array([4, 5, 6, 3, 2, 4]) 51 | x = x.reshape((2, 3)) 52 | self.assertRaises(ValueError, pints.vector, x) 53 | x = x.reshape((3, 2)) 54 | self.assertRaises(ValueError, pints.vector, x) 55 | x = x.reshape((6, 1)) 56 | v = pints.vector(x) 57 | 58 | # Test correct use with scalar 59 | x = 5 60 | v = pints.vector(x) 61 | self.assertEqual(len(v), 1) 62 | self.assertEqual(v[0], 5) 63 | 64 | # Test read-only 65 | def assign(): 66 | v[0] = 10 67 | self.assertRaises(ValueError, assign) 68 | 69 | 70 | if __name__ == '__main__': 71 | unittest.main() 72 | -------------------------------------------------------------------------------- /pints/tests/test_version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Tests the version number. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | import unittest 10 | import pints 11 | 12 | 13 | class TestVersion(unittest.TestCase): 14 | """ 15 | Tests the version number information. 16 | """ 17 | 18 | def test_version_properties(self): 19 | 20 | self.assertEqual(type(pints.__version_int__), tuple) 21 | self.assertEqual(len(pints.__version_int__), 3) 22 | self.assertEqual(type(pints.__version_int__[0]), int) 23 | self.assertEqual(type(pints.__version_int__[1]), int) 24 | self.assertEqual(type(pints.__version_int__[2]), int) 25 | 26 | self.assertEqual( 27 | pints.__version__, 28 | '.'.join([str(x) for x in pints.__version_int__]) 29 | ) 30 | 31 | def test_version_method(self): 32 | 33 | self.assertEqual(pints.version(), pints.__version_int__) 34 | self.assertEqual(pints.version(True), 'Pints ' + pints.__version__) 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /pints/toy/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Root of the toy module. 3 | # Provides a number of toy models and logpdfs for tests of Pints' functions. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | from ._toy_classes import ToyLogPDF, ToyModel, ToyODEModel 10 | 11 | from ._annulus import AnnulusLogPDF 12 | from ._beeler_reuter_model import ActionPotentialModel 13 | from ._cone import ConeLogPDF 14 | from ._constant_model import ConstantModel 15 | from ._eight_schools import EightSchoolsLogPDF 16 | from ._fitzhugh_nagumo_model import FitzhughNagumoModel 17 | from ._gaussian import GaussianLogPDF 18 | from ._german_credit import GermanCreditLogPDF 19 | from ._german_credit_hierarchical import GermanCreditHierarchicalLogPDF 20 | from ._goodwin_oscillator_model import GoodwinOscillatorModel 21 | from ._hes1_michaelis_menten import Hes1Model 22 | from ._hh_ik_model import HodgkinHuxleyIKModel 23 | from ._high_dimensional_gaussian import HighDimensionalGaussianLogPDF 24 | from ._logistic_model import LogisticModel 25 | from ._lotka_volterra_model import LotkaVolterraModel 26 | from ._multimodal_gaussian import MultimodalGaussianLogPDF 27 | from ._neals_funnel import NealsFunnelLogPDF 28 | from ._parabola import ParabolicError 29 | from ._repressilator_model import RepressilatorModel 30 | from ._rosenbrock import RosenbrockError, RosenbrockLogPDF 31 | from ._sho_model import SimpleHarmonicOscillatorModel 32 | from ._simple_egg_box import SimpleEggBoxLogPDF 33 | from ._sir_model import SIRModel 34 | from ._twisted_gaussian_banana import TwistedGaussianLogPDF 35 | -------------------------------------------------------------------------------- /pints/toy/_constant_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Constant model with multiple outputs. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import numpy as np 9 | import pints 10 | 11 | 12 | class ConstantModel(pints.ForwardModelS1): 13 | r""" 14 | Toy model that's constant over time, linear over the parameters, mostly 15 | useful for unit testing. 16 | 17 | For an `n`-dimensional model, evaluated with parameters 18 | ``p = [p_1, p_2, ..., p_n]``, the simulated values are time-invariant, so 19 | that for any time ``t`` 20 | 21 | .. math:: 22 | f(t) = (p_1, 2 p_2, 3 p_3, ..., n p_n) 23 | 24 | The derivatives with respect to the parameters are time-invariant, and 25 | simply equal 26 | 27 | .. math:: 28 | 29 | \frac{\partial{f_i(t)}}{dp_j} = 30 | \begin{cases} i, i = j\\0, i \neq j \end{cases} 31 | 32 | Extends :class:`pints.ForwardModelS1`. 33 | 34 | Parameters 35 | ---------- 36 | n : int 37 | The number of parameters (and outputs) the model should have. 38 | force_multi_output : boolean 39 | Set to ``True`` to always return output of the shape 40 | ``(n_times, n_outputs)``, even if ``n_outputs == 1``. 41 | 42 | Example 43 | ------- 44 | :: 45 | 46 | times = np.linspace(0, 1, 100) 47 | m = pints.ConstantModel(2) 48 | m.simulate([1, 2], times) 49 | 50 | In this example, the returned output is ``[1, 4]`` at every point in time. 51 | """ 52 | 53 | def __init__(self, n, force_multi_output=False): 54 | super(ConstantModel, self).__init__() 55 | 56 | n = int(n) 57 | if n < 1: 58 | raise ValueError('Number of parameters must be 1 or greater.') 59 | self._r = np.arange(1, 1 + n) 60 | self._n = n 61 | 62 | # Reshape for single-output models? 63 | self._reshape = (n == 1 and not force_multi_output) 64 | 65 | def n_outputs(self): 66 | """ See :meth:`pints.ForwardModel.n_outputs()`. """ 67 | return self._n 68 | 69 | def n_parameters(self): 70 | """ See :meth:`pints.ForwardModel.n_parameters()`. """ 71 | return self._n 72 | 73 | def simulate(self, parameters, times): 74 | """ See :meth:`pints.ForwardModel.simulate()`. """ 75 | 76 | # Check input 77 | parameters = np.asarray(parameters) 78 | times = np.asarray(times) 79 | if np.any(times < 0): 80 | raise ValueError('Negative times are not allowed.') 81 | if len(parameters) != self._n: 82 | raise ValueError('Expected ' + str(self._n) + ' parameters.') 83 | if not np.all(np.isfinite(parameters)): 84 | raise ValueError('All parameters must be finite.') 85 | 86 | # Calculate 87 | out = parameters.reshape((1, self._n)) * self._r 88 | out = out.repeat(len(times), axis=0) 89 | if self._reshape: 90 | out = out.reshape((len(times), )) 91 | return out 92 | 93 | def simulateS1(self, parameters, times): 94 | """ See :meth:`pints.ForwardModel.simulateS1()`. """ 95 | y = self.simulate(parameters, times) 96 | if self._reshape: 97 | dy = np.ones(len(times)) 98 | else: 99 | # Output has shape (times, outputs, parameters) 100 | # At every time point, there is a matrix: 101 | # [[df1/dp1, df1/dp2], 102 | # [df2/dp1, df2/dp2]] (for 2d...) 103 | # i.e. 104 | # [[df1/dp1, df1/dp2], 105 | # [df2/dp1, df2/dp2]] 106 | # i.e. 107 | # [[1, 0], 108 | # [0, 2]] 109 | dy = np.tile( 110 | np.diag(np.arange(1, self._n + 1)), (len(times), 1, 1)) 111 | return (y, dy) 112 | -------------------------------------------------------------------------------- /pints/toy/_logistic_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Logistic toy model. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import numpy as np 9 | import pints 10 | 11 | from . import ToyModel 12 | 13 | 14 | class LogisticModel(pints.ForwardModelS1, ToyModel): 15 | 16 | r""" 17 | Logistic model of population growth [1]_. 18 | 19 | .. math:: 20 | f(t) &= \frac{k}{1+(k/p_0 - 1) \exp(-r t)} \\ 21 | \frac{\partial f(t)}{\partial r} &= 22 | \frac{k t (k / p_0 - 1) \exp(-r t)} 23 | {((k/p_0-1) \exp(-r t) + 1)^2} \\ 24 | \frac{\partial f(t)}{ \partial k} &= -\frac{k \exp(-r t)} 25 | {p_0 ((k/p_0-1)\exp(-r t) + 1)^2} 26 | + \frac{1}{(k/p_0 - 1)\exp(-r t) + 1} 27 | 28 | Has two model parameters: A growth rate :math:`r` and a carrying capacity 29 | :math:`k`. The initial population size :math:`p_0 = f(0)` is a fixed 30 | (known) parameter in the model. 31 | 32 | Extends :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`. 33 | 34 | Parameters 35 | ---------- 36 | initial_population_size : float 37 | Sets the initial population size :math:`p_0`. 38 | 39 | References 40 | ---------- 41 | .. [1] https://en.wikipedia.org/wiki/Population_growth 42 | """ 43 | 44 | def __init__(self, initial_population_size=2): 45 | super(LogisticModel, self).__init__() 46 | self._p0 = float(initial_population_size) 47 | if self._p0 < 0: 48 | raise ValueError('Population size cannot be negative.') 49 | 50 | def n_parameters(self): 51 | """ See :meth:`pints.ForwardModel.n_parameters()`. """ 52 | return 2 53 | 54 | def simulate(self, parameters, times): 55 | """ See :meth:`pints.ForwardModel.simulate()`. """ 56 | return self._simulate(parameters, times, False) 57 | 58 | def simulateS1(self, parameters, times): 59 | """ See :meth:`pints.ForwardModelS1.simulateS1()`. """ 60 | return self._simulate(parameters, times, True) 61 | 62 | def _simulate(self, parameters, times, sensitivities): 63 | r, k = [float(x) for x in parameters] 64 | times = np.asarray(times) 65 | if np.any(times < 0): 66 | raise ValueError('Negative times are not allowed.') 67 | if self._p0 == 0 or k < 0: 68 | if sensitivities: 69 | return np.zeros(times.shape), \ 70 | np.zeros((len(times), len(parameters))) 71 | else: 72 | return np.zeros(times.shape) 73 | 74 | exp = np.exp(-r * times) 75 | c = (k / self._p0 - 1) 76 | 77 | values = k / (1 + c * exp) 78 | 79 | if sensitivities: 80 | dvalues_dp = np.empty((len(times), len(parameters))) 81 | dvalues_dp[:, 0] = k * times * c * exp / (c * exp + 1)**2 82 | dvalues_dp[:, 1] = -k * exp / \ 83 | (self._p0 * (c * exp + 1)**2) + 1 / (c * exp + 1) 84 | return values, dvalues_dp 85 | else: 86 | return values 87 | 88 | def suggested_parameters(self): 89 | """ See :meth:`pints.toy.ToyModel.suggested_parameters()`. """ 90 | 91 | return np.array([0.1, 50]) 92 | 93 | def suggested_times(self): 94 | """ See :meth:`pints.toy.ToyModel.suggested_times()`. """ 95 | 96 | return np.linspace(0, 100, 100) 97 | -------------------------------------------------------------------------------- /pints/toy/_parabola.py: -------------------------------------------------------------------------------- 1 | # 2 | # Parabolic error measure. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | import pints 9 | import numpy as np 10 | 11 | 12 | class ParabolicError(pints.ErrorMeasure): 13 | r""" 14 | Error measure based on a simple parabola centered around a user specified 15 | point. 16 | 17 | .. math:: 18 | f(x) = \sum (x - c)^2 19 | 20 | Extends :class:`pints.ErrorMeasure`. 21 | 22 | Parameters 23 | ---------- 24 | c : sequence 25 | The center of the parabola. 26 | """ 27 | def __init__(self, c=[0, 0]): 28 | self._c = pints.vector(c) 29 | self._n = len(self._c) 30 | 31 | def __call__(self, x): 32 | y = pints.vector(x).reshape(self._n) 33 | return np.sum((self._c - y)**2) 34 | 35 | def evaluateS1(self, x): 36 | """ See :meth:`pints.ErrorMeasure.evaluateS1()`. """ 37 | x = pints.vector(x) - self._c 38 | return np.sum(x**2), 2 * x 39 | 40 | def n_parameters(self): 41 | """ See :meth:`pints.ErrorMeasure.n_parameters()`. """ 42 | return self._n 43 | 44 | def optimum(self): 45 | """ 46 | Returns the global optimum for this function. 47 | """ 48 | return np.array(self._c, copy=True) 49 | -------------------------------------------------------------------------------- /pints/toy/stochastic/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Root of the stochastic toy module. 3 | # Provides a number of stochastic toy models for tests of Pints' functions. 4 | # 5 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 6 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 7 | # copyright notice and full license details. 8 | # 9 | from ._markov_jump_model import MarkovJumpModel # noqa 10 | from ._michaelis_menten_model import MichaelisMentenModel # noqa 11 | from ._degradation_model import DegradationModel # noqa 12 | from ._logistic_model import LogisticModel # noqa 13 | from ._production_degradation_model import ProductionDegradationModel # noqa 14 | from ._schlogl_model import SchloglModel # noqa -------------------------------------------------------------------------------- /pints/toy/stochastic/_degradation_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Stochastic degradation toy model. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from . import MarkovJumpModel 9 | 10 | import numpy as np 11 | 12 | 13 | class DegradationModel(MarkovJumpModel): 14 | r""" 15 | Stochastic degradation model of a single chemical reaction starting from 16 | an initial molecule count :math:`A(0)` and degrading to 0 with a fixed rate 17 | :math:`k`: 18 | 19 | .. math:: 20 | A \xrightarrow{k} 0 21 | 22 | Extends :class:`pints.MarkovJumpModel`. 23 | 24 | Parameters 25 | ---------- 26 | initial_molecule_count 27 | The initial molecule count :math:`A(0)`. 28 | """ 29 | def __init__(self, initial_molecule_count=20): 30 | V = [[-1]] 31 | init_list = [initial_molecule_count] 32 | super(DegradationModel, self).__init__( 33 | init_list, V, self._propensities) 34 | 35 | @staticmethod 36 | def _propensities(xs, ks): 37 | return [xs[0] * ks[0]] 38 | 39 | def mean(self, parameters, times): 40 | r""" 41 | Returns the deterministic mean of infinitely many stochastic 42 | simulations, which follows :math:`A(0) \exp(-kt)`. 43 | """ 44 | parameters = np.asarray(parameters) 45 | if len(parameters) != self.n_parameters(): 46 | raise ValueError('This model should have only 1 parameter.') 47 | k = parameters[0] 48 | 49 | if k <= 0: 50 | raise ValueError('Rate constant must be positive.') 51 | 52 | times = np.asarray(times) 53 | if np.any(times < 0): 54 | raise ValueError('Negative times are not allowed.') 55 | 56 | mean = self._x0 * np.exp(-k * times) 57 | return mean 58 | 59 | def variance(self, parameters, times): 60 | r""" 61 | Returns the deterministic variance of infinitely many stochastic 62 | simulations, which follows :math:`\exp(-2kt)(-1 + \exp(kt))A(0)`. 63 | """ 64 | parameters = np.asarray(parameters) 65 | if len(parameters) != self.n_parameters(): 66 | raise ValueError('This model should have only 1 parameter.') 67 | k = parameters[0] 68 | 69 | if k <= 0: 70 | raise ValueError('Rate constant must be positive.') 71 | 72 | times = np.asarray(times) 73 | if np.any(times < 0): 74 | raise ValueError('Negative times are not allowed.') 75 | 76 | variance = np.exp(-2 * k * times) * (-1 + np.exp(k * times)) * self._x0 77 | return variance 78 | 79 | def suggested_parameters(self): 80 | """ See :meth:`pints.toy.ToyModel.suggested_parameters()`. """ 81 | return np.array([0.1]) 82 | 83 | def suggested_times(self): 84 | """ See "meth:`pints.toy.ToyModel.suggested_times()`.""" 85 | return np.linspace(0, 100, 101) 86 | -------------------------------------------------------------------------------- /pints/toy/stochastic/_logistic_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Stochastic logistic toy model. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from . import MarkovJumpModel 9 | 10 | import numpy as np 11 | 12 | 13 | class LogisticModel(MarkovJumpModel): 14 | r""" 15 | This model describes the growth of a population of individuals, where the 16 | birth rate per capita, initially :math:`b_0`, decreases to :math:`0` as the 17 | population size, :math:`\mathcal{C}(t)`, starting from an initial 18 | population size, :math:`n_0`, approaches a carrying capacity, :math:`k`. 19 | This process follows a rate according to [1]_ 20 | 21 | .. math:: 22 | A \xrightarrow{b_0(1-\frac{\mathcal{C}(t)}{k})} 2A. 23 | 24 | The model is simulated using the Gillespie stochastic simulation algorithm 25 | [2]_, [3]_. 26 | 27 | *Extends:* :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`. 28 | 29 | Parameters 30 | ---------- 31 | initial_molecule_count : float 32 | Sets the initial population size :math:`n_0`. 33 | 34 | References 35 | ---------- 36 | .. [1] Simpson, M. et al. 2019. Process noise distinguishes between 37 | indistinguishable population dynamics. bioRxiv. 38 | https://doi.org/10.1101/533182 39 | .. [2] Gillespie, D. 1976. A General Method for Numerically Simulating the 40 | Stochastic Time Evolution of Coupled Chemical Reactions. 41 | Journal of Computational Physics. 22 (4): 403-434. 42 | https://doi.org/10.1016/0021-9991(76)90041-3 43 | .. [3] Erban R. et al. 2007. A practical guide to stochastic simulations 44 | of reaction-diffusion processes. arXiv. 45 | https://arxiv.org/abs/0704.1908v2 46 | """ 47 | def __init__(self, initial_molecule_count=50): 48 | V = [[1]] 49 | init_list = [initial_molecule_count] 50 | super(LogisticModel, self).__init__(init_list, 51 | V, self._propensities) 52 | 53 | def n_parameters(self): 54 | """ 55 | Default value must be overwritten because the number of parameters 56 | does not correspond with the number of equations. 57 | """ 58 | return 2 59 | 60 | @staticmethod 61 | def _propensities(xs, ks): 62 | return [ 63 | ks[0] * (1 - xs[0] / ks[1]) * xs[0], 64 | ] 65 | 66 | def mean(self, parameters, times): 67 | r""" 68 | Computes the deterministic mean of infinitely many stochastic 69 | simulations with times :math:`t` and parameters (:math:`b`, :math:`k`), 70 | which follows: 71 | :math:`\frac{kC(0)}{C(0) + (k - C(0)) \exp(-bt)}`. 72 | 73 | Returns an array with the same length as `times`. 74 | """ 75 | parameters = np.asarray(parameters) 76 | if len(parameters) != self.n_parameters(): 77 | raise ValueError('This model should have only 2 parameters.') 78 | 79 | b = parameters[0] 80 | if b <= 0: 81 | raise ValueError('Rate constant must be positive.') 82 | 83 | k = parameters[1] 84 | if k <= 0: 85 | raise ValueError("Carrying capacity must be positive") 86 | 87 | times = np.asarray(times) 88 | if np.any(times < 0): 89 | raise ValueError('Negative times are not allowed.') 90 | c0 = self._x0 91 | return (c0 * k) / (c0 + np.exp(-b * times) * (k - c0)) 92 | 93 | def suggested_parameters(self): 94 | """ See :meth:`pints.toy.ToyModel.suggested_parameters()`. """ 95 | return np.array([0.1, 500]) 96 | 97 | def suggested_times(self): 98 | """ See :meth:`pints.toy.ToyModel.suggested_times()`.""" 99 | return np.linspace(0, 100, 101) 100 | -------------------------------------------------------------------------------- /pints/toy/stochastic/_michaelis_menten_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Stochastic michaelis-menten toy model. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from . import MarkovJumpModel 9 | 10 | 11 | class MichaelisMentenModel(MarkovJumpModel): 12 | r""" 13 | Simulates the Michaelis Menten Dynamics using Gillespie. 14 | 15 | This system of reaction involves 4 chemical species with 16 | inital counts ``initial_molecule_count``, and reactions: 17 | 18 | - X1+X2 -> X3 with rate k1 19 | - X3 -> X1+X2 with rate k2 20 | - X3 -> X2+X4 with rate k3 21 | 22 | Parameters 23 | ---------- 24 | initial_molecule_count : Array of size 3 of integers 25 | Sets the initial molecule count. 26 | 27 | References 28 | ---------- 29 | .. [1] https://en.wikipedia.org/wiki/Michaelis-Menten_kinetics 30 | """ 31 | def __init__(self, initial_molecule_count): 32 | V = [[-1, -1, 1, 0], 33 | [1, 1, -1, 0], 34 | [0, 1, -1, 1]] 35 | super(MichaelisMentenModel, self).__init__(initial_molecule_count, 36 | V, self._propensities) 37 | 38 | @staticmethod 39 | def _propensities(xs, ks): 40 | return [ 41 | xs[0] * xs[1] * ks[0], 42 | xs[2] * ks[1], 43 | xs[2] * ks[2] 44 | ] 45 | 46 | def n_outputs(self): 47 | return 4 48 | -------------------------------------------------------------------------------- /pints/toy/stochastic/_production_degradation_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Stochastic production and degradation toy model. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from . import MarkovJumpModel 9 | 10 | import numpy as np 11 | 12 | 13 | class ProductionDegradationModel(MarkovJumpModel): 14 | r""" 15 | Stochastic production and degradation model of two separate chemical 16 | reactions reaction starting from an initial molecule count :math:`A(0)` 17 | and degrading to 0 with a fixed rate. 18 | :math:`k`: 19 | 20 | .. math:: 21 | A \xrightarrow{k1} 0, 0 \xrightarrow{k2} A 22 | 23 | Extends :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`. 24 | 25 | Parameters 26 | ---------- 27 | initial_molecule_count 28 | The initial molecule count :math:`A(0)`. 29 | """ 30 | def __init__(self, initial_molecule_count=20): 31 | V = [[-1], [1]] 32 | init_list = [initial_molecule_count] 33 | super(ProductionDegradationModel, self).__init__(init_list, 34 | V, self._propensities) 35 | 36 | @staticmethod 37 | def _propensities(xs, ks): 38 | return [ 39 | xs[0] * ks[0], 40 | ks[1] 41 | ] 42 | 43 | def suggested_parameters(self): 44 | """ See :meth:`pints.toy.ToyModel.suggested_parameters()`. """ 45 | return np.array([0.1, 0.2]) 46 | 47 | def suggested_times(self): 48 | """ See "meth:`pints.toy.ToyModel.suggested_times()`.""" 49 | return np.linspace(0, 100, 101) 50 | -------------------------------------------------------------------------------- /pints/toy/stochastic/_schlogl_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Schlogl's stochastic toy model. 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from . import MarkovJumpModel 9 | 10 | import numpy as np 11 | 12 | 13 | class SchloglModel(MarkovJumpModel): 14 | r""" 15 | Schlogl's system of chemical reactions has a single type of molecules and 16 | starts with an initial count :math:`A(0)`. The evolution of the molecule 17 | count is defined through the rates :math:`k_1`, :math:`k_2`, :math:`k_3` 18 | and :math:`k_4` and the following equations: 19 | 20 | ..math:: 21 | 2A \xrightarrow{k_1} 3A 22 | 3A \xrightarrow{k_2} 2A 23 | 0 \xrightarrow{k_3} A 24 | A \xrightarrow{k_4} 0 25 | 26 | Extends :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`. 27 | 28 | Parameters 29 | ---------- 30 | initial_molecule_count 31 | The initial molecule count :math:`A(0)`. 32 | """ 33 | def __init__(self, initial_molecule_count=20): 34 | V = [[1], [-1], [1], [-1]] 35 | init_list = [initial_molecule_count] 36 | super(SchloglModel, self).__init__(init_list, 37 | V, self._propensities) 38 | 39 | @staticmethod 40 | def _propensities(xs, ks): 41 | return [ 42 | xs[0] * (xs[0] - 1) * ks[0], 43 | xs[0] * (xs[0] - 1) * (xs[0] - 2) * ks[1], 44 | ks[2], 45 | xs[0] * ks[3] 46 | ] 47 | 48 | def suggested_parameters(self): 49 | """ See :meth:`pints.toy.ToyModel.suggested_parameters()`. """ 50 | return np.array([0.18, 0.00025, 2200, 37.5]) 51 | 52 | def suggested_times(self): 53 | """ See "meth:`pints.toy.ToyModel.suggested_times()`.""" 54 | return np.linspace(0, 100, 101) 55 | -------------------------------------------------------------------------------- /pints/version: -------------------------------------------------------------------------------- 1 | 0,5,1 2 | -------------------------------------------------------------------------------- /postBuild: -------------------------------------------------------------------------------- 1 | pip install . 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # 2 | # Pints setuptools script 3 | # 4 | # This file is part of PINTS (https://github.com/pints-team/pints/) which is 5 | # released under the BSD 3-clause license. See accompanying LICENSE.md for 6 | # copyright notice and full license details. 7 | # 8 | from setuptools import setup, find_packages 9 | 10 | # Load text for description and license 11 | with open('README.md') as f: 12 | readme = f.read() 13 | 14 | 15 | # Read version number from file 16 | def load_version(): 17 | try: 18 | import os 19 | root = os.path.abspath(os.path.dirname(__file__)) 20 | with open(os.path.join(root, 'pints', 'version'), 'r') as f: 21 | version = f.read().strip().split(',') 22 | return '.'.join([str(int(x)) for x in version]) 23 | except Exception as e: 24 | raise RuntimeError('Unable to read version number (' + str(e) + ').') 25 | 26 | 27 | # Go! 28 | setup( 29 | # Module name (lowercase) 30 | name='pints', 31 | version=load_version(), 32 | 33 | # Description 34 | description='Probabilistic Inference in Noisy Time-Series', 35 | long_description=readme, 36 | long_description_content_type='text/markdown', 37 | 38 | # License name 39 | license='BSD 3-clause license', 40 | 41 | # Maintainer information 42 | # author='', 43 | # author_email='', 44 | maintainer='PINTS Team', 45 | maintainer_email='pints@maillist.ox.ac.uk', 46 | url='https://github.com/pints-team/pints', 47 | 48 | # Project URLs 49 | project_urls={ 50 | 'Bug Tracker': 'https://github.com/pints-team/pints/issues', 51 | 'Documentation': 'https://pints.readthedocs.io', 52 | 'Source Code': 'https://github.com/pints-team/pints', 53 | }, 54 | 55 | # Packages to include 56 | packages=find_packages(include=('pints', 'pints.*')), 57 | 58 | # Include non-python files (via MANIFEST.in) 59 | include_package_data=True, 60 | 61 | # List of dependencies 62 | install_requires=[ 63 | 'cma>=2', 64 | 'numpy>=1.8', 65 | 'scipy>=0.14', 66 | # Note: Matplotlib is loaded for debug plots, but to ensure pints runs 67 | # on systems without an attached display, it should never be imported 68 | # outside of plot() methods. 69 | 'matplotlib>=1.5', 70 | 'tabulate', 71 | 'threadpoolctl', 72 | ], 73 | extras_require={ 74 | 'docs': [ 75 | 'sphinx>=1.5, !=1.7.3', # For doc generation 76 | ], 77 | 'dev': [ 78 | 'flake8>=3', # For code style checking 79 | 'jupyter', # For documentation and testing 80 | 'nbconvert', 81 | 'traitlets', 82 | ], 83 | 'stan': [ 84 | 'pystan>=3', 85 | ] 86 | }, 87 | python_requires='>=3.8', 88 | ) 89 | --------------------------------------------------------------------------------