├── MANIFEST.in
├── docs
├── user_guide
│ ├── fundamentals
│ │ ├── problem_formulation.py
│ │ └── index.rst
│ ├── advanced_topics
│ │ ├── problem_formulation.py
│ │ └── index.rst
│ ├── zbibliography.rst
│ ├── main_usage
│ │ ├── pseudo_spectral_projection.py
│ │ ├── index.rst
│ │ └── monte_carlo_integration.py
│ ├── index.rst
│ ├── polynomial
│ │ └── index.rst
│ └── fill_notebooks.sh
├── _static
│ ├── chaospy_logo.png
│ └── chaospy_logo2.png
├── reference
│ ├── polynomial
│ │ ├── index.rst
│ │ ├── baseclass.rst
│ │ └── constructor.rst
│ ├── descriptive
│ │ ├── index.rst
│ │ ├── miscellaneous.rst
│ │ ├── sensitivity_indices.rst
│ │ └── statistical_moment.rst
│ ├── distribution
│ │ ├── index.rst
│ │ ├── utils.rst
│ │ ├── operator.rst
│ │ ├── advanced.rst
│ │ ├── baseclass.rst
│ │ └── collection.rst
│ ├── index.rst
│ ├── recurrence.rst
│ ├── high_level_interface.rst
│ ├── sampling.rst
│ └── quadrature
│ │ └── index.rst
├── requirements.txt
├── Makefile
├── _templates
│ ├── distribution.rst
│ └── ndpoly.rst
├── index.rst
└── conf.py
├── requirements.txt
├── .gitignore
├── chaospy
├── descriptives
│ ├── correlation
│ │ ├── __init__.py
│ │ ├── auto_correlation.py
│ │ ├── pearson.py
│ │ └── spearman.py
│ ├── sensitivity
│ │ └── __init__.py
│ ├── __init__.py
│ ├── standard_deviation.py
│ ├── variance.py
│ ├── skewness.py
│ ├── expected.py
│ ├── covariance.py
│ ├── quantity_of_interest.py
│ ├── kurtosis.py
│ └── percentile.py
├── distributions
│ ├── sampler
│ │ ├── __init__.py
│ │ ├── sequences
│ │ │ ├── __init__.py
│ │ │ ├── primes.py
│ │ │ ├── korobov.py
│ │ │ ├── hammersley.py
│ │ │ ├── van_der_corput.py
│ │ │ └── grid.py
│ │ ├── latin_hypercube.py
│ │ └── antithetic.py
│ ├── __init__.py
│ ├── baseclass
│ │ ├── __init__.py
│ │ ├── slice_.py
│ │ └── copula.py
│ ├── operators
│ │ ├── __init__.py
│ │ └── iid.py
│ ├── copulas
│ │ └── __init__.py
│ ├── kernel
│ │ └── __init__.py
│ └── collection
│ │ ├── anglit.py
│ │ ├── log_weibull.py
│ │ ├── fisk.py
│ │ ├── levy.py
│ │ ├── bradford.py
│ │ ├── pareto1.py
│ │ ├── gompertz.py
│ │ ├── pareto2.py
│ │ ├── log_gamma.py
│ │ ├── logistic.py
│ │ ├── folded_normal.py
│ │ ├── frechet.py
│ │ ├── log_laplace.py
│ │ ├── trunc_exponential.py
│ │ ├── hyperbolic_secant.py
│ │ ├── nakagami.py
│ │ ├── uniform.py
│ │ ├── laplace.py
│ │ ├── weibull.py
│ │ ├── mv_student_t.py
│ │ ├── normal.py
│ │ ├── mielke.py
│ │ ├── exponential_power.py
│ │ ├── double_gamma.py
│ │ ├── alpha.py
│ │ ├── __init__.py
│ │ ├── exponential_weibull.py
│ │ ├── power_normal.py
│ │ ├── generalized_half_logistic.py
│ │ ├── double_weibull.py
│ │ ├── cauchy.py
│ │ ├── reciprocal.py
│ │ ├── chi_squared.py
│ │ ├── log_uniform.py
│ │ ├── inverse_gamma.py
│ │ ├── folded_cauchy.py
│ │ └── fatigue_life.py
├── recurrence
│ ├── __init__.py
│ ├── jacobi.py
│ └── chebyshev.py
├── external
│ └── __init__.py
├── expansion
│ ├── laguerre.py
│ ├── __init__.py
│ ├── legendre.py
│ ├── jacobi.py
│ ├── gegenbauer.py
│ └── hermite.py
├── __init__.py
└── quadrature
│ ├── laguerre.py
│ ├── grid.py
│ ├── hermite.py
│ ├── gegenbauer.py
│ └── jacobi.py
├── polychaos.md
├── codecov.yml
├── .readthedocs.yaml
├── tests
├── test_monte_carlo_integration.py
├── poly
│ └── test_numpoly.py
├── distributions
│ ├── conftest.py
│ ├── collection
│ │ └── test_triangle.py
│ ├── operators
│ │ ├── test_operators.py
│ │ └── test_truncation.py
│ ├── test_approximation.py
│ ├── test_arithmetics.py
│ ├── test_baseclass.py
│ ├── test_1d_dependencies.py
│ └── test_2d_dependencies.py
├── test_point_collocation.py
├── recurrence
│ ├── conftest.py
│ ├── test_stieltjes_method.py
│ └── test_quadrature_creation.py
├── test_lagrange_polynomials.py
├── test_intrusive_galerkin.py
├── test_regression_models.py
├── test_pseudo_spectral_projection.py
└── test_orthogonal_expansion.py
├── .github
├── ISSUE_TEMPLATE
│ ├── usage_question.md
│ ├── bug_report.md
│ └── functionality_request.md
└── workflows
│ └── pipe.yml
├── conftest.py
├── LICENSE.txt
└── pyproject.toml
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.rst
2 | include LICENSE.txt
3 | global-exclude *.pyc
4 |
--------------------------------------------------------------------------------
/docs/user_guide/fundamentals/problem_formulation.py:
--------------------------------------------------------------------------------
1 | ../main_usage/problem_formulation.py
--------------------------------------------------------------------------------
/docs/user_guide/advanced_topics/problem_formulation.py:
--------------------------------------------------------------------------------
1 | ../main_usage/problem_formulation.py
--------------------------------------------------------------------------------
/docs/user_guide/zbibliography.rst:
--------------------------------------------------------------------------------
1 | Bibliography
2 | ============
3 |
4 | .. bibliography::
5 | :all:
6 |
--------------------------------------------------------------------------------
/docs/_static/chaospy_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonathf/chaospy/HEAD/docs/_static/chaospy_logo.png
--------------------------------------------------------------------------------
/docs/_static/chaospy_logo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonathf/chaospy/HEAD/docs/_static/chaospy_logo2.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # This file is used by Binder and Github graph
2 | chaospy
3 | numpoly
4 | numpy
5 | scipy
6 | matplotlib
7 | importlib-metadata
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .coverage
2 | __pycache__
3 | *.egg-info
4 | *.pyc
5 | .ipynb_checkpoints
6 | .mypy_cache
7 | .envrc
8 | docs/.build
9 | docs/api
10 | build
11 |
--------------------------------------------------------------------------------
/chaospy/descriptives/correlation/__init__.py:
--------------------------------------------------------------------------------
1 | """Correlation operators."""
2 | from .pearson import Corr
3 | from .auto_correlation import Acf
4 | from .spearman import Spearman
5 |
--------------------------------------------------------------------------------
/docs/reference/polynomial/index.rst:
--------------------------------------------------------------------------------
1 | .. _polynomials:
2 |
3 | Polynomial
4 | ==========
5 |
6 | .. toctree::
7 |
8 | constructor
9 | baseclass
10 | helper_function
11 |
--------------------------------------------------------------------------------
/polychaos.md:
--------------------------------------------------------------------------------
1 | **Note:** This package was previously named `polychaos` and hosted at
2 | [Bitbucket](https://bitbucket.org/jonathf/polychaos). That version is
3 | no longer maintained.
4 |
--------------------------------------------------------------------------------
/docs/reference/descriptive/index.rst:
--------------------------------------------------------------------------------
1 | Descriptive statistics
2 | ======================
3 |
4 | .. toctree::
5 |
6 | statistical_moment
7 | sensitivity_indices
8 | miscellaneous
9 |
--------------------------------------------------------------------------------
/docs/reference/distribution/index.rst:
--------------------------------------------------------------------------------
1 | Distribution
2 | ============
3 |
4 | .. toctree::
5 |
6 | baseclass
7 | collection
8 | operator
9 | advanced
10 | utils
11 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | nbsphinx==0.8.5
2 | markupsafe==2.0.1
3 | pydata_sphinx_theme==0.6.3
4 | Sphinx<4.3
5 | sphinxcontrib-bibtex==2.2.0
6 |
7 | git+https://github.com/jonathf/numpoly.git@master
8 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | codecov:
2 | branch: master
3 | archive:
4 | uploads: no
5 |
6 | coverage:
7 | precision: 1
8 | range: 80...100
9 |
10 | comment:
11 | layout: "header, diff, changes"
12 |
--------------------------------------------------------------------------------
/docs/reference/descriptive/miscellaneous.rst:
--------------------------------------------------------------------------------
1 | Miscellaneous
2 | -------------
3 |
4 | .. currentmodule:: chaospy
5 |
6 | .. autosummary::
7 | :toctree: ../../api
8 |
9 | Perc
10 | QoI_Dist
11 |
--------------------------------------------------------------------------------
/docs/user_guide/fundamentals/index.rst:
--------------------------------------------------------------------------------
1 |
2 | Fundamentals
3 | ============
4 |
5 | .. toctree::
6 |
7 | probability_distributions
8 | quasi_random_samples
9 | quadrature_integration
10 | descriptive_statistics
11 |
--------------------------------------------------------------------------------
/docs/reference/descriptive/sensitivity_indices.rst:
--------------------------------------------------------------------------------
1 | Sensitivity Indices
2 | ===================
3 |
4 | .. currentmodule:: chaospy
5 |
6 | .. autosummary::
7 | :toctree: ../../api
8 |
9 | Sens_m
10 | Sens_m2
11 | Sens_t
12 |
--------------------------------------------------------------------------------
/docs/reference/distribution/utils.rst:
--------------------------------------------------------------------------------
1 | Utility
2 | -------
3 |
4 | .. currentmodule:: chaospy
5 |
6 | .. autosummary::
7 | :toctree: ../../api
8 |
9 | approximate_density
10 | approximate_inverse
11 | approximate_moment
12 |
--------------------------------------------------------------------------------
/docs/user_guide/main_usage/pseudo_spectral_projection.py:
--------------------------------------------------------------------------------
1 | import chaospy
2 | from problem_formulation import joint
3 |
4 | gauss_quads = [
5 | chaospy.generate_quadrature(order, joint, rule="gaussian")
6 | for order in range(1, 8)
7 | ]
8 |
--------------------------------------------------------------------------------
/chaospy/descriptives/sensitivity/__init__.py:
--------------------------------------------------------------------------------
1 | """Variance based sensitivity decomposition using Sobol Indices."""
2 | from .main import Sens_m, FirstOrderSobol
3 | from .main2 import Sens_m2, SecondOrderSobol
4 | from .total import Sens_t, TotalOrderSobol
5 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 | sphinx:
3 | builder: html
4 | configuration: docs/conf.py
5 | build:
6 | image: latest
7 | python:
8 | version: 3.8
9 | install:
10 | - requirements: docs/requirements.txt
11 | - method: pip
12 | path: .
13 |
--------------------------------------------------------------------------------
/docs/user_guide/main_usage/index.rst:
--------------------------------------------------------------------------------
1 | Main usage
2 | ==========
3 |
4 | .. toctree::
5 |
6 | problem_formulation
7 | monte_carlo_integration
8 | pseudo_spectral_projection
9 | point_collocation
10 | intrusive_galerkin
11 | lagrange_polynomials
12 |
--------------------------------------------------------------------------------
/docs/user_guide/main_usage/monte_carlo_integration.py:
--------------------------------------------------------------------------------
1 | from problem_formulation import joint
2 |
3 | sobol_samples = joint.sample(10000, rule="sobol")
4 | antithetic_samples = joint.sample(10000, antithetic=True, seed=1234)
5 | halton_samples = joint.sample(10000, rule="halton")
6 |
--------------------------------------------------------------------------------
/docs/reference/index.rst:
--------------------------------------------------------------------------------
1 | .. _reference:
2 |
3 | Reference
4 | =========
5 |
6 | .. toctree::
7 |
8 | high_level_interface
9 | descriptive/index.rst
10 | distribution/index.rst
11 | polynomial/index.rst
12 | sampling
13 | quadrature/index.rst
14 | recurrence
15 |
--------------------------------------------------------------------------------
/docs/user_guide/index.rst:
--------------------------------------------------------------------------------
1 | .. _user_guide:
2 |
3 | User guide
4 | ==========
5 |
6 | .. toctree::
7 |
8 | quick_tutorial
9 |
10 | fundamentals/index.rst
11 | polynomial/index.rst
12 | main_usage/index.rst
13 | advanced_topics/index.rst
14 |
15 | zbibliography
16 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of variance reduction techniques."""
2 | from .generator import generate_samples
3 |
4 | from .sequences import * # pylint: disable=wildcard-import
5 | from .latin_hypercube import create_latin_hypercube_samples
6 | from .antithetic import create_antithetic_variates
7 |
--------------------------------------------------------------------------------
/docs/user_guide/polynomial/index.rst:
--------------------------------------------------------------------------------
1 | Polynomial
2 | ==========
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | introduction
8 | polynomial_evaluation
9 | polynomial_expansion
10 | numpy_functions
11 | orthogonality
12 | truncation_scheme
13 | comparison_operators
14 | polynomial_division
15 |
--------------------------------------------------------------------------------
/docs/user_guide/advanced_topics/index.rst:
--------------------------------------------------------------------------------
1 | Advanced topics
2 | ===============
3 |
4 | .. toctree::
5 |
6 | stochastic_dependencies
7 | generalized_polynomial_chaos
8 | advanced_regression_method
9 | kernel_density_estimation
10 | gaussian_mixture_model
11 | polynomial_chaos_kriging
12 | seir_model
13 |
--------------------------------------------------------------------------------
/docs/user_guide/fill_notebooks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/env bash
2 |
3 | for p in $(find -name '*.ipynb'); do
4 | if [ "$p" != *".ipynb_checkpoints"* ]; then
5 | pushd $(dirname $p)
6 | jupyter nbconvert --to notebook --inplace \
7 | --ExecutePreprocessor.timeout=-1 --execute $(basename $p)
8 | popd
9 | fi
10 | done
11 |
--------------------------------------------------------------------------------
/chaospy/recurrence/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of three terms recurrence algorithms."""
2 | from .frontend import construct_recurrence_coefficients, RECURRENCE_ALGORITHMS
3 | from .jacobi import coefficients_to_quadrature
4 |
5 | from .chebyshev import modified_chebyshev
6 | from .lanczos import lanczos
7 | from .stieltjes import stieltjes, discretized_stieltjes, analytical_stieltjes
8 |
--------------------------------------------------------------------------------
/chaospy/distributions/__init__.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=wildcard-import
2 | """Module defining distributions."""
3 | from .baseclass import *
4 | from .sampler import *
5 | from .collection import *
6 | from .copulas import *
7 | from .operators import *
8 | from .approximation import *
9 | from .kernel import *
10 |
11 | from . import baseclass, sampler, approximation, copulas, collection, operators
12 |
--------------------------------------------------------------------------------
/tests/test_monte_carlo_integration.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import pytest
3 |
4 |
5 | def test_monte_carlo_mean(evaluations_large, true_mean):
6 | assert numpy.allclose(numpy.mean(evaluations_large, axis=0), true_mean, rtol=1e-3)
7 |
8 |
9 | def test_monte_carlo_variance(evaluations_large, true_variance):
10 | assert numpy.allclose(numpy.var(evaluations_large, axis=0), true_variance, rtol=2e-2)
11 |
--------------------------------------------------------------------------------
/chaospy/descriptives/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of descriptive statistics."""
2 | from .expected import E
3 | from .conditional import E_cond
4 | from .covariance import Cov
5 | from .kurtosis import Kurt
6 | from .percentile import Perc
7 | from .quantity_of_interest import QoI_Dist
8 | from .skewness import Skew
9 | from .standard_deviation import Std
10 | from .variance import Var
11 |
12 | from .correlation import *
13 | from .sensitivity import *
14 |
--------------------------------------------------------------------------------
/docs/reference/polynomial/baseclass.rst:
--------------------------------------------------------------------------------
1 | Baseclass
2 | =========
3 |
4 | .. currentmodule:: chaospy
5 |
6 | .. autosummary::
7 | :template: ndpoly.rst
8 | :toctree: ../../api
9 |
10 | ndpoly
11 |
12 | .. autosummary::
13 | :toctree: ../../api
14 |
15 | ndpoly.coefficients
16 | ndpoly.dtype
17 | ndpoly.exponents
18 | ndpoly.indeterminants
19 | ndpoly.keys
20 | ndpoly.names
21 | ndpoly.values
22 | ndpoly.KEY_OFFSET
23 |
--------------------------------------------------------------------------------
/docs/reference/recurrence.rst:
--------------------------------------------------------------------------------
1 | Three terms recurrence
2 | ======================
3 |
4 | .. currentmodule:: chaospy
5 |
6 | Recurrence algorithms
7 | ---------------------
8 |
9 | .. autosummary::
10 | :toctree: ../api
11 |
12 | modified_chebyshev
13 | lanczos
14 | stieltjes
15 | discretized_stieltjes
16 | analytical_stieltjes
17 |
18 | Utility functions
19 | -----------------
20 |
21 | .. autosummary::
22 | :toctree: ../api
23 |
24 | coefficients_to_quadrature
25 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/sequences/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of low-discrepancy sequences."""
2 | from .additive_recursion import create_additive_recursion_samples
3 | from .chebyshev import create_chebyshev_samples, create_nested_chebyshev_samples
4 | from .grid import create_grid_samples, create_nested_grid_samples
5 | from .halton import create_halton_samples
6 | from .hammersley import create_hammersley_samples
7 | from .sobol import create_sobol_samples
8 | from .korobov import create_korobov_samples
9 |
--------------------------------------------------------------------------------
/docs/reference/distribution/operator.rst:
--------------------------------------------------------------------------------
1 | Operator
2 | ========
3 |
4 | .. currentmodule:: chaospy
5 |
6 | Multivariate
7 | ------------
8 |
9 | .. autosummary::
10 | :template: distribution.rst
11 | :toctree: ../../api
12 |
13 | J
14 | Iid
15 |
16 | Mathematical
17 | ------------
18 |
19 | .. autosummary::
20 | :template: distribution.rst
21 | :toctree: ../../api
22 |
23 | Add
24 | Multiply
25 | Negative
26 | Power
27 | Trunc
28 | Log
29 | Log10
30 | Logn
31 |
--------------------------------------------------------------------------------
/tests/poly/test_numpoly.py:
--------------------------------------------------------------------------------
1 | """Test interface against numpoly."""
2 | import chaospy
3 |
4 |
5 | def test_constant_expected():
6 | """Test if polynomial constant behave as expected."""
7 | distribution = chaospy.J(chaospy.Uniform(-1.2, 1.2),
8 | chaospy.Uniform(-2.0, 2.0))
9 | const = chaospy.polynomial(7.)
10 | assert chaospy.E(const, distribution[0]) == const
11 | assert chaospy.E(const, distribution) == const
12 | assert chaospy.Var(const, distribution) == 0.
13 |
--------------------------------------------------------------------------------
/chaospy/distributions/baseclass/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of distribution baseclasses."""
2 | from .utils import *
3 | from .distribution import Distribution
4 | from .slice_ import ItemDistribution
5 | from .simple import SimpleDistribution
6 | from .copula import CopulaDistribution
7 | from .mean_covariance import MeanCovarianceDistribution
8 | from .shift_scale import ShiftScaleDistribution
9 | from .lower_upper import LowerUpperDistribution
10 | from .operator import OperatorDistribution
11 | from .user import UserDistribution
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/usage_question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Usage question
3 | about: Get help solving a problem with Chaospy
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe your problem**
11 | A short description of the problem you are trying to solve.
12 |
13 | **Initial implementation**
14 | Assuming you have already made an effort to solve to problem on your own,
15 | please provide a minimal code example.
16 |
17 | **Additional context**
18 | Add any other context about the feature request here.
19 |
--------------------------------------------------------------------------------
/chaospy/external/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | ``chaospy`` is in no means a package that provides absolutely all functionality
3 | for all problems. If another project provides functionality that can work well
4 | in tandem, the best approach, if possible, is to use both at the same time. To
5 | make such an approach more feasible, some compatibility wrappers exists
6 | allowing for using components from other projects as part of ``chaospy``.
7 | """
8 | from .openturns_ import openturns_dist, OpenTURNSDist
9 | from .scipy_stats import ScipyStatsDist
10 |
--------------------------------------------------------------------------------
/chaospy/distributions/operators/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of mathematical operators."""
2 | from .addition import Add
3 | from .multiply import Multiply
4 | from .negative import Negative
5 | from .power import Power
6 | from .truncation import Trunc
7 | from .logarithm import Log, Log10, Logn
8 |
9 | from .joint import J
10 | from .iid import Iid
11 |
12 | __all__ = (
13 | "Add",
14 | "Multiply",
15 | "Negative",
16 | "Power",
17 | "Trunc",
18 | "Log",
19 | "Log10",
20 | "Logn",
21 | "J",
22 | "Iid",
23 | )
24 |
--------------------------------------------------------------------------------
/tests/distributions/conftest.py:
--------------------------------------------------------------------------------
1 | """Test configuration for chaospy.distributions."""
2 | import pytest
3 |
4 | from chaospy.distributions import collection, Distribution
5 |
6 | DISTRIBUTIONS = []
7 | for attr in vars(collection).values():
8 | try:
9 | if issubclass(attr, Distribution) and len(attr()) == 1:
10 | DISTRIBUTIONS.append(attr)
11 | except TypeError:
12 | pass
13 |
14 |
15 | @pytest.fixture(params=DISTRIBUTIONS)
16 | def distribution(request):
17 | """Parameterization of distribution."""
18 | return request.param
19 |
--------------------------------------------------------------------------------
/docs/reference/distribution/advanced.rst:
--------------------------------------------------------------------------------
1 | Advanced
2 | ========
3 |
4 | .. currentmodule:: chaospy
5 |
6 | User-defined distribution
7 | -------------------------
8 |
9 | .. autosummary::
10 | :template: distribution.rst
11 | :toctree: ../../api
12 |
13 | UserDistribution
14 |
15 | Kernel Estimation
16 | -----------------
17 |
18 | .. autosummary::
19 | :template: distribution.rst
20 | :toctree: ../../api
21 |
22 | GaussianKDE
23 |
24 | Mixtures
25 | --------
26 |
27 | .. autosummary::
28 | :template: distribution.rst
29 | :toctree: ../../api
30 |
31 | GaussianMixture
32 |
--------------------------------------------------------------------------------
/tests/test_point_collocation.py:
--------------------------------------------------------------------------------
1 | import chaospy
2 | import numpy
3 | import pytest
4 |
5 |
6 | @pytest.fixture
7 | def collocation_model(expansion_small, samples_small, evaluations_small):
8 | return chaospy.fit_regression(expansion_small, samples_small, evaluations_small)
9 |
10 |
11 | def test_collocation_mean(collocation_model, joint, true_mean):
12 | assert numpy.allclose(chaospy.E(collocation_model, joint), true_mean, rtol=1e-6)
13 |
14 |
15 | def test_regression_variance(collocation_model, joint, true_variance):
16 | assert numpy.allclose(chaospy.Var(collocation_model, joint), true_variance, rtol=1e-5)
17 |
--------------------------------------------------------------------------------
/docs/reference/distribution/baseclass.rst:
--------------------------------------------------------------------------------
1 | .. _baseclass:
2 |
3 | Baseclass
4 | ---------
5 |
6 | .. currentmodule:: chaospy
7 |
8 | .. autosummary::
9 | :template: distribution.rst
10 | :toctree: ../../api
11 |
12 | Distribution
13 |
14 | .. autosummary::
15 | :toctree: ../../api
16 |
17 | Distribution.pdf
18 | Distribution.cdf
19 | Distribution.ppf
20 | Distribution.fwd
21 | Distribution.inv
22 | Distribution.sample
23 | Distribution.mom
24 | Distribution.ttr
25 |
26 | Distribution.interpret_as_integer
27 | Distribution.stochastic_dependent
28 | Distribution.lower
29 | Distribution.upper
30 |
--------------------------------------------------------------------------------
/tests/distributions/collection/test_triangle.py:
--------------------------------------------------------------------------------
1 | """Test for Triangle distribution."""
2 | import pytest
3 | import chaospy as cp
4 |
5 | def test_triangle_init():
6 | """Assert that initialization checks lower and upper bounds."""
7 | # Should just run
8 | u = cp.Uniform(0., 1.)
9 | t = cp.Triangle(u - 1., u, u + 1.)
10 | cp.J(u, t)
11 |
12 | # Overlapping lb and ub
13 | with pytest.raises(ValueError):
14 | cp.Triangle(u - 0.5, u, u + 1.)
15 | with pytest.raises(ValueError):
16 | cp.Triangle(u - 1., u, u + 0.5)
17 |
18 | # Should initialize fine
19 | cp.Triangle(u - 1., 0., u + 0.5)
20 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = .build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/reference/descriptive/statistical_moment.rst:
--------------------------------------------------------------------------------
1 | Statistical moment
2 | ==================
3 |
4 | .. currentmodule:: chaospy
5 |
6 | First Order Moments
7 | -------------------
8 |
9 | .. autosummary::
10 | :toctree: ../../api
11 |
12 | E
13 | E_cond
14 |
15 | Second Order Moments
16 | --------------------
17 |
18 | .. autosummary::
19 | :toctree: ../../api
20 |
21 | Std
22 | Var
23 | Cov
24 |
25 | Higher Order Moments
26 | --------------------
27 |
28 | .. autosummary::
29 | :toctree: ../../api
30 |
31 | Skew
32 | Kurt
33 |
34 | Correlation Functions
35 | ---------------------
36 |
37 | .. autosummary::
38 | :toctree: ../../api
39 |
40 | Corr
41 | Acf
42 | Spearman
43 |
--------------------------------------------------------------------------------
/docs/reference/high_level_interface.rst:
--------------------------------------------------------------------------------
1 | High-level interfaces
2 | =====================
3 |
4 | .. currentmodule:: chaospy
5 |
6 | Orthogonal expansion
7 | --------------------
8 |
9 | .. autosummary::
10 | :toctree: ../api
11 |
12 | generate_expansion
13 |
14 | Quadrature
15 | ----------
16 |
17 | .. autosummary::
18 | :toctree: ../api
19 |
20 | generate_quadrature
21 |
22 | Fit chaos expansion
23 | -------------------
24 |
25 | .. autosummary::
26 | :toctree: ../api
27 |
28 | fit_regression
29 | fit_quadrature
30 |
31 | Three terms recurrence coefficients
32 | -----------------------------------
33 |
34 | .. autosummary::
35 | :toctree: ../api
36 |
37 | construct_recurrence_coefficients
38 |
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | """Global configuration."""
2 | import os
3 |
4 | import pytest
5 |
6 | import numpy
7 | import scipy
8 | import sklearn.linear_model
9 |
10 |
11 | @pytest.fixture(autouse=True)
12 | def global_setup(doctest_namespace, monkeypatch):
13 | """Global configuration setup."""
14 | # set debug mode during testing
15 | environ = os.environ.copy()
16 | environ["NUMPOLY_DEBUG"] = "1"
17 | environ["CHAOSPY_DEBUG"] = "1"
18 | monkeypatch.setattr("os.environ", environ)
19 |
20 | import chaospy
21 | doctest_namespace["numpy"] = numpy
22 | doctest_namespace["scipy"] = scipy
23 | doctest_namespace["chaospy"] = chaospy
24 | doctest_namespace["sklearn"] = sklearn
25 |
26 | # fix random seeds:
27 | numpy.random.seed(1000)
28 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a bug report of something that is currently not working
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Provide a minimal code example that reproduces the error
15 |
16 | **Expected behavior**
17 | A clear and concise description of what you expected to happen. If an exception is raised, please provide the exception traceback.
18 |
19 | **Desktop (please complete the following information):**
20 | - OS: [e.g. Windows, MacOS]
21 | - Python version: [e.g. 3.8.1]
22 | - Chaospy version [e.g. 4.0.1]
23 |
24 | **Additional context**
25 | Add any other context about the problem here.
26 |
--------------------------------------------------------------------------------
/chaospy/distributions/copulas/__init__.py:
--------------------------------------------------------------------------------
1 | r"""
2 | Copulas are a type dependency structure imposed on independent variables to
3 | achieve to more complex problems without adding too much complexity.
4 |
5 | To construct a copula one needs a copula transformation and the
6 | Copula wrapper::
7 |
8 | >>> dist = chaospy.Iid(chaospy.Uniform(), 2)
9 | >>> copula = chaospy.Gumbel(dist, theta=1.5)
10 |
11 | The resulting copula is then ready for use::
12 |
13 | >>> copula.sample(5).round(4)
14 | array([[0.6536, 0.115 , 0.9503, 0.4822, 0.8725],
15 | [0.6286, 0.0654, 0.96 , 0.5073, 0.9705]])
16 |
17 | """
18 | from .archimedean import Archimedean
19 |
20 | from .gumbel import Gumbel
21 | from .clayton import Clayton
22 | from .joe import Joe
23 | from .nataf import Nataf
24 | from .t_copula import TCopula
25 |
--------------------------------------------------------------------------------
/docs/_templates/distribution.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline}}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. autoclass:: {{ objname }}
6 |
7 | {% block methods %}
8 | .. automethod:: __init__
9 |
10 | .. rubric:: Methods
11 |
12 | .. autosummary::
13 |
14 | ~Distribution.pdf
15 | ~Distribution.cdf
16 | ~Distribution.fwd
17 | ~Distribution.inv
18 | ~Distribution.sample
19 | ~Distribution.mom
20 | ~Distribution.ttr
21 |
22 | {% endblock %}
23 |
24 | {% block attributes %}
25 | {% if attributes %}
26 | .. rubric:: Attributes
27 |
28 | .. autosummary::
29 |
30 | ~Distribution.interpret_as_integer
31 | ~Distribution.lower
32 | ~Distribution.stochastic_dependent
33 | ~Distribution.upper
34 |
35 | {% endif %}
36 | {% endblock %}
37 |
--------------------------------------------------------------------------------
/docs/_templates/ndpoly.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline}}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. autoclass:: {{ objname }}
6 |
7 | {% block methods %}
8 | .. automethod:: __init__
9 | .. automethod:: __call__
10 |
11 | .. rubric:: Methods
12 |
13 | .. autosummary::
14 |
15 | ~ndpoly.from_attributes
16 | ~ndpoly.isconstant
17 | ~ndpoly.todict
18 | ~ndpoly.tonumpy
19 |
20 | {% endblock %}
21 |
22 | {% block attributes %}
23 | {% if attributes %}
24 | .. rubric:: Attributes
25 |
26 | .. autosummary::
27 |
28 | ~ndpoly.coefficients
29 | ~ndpoly.dtype
30 | ~ndpoly.exponents
31 | ~ndpoly.indeterminants
32 | ~ndpoly.keys
33 | ~ndpoly.names
34 | ~ndpoly.values
35 | ~ndpoly.KEY_OFFSET
36 |
37 | {% endif %}
38 | {% endblock %}
39 |
--------------------------------------------------------------------------------
/docs/reference/sampling.rst:
--------------------------------------------------------------------------------
1 | .. _sampling_collection:
2 |
3 | Random and low-discrepency samples
4 | ==================================
5 |
6 | .. currentmodule:: chaospy
7 |
8 | Generator function
9 | ------------------
10 |
11 | .. autosummary::
12 | :toctree: ../api
13 |
14 | generate_samples
15 |
16 | .. _low_discrepancy_sequences:
17 |
18 | Low-discrepancy sequences
19 | -------------------------
20 |
21 | .. autosummary::
22 | :toctree: ../api
23 |
24 | create_halton_samples
25 | create_hammersley_samples
26 | create_korobov_samples
27 | create_sobol_samples
28 | create_latin_hypercube_samples
29 | create_grid_samples
30 | create_chebyshev_samples
31 |
32 | Antithetic Variates
33 | -------------------
34 |
35 | .. autosummary::
36 | :toctree: ../api
37 |
38 | create_antithetic_variates
39 |
--------------------------------------------------------------------------------
/tests/recurrence/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import chaospy
4 | from chaospy.recurrence import RECURRENCE_ALGORITHMS
5 |
6 | ANALYTICAL_DISTRIBUTIONS = {
7 | "beta": chaospy.Beta(4, 2),
8 | "expon": chaospy.Exponential(1),
9 | "gamma": chaospy.Gamma(2, 2),
10 | "lognorm": chaospy.LogNormal(-10, 0.1),
11 | "normal": chaospy.Normal(2, 3),
12 | "student": chaospy.StudentT(df=25, mu=0.5),
13 | "uniform": chaospy.Uniform(-1, 2),
14 | }
15 |
16 |
17 | @pytest.fixture(params=RECURRENCE_ALGORITHMS)
18 | def recurrence_algorithm(request):
19 | """Parameterization of name of recurrence algorithms."""
20 | yield request.param
21 |
22 |
23 | @pytest.fixture(params=ANALYTICAL_DISTRIBUTIONS.keys())
24 | def analytical_distribution(request):
25 | """Parameterization of distribution with analytical TTR methods."""
26 | return ANALYTICAL_DISTRIBUTIONS[request.param]
27 |
--------------------------------------------------------------------------------
/tests/distributions/operators/test_operators.py:
--------------------------------------------------------------------------------
1 | """Tests for the various base-operators."""
2 | import numpy
3 | import chaospy
4 |
5 |
6 | def test_operator_slicing():
7 | dists = [
8 | chaospy.J(chaospy.Normal(2), chaospy.Normal(2), chaospy.Normal(3)), # ShiftScale
9 | chaospy.J(chaospy.Uniform(1, 3), chaospy.Uniform(1, 3), chaospy.Uniform(1, 5)), # LowerUpper
10 | chaospy.MvNormal([2, 2, 3], numpy.eye(3)), # MeanCovariance
11 | ]
12 | for dist in dists:
13 |
14 | assert numpy.allclose(dist.inv([0.5, 0.5, 0.5]), [2, 2, 3])
15 | assert numpy.allclose(dist.fwd([2, 2, 3]), [0.5, 0.5, 0.5])
16 | density = dist.pdf([2, 2, 2], decompose=True)
17 | assert numpy.isclose(density[0], density[1]), dist
18 | assert not numpy.isclose(density[0], density[2]), dist
19 | assert numpy.isclose(dist[0].inv([0.5]), 2)
20 | assert numpy.isclose(dist[0].fwd([2]), 0.5)
21 |
--------------------------------------------------------------------------------
/tests/test_lagrange_polynomials.py:
--------------------------------------------------------------------------------
1 | import chaospy
2 | import numpy
3 | import pytest
4 |
5 |
6 | @pytest.fixture
7 | def samples(joint):
8 | return joint.sample(10, rule="sobol")
9 |
10 |
11 | @pytest.fixture
12 | def evaluations(model_solver, samples):
13 | return numpy.array([model_solver(sample) for sample in samples.T])
14 |
15 |
16 | @pytest.fixture
17 | def expansion(samples):
18 | return chaospy.lagrange_polynomial(samples)
19 |
20 |
21 | @pytest.fixture
22 | def lagrange_approximation(evaluations, expansion):
23 | return chaospy.sum(evaluations.T*expansion, axis=-1).T
24 |
25 |
26 | def test_lagrange_mean(lagrange_approximation, joint, true_mean):
27 | assert numpy.allclose(chaospy.E(lagrange_approximation, joint), true_mean, rtol=1e-3)
28 |
29 |
30 | def test_lagrange_variance(lagrange_approximation, joint, true_variance):
31 | assert numpy.allclose(chaospy.Var(lagrange_approximation, joint), true_variance, rtol=1e-2)
32 |
--------------------------------------------------------------------------------
/docs/reference/polynomial/constructor.rst:
--------------------------------------------------------------------------------
1 | Constructor
2 | ===========
3 |
4 | .. currentmodule:: chaospy
5 |
6 | Variable
7 | --------
8 |
9 | .. autosummary::
10 | :toctree: ../../api
11 |
12 | variable
13 | polynomial
14 | aspolynomial
15 | symbols
16 | polynomial_from_attributes
17 | ndpoly.from_attributes
18 | polynomial_from_roots
19 |
20 | Expansion
21 | ---------
22 |
23 | .. autosummary::
24 | :toctree: ../../api
25 |
26 | monomial
27 | expansion.lagrange
28 |
29 | Orthogonal
30 | ----------
31 |
32 | .. autosummary::
33 | :toctree: ../../api
34 |
35 | expansion.stieltjes
36 | expansion.cholesky
37 | expansion.gram_schmidt
38 |
39 | Pre-defined orthogonal
40 | ~~~~~~~~~~~~~~~~~~~~~~
41 |
42 | .. autosummary::
43 | :toctree: ../../api
44 |
45 | expansion.chebyshev_1
46 | expansion.chebyshev_2
47 | expansion.gegenbauer
48 | expansion.hermite
49 | expansion.jacobi
50 | expansion.laguerre
51 | expansion.legendre
52 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/functionality_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Functionality request
3 | about: Propose something new that you would like to see be implemented.
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your functionality related to a problem? Please describe.**
11 | A clear and concise description of what the problem you are trying to solve with help of Chaospy.
12 |
13 | **Relevant resources about the topic**
14 | Beyond the trivial cases, some context is often needed to understand a functionality. So if possible, provide at least on of the following (from least to most relevant):
15 |
16 | * Some other software/script that has already implemented the funcationality. Preferably something that has available source code.
17 | * Presentation slides/video tutorial that outlines the functionality.
18 | * Research paper outlining the functionality.
19 | * Blogpost/Wikipedia article.
20 |
21 | **Additional context**
22 | Add any other context about the feature request here.
23 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright 2018 Jonathan Feinberg
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/latin_hypercube.py:
--------------------------------------------------------------------------------
1 | """
2 | Create Latin Hyper-cube samples.
3 |
4 | Example usage
5 | -------------
6 |
7 | Normal usage::
8 | >>> chaospy.create_latin_hypercube_samples(order=4, dim=2).round(4)
9 | array([[0.6634, 0.2788, 0.9876, 0.1205],
10 | [0.4681, 0.0531, 0.5102, 0.8493]])
11 | """
12 | import numpy
13 |
14 |
15 | def create_latin_hypercube_samples(order, dim=1):
16 | """
17 | Latin Hypercube sampling.
18 |
19 | Args:
20 | order (int):
21 | The order of the latin hyper-cube. Defines the number of samples.
22 | dim (int):
23 | The number of dimensions in the latin hyper-cube.
24 |
25 | Returns (numpy.ndarray):
26 | Latin hyper-cube with ``shape == (dim, order)``.
27 | """
28 | randoms = numpy.random.random(order * dim).reshape((dim, order))
29 | for dim_ in range(dim):
30 | perm = numpy.random.permutation(order) # pylint: disable=no-member
31 | randoms[dim_] = (perm + randoms[dim_]) / order
32 | return randoms
33 |
--------------------------------------------------------------------------------
/chaospy/descriptives/standard_deviation.py:
--------------------------------------------------------------------------------
1 | """Standard deviation."""
2 | import numpy
3 |
4 | from .variance import Var
5 |
6 |
7 | def Std(poly, dist=None, **kws):
8 | """
9 | Standard deviation.
10 |
11 | Args:
12 | poly (numpoly.ndpoly, Distribution):
13 | Input to take variance on.
14 | dist (Distribution):
15 | Defines the space the variance is taken on. It is ignored if
16 | ``poly`` is a distribution.
17 |
18 | Returns:
19 | (numpy.ndarray):
20 | Element for element variance along ``poly``, where
21 | ``variation.shape == poly.shape``.
22 |
23 | Examples:
24 | >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
25 | >>> chaospy.Std(dist)
26 | array([1., 2.])
27 | >>> q0, q1 = chaospy.variable(2)
28 | >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
29 | >>> chaospy.Std(poly, dist)
30 | array([ 0. , 1. , 2. , 28.28427125])
31 |
32 | """
33 | return numpy.sqrt(Var(poly, dist=dist, **kws))
34 |
--------------------------------------------------------------------------------
/tests/test_intrusive_galerkin.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy
3 | from scipy.integrate import odeint
4 | import chaospy
5 |
6 |
7 | @pytest.fixture
8 | def galerkin_approx(coordinates, joint, expansion_small, norms_small):
9 | alpha, beta = chaospy.variable(2)
10 |
11 | e_alpha_phi = chaospy.E(alpha*expansion_small, joint)
12 | initial_condition = e_alpha_phi/norms_small
13 |
14 | phi_phi = chaospy.outer(expansion_small, expansion_small)
15 | e_beta_phi_phi = chaospy.E(beta*phi_phi, joint)
16 |
17 | def right_hand_side(c, t):
18 | return -numpy.sum(c*e_beta_phi_phi, -1)/norms_small
19 |
20 | coefficients = odeint(
21 | func=right_hand_side,
22 | y0=initial_condition,
23 | t=coordinates,
24 | )
25 | return chaospy.sum(expansion_small*coefficients, -1)
26 |
27 |
28 | def test_galerkin_mean(galerkin_approx, joint, true_mean):
29 | assert numpy.allclose(chaospy.E(galerkin_approx, joint), true_mean, rtol=1e-12)
30 |
31 |
32 | def test_galerkin_variance(galerkin_approx, joint, true_variance):
33 | assert numpy.allclose(chaospy.Var(galerkin_approx, joint), true_variance, rtol=1e-12)
34 |
--------------------------------------------------------------------------------
/docs/reference/quadrature/index.rst:
--------------------------------------------------------------------------------
1 | .. _quadrature_collection:
2 |
3 | Quadrature integration
4 | ======================
5 |
6 | .. currentmodule:: chaospy.quadrature
7 |
8 | Standard library
9 | ----------------
10 |
11 | .. autosummary::
12 | :toctree: ../../api
13 |
14 | clenshaw_curtis
15 | fejer_1
16 | fejer_2
17 | gaussian
18 | grid
19 | legendre_proxy
20 | leja
21 | newton_cotes
22 |
23 | Discrete densities
24 | ------------------
25 |
26 | .. autosummary::
27 | :toctree: ../../api
28 |
29 | discrete
30 |
31 | Gaussian extensions
32 | -------------------
33 |
34 | .. autosummary::
35 | :toctree: ../../api
36 |
37 | genz_keister_16
38 | genz_keister_18
39 | genz_keister_22
40 | genz_keister_24
41 | legendre
42 | kronrod
43 | lobatto
44 | patterson
45 | radau
46 |
47 | Predefined Gaussian
48 | -------------------
49 |
50 | .. autosummary::
51 | :toctree: ../../api
52 |
53 | chebyshev_1
54 | chebyshev_2
55 | gegenbauer
56 | hermite
57 | jacobi
58 | legendre
59 | laguerre
60 |
61 | Helper functions
62 | ----------------
63 |
64 | .. autosummary::
65 | :toctree: ../../api
66 |
67 | sparse_grid
68 | kronrod_jacobi
69 |
--------------------------------------------------------------------------------
/chaospy/descriptives/correlation/auto_correlation.py:
--------------------------------------------------------------------------------
1 | """Auto correlation function."""
2 | import numpy
3 |
4 | from .pearson import Corr
5 |
6 |
7 | def Acf(poly, dist, n_steps=None, **kws):
8 | """
9 | Auto-correlation function.
10 |
11 | Args:
12 | poly (numpoly.ndpoly):
13 | Polynomial of interest. Must have ``len(poly) > n_steps``.
14 | dist (Distribution):
15 | Defines the space the correlation is taken on.
16 | n_steps (int):
17 | The number of time steps apart included. If omitted set to
18 | ``len(poly)/2+1``.
19 |
20 | Returns:
21 | (numpy.ndarray) :
22 | Auto-correlation of ``poly`` with shape ``(n_steps,)``. Note that
23 | by definition ``Q[0]=1``.
24 |
25 | Examples:
26 | >>> poly = chaospy.monomial(1, 10)
27 | >>> dist = chaospy.Uniform()
28 | >>> chaospy.Acf(poly, dist).round(4)
29 | array([1. , 0.9915, 0.9722, 0.9457, 0.9127])
30 |
31 | """
32 | n_steps = int(len(poly) / 2 + 1) if n_steps is None else n_steps
33 | correlation = Corr(poly, dist, **kws)
34 | return numpy.array(
35 | [numpy.mean(correlation.diagonal(idx), axis=0) for idx in range(n_steps)]
36 | )
37 |
--------------------------------------------------------------------------------
/chaospy/expansion/laguerre.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import chaospy
3 |
4 |
5 | def laguerre(
6 | order,
7 | alpha=0.0,
8 | physicist=False,
9 | normed=False,
10 | retall=False,
11 | ):
12 | """
13 | Examples:
14 | >>> polynomials, norms = chaospy.expansion.laguerre(3, retall=True)
15 | >>> polynomials
16 | polynomial([1.0, q0-1.0, q0**2-4.0*q0+2.0, q0**3-9.0*q0**2+18.0*q0-6.0])
17 | >>> norms
18 | array([ 1., 1., 4., 36.])
19 | >>> chaospy.expansion.laguerre(3, physicist=True).round(5)
20 | polynomial([1.0, -q0+1.0, 0.5*q0**2-2.0*q0+2.0,
21 | -0.16667*q0**3+1.5*q0**2-5.33333*q0+4.66667])
22 | >>> chaospy.expansion.laguerre(3, alpha=2, normed=True).round(3)
23 | polynomial([1.0, 0.577*q0-1.732, 0.204*q0**2-1.633*q0+2.449,
24 | 0.053*q0**3-0.791*q0**2+3.162*q0-3.162])
25 |
26 | """
27 | multiplier = -1.0 / numpy.arange(1, order + 1) if physicist else 1.0
28 | _, [polynomials], [norms] = chaospy.recurrence.analytical_stieltjes(
29 | order, chaospy.Gamma(alpha + 1), multiplier=multiplier
30 | )
31 | if normed:
32 | polynomials = chaospy.true_divide(polynomials, numpy.sqrt(norms))
33 | norms[:] = 1.0
34 | return (polynomials, norms) if retall else polynomials
35 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/sequences/primes.py:
--------------------------------------------------------------------------------
1 | """
2 | Create all primes bellow a certain threshold.
3 |
4 | Examples::
5 |
6 | >>> create_primes(1)
7 | []
8 | >>> create_primes(2)
9 | [2]
10 | >>> create_primes(3)
11 | [2, 3]
12 | >>> create_primes(20)
13 | [2, 3, 5, 7, 11, 13, 17, 19]
14 | """
15 |
16 |
17 | def create_primes(threshold):
18 | """
19 | Generate prime values using sieve of Eratosthenes method.
20 |
21 | Args:
22 | threshold (int):
23 | The upper bound for the size of the prime values.
24 |
25 | Returns (List[int]):
26 | All primes from 2 and up to ``threshold``.
27 | """
28 | if threshold == 2:
29 | return [2]
30 |
31 | elif threshold < 2:
32 | return []
33 |
34 | numbers = list(range(3, threshold + 1, 2))
35 | root_of_threshold = threshold**0.5
36 | half = int((threshold + 1) / 2 - 1)
37 | idx = 0
38 | counter = 3
39 | while counter <= root_of_threshold:
40 | if numbers[idx]:
41 | idy = int((counter * counter - 3) / 2)
42 | numbers[idy] = 0
43 | while idy < half:
44 | numbers[idy] = 0
45 | idy += counter
46 | idx += 1
47 | counter = 2 * idx + 3
48 | return [2] + [number for number in numbers if number]
49 |
--------------------------------------------------------------------------------
/chaospy/distributions/kernel/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | In some cases a constructed distribution that are first and foremost data
3 | driven. In such scenarios it make sense to make use of
4 | `kernel density estimation`_ (KDE). In ``chaospy`` KDE can be accessed through
5 | the :func:`GaussianKDE` constructor.
6 |
7 | Basic usage of the :func:`GaussianKDE` constructor involves just passing the
8 | data as input argument::
9 |
10 | >>> data = [3, 4, 5, 5]
11 | >>> distribution = chaospy.GaussianKDE(data)
12 |
13 | This distribution can be used as any other distributions::
14 |
15 | >>> distribution.cdf([3, 3.5, 4, 4.5, 5]).round(4)
16 | array([0.1393, 0.2542, 0.3889, 0.5512, 0.7359])
17 | >>> distribution.mom(1).round(4)
18 | np.float64(4.25)
19 | >>> distribution.sample(4).round(4)
20 | array([4.7784, 2.8769, 5.8109, 4.2995])
21 |
22 | In addition multivariate distributions supported::
23 |
24 | >>> data = [[1, 2, 2, 3], [5, 5, 4, 3]]
25 | >>> distribution = chaospy.GaussianKDE(data)
26 | >>> distribution.sample(4).round(4)
27 | array([[2.081 , 3.0304, 3.0882, 0.4872],
28 | [3.2878, 2.5473, 2.2699, 5.3412]])
29 |
30 | .. _kernel density estimation: \
31 | https://en.wikipedia.org/wiki/Kernel_density_estimation
32 | """
33 | from .gaussian import GaussianKDE
34 | from .mixture import GaussianMixture
35 |
--------------------------------------------------------------------------------
/chaospy/expansion/__init__.py:
--------------------------------------------------------------------------------
1 | r"""Collection of polynomial expansion constructors."""
2 | import logging
3 | from functools import wraps
4 |
5 | from .chebyshev import chebyshev_1, chebyshev_2
6 | from .cholesky import cholesky
7 | from .frontend import generate_expansion
8 | from .gegenbauer import gegenbauer
9 | from .gram_schmidt import gram_schmidt
10 | from .hermite import hermite
11 | from .jacobi import jacobi
12 | from .stieltjes import stieltjes
13 | from .lagrange import lagrange
14 | from .laguerre import laguerre
15 | from .legendre import legendre
16 |
17 | __all__ = ["generate_expansion"]
18 |
19 |
20 | def expansion_deprecation_warning(name, func):
21 | @wraps(func)
22 | def wrapped(*args, **kwargs):
23 | """Function wrapper adds warnings."""
24 | logger = logging.getLogger(__name__)
25 | logger.warning(
26 | "chaospy.%s name is to be deprecated; " "Use chaospy.expansion.%s instead",
27 | name,
28 | func.__name__,
29 | )
30 | return func(*args, **kwargs)
31 |
32 | globals()[name] = wrapped
33 | __all__.append(name)
34 |
35 |
36 | expansion_deprecation_warning("orth_ttr", stieltjes)
37 | expansion_deprecation_warning("orth_chol", cholesky)
38 | expansion_deprecation_warning("orth_gs", gram_schmidt)
39 | expansion_deprecation_warning("lagrange_polynomial", lagrange)
40 |
--------------------------------------------------------------------------------
/tests/recurrence/test_stieltjes_method.py:
--------------------------------------------------------------------------------
1 | """Check if Stieltjes method, both analytical and discretized works as expected."""
2 | import numpy
3 | import numpoly
4 | import chaospy
5 |
6 |
7 | def test_analytical_stieltjes(analytical_distribution):
8 | """Assert that Analytical Stieltjes produces orthogonality."""
9 | coeffs, [orth], norms = chaospy.analytical_stieltjes(
10 | order=4, dist=analytical_distribution)
11 | assert orth[0] == 1
12 | assert numpy.allclose(chaospy.E(orth[1:], analytical_distribution), 0)
13 | covariance = chaospy.E(
14 | numpoly.outer(orth[1:], orth[1:]), analytical_distribution)
15 | assert numpy.allclose(numpy.diag(numpy.diag(covariance)), covariance)
16 | assert numpy.allclose(numpoly.lead_coefficient(orth), 1)
17 |
18 |
19 | def test_stieltjes_compared(analytical_distribution):
20 | """Assert that discretized and analytical approach are equivalent."""
21 | (alpha0, beta0), [orth0], norms0 = chaospy.analytical_stieltjes(
22 | order=3, dist=analytical_distribution)
23 | (alpha1, beta1), [orth1], norms1 = chaospy.discretized_stieltjes(
24 | order=3, dist=analytical_distribution)
25 | assert numpy.allclose(alpha0, alpha1)
26 | assert numpy.allclose(beta0, beta1)
27 | assert numpy.allclose(orth0.coefficients, orth1.coefficients)
28 | assert numpy.allclose(norms0, norms1)
29 |
--------------------------------------------------------------------------------
/chaospy/expansion/legendre.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import chaospy
3 |
4 |
5 | def legendre(
6 | order,
7 | lower=-1,
8 | upper=1,
9 | physicist=False,
10 | normed=False,
11 | retall=False,
12 | ):
13 | """
14 | Examples:
15 | >>> polynomials, norms = chaospy.expansion.legendre(3, retall=True)
16 | >>> polynomials.round(5)
17 | polynomial([1.0, q0, q0**2-0.33333, q0**3-0.6*q0])
18 | >>> norms
19 | array([1. , 0.33333333, 0.08888889, 0.02285714])
20 | >>> chaospy.expansion.legendre(3, physicist=True).round(3)
21 | polynomial([1.0, 1.5*q0, 2.5*q0**2-0.556, 4.375*q0**3-1.672*q0])
22 | >>> chaospy.expansion.legendre(3, lower=0, upper=1, normed=True).round(3)
23 | polynomial([1.0, 3.464*q0-1.732, 13.416*q0**2-13.416*q0+2.236,
24 | 52.915*q0**3-79.373*q0**2+31.749*q0-2.646])
25 |
26 | """
27 | multiplier = 1.0
28 | if physicist:
29 | multiplier = numpy.arange(1, order + 1)
30 | multiplier = (2 * multiplier + 1) / (multiplier + 1)
31 | _, [polynomials], [norms] = chaospy.recurrence.analytical_stieltjes(
32 | order, chaospy.Uniform(lower, upper), multiplier=multiplier
33 | )
34 | if normed:
35 | polynomials = chaospy.true_divide(polynomials, numpy.sqrt(norms))
36 | norms[:] = 1.0
37 | return (polynomials, norms) if retall else polynomials
38 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=45"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "chaospy"
7 | version = "4.3.21"
8 | license = {text = "MIT license"}
9 | description = "Numerical tool for performing uncertainty quantification"
10 | readme = "README.rst"
11 | classifiers = [
12 | "Development Status :: 5 - Production/Stable",
13 | "Intended Audience :: Science/Research",
14 | "Topic :: Scientific/Engineering :: Mathematics",
15 | "Natural Language :: English",
16 | ]
17 |
18 | requires-python = ">=3.9"
19 | dependencies = [
20 | "numpy >=1.20",
21 | "numpoly >=1.2.12",
22 | "scipy",
23 | "setuptools >=40.9.0",
24 | "importlib-metadata; python_version<'3.10'",
25 | ]
26 |
27 | [project.optional-dependencies]
28 | dev = [
29 | "openturns==1.23",
30 | "scikit-learn",
31 | "gstools",
32 | "jupyter",
33 | "matplotlib",
34 | "nbsphinx",
35 | "sphinxcontrib-bibtex",
36 | "black ==22.10.0",
37 | "pandoc",
38 | "pylint ==2.15.5",
39 | "pydocstyle ==3.0.0",
40 | "pydata_sphinx_theme ==0.11.0",
41 | "sympy",
42 | "pytest",
43 | "pytest-cov",
44 | "codecov",
45 | "coverage >=5.0",
46 | "Sphinx",
47 | "sphinx_automodapi",
48 | "build",
49 | "twine",
50 | ]
51 |
52 | [tool.pytest.ini_options]
53 | filterwarnings = [
54 | "error::DeprecationWarning",
55 | "ignore:elementwise:DeprecationWarning"
56 | ]
57 |
--------------------------------------------------------------------------------
/chaospy/descriptives/variance.py:
--------------------------------------------------------------------------------
1 | """Variance operator."""
2 | import numpy
3 | import numpoly
4 |
5 | from .expected import E
6 |
7 |
8 | def Var(poly, dist=None, **kws):
9 | """
10 | The variance of a distribution or polynomial.
11 |
12 | Args:
13 | poly (numpoly.ndpoly, Distribution):
14 | Input to take variance on.
15 | dist (Distribution):
16 | Defines the space the variance is taken on. It is ignored if
17 | ``poly`` is a distribution.
18 |
19 | Returns:
20 | (numpy.ndarray):
21 | Element for element variance along ``poly``, where
22 | ``variation.shape == poly.shape``.
23 |
24 | Examples:
25 | >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
26 | >>> chaospy.Var(dist)
27 | array([1., 4.])
28 | >>> q0, q1 = chaospy.variable(2)
29 | >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1])
30 | >>> chaospy.Var(poly, dist)
31 | array([ 0., 1., 4., 800.])
32 | >>> chaospy.Var(2., dist)
33 | array(0.)
34 |
35 | """
36 | if dist is None:
37 | dist, poly = poly, numpoly.variable(len(poly))
38 | poly = numpoly.set_dimensions(poly, len(dist))
39 | if poly.isconstant():
40 | return numpy.zeros(poly.shape)
41 | poly = poly - E(poly, dist, **kws)
42 | poly = numpoly.square(poly)
43 | return E(poly, dist, **kws)
44 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/sequences/korobov.py:
--------------------------------------------------------------------------------
1 | """
2 | Create samples from a Korobov lattice.
3 |
4 | Examples usage
5 | --------------
6 |
7 | Normal usage::
8 |
9 | >>> distribution = chaospy.J(chaospy.Uniform(0, 1), chaospy.Uniform(0, 1))
10 | >>> samples = distribution.sample(4, rule="korobov")
11 | >>> samples.round(4)
12 | array([[0.2, 0.4, 0.6, 0.8],
13 | [0.4, 0.8, 0.2, 0.6]])
14 | >>> samples = distribution.sample(6, rule="korobov")
15 | >>> samples.round(4)
16 | array([[0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571],
17 | [0.4286, 0.8571, 0.2857, 0.7143, 0.1429, 0.5714]])
18 |
19 | """
20 | import numpy
21 |
22 |
23 | def create_korobov_samples(order, dim, base=17797):
24 | """
25 | Create Korobov lattice samples.
26 |
27 | Args:
28 | order (int):
29 | The order of the Korobov latice. Defines the number of
30 | samples.
31 | dim (int):
32 | The number of dimensions in the output.
33 | base (int):
34 | The number based used to calculate the distribution of values.
35 |
36 | Returns (numpy.ndarray):
37 | Korobov lattice with ``shape == (dim, order)``
38 | """
39 | values = numpy.empty(dim)
40 | values[0] = 1
41 | for idx in range(1, dim):
42 | values[idx] = base * values[idx - 1] % (order + 1)
43 |
44 | grid = numpy.mgrid[:dim, : order + 1]
45 | out = values[grid[0]] * (grid[1] + 1) / (order + 1.0) % 1.0
46 | return out[:, :order]
47 |
--------------------------------------------------------------------------------
/tests/test_regression_models.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from sklearn import linear_model
3 | import numpy
4 | import chaospy
5 |
6 | LINEAR_MODELS = {
7 | "none": None,
8 | "linear": linear_model.LinearRegression(fit_intercept=False),
9 | "elastic_net": linear_model.MultiTaskElasticNet(alpha=0.0001, fit_intercept=False),
10 | "lasso": linear_model.MultiTaskLasso(alpha=0.001, fit_intercept=False),
11 | "lasso_lars": linear_model.LassoLars(alpha=0.0001, fit_intercept=False),
12 | "lars": linear_model.Lars(n_nonzero_coefs=10, fit_intercept=False),
13 | "matching_pursuit": linear_model.OrthogonalMatchingPursuit(n_nonzero_coefs=10, fit_intercept=False),
14 | "ridge": linear_model.Ridge(alpha=0.1, fit_intercept=False),
15 | }
16 |
17 |
18 | @pytest.fixture
19 | def samples(joint):
20 | return joint.sample(1000, rule="sobol")
21 |
22 |
23 | @pytest.fixture
24 | def evaluations(model_solver, samples):
25 | return numpy.array([model_solver(sample) for sample in samples.T])
26 |
27 |
28 | @pytest.fixture(params=LINEAR_MODELS)
29 | def linear_model(request, expansion_small, samples, evaluations):
30 | return chaospy.fit_regression(
31 | expansion_small, samples, evaluations, model=LINEAR_MODELS[request.param])
32 |
33 |
34 | def test_regression_mean(linear_model, joint, true_mean):
35 | assert numpy.allclose(chaospy.E(linear_model, joint), true_mean, rtol=1e-2)
36 |
37 |
38 | def test_regression_variance(linear_model, joint, true_variance):
39 | assert numpy.allclose(chaospy.Var(linear_model, joint), true_variance, rtol=3e-1)
40 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/antithetic.py:
--------------------------------------------------------------------------------
1 | """Generate antithetic variables."""
2 | import numpy
3 |
4 |
5 | def create_antithetic_variates(samples, axes=()):
6 | """
7 | Generate antithetic variables.
8 |
9 | Args:
10 | samples (numpy.ndarray):
11 | The samples, assumed to be on the [0, 1]^D hyper-cube, to be
12 | reflected.
13 | axes (tuple):
14 | Boolean array of which axes to reflect. If This to limit the number
15 | of points created in higher dimensions by reflecting all axes at
16 | once.
17 |
18 | Returns (numpy.ndarray):
19 | Same as ``samples``, but with samples internally reflected. roughly
20 | equivalent to ``numpy.vstack([samples, 1-samples])`` in one dimensions.
21 | """
22 | samples = numpy.asarray(samples, dtype=float)
23 | assert numpy.all(samples <= 1) and numpy.all(
24 | samples >= 0
25 | ), "all samples assumed on interval [0, 1]."
26 | if len(samples.shape) == 1:
27 | samples = samples.reshape(1, -1)
28 | inverse_samples = 1 - samples
29 | dims = len(samples)
30 |
31 | if not len(axes):
32 | axes = (True,)
33 | axes = numpy.asarray(axes, dtype=bool).flatten()
34 |
35 | indices = {tuple(axes * idx) for idx in numpy.ndindex((2,) * dims)}
36 | indices = sorted(indices, reverse=True)
37 | indices = sorted(indices, key=lambda idx: sum(idx))
38 | out = [numpy.where(idx, inverse_samples.T, samples.T).T for idx in indices]
39 | out = numpy.dstack(out).reshape(dims, -1)
40 | return out
41 |
--------------------------------------------------------------------------------
/tests/test_pseudo_spectral_projection.py:
--------------------------------------------------------------------------------
1 | import chaospy
2 | import numpy
3 | import pytest
4 |
5 | QUADRATURE_RULES = {
6 | "gaussian": {"rule": "gaussian"},
7 | "sparse1": {"rule": ["genz_keister_16", "clenshaw_curtis"], "sparse": True},
8 | "sparse2": {"rule": ["genz_keister_18", "fejer_1"], "sparse": True},
9 | "sparse3": {"rule": ["genz_keister_22", "newton_cotes"], "sparse": True},
10 | "sparse4": {"rule": ["genz_keister_24", "legendre"], "sparse": True},
11 | "lobatto": {"rule": "lobatto"},
12 | "radau": {"rule": "radau"},
13 | }
14 |
15 |
16 | @pytest.fixture(params=QUADRATURE_RULES)
17 | def nodes_and_weights(joint, request):
18 | return chaospy.generate_quadrature(4, joint, **QUADRATURE_RULES[request.param])
19 |
20 |
21 | @pytest.fixture
22 | def nodes(nodes_and_weights):
23 | return nodes_and_weights[0]
24 |
25 |
26 | @pytest.fixture
27 | def weights(nodes_and_weights):
28 | return nodes_and_weights[1]
29 |
30 |
31 | @pytest.fixture
32 | def evaluations(nodes, model_solver):
33 | return numpy.array([model_solver(node) for node in nodes.T])
34 |
35 |
36 | @pytest.fixture
37 | def spectral_approx(expansion_small, nodes, weights, evaluations):
38 | return chaospy.fit_quadrature(expansion_small, nodes, weights, evaluations)
39 |
40 |
41 | def test_spectral_mean(spectral_approx, joint, true_mean):
42 | assert numpy.allclose(chaospy.E(spectral_approx, joint), true_mean)
43 |
44 |
45 | def test_spectral_variance(spectral_approx, joint, true_variance):
46 | assert numpy.allclose(chaospy.Var(spectral_approx, joint), true_variance)
47 |
--------------------------------------------------------------------------------
/chaospy/descriptives/skewness.py:
--------------------------------------------------------------------------------
1 | """Skewness operator."""
2 | import numpy
3 | import numpoly
4 |
5 | from .expected import E
6 | from .standard_deviation import Std
7 |
8 |
9 | def Skew(poly, dist=None, **kws):
10 | """
11 | The third order statistical moment Kurtosis.
12 |
13 | Element by element 3rd order statistics of a distribution or polynomial.
14 |
15 | Args:
16 | poly (numpoly.ndpoly, Distribution):
17 | Input to take skewness on.
18 | dist (Distribution):
19 | Defines the space the skewness is taken on. It is ignored if
20 | ``poly`` is a distribution.
21 |
22 | Returns:
23 | (numpy.ndarray):
24 | Element for element variance along ``poly``, where
25 | ``skewness.shape == poly.shape``.
26 |
27 | Examples:
28 | >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
29 | >>> chaospy.Skew(dist)
30 | array([2., 0.])
31 | >>> q0, q1 = chaospy.variable(2)
32 | >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
33 | >>> chaospy.Skew(poly, dist)
34 | array([nan, 2., 0., 0.])
35 | >>> chaospy.Skew(2., dist)
36 | array(nan)
37 |
38 | """
39 | if dist is None:
40 | dist, poly = poly, numpoly.variable(len(poly))
41 | poly = numpoly.set_dimensions(poly, len(dist))
42 | if poly.isconstant():
43 | return numpy.full(poly.shape, numpy.nan)
44 |
45 | poly = poly - E(poly, dist, **kws)
46 | poly = numpoly.true_divide(poly, Std(poly, dist, **kws))
47 | return E(poly**3, dist, **kws)
48 |
--------------------------------------------------------------------------------
/chaospy/expansion/jacobi.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import chaospy
3 |
4 |
5 | def jacobi(
6 | order,
7 | alpha,
8 | beta,
9 | lower=-1,
10 | upper=1,
11 | physicist=False,
12 | normed=False,
13 | retall=False,
14 | ):
15 | """
16 | Jacobi polynomial expansion.
17 |
18 | Examples:
19 | >>> polynomials, norms = chaospy.expansion.jacobi(4, 0.5, 0.5, retall=True)
20 | >>> polynomials
21 | polynomial([1.0, q0, q0**2-0.5, q0**3-0.75*q0, q0**4-q0**2+0.125])
22 | >>> norms
23 | array([1. , 0.5 , 0.125 , 0.03125 , 0.0078125])
24 | >>> chaospy.expansion.jacobi(3, 0.5, 0.5, physicist=True).round(4)
25 | polynomial([1.0, 1.5*q0, 2.5*q0**2-0.8333, 4.375*q0**3-2.1146*q0])
26 | >>> chaospy.expansion.jacobi(3, 1.5, 0.5, normed=True)
27 | polynomial([1.0, 2.0*q0, 4.0*q0**2-1.0, 8.0*q0**3-4.0*q0])
28 |
29 | """
30 | multiplier = 1
31 | if physicist:
32 | multiplier = numpy.arange(1, order + 1)
33 | multiplier = (
34 | (2 * multiplier + alpha + beta - 1)
35 | * (2 * multiplier + alpha + beta)
36 | / (2 * multiplier * (multiplier + alpha + beta))
37 | )
38 | _, [polynomials], [norms] = chaospy.recurrence.analytical_stieltjes(
39 | order,
40 | chaospy.Beta(alpha, beta, lower=lower, upper=upper),
41 | multiplier=multiplier,
42 | )
43 | if normed:
44 | polynomials = chaospy.true_divide(polynomials, numpy.sqrt(norms))
45 | norms[:] = 1.0
46 | return (polynomials, norms) if retall else polynomials
47 |
--------------------------------------------------------------------------------
/chaospy/descriptives/correlation/pearson.py:
--------------------------------------------------------------------------------
1 | """Pearson's correlation matrix."""
2 | import numpy
3 | from scipy.stats import spearmanr
4 | import numpoly
5 | import chaospy
6 |
7 |
8 | def Corr(poly, dist=None, **kws):
9 | """
10 | Correlation matrix of a distribution or polynomial.
11 |
12 | Args:
13 | poly (numpoly.ndpoly, Distribution):
14 | Input to take correlation on. Must have ``len(poly)>=2``.
15 | dist (Distribution):
16 | Defines the space the correlation is taken on. It is ignored if
17 | ``poly`` is a distribution.
18 |
19 | Returns:
20 | (numpy.ndarray):
21 | Correlation matrix with
22 | ``correlation.shape == poly.shape+poly.shape``.
23 |
24 | Examples:
25 | >>> distribution = chaospy.MvNormal(
26 | ... [3, 4], [[2, .5], [.5, 1]])
27 | >>> chaospy.Corr(distribution).round(4)
28 | array([[1. , 0.3536],
29 | [0.3536, 1. ]])
30 | >>> q0 = chaospy.variable()
31 | >>> poly = chaospy.polynomial([q0, q0**2])
32 | >>> distribution = chaospy.Normal()
33 | >>> chaospy.Corr(poly, distribution).round(4)
34 | array([[1., 0.],
35 | [0., 1.]])
36 |
37 | """
38 | if isinstance(poly, chaospy.Distribution):
39 | poly, dist = numpoly.variable(len(poly)), poly
40 | else:
41 | poly = numpoly.polynomial(poly)
42 |
43 | cov = chaospy.Cov(poly, dist, **kws)
44 | var = numpy.diag(cov)
45 | vvar = numpy.sqrt(numpy.outer(var, var))
46 | return numpy.where(vvar > 0, cov / vvar, 0)
47 |
--------------------------------------------------------------------------------
/tests/distributions/test_approximation.py:
--------------------------------------------------------------------------------
1 | """Test if approximation functions works as expected."""
2 | from pytest import raises
3 | import numpy
4 | import chaospy
5 | from chaospy.distributions.collection.gamma import gamma
6 |
7 | DIST = gamma(1)
8 | SAMPLES = DIST.sample(100)
9 |
10 |
11 | def test_approximate_density(monkeypatch):
12 | """Assert that approximate density is doing its job."""
13 | ref_density = DIST.pdf(SAMPLES)
14 | monkeypatch.setattr(DIST, "_pdf",
15 | lambda x, **_: chaospy.Distribution._pdf(DIST, x))
16 | with raises(chaospy.UnsupportedFeature):
17 | DIST.pdf(SAMPLES, allow_approx=False)
18 | assert numpy.allclose(DIST.pdf(SAMPLES, allow_approx=True), ref_density)
19 |
20 |
21 | def test_approximate_inverse(monkeypatch):
22 | """Assert that approximate inverse is doing its job."""
23 | u_samples = DIST.fwd(SAMPLES)
24 | monkeypatch.setattr(DIST, "_ppf",
25 | lambda u, **_: chaospy.Distribution._ppf(DIST, u))
26 | assert numpy.allclose(DIST.inv(u_samples), SAMPLES)
27 |
28 |
29 | def test_approximate_moment(monkeypatch):
30 | """Assert that approximate moments is doing its job."""
31 | ref_moments = DIST.mom([1, 2, 3, 4])
32 | monkeypatch.setattr(DIST, "_mom",
33 | lambda k, **_: chaospy.Distribution._mom(DIST, k))
34 | monkeypatch.delitem(DIST._mom_cache, (1,)) # value is cached
35 | with raises(chaospy.UnsupportedFeature):
36 | DIST.mom([1, 2, 3, 4], allow_approx=False)
37 | assert numpy.allclose(DIST.mom([1, 2, 3, 4], allow_approx=True), ref_moments)
38 |
--------------------------------------------------------------------------------
/chaospy/descriptives/expected.py:
--------------------------------------------------------------------------------
1 | """Expected value."""
2 | import numpy
3 | import numpoly
4 |
5 |
6 | def E(poly, dist=None, **kws):
7 | """
8 | The expected value of a distribution or polynomial.
9 |
10 | 1st order statistics of a probability distribution or polynomial on a given
11 | probability space.
12 |
13 | Args:
14 | poly (numpoly.ndpoly, Distribution):
15 | Input to take expected value on.
16 | dist (Distribution):
17 | Defines the space the expected value is taken on. It is ignored if
18 | ``poly`` is a distribution.
19 |
20 | Returns:
21 | (numpy.ndarray):
22 | The expected value of the polynomial or distribution, where
23 | ``expected.shape == poly.shape``.
24 |
25 | Examples:
26 | >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
27 | >>> chaospy.E(dist)
28 | array([1., 0.])
29 | >>> q0, q1 = chaospy.variable(2)
30 | >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
31 | >>> chaospy.E(poly, dist)
32 | array([ 1., 1., 0., -1.])
33 |
34 | """
35 | if dist is None:
36 | dist, poly = poly, numpoly.variable(len(poly))
37 |
38 | poly = numpoly.set_dimensions(poly, len(dist))
39 | if poly.isconstant():
40 | return poly.tonumpy()
41 |
42 | moments = dist.mom(poly.exponents.T, **kws)
43 | if len(dist) == 1:
44 | moments = moments[0]
45 |
46 | out = numpy.zeros(poly.shape)
47 | for idx, key in enumerate(poly.keys):
48 | out += poly.values[key] * moments[idx]
49 | return out
50 |
--------------------------------------------------------------------------------
/.github/workflows/pipe.yml:
--------------------------------------------------------------------------------
1 | name: pipeline
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | push:
8 | branches:
9 | - master
10 | tags:
11 | - '*'
12 |
13 | jobs:
14 | test:
15 | name: pipe
16 | runs-on: ubuntu-latest
17 | steps:
18 | - uses: actions/checkout@v2
19 |
20 | - name: Set up Python 3.12
21 | uses: actions/setup-python@v5
22 | with:
23 | python-version: '3.12'
24 |
25 | - name: Install dependencies
26 | run: |
27 | sudo apt update -y && sudo apt install -y pandoc
28 | python -m pip install --upgrade pip
29 | pip install -e .[dev]
30 |
31 | - name: Check documentation
32 | run: |
33 | sphinx-build docs/ docs/.build -b html -v --color -T -W
34 |
35 | - name: "Run Tests"
36 | run: |
37 | coverage run -m pytest --doctest-modules chaospy/ tests/ README.rst
38 | coverage xml coverage.xml
39 |
40 | - name: "Check formatting"
41 | run: |
42 | black --check chaospy/
43 |
44 | - name: "Upload python coverage"
45 | uses: codecov/codecov-action@v2
46 | with:
47 | token: ${{ secrets.codecov_token }}
48 | files: coverage.xml
49 | flags: python
50 | fail_ci_if_error: true
51 |
52 | - name: "Build Wheels"
53 | run: python -m build
54 |
55 | - uses: pypa/gh-action-pypi-publish@v1.4.2
56 | if: startsWith(github.ref, 'refs/tags/')
57 | with:
58 | user: __token__
59 | password: ${{ secrets.pypi_password }}
60 |
61 | - name: Verify clean directory
62 | run: git diff --exit-code
63 |
--------------------------------------------------------------------------------
/chaospy/descriptives/covariance.py:
--------------------------------------------------------------------------------
1 | """Covariance matrix."""
2 | import numpy
3 | import numpoly
4 |
5 | from .expected import E
6 | from .. import distributions
7 |
8 |
9 | def Cov(poly, dist=None, **kws):
10 | """
11 | Variance/Covariance matrix of a distribution or polynomial array.
12 |
13 | Args:
14 | poly (numpoly.ndpoly, Distribution) :
15 | Input to take covariance on. Must have `len(poly)>=2`.
16 | dist (Distribution) :
17 | Defines the space the covariance is taken on. It is ignored if
18 | `poly` is a distribution.
19 |
20 | Returns:
21 | (numpy.ndarray):
22 | Covariance matrix with shape ``poly.shape+poly.shape``.
23 |
24 | Examples:
25 | >>> dist = chaospy.MvNormal([0, 0], [[2, .5], [.5, 1]])
26 | >>> chaospy.Cov(dist)
27 | array([[2. , 0.5],
28 | [0.5, 1. ]])
29 | >>> q0, q1 = chaospy.variable(2)
30 | >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
31 | >>> chaospy.Cov(poly, dist)
32 | array([[ 0. , 0. , 0. , 0. ],
33 | [ 0. , 2. , 0.5, 0. ],
34 | [ 0. , 0.5, 1. , 0. ],
35 | [ 0. , 0. , 0. , 225. ]])
36 | >>> chaospy.Cov([1, 2, 3], dist)
37 | array([[0., 0., 0.],
38 | [0., 0., 0.],
39 | [0., 0., 0.]])
40 |
41 | """
42 | if dist is None:
43 | dist, poly = poly, numpoly.variable(len(poly))
44 | poly = numpoly.set_dimensions(poly, len(dist))
45 | if poly.isconstant():
46 | return numpy.zeros((len(poly), len(poly)))
47 | poly = poly - E(poly, dist)
48 | poly = numpoly.outer(poly, poly)
49 | return E(poly, dist)
50 |
--------------------------------------------------------------------------------
/chaospy/descriptives/quantity_of_interest.py:
--------------------------------------------------------------------------------
1 | """Constructs distributions for the quantity of interests."""
2 | from __future__ import division
3 | import numpy
4 | import chaospy
5 |
6 |
7 | def QoI_Dist(poly, dist, sample=10000, **kws):
8 | """
9 | Constructs distributions for the quantity of interests.
10 |
11 | The function constructs a kernel density estimator (KDE) for each
12 | polynomial (poly) by sampling it. With the KDEs, distributions (Dists) are
13 | constructed. The Dists can be used for e.g. plotting probability density
14 | functions (PDF), or to make a second uncertainty quantification simulation
15 | with that newly generated Dists.
16 |
17 | Args:
18 | poly (numpoly.ndpoly):
19 | Polynomial of interest.
20 | dist (Distribution):
21 | Defines the space where the samples for the KDE is taken from the
22 | poly.
23 | sample (int):
24 | Number of samples used in estimation to construct the KDE.
25 |
26 | Returns:
27 | (Distribution):
28 | The constructed quantity of interest (QoI) distributions, where
29 | ``qoi_dists.shape==poly.shape``.
30 |
31 | Examples:
32 | >>> dist = chaospy.Normal(0, 1)
33 | >>> x = chaospy.variable(1)
34 | >>> poly = chaospy.polynomial([x])
35 | >>> qoi_dist = chaospy.QoI_Dist(poly, dist)
36 | >>> values = qoi_dist.pdf([-0.75, 0., 0.75])
37 | >>> values.round(8)
38 | array([0.29143989, 0.39939823, 0.29531414])
39 |
40 | """
41 | poly = chaospy.aspolynomial(poly).ravel()
42 | samples = numpy.atleast_2d(dist.sample(sample, **kws))
43 | qoi_dist = chaospy.GaussianKDE(poly(*samples))
44 | return qoi_dist
45 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/anglit.py:
--------------------------------------------------------------------------------
1 | """Anglit distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class anglit(SimpleDistribution):
8 | """Anglit distribution."""
9 |
10 | def __init__(self):
11 | super(anglit, self).__init__()
12 |
13 | def _pdf(self, x):
14 | return numpy.cos(2 * x)
15 |
16 | def _cdf(self, x):
17 | return numpy.sin(x + numpy.pi / 4) ** 2.0
18 |
19 | def _ppf(self, q):
20 | return numpy.arcsin(numpy.sqrt(q)) - numpy.pi / 4
21 |
22 | def _lower(self):
23 | return -numpy.pi / 4
24 |
25 | def _upper(self):
26 | return numpy.pi / 4
27 |
28 |
29 | class Anglit(ShiftScaleDistribution):
30 | """
31 | Anglit distribution.
32 |
33 | Args:
34 | loc (float, Distribution):
35 | Location parameter
36 | scale (float, Distribution):
37 | Scaling parameter
38 |
39 | Examples:
40 | >>> distribution = chaospy.Anglit()
41 | >>> distribution
42 | Anglit()
43 | >>> uloc = numpy.linspace(0, 1, 6)
44 | >>> uloc
45 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
46 | >>> xloc = distribution.inv(uloc)
47 | >>> xloc.round(3)
48 | array([-0.785, -0.322, -0.101, 0.101, 0.322, 0.785])
49 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
50 | True
51 | >>> distribution.pdf(xloc).round(3)
52 | array([0. , 0.8 , 0.98, 0.98, 0.8 , 0. ])
53 | >>> distribution.sample(4).round(3)
54 | array([ 0.156, -0.439, 0.561, -0.018])
55 |
56 | """
57 |
58 | def __init__(self, scale=1, shift=0):
59 | super(Anglit, self).__init__(dist=anglit(), scale=scale, shift=shift)
60 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/sequences/hammersley.py:
--------------------------------------------------------------------------------
1 | """Create samples from the Hammersley set."""
2 | import numpy
3 |
4 | from .halton import create_halton_samples
5 |
6 |
7 | def create_hammersley_samples(order, dim=1, burnin=-1, primes=()):
8 | """
9 | Create samples from the Hammersley set.
10 |
11 | The Hammersley set is equivalent to the Halton sequence, except for one
12 | dimension is replaced with a regular grid.
13 |
14 | Args:
15 | order (int):
16 | The order of the Hammersley sequence. Defines the number of samples.
17 | dim (int):
18 | The number of dimensions in the Hammersley sequence.
19 | burnin (int):
20 | Skip the first ``burnin`` samples. If negative, the maximum of
21 | ``primes`` is used.
22 | primes (tuple):
23 | The (non-)prime base to calculate values along each axis. If
24 | empty, growing prime values starting from 2 will be used.
25 |
26 | Returns:
27 | (numpy.ndarray):
28 | Hammersley set with ``shape == (dim, order)``.
29 |
30 | Examples:
31 | >>> distribution = chaospy.J(chaospy.Uniform(0, 1), chaospy.Uniform(0, 1))
32 | >>> samples = distribution.sample(3, rule="hammersley")
33 | >>> samples.round(4)
34 | array([[0.75 , 0.125, 0.625],
35 | [0.25 , 0.5 , 0.75 ]])
36 | >>> samples = distribution.sample(4, rule="hammersley")
37 | >>> samples.round(4)
38 | array([[0.75 , 0.125, 0.625, 0.375],
39 | [0.2 , 0.4 , 0.6 , 0.8 ]])
40 |
41 | """
42 | out = numpy.empty((dim, order), dtype=float)
43 | out[: max(dim - 1, 1)] = create_halton_samples(
44 | order=order, dim=max(dim - 1, 1), burnin=burnin, primes=primes
45 | )
46 | if dim > 1:
47 | out[dim - 1] = numpy.linspace(0, 1, order + 2)[1:-1]
48 | return out
49 |
--------------------------------------------------------------------------------
/tests/distributions/test_arithmetics.py:
--------------------------------------------------------------------------------
1 | """Testing basic distributions and their operations."""
2 | import numpy
3 | import chaospy
4 |
5 |
6 | def test_dependent_density():
7 | """Assert that manually create dependency structure holds."""
8 | distribution1 = chaospy.Exponential(1)
9 | distribution2 = chaospy.Uniform(lower=0, upper=distribution1)
10 | distribution = chaospy.J(distribution1, distribution2)
11 | assert distribution.pdf([0.5, 0.6]) == 0
12 | assert distribution.pdf([0.5, 0.4]) > 0
13 |
14 |
15 | def test_distribution_addition(distribution):
16 | """Assert adding."""
17 | right_addition = chaospy.E(distribution()+2.0)
18 | left_addition = chaospy.E(2.0+distribution())
19 | reference = chaospy.E(distribution())+2.0
20 | numpy.testing.assert_allclose(right_addition, left_addition, rtol=1e-05, atol=1e-08)
21 | numpy.testing.assert_allclose(left_addition, reference, rtol=1e-05, atol=1e-08)
22 |
23 |
24 | def test_distribution_subtraction(distribution):
25 | """Test distribution subtraction."""
26 | right_subtraction = chaospy.E(distribution()-3.0)
27 | left_subtraction = chaospy.E(3.0-distribution())
28 | reference = chaospy.E(distribution())-3.0
29 | numpy.testing.assert_allclose(right_subtraction, -left_subtraction, rtol=1e-05, atol=1e-08)
30 | numpy.testing.assert_allclose(left_subtraction, -reference, rtol=1e-05, atol=1e-08)
31 |
32 |
33 | def test_distribution_inverse_bounds(distribution):
34 | """Assert the inverse transformation spans out inside the bounds."""
35 | distribution = distribution()
36 | # assert numpy.isclose(distribution.lower, distribution.inv(0))
37 | # assert numpy.isclose(distribution.upper, distribution.inv(1))
38 | assert distribution.lower < distribution.inv(0.001)
39 | assert distribution.inv(0.001) < distribution.inv(0.999)
40 | assert distribution.inv(0.999) < distribution.upper
41 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/log_weibull.py:
--------------------------------------------------------------------------------
1 | """Gumbel or Log-Weibull distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class log_weibull(SimpleDistribution):
8 | """Gumbel or Log-Weibull distribution."""
9 |
10 | def __init__(self):
11 | super(log_weibull, self).__init__()
12 |
13 | def _pdf(self, x):
14 | ex = numpy.exp(-x)
15 | return ex * numpy.exp(-ex)
16 |
17 | def _cdf(self, x):
18 | return numpy.exp(-numpy.exp(-x))
19 |
20 | def _ppf(self, q):
21 | return -numpy.log(-numpy.log(q))
22 |
23 | def _lower(self):
24 | return -3.5
25 |
26 | def _upper(self):
27 | return 35
28 |
29 |
30 | class LogWeibull(ShiftScaleDistribution):
31 | """
32 | Gumbel or Log-Weibull distribution.
33 |
34 | Args:
35 | scale (float, Distribution):
36 | Scaling parameter
37 | loc (float, Distribution):
38 | Location parameter
39 |
40 | Examples:
41 | >>> distribution = chaospy.LogWeibull()
42 | >>> distribution
43 | LogWeibull()
44 | >>> uloc = numpy.linspace(0, 1, 6)
45 | >>> uloc
46 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
47 | >>> xloc = distribution.inv(uloc)
48 | >>> xloc.round(3)
49 | array([-3.5 , -0.476, 0.087, 0.672, 1.5 , 35. ])
50 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
51 | True
52 | >>> distribution.pdf(xloc).round(3)
53 | array([0. , 0.322, 0.367, 0.306, 0.179, 0. ])
54 | >>> distribution.sample(4).round(3)
55 | array([ 0.855, -0.771, 2.976, 0.316])
56 |
57 | """
58 |
59 | def __init__(self, scale=1, shift=0):
60 | super(LogWeibull, self).__init__(
61 | dist=log_weibull(),
62 | scale=scale,
63 | shift=shift,
64 | )
65 |
--------------------------------------------------------------------------------
/chaospy/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Uncertainty Quantification Toolbox
3 | ==================================
4 |
5 | This module contains tools for performing uncertainty quantification of models.
6 | """
7 | import logging
8 | import os
9 |
10 | try: # Python < 3.10
11 | from importlib_metadata import version
12 | except ImportError:
13 | from importlib.metadata import version
14 |
15 | from numpoly import *
16 |
17 | import chaospy.descriptives
18 | import chaospy.distributions
19 | import chaospy.expansion
20 | import chaospy.spectral
21 | import chaospy.quadrature
22 | import chaospy.saltelli
23 | import chaospy.regression
24 | import chaospy.recurrence
25 |
26 | from chaospy.distributions import *
27 | from chaospy.expansion import *
28 | from chaospy.spectral import *
29 | from chaospy.quadrature import *
30 | from chaospy.saltelli import *
31 | from chaospy.descriptives import *
32 | from chaospy.regression import *
33 | from chaospy.external import *
34 | from chaospy.recurrence import *
35 |
36 | __version__ = version("chaospy")
37 |
38 |
39 | def configure_logging():
40 | """Configure logging for Chaospy."""
41 | logpath = os.environ.get("CHAOSPY_LOGPATH", os.devnull)
42 | logging.basicConfig(level=logging.DEBUG, filename=logpath, filemode="w")
43 | streamer = logging.StreamHandler()
44 | loglevel = (
45 | logging.DEBUG if os.environ.get("CHAOSPY_DEBUG", "") == "1" else logging.WARNING
46 | )
47 | streamer.setLevel(loglevel)
48 |
49 | logger = logging.getLogger("chaospy")
50 | logger.addHandler(streamer)
51 | logger = logging.getLogger("numpoly")
52 | logger.addHandler(streamer)
53 |
54 |
55 | configure_logging()
56 |
57 |
58 | class StochasticallyDependentError(ValueError):
59 | """Error related to stochastically dependent variables."""
60 |
61 |
62 | class UnsupportedFeature(NotImplementedError):
63 | """Error when dependencies are not correctly handled."""
64 |
--------------------------------------------------------------------------------
/chaospy/expansion/gegenbauer.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import chaospy
3 |
4 |
5 | def gegenbauer(
6 | order,
7 | alpha,
8 | lower=-1,
9 | upper=1,
10 | physicist=False,
11 | normed=False,
12 | retall=False,
13 | ):
14 | """
15 | Gegenbauer polynomials.
16 |
17 | Args:
18 | order (int):
19 | The polynomial order.
20 | alpha (float):
21 | Gegenbauer shape parameter.
22 | lower (float):
23 | Lower bound for the integration interval.
24 | upper (float):
25 | Upper bound for the integration interval.
26 | physicist (bool):
27 | Use physicist weights instead of probabilist.
28 |
29 | Examples:
30 | >>> polynomials, norms = chaospy.expansion.gegenbauer(4, 1, retall=True)
31 | >>> polynomials
32 | polynomial([1.0, q0, q0**2-0.25, q0**3-0.5*q0, q0**4-0.75*q0**2+0.0625])
33 | >>> norms
34 | array([1. , 0.25 , 0.0625 , 0.015625 , 0.00390625])
35 | >>> chaospy.expansion.gegenbauer(3, 1, physicist=True)
36 | polynomial([1.0, 2.0*q0, 4.0*q0**2-0.5, 8.0*q0**3-2.0*q0])
37 | >>> chaospy.expansion.gegenbauer(3, 1, lower=0.5, upper=1.5, normed=True).round(3)
38 | polynomial([1.0, 4.0*q0-4.0, 16.0*q0**2-32.0*q0+15.0,
39 | 64.0*q0**3-192.0*q0**2+184.0*q0-56.0])
40 |
41 | """
42 | multiplier = 1
43 | if physicist:
44 | multiplier = numpy.arange(1, order + 1)
45 | multiplier = 2 * (multiplier + alpha - 1) / multiplier
46 | _, [polynomials], [norms] = chaospy.recurrence.analytical_stieltjes(
47 | order,
48 | chaospy.Beta(alpha + 0.5, alpha + 0.5, lower, upper),
49 | multiplier=multiplier,
50 | )
51 | if normed:
52 | polynomials = chaospy.true_divide(polynomials, numpy.sqrt(norms))
53 | norms[:] = 1.0
54 | return (polynomials, norms) if retall else polynomials
55 |
--------------------------------------------------------------------------------
/tests/distributions/test_baseclass.py:
--------------------------------------------------------------------------------
1 | """Test basic properties with the Dist baseclass."""
2 | import pytest
3 | import chaospy
4 |
5 |
6 | def test_distribution_exclusion():
7 | """
8 | Check if illegal reuse of dependencies raises errors correctly.
9 |
10 | Dependencies transformed in a non-bijective way can not be reused. For
11 | example, here a truncation of a distribution can not be use together with
12 | said distribution without truncation.
13 | """
14 | dist1 = chaospy.Uniform(-1, 1)
15 | dist2 = chaospy.Trunc(dist1, 0)
16 | with pytest.raises(chaospy.StochasticallyDependentError):
17 | dist3 = chaospy.J(dist1, dist2)
18 |
19 |
20 | def test_incomplete_stochastic_dependency():
21 | """
22 | Check if dangling stochastic dependency raises errors correctly.
23 |
24 | Many operators requires that the number of underlying distributions
25 | is the same as the length of the stochastic vector.
26 | """
27 | dist1 = chaospy.Uniform(0, 1)
28 | dist2 = chaospy.Normal(dist1, 1)
29 | with pytest.raises(chaospy.StochasticallyDependentError):
30 | dist2.pdf(0)
31 | with pytest.raises(chaospy.StochasticallyDependentError):
32 | dist2.fwd(0)
33 | with pytest.raises(chaospy.StochasticallyDependentError):
34 | dist2.cdf(0)
35 | with pytest.raises(chaospy.StochasticallyDependentError):
36 | dist2.inv(0.5)
37 |
38 |
39 | def test_underdefined_distribution():
40 | """
41 | Check if under-defined probability distributions raises errors correctly.
42 |
43 | The number of underlying distribution components in a stochastic vector
44 | must always be at least as large as the length of the vector.
45 | """
46 | with pytest.raises(chaospy.StochasticallyDependentError):
47 | chaospy.Add(2, 2)
48 | dist = chaospy.Uniform(-1, 1)
49 | with pytest.raises(chaospy.StochasticallyDependentError):
50 | chaospy.J(dist, dist)
51 |
--------------------------------------------------------------------------------
/chaospy/descriptives/kurtosis.py:
--------------------------------------------------------------------------------
1 | """Kurtosis operator."""
2 | import numpy
3 | import numpoly
4 |
5 | from .. import distributions
6 | from .expected import E
7 | from .standard_deviation import Std
8 |
9 |
10 | def Kurt(poly, dist=None, fisher=True, **kws):
11 | """
12 | The forth order statistical moment Kurtosis.
13 |
14 | Element by element 4rd order statistics of a distribution or polynomial.
15 |
16 | Args:
17 | poly (numpoly.ndpoly, Distribution):
18 | Input to take kurtosis on.
19 | dist (Distribution):
20 | Defines the space the skewness is taken on. It is ignored if
21 | ``poly`` is a distribution.
22 | fisher (bool):
23 | If True, Fisher's definition is used (Normal -> 0.0). If False,
24 | Pearson's definition is used (normal -> 3.0)
25 |
26 | Returns:
27 | (numpy.ndarray):
28 | Element for element variance along ``poly``, where
29 | ``skewness.shape==poly.shape``.
30 |
31 | Examples:
32 | >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
33 | >>> chaospy.Kurt(dist).round(4)
34 | array([6., 0.])
35 | >>> chaospy.Kurt(dist, fisher=False).round(4)
36 | array([9., 3.])
37 | >>> q0, q1 = chaospy.variable(2)
38 | >>> poly = chaospy.polynomial([1, q0, q1, 10*q0*q1-1])
39 | >>> chaospy.Kurt(poly, dist).round(4)
40 | array([nan, 6., 0., 15.])
41 | >>> chaospy.Kurt(4., dist)
42 | array(nan)
43 |
44 | """
45 | adjust = 3 if fisher else 0
46 |
47 | if dist is None:
48 | dist, poly = poly, numpoly.variable(len(poly))
49 | poly = numpoly.set_dimensions(poly, len(dist))
50 | if poly.isconstant():
51 | return numpy.full(poly.shape, numpy.nan)
52 |
53 | poly = poly - E(poly, dist, **kws)
54 | poly = numpoly.true_divide(poly, Std(poly, dist, **kws))
55 | return numpy.asarray(E(poly**4, dist, **kws) - adjust)
56 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/sequences/van_der_corput.py:
--------------------------------------------------------------------------------
1 | """Create Van Der Corput low discrepancy sequence samples."""
2 | from __future__ import division
3 | import numpy
4 |
5 |
6 | def create_van_der_corput_samples(idx, number_base=2):
7 | """
8 | Create Van Der Corput low discrepancy sequence samples.
9 |
10 | A van der Corput sequence is an example of the simplest one-dimensional
11 | low-discrepancy sequence over the unit interval; it was first described in
12 | 1935 by the Dutch mathematician J. G. van der Corput. It is constructed by
13 | reversing the base-n representation of the sequence of natural numbers
14 | :math:`(1, 2, 3, ...)`.
15 |
16 | In practice, use Halton sequence instead of Van Der Corput, as it is the
17 | same, but generalized to work in multiple dimensions.
18 |
19 | Args:
20 | idx (int, numpy.ndarray):
21 | The index of the sequence. If array is provided, all values in
22 | array is returned.
23 | number_base (int):
24 | The numerical base from where to create the samples from.
25 |
26 | Returns:
27 | (numpy.ndarray):
28 | Van der Corput samples.
29 |
30 | Examples:
31 | #>>> chaospy.create_van_der_corput_samples(range(11), number_base=10)
32 | #array([0.1 , 0.2 , 0.3 , 0.4 , 0.5 , 0.6 , 0.7 , 0.8 , 0.9 , 0.01, 0.11])
33 | #>>> chaospy.create_van_der_corput_samples(range(8), number_base=2)
34 | #array([0.5 , 0.25 , 0.75 , 0.125 , 0.625 , 0.375 , 0.875 , 0.0625])
35 |
36 | """
37 | assert number_base > 1
38 |
39 | idx = numpy.asarray(idx).flatten() + 1
40 | out = numpy.zeros(len(idx), dtype=float)
41 |
42 | base = float(number_base)
43 | active = numpy.ones(len(idx), dtype=bool)
44 | while numpy.any(active):
45 | out[active] += (idx[active] % number_base) / base
46 | idx //= number_base
47 | base *= number_base
48 | active = idx > 0
49 | return out
50 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/fisk.py:
--------------------------------------------------------------------------------
1 | """Fisk or Log-logistic distribution."""
2 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
3 |
4 |
5 | class fisk(SimpleDistribution):
6 | """Fisk or Log-logistic distribution."""
7 |
8 | def __init__(self, c=1):
9 | super(fisk, self).__init__(dict(c=c))
10 |
11 | def _pdf(self, x, c):
12 | return c * x ** (c - 1.0) / (1 + x**c) ** 2
13 |
14 | def _cdf(self, x, c):
15 | return 1.0 / (1 + x**-c)
16 |
17 | def _ppf(self, q, c):
18 | return (q ** (-1.0) - 1) ** (-1.0 / c)
19 |
20 | def _lower(self, c):
21 | return 0.0
22 |
23 | def _upper(self, c):
24 | return 1e12 ** (1.0 / c)
25 |
26 |
27 | class Fisk(ShiftScaleDistribution):
28 | """
29 | Fisk or Log-logistic distribution.
30 |
31 | Args:
32 | shape (float, Distribution):
33 | Shape parameter
34 | scale (float, Distribution):
35 | Scaling parameter
36 | shift (float, Distribution):
37 | Location parameter
38 |
39 | Examples:
40 | >>> distribution = chaospy.Fisk(5)
41 | >>> distribution
42 | Fisk(5)
43 | >>> uloc = numpy.linspace(0, 1, 6)
44 | >>> uloc
45 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
46 | >>> xloc = distribution.inv(uloc)
47 | >>> xloc.round(3)
48 | array([ 0. , 0.758, 0.922, 1.084, 1.32 , 251.189])
49 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
50 | True
51 | >>> distribution.pdf(xloc).round(3)
52 | array([0. , 1.056, 1.301, 1.107, 0.606, 0. ])
53 | >>> distribution.sample(4).round(3)
54 | array([1.135, 0.665, 1.804, 0.986])
55 |
56 | """
57 |
58 | def __init__(self, shape=1, scale=1, shift=0):
59 | super(Fisk, self).__init__(
60 | dist=fisk(shape),
61 | scale=scale,
62 | shift=shift,
63 | repr_args=[shape],
64 | )
65 |
--------------------------------------------------------------------------------
/chaospy/recurrence/jacobi.py:
--------------------------------------------------------------------------------
1 | """Convert three terms recurrence coefficients into quadrature rules."""
2 | import numpy
3 | import scipy.linalg
4 |
5 |
6 | def coefficients_to_quadrature(coeffs):
7 | """
8 | Construct Gaussian quadrature abscissas and weights from three terms
9 | recurrence coefficients.
10 |
11 | Examples:
12 | >>> distribution = chaospy.Normal(0, 1)
13 | >>> coeffs, = chaospy.construct_recurrence_coefficients(4, distribution)
14 | >>> coeffs
15 | array([[0., 0., 0., 0., 0.],
16 | [1., 1., 2., 3., 4.]])
17 | >>> (abscissas,), (weights,) = chaospy.coefficients_to_quadrature(coeffs)
18 | >>> abscissas.round(4)
19 | array([-2.857 , -1.3556, -0. , 1.3556, 2.857 ])
20 | >>> weights.round(4)
21 | array([0.0113, 0.2221, 0.5333, 0.2221, 0.0113])
22 | """
23 | coeffs = numpy.asarray(coeffs, dtype=float)
24 | if len(coeffs.shape) == 2:
25 | coeffs = coeffs.reshape(1, 2, -1)
26 | assert len(coeffs.shape) == 3, "shape %s not allowed" % coeffs.shape
27 | assert coeffs.shape[-1] >= 1
28 | abscissas = []
29 | weights = []
30 | for coeff in coeffs:
31 |
32 | if numpy.any(coeff[1] < 0) or numpy.any(numpy.isnan(coeff)):
33 | raise numpy.linalg.LinAlgError(
34 | "Invalid recurrence coefficients can not be used for "
35 | "constructing Gaussian quadrature rule"
36 | )
37 |
38 | order = len(coeff[0])
39 | bands = numpy.zeros((2, order))
40 | bands[0, :] = coeff[0, :order]
41 | bands[1, :-1] = numpy.sqrt(coeff[1, 1:order])
42 | vals, vecs = scipy.linalg.eig_banded(bands, lower=True)
43 |
44 | abscissa, weight = vals.real, vecs[0, :] ** 2
45 | indices = numpy.argsort(abscissa)
46 | abscissa, weight = abscissa[indices], weight[indices]
47 |
48 | abscissas.append(abscissa)
49 | weights.append(weight)
50 |
51 | return abscissas, weights
52 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/levy.py:
--------------------------------------------------------------------------------
1 | """Levy distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class levy(SimpleDistribution):
9 | """Levy distribution."""
10 |
11 | def __init__(self):
12 | super(levy, self).__init__()
13 |
14 | def _pdf(self, x):
15 | out = 1 / numpy.sqrt(2 * numpy.pi * x) / x * numpy.exp(-1 / (2 * x))
16 | out[x == 0] = 0
17 | return out
18 |
19 | def _cdf(self, x):
20 | return 2 * (1 - special.ndtr(1 / numpy.sqrt(x)))
21 |
22 | def _ppf(self, q):
23 | val = special.ndtri(1 - q / 2.0)
24 | return 1.0 / (val * val)
25 |
26 | def _upper(self):
27 | return 1e12
28 |
29 | def _lower(self):
30 | return 0.0
31 |
32 |
33 | class Levy(ShiftScaleDistribution):
34 | """
35 | Levy distribution
36 |
37 | Args:
38 | scale (float, Distribution):
39 | Scaling parameter
40 | shift (float, Distribution):
41 | Location parameter
42 |
43 | Examples:
44 | >>> distribution = chaospy.Levy()
45 | >>> distribution
46 | Levy()
47 | >>> uloc = numpy.linspace(0, 1, 6)
48 | >>> uloc
49 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
50 | >>> xloc = distribution.inv(uloc)
51 | >>> xloc[:5].round(3)
52 | array([ 0. , 0.609, 1.412, 3.636, 15.58 ])
53 | >>> distribution.upper
54 | array([1.e+12])
55 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
56 | True
57 | >>> distribution.pdf(xloc).round(3)
58 | array([0. , 0.369, 0.167, 0.05 , 0.006, 0. ])
59 | >>> distribution.sample(4).round(3)
60 | array([ 4.965, 0.403, 257.22 , 2.025])
61 |
62 | """
63 |
64 | def __init__(self, scale=1, shift=0):
65 | super(Levy, self).__init__(
66 | dist=levy(),
67 | scale=scale,
68 | shift=shift,
69 | )
70 |
--------------------------------------------------------------------------------
/chaospy/recurrence/chebyshev.py:
--------------------------------------------------------------------------------
1 | """Modified Chebyshev algorithm."""
2 | from __future__ import division
3 |
4 | import numpy
5 |
6 |
7 | def modified_chebyshev(moments):
8 | r"""
9 | Given the first 2N raw statistical moments, this method uses the modified
10 | Chebyshev algorithm for computing the associated recurrence coefficients.
11 |
12 | Args:
13 | moments (numpy.ndarray):
14 | Raw statistical moments from calculating the integrals
15 | :math:`\int x^k p(x) dx` for :math:`k=0,\dots,2N`.
16 |
17 | Examples:
18 | >>> dist = chaospy.Normal()
19 | >>> modified_chebyshev(dist.mom(numpy.arange(8)))
20 | array([[0., 0., 0., 0.],
21 | [1., 1., 2., 3.]])
22 | >>> dist = chaospy.Uniform(-1, 1)
23 | >>> modified_chebyshev(dist.mom(numpy.arange(8)))
24 | array([[0. , 0. , 0. , 0. ],
25 | [1. , 0.33333333, 0.26666667, 0.25714286]])
26 | """
27 | moments = numpy.asarray(moments, dtype=float).flatten()
28 | order = len(moments)
29 | assert order % 2 == 0
30 |
31 | sigma = numpy.zeros((3, order))
32 | sigma[0] = moments
33 | coeffs = [(sigma[0, 1] / sigma[0, 0], sigma[0, 0])]
34 |
35 | for idx in range(1, order // 2):
36 | sigma[idx % 3, idx : order - idx] = (
37 | sigma[(idx - 1) % 3, idx + 1 : order - idx + 1]
38 | - coeffs[idx - 1][0] * sigma[(idx - 1) % 3, idx : order - idx]
39 | - coeffs[idx - 1][1] * sigma[(idx - 2) % 3, idx : order - idx]
40 | )
41 | coeffs.append(
42 | (
43 | (
44 | sigma[idx % 3, idx + 1] / sigma[idx % 3, idx]
45 | - sigma[(idx - 1) % 3, idx] / sigma[(idx - 1) % 3, idx - 1]
46 | ),
47 | sigma[idx % 3, idx] / sigma[(idx - 1) % 3, idx - 1],
48 | )
49 | )
50 |
51 | coeffs = numpy.array(coeffs[: order // 2]).reshape(-1, 2).T
52 | return coeffs
53 |
--------------------------------------------------------------------------------
/chaospy/expansion/hermite.py:
--------------------------------------------------------------------------------
1 | """Hermite orthogonal polynomial expansion."""
2 | import numpy
3 | import chaospy
4 |
5 |
6 | def hermite(
7 | order,
8 | mu=0.0,
9 | sigma=1.0,
10 | physicist=False,
11 | normed=False,
12 | retall=False,
13 | ):
14 | """
15 | Hermite orthogonal polynomial expansion.
16 |
17 | Args:
18 | order (int):
19 | The quadrature order.
20 | mu (float):
21 | Non-centrality parameter.
22 | sigma (float):
23 | Scale parameter.
24 | physicist (bool):
25 | Use physicist weights instead of probabilist variant.
26 | normed (bool):
27 | If True orthonormal polynomials will be used.
28 | retall (bool):
29 | If true return numerical stabilized norms as well. Roughly the same
30 | as ``cp.E(orth**2, dist)``.
31 |
32 | Returns:
33 | (numpoly.ndpoly, numpy.ndarray):
34 | Hermite polynomial expansion. Norms of the orthogonal
35 | expansion on the form ``E(orth**2, dist)``.
36 |
37 | Examples:
38 | >>> polynomials, norms = chaospy.expansion.hermite(4, retall=True)
39 | >>> polynomials
40 | polynomial([1.0, q0, q0**2-1.0, q0**3-3.0*q0, q0**4-6.0*q0**2+3.0])
41 | >>> norms
42 | array([ 1., 1., 2., 6., 24.])
43 | >>> chaospy.expansion.hermite(3, physicist=True)
44 | polynomial([1.0, 2.0*q0, 4.0*q0**2-2.0, 8.0*q0**3-12.0*q0])
45 | >>> chaospy.expansion.hermite(3, sigma=2.5, normed=True).round(3)
46 | polynomial([1.0, 0.4*q0, 0.113*q0**2-0.707, 0.026*q0**3-0.49*q0])
47 |
48 | """
49 | multiplier = 2 if physicist else 1
50 | _, [polynomials], [norms] = chaospy.recurrence.analytical_stieltjes(
51 | order, chaospy.Normal(mu, sigma), multiplier=multiplier
52 | )
53 | if normed:
54 | polynomials = chaospy.true_divide(polynomials, numpy.sqrt(norms))
55 | norms[:] = 1.0
56 | return (polynomials, norms) if retall else polynomials
57 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/bradford.py:
--------------------------------------------------------------------------------
1 | """Bradford distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, LowerUpperDistribution
5 |
6 |
7 | class bradford(SimpleDistribution):
8 | """Standard Bradford distribution."""
9 |
10 | def __init__(self, c=1):
11 | super(bradford, self).__init__(dict(c=c))
12 |
13 | def _pdf(self, x, c):
14 | return c / (c * x + 1.0) / numpy.log(1.0 + c)
15 |
16 | def _cdf(self, x, c):
17 | return numpy.log(1.0 + c * x) / numpy.log(c + 1.0)
18 |
19 | def _ppf(self, q, c):
20 | return ((1.0 + c) ** q - 1) / c
21 |
22 | def _lower(self, c):
23 | return 0.0
24 |
25 | def _upper(self, c):
26 | return 1.0
27 |
28 |
29 | class Bradford(LowerUpperDistribution):
30 | """
31 | Bradford distribution.
32 |
33 | Args:
34 | shape (float, Distribution):
35 | Shape parameter
36 | lower (float, Distribution):
37 | Location of lower threshold
38 | upper (float, Distribution):
39 | Location of upper threshold
40 |
41 | Examples:
42 | >>> distribution = chaospy.Bradford(0.8, 4, 6)
43 | >>> distribution
44 | Bradford(0.8, lower=4, upper=6)
45 | >>> uloc = numpy.linspace(0, 1, 6)
46 | >>> uloc
47 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
48 | >>> xloc = distribution.inv(uloc)
49 | >>> xloc.round(3)
50 | array([4. , 4.312, 4.663, 5.057, 5.501, 6. ])
51 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
52 | True
53 | >>> distribution.pdf(xloc).round(3)
54 | array([0.681, 0.605, 0.538, 0.478, 0.425, 0.378])
55 | >>> distribution.sample(4).round(3)
56 | array([5.171, 4.175, 5.87 , 4.819])
57 |
58 | """
59 |
60 | def __init__(self, shape=1, lower=0, upper=1):
61 | super(Bradford, self).__init__(
62 | dist=bradford(shape),
63 | lower=lower,
64 | upper=upper,
65 | repr_args=[shape],
66 | )
67 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/pareto1.py:
--------------------------------------------------------------------------------
1 | """Pareto type 1 distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class pareto1(SimpleDistribution):
9 | """Pareto type 1 distribution."""
10 |
11 | def __init__(self, b):
12 | super(pareto1, self).__init__(dict(b=b))
13 |
14 | def _pdf(self, x, b):
15 | return b * x ** (-b - 1)
16 |
17 | def _cdf(self, x, b):
18 | return 1 - x**-b
19 |
20 | def _ppf(self, q, b):
21 | return pow(1 - q, -1.0 / b)
22 |
23 | def _lower(self, b):
24 | return 1.0
25 |
26 | def _upper(self, b):
27 | return pow(1e12, 1.0 / b)
28 |
29 |
30 | class Pareto1(ShiftScaleDistribution):
31 | """
32 | Pareto type 1 distribution.
33 |
34 | Lower threshold at scale+loc and survival: x^-shape
35 |
36 | Args:
37 | shape (float, Distribution):
38 | Tail index parameter
39 | scale (float, Distribution):
40 | Scaling parameter
41 | shift (float, Distribution):
42 | Location parameter
43 |
44 | Examples:
45 | >>> distribution = chaospy.Pareto1(15)
46 | >>> distribution
47 | Pareto1(15)
48 | >>> uloc = numpy.linspace(0, 1, 6)
49 | >>> uloc
50 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
51 | >>> xloc = distribution.inv(uloc)
52 | >>> xloc.round(3)
53 | array([1. , 1.015, 1.035, 1.063, 1.113, 6.31 ])
54 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
55 | True
56 | >>> distribution.pdf(xloc).round(3)
57 | array([15. , 11.823, 8.699, 5.644, 2.695, 0. ])
58 | >>> distribution.sample(4).round(3)
59 | array([1.073, 1.008, 1.222, 1.045])
60 |
61 | """
62 |
63 | def __init__(self, shape=1, scale=1, shift=0):
64 | super(Pareto1, self).__init__(
65 | dist=pareto1(shape),
66 | scale=scale,
67 | shift=shift,
68 | repr_args=[shape],
69 | )
70 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/gompertz.py:
--------------------------------------------------------------------------------
1 | """Gompertz distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class gompertz(SimpleDistribution):
9 | """Gompertz distribution."""
10 |
11 | def __init__(self, c):
12 | super(gompertz, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | ex = numpy.exp(x)
16 | return c * ex * numpy.exp(-c * (ex - 1))
17 |
18 | def _cdf(self, x, c):
19 | return 1.0 - numpy.exp(-c * (numpy.exp(x) - 1))
20 |
21 | def _ppf(self, q, c):
22 | return numpy.log(1 - 1.0 / c * numpy.log(1 - q))
23 |
24 | def _lower(self, c):
25 | return 0.0
26 |
27 | def _upper(self, c):
28 | return numpy.log(1 + 27.7 / c)
29 |
30 |
31 | class Gompertz(ShiftScaleDistribution):
32 | """
33 | Gompertz distribution
34 |
35 | Args:
36 | shape (float, Distribution):
37 | Shape parameter
38 | scale (float, Distribution):
39 | Scaling parameter
40 | shift (float, Distribution):
41 | Location parameter
42 |
43 | Examples:
44 | >>> distribution = chaospy.Gompertz(1.5)
45 | >>> distribution
46 | Gompertz(1.5)
47 | >>> uloc = numpy.linspace(0, 1, 6)
48 | >>> uloc
49 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
50 | >>> xloc = distribution.inv(uloc)
51 | >>> xloc.round(3)
52 | array([0. , 0.139, 0.293, 0.477, 0.729, 2.969])
53 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
54 | True
55 | >>> distribution.pdf(xloc).round(3)
56 | array([1.5 , 1.379, 1.206, 0.967, 0.622, 0. ])
57 | >>> distribution.sample(4).round(3)
58 | array([0.535, 0.078, 1.099, 0.364])
59 |
60 | """
61 |
62 | def __init__(self, shape, scale=1, shift=0):
63 | super(Gompertz, self).__init__(
64 | dist=gompertz(shape),
65 | scale=scale,
66 | shift=shift,
67 | repr_args=[shape],
68 | )
69 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/pareto2.py:
--------------------------------------------------------------------------------
1 | """Pareto type 2 distribution."""
2 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
3 |
4 |
5 | class pareto2(SimpleDistribution):
6 | """Pareto type 2 distribution."""
7 |
8 | def __init__(self, c):
9 | super(pareto2, self).__init__(dict(c=c))
10 |
11 | def _pdf(self, x, c):
12 | return c * 1.0 / (1.0 + x) ** (c + 1.0)
13 |
14 | def _cdf(self, x, c):
15 | return 1.0 - 1.0 / (1.0 + x) ** c
16 |
17 | def _ppf(self, q, c):
18 | return pow(1.0 - q, -1.0 / c) - 1
19 |
20 | def _lower(self, c):
21 | return 0.0
22 |
23 | def _upper(self, c):
24 | return pow(1e-12, -1.0 / c) - 1
25 |
26 |
27 | class Pareto2(ShiftScaleDistribution):
28 | """
29 | Pareto type 2 distribution.
30 |
31 | Also known as Lomax distribution (for loc=0).
32 |
33 | Lower threshold at loc and survival: (1+x)^-shape.
34 |
35 | Args:
36 | shape (float, Distribution):
37 | Shape parameter
38 | scale (float, Distribution):
39 | Scaling parameter
40 | loc (float, Distribution):
41 | Location parameter
42 |
43 | Examples:
44 | >>> distribution = chaospy.Pareto2(15)
45 | >>> distribution
46 | Pareto2(15)
47 | >>> uloc = numpy.linspace(0, 1, 6)
48 | >>> uloc
49 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
50 | >>> xloc = distribution.inv(uloc)
51 | >>> xloc.round(3)
52 | array([0. , 0.015, 0.035, 0.063, 0.113, 5.31 ])
53 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
54 | True
55 | >>> distribution.pdf(xloc).round(3)
56 | array([15. , 11.823, 8.699, 5.644, 2.695, 0. ])
57 | >>> distribution.sample(4).round(3)
58 | array([0.073, 0.008, 0.222, 0.045])
59 |
60 | """
61 |
62 | def __init__(self, shape=1, scale=1, shift=0):
63 | super(Pareto2, self).__init__(
64 | dist=pareto2(shape),
65 | scale=scale,
66 | shift=shift,
67 | repr_args=[shape],
68 | )
69 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/log_gamma.py:
--------------------------------------------------------------------------------
1 | """Log-gamma distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class log_gamma(SimpleDistribution):
9 | """Log-gamma distribution."""
10 |
11 | def __init__(self, c):
12 | super(log_gamma, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | return numpy.exp(c * x - numpy.exp(x) - special.gammaln(c))
16 |
17 | def _cdf(self, x, c):
18 | return special.gammainc(c, numpy.exp(x))
19 |
20 | def _ppf(self, q, c):
21 | return numpy.log(special.gammaincinv(c, q))
22 |
23 | def _lower(self, c):
24 | return numpy.log(special.gammaincinv(c, 1e-15))
25 |
26 | def _upper(self, c):
27 | return numpy.log(special.gammaincinv(c, 1 - 1e-15))
28 |
29 |
30 | class LogGamma(ShiftScaleDistribution):
31 | """
32 | Log-gamma distribution
33 |
34 | Args:
35 | shape (float, Distribution):
36 | Shape parameter
37 | scale (float, Distribution):
38 | Scaling parameter
39 | shift (float, Distribution):
40 | Location parameter
41 |
42 | Examples:
43 | >>> distribution = chaospy.LogGamma(1.5)
44 | >>> distribution
45 | LogGamma(1.5)
46 | >>> uloc = numpy.linspace(0, 1, 6)
47 | >>> uloc
48 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
49 | >>> xloc = distribution.inv(uloc)
50 | >>> xloc.round(3)
51 | array([-22.836, -0.688, -0.068, 0.387, 0.842, 3.597])
52 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
53 | True
54 | >>> distribution.pdf(xloc).round(3)
55 | array([0. , 0.243, 0.4 , 0.462, 0.392, 0. ])
56 | >>> distribution.sample(4).round(3)
57 | array([ 0.503, -1.125, 1.364, 0.128])
58 |
59 | """
60 |
61 | def __init__(self, shape=1, scale=1, shift=0):
62 | super(LogGamma, self).__init__(
63 | dist=log_gamma(shape),
64 | scale=scale,
65 | shift=shift,
66 | repr_args=[shape],
67 | )
68 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/logistic.py:
--------------------------------------------------------------------------------
1 | """Generalized logistic type 1 distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class logistic(SimpleDistribution):
8 | """Generalized logistic type 1 distribution."""
9 |
10 | def __init__(self, c=1):
11 | super(logistic, self).__init__(dict(c=c))
12 |
13 | def _pdf(self, x, c):
14 | return numpy.e**-x / (1 + numpy.e**-x) ** (c + 1)
15 |
16 | def _cdf(self, x, c):
17 | return (1 + numpy.e**-x) ** -c
18 |
19 | def _ppf(self, q, c):
20 | return -numpy.log(q ** (-1.0 / c) - 1)
21 |
22 | def _lower(self, c):
23 | return -numpy.log(1e-12 ** (-1.0 / c) - 1)
24 |
25 | def _upper(self, c):
26 | return -numpy.log((1 - 1e-12) ** (-1.0 / c) - 1)
27 |
28 |
29 | class Logistic(ShiftScaleDistribution):
30 | """
31 | Generalized logistic type 1 distribution
32 | Sech squared distribution
33 |
34 | Args:
35 | skew (float, Distribution):
36 | Shape parameter
37 | shift (float, Distribution):
38 | Location parameter
39 | scale (float, Distribution):
40 | Scale parameter
41 |
42 | Examples:
43 | >>> distribution = chaospy.Logistic(15)
44 | >>> distribution
45 | Logistic(15)
46 | >>> uloc = numpy.linspace(0, 1, 6)
47 | >>> uloc
48 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
49 | >>> xloc = distribution.inv(uloc)
50 | >>> xloc.round(3)
51 | array([-1.67 , 2.178, 2.765, 3.363, 4.201, 30.34 ])
52 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
53 | True
54 | >>> distribution.pdf(xloc).round(3)
55 | array([0. , 0.02 , 0.024, 0.02 , 0.012, 0. ])
56 | >>> distribution.sample(4).round(3)
57 | array([3.549, 1.864, 5.682, 2.999])
58 |
59 | """
60 |
61 | def __init__(self, skew=1, shift=0, scale=1):
62 | super(Logistic, self).__init__(
63 | dist=logistic(skew),
64 | scale=scale,
65 | shift=shift,
66 | repr_args=[skew],
67 | )
68 |
--------------------------------------------------------------------------------
/chaospy/quadrature/laguerre.py:
--------------------------------------------------------------------------------
1 | """Generalized Gauss-Laguerre quadrature rule."""
2 | import numpy
3 | from scipy.special import gamma
4 | import chaospy
5 |
6 | from .hypercube import hypercube_quadrature
7 |
8 |
9 | def laguerre(order, alpha=0.0, physicist=False):
10 | r"""
11 | Generalized Gauss-Laguerre quadrature rule.
12 |
13 | Compute the sample points and weights for Gauss-Laguerre quadrature. The
14 | sample points are the roots of the nth degree Laguerre polynomial. These
15 | sample points and weights correctly integrate polynomials of degree
16 | :math:`2N-1` or less.
17 |
18 | Gaussian quadrature come in two variants: physicist and probabilist. For
19 | Gauss-Laguerre physicist means a weight function :math:`x^\alpha e^{-x}`
20 | and weights that sum to :math`\Gamma(\alpha+1)`, and probabilist means a
21 | weight function is :math:`x^\alpha e^{-x}` and sum to 1.
22 |
23 | Args:
24 | order (int):
25 | The quadrature order.
26 | alpha (float):
27 | Shape parameter. Defaults to non-generalized Laguerre if 0.
28 | physicist (bool):
29 | Use physicist weights instead of probabilist.
30 |
31 | Returns:
32 | abscissas (numpy.ndarray):
33 | The ``order+1`` quadrature points for where to evaluate the model
34 | function with.
35 | weights (numpy.ndarray):
36 | The quadrature weights associated with each abscissas.
37 |
38 | Examples:
39 | >>> abscissas, weights = chaospy.quadrature.laguerre(2)
40 | >>> abscissas
41 | array([[0.41577456, 2.29428036, 6.28994508]])
42 | >>> weights
43 | array([0.71109301, 0.27851773, 0.01038926])
44 |
45 | See also:
46 | :func:`chaospy.quadrature.gaussian`
47 |
48 | """
49 | order = int(order)
50 | coefficients = chaospy.construct_recurrence_coefficients(
51 | order=order, dist=chaospy.Gamma(alpha + 1)
52 | )
53 | [abscissas], [weights] = chaospy.coefficients_to_quadrature(coefficients)
54 | weights *= gamma(alpha + 1) if physicist else 1
55 | return abscissas[numpy.newaxis], weights
56 |
--------------------------------------------------------------------------------
/chaospy/quadrature/grid.py:
--------------------------------------------------------------------------------
1 | """
2 | Generate the quadrature abscissas and weights for simple grid.
3 |
4 | Mostly available to ensure that discrete distributions works along side
5 | continuous ones.
6 | """
7 | import numpy
8 | import chaospy
9 |
10 | from .hypercube import hypercube_quadrature
11 |
12 |
13 | def grid(order, domain=(0, 1), growth=False, segments=1):
14 | """
15 | Generate the quadrature abscissas and weights for simple grid.
16 |
17 | Args:
18 | order (int, numpy.ndarray):
19 | Quadrature order.
20 | domain (chaospy.distributions.baseclass.Distribution, numpy.ndarray):
21 | Either distribution or bounding of interval to integrate over.
22 |
23 | Returns:
24 | (numpy.ndarray, numpy.ndarray):
25 | The quadrature points and weights. The points are
26 | equi-spaced grid on the interior of the domain bounds.
27 | The weights are all equal to `1/len(weights[0])`.
28 |
29 | Example:
30 | >>> abscissas, weights = chaospy.quadrature.grid(4, chaospy.Uniform(-1, 1))
31 | >>> abscissas.round(4)
32 | array([[-0.8, -0.4, 0. , 0.4, 0.8]])
33 | >>> weights.round(4)
34 | array([0.2, 0.2, 0.2, 0.2, 0.2])
35 | >>> abscissas, weights = chaospy.quadrature.grid([1, 1])
36 | >>> abscissas.round(4)
37 | array([[0.25, 0.25, 0.75, 0.75],
38 | [0.25, 0.75, 0.25, 0.75]])
39 | >>> weights.round(4)
40 | array([0.25, 0.25, 0.25, 0.25])
41 |
42 | """
43 | order = numpy.asarray(order)
44 | order = numpy.where(growth, numpy.where(order > 0, 3**order - 1, 0), order)
45 | return hypercube_quadrature(
46 | quad_func=grid_simple,
47 | order=order,
48 | domain=domain,
49 | segments=segments,
50 | )
51 |
52 |
53 | def grid_simple(order):
54 | """
55 | Backend for grid quadrature.
56 |
57 | Use :func:`chaospy.quadrature.grid` instead.
58 | """
59 | order = int(order)
60 | abscissas = numpy.linspace(0, 1, 2 * order + 3)[1::2]
61 | weights = numpy.full(order + 1, 1.0 / (order + 1))
62 | return abscissas, weights
63 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/folded_normal.py:
--------------------------------------------------------------------------------
1 | """Folded normal distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class folded_normal(SimpleDistribution):
9 | """Folded normal distribution."""
10 |
11 | def __init__(self, c=1):
12 | super(folded_normal, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | return (
16 | numpy.sqrt(2.0 / numpy.pi)
17 | * numpy.cosh(c * x)
18 | * numpy.exp(-(x * x + c * c) / 2.0)
19 | )
20 |
21 | def _cdf(self, x, c):
22 | return special.ndtr(x - c) + special.ndtr(x + c) - 1.0
23 |
24 | def _lower(self, c):
25 | return 0.0
26 |
27 | def _upper(self, c):
28 | return 8 + c
29 |
30 |
31 | class FoldedNormal(ShiftScaleDistribution):
32 | """
33 | Folded normal distribution.
34 |
35 | Args:
36 | mu (float, Distribution):
37 | Location parameter in normal distribution.
38 | scale (float, Distribution):
39 | Scaling parameter (in both normal and fold).
40 | shift (float, Distribution):
41 | Location of fold.
42 |
43 | Examples:
44 | >>> distribution = chaospy.FoldedNormal(1.5)
45 | >>> distribution
46 | FoldedNormal(1.5)
47 | >>> uloc = numpy.linspace(0, 1, 6)
48 | >>> uloc
49 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
50 | >>> xloc = distribution.inv(uloc)
51 | >>> xloc.round(3)
52 | array([0. , 0.706, 1.254, 1.755, 2.342, 9.5 ])
53 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
54 | True
55 | >>> distribution.pdf(xloc).round(3)
56 | array([0.259, 0.326, 0.396, 0.388, 0.28 , 0. ])
57 | >>> distribution.sample(4).round(3)
58 | array([1.906, 2.225, 1.638, 2.701])
59 |
60 | """
61 |
62 | def __init__(self, mu=0, scale=1, shift=0):
63 | super(FoldedNormal, self).__init__(
64 | dist=folded_normal(mu - shift),
65 | scale=scale,
66 | shift=shift,
67 | repr_args=[mu],
68 | )
69 |
--------------------------------------------------------------------------------
/tests/recurrence/test_quadrature_creation.py:
--------------------------------------------------------------------------------
1 | """
2 | Check the creation of quadrature nodes.
3 |
4 | Create Gaussian quadrature nodes using various distributions and algorithms and
5 | check if the nodes correctly can be used to estimate raw statistical nodes up
6 | to 2N-1. Check for both 1 and 3 dimensions.
7 | """
8 | import pytest
9 | import numpy
10 |
11 | import chaospy
12 |
13 |
14 | def test_1d_quadrature_creation(
15 | analytical_distribution, recurrence_algorithm):
16 | """Check 1-D quadrature rule."""
17 | abscissas, weights = chaospy.quadrature.gaussian(
18 | order=8,
19 | dist=analytical_distribution,
20 | recurrence_algorithm=recurrence_algorithm,
21 | )
22 | assert abscissas.shape == (1, 9)
23 | assert weights.shape == (9,)
24 | assert numpy.allclose(numpy.sum(abscissas*weights, -1),
25 | analytical_distribution.mom(1))
26 | assert numpy.allclose(numpy.sum(abscissas**2*weights, -1),
27 | analytical_distribution.mom(2))
28 | # lanczos not working as well as the others for heavy tails:
29 | rtol = 1e-3 if recurrence_algorithm == "lanczos" else 1e-5
30 | assert numpy.allclose(numpy.sum(abscissas**15*weights, -1),
31 | analytical_distribution.mom(15), rtol=rtol)
32 |
33 |
34 | def test_3d_quadrature_creation(
35 | analytical_distribution, recurrence_algorithm):
36 | """Check 3-D quadrature rule."""
37 | distribution = chaospy.Iid(analytical_distribution, 3)
38 | abscissas, weights = chaospy.quadrature.gaussian(
39 | order=3,
40 | dist=distribution,
41 | recurrence_algorithm=recurrence_algorithm,
42 | )
43 | assert abscissas.shape == (3, 4**3)
44 | assert weights.shape == (4**3,)
45 | kloc = numpy.eye(3, dtype=int)
46 | assert numpy.allclose(numpy.sum(abscissas*weights, -1),
47 | distribution.mom(kloc))
48 | assert numpy.allclose(numpy.sum(abscissas**2*weights, -1),
49 | distribution.mom(2*kloc))
50 | assert numpy.allclose(numpy.sum(abscissas**5*weights, -1),
51 | distribution.mom(5*kloc))
52 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/frechet.py:
--------------------------------------------------------------------------------
1 | """Frechet or Extreme value distribution type 2."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class frechet(SimpleDistribution):
9 | """Frechet or Extreme value distribution type 2."""
10 |
11 | def __init__(self, c=1):
12 | super(frechet, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | return c * pow(x, c - 1) * numpy.exp(-pow(x, c))
16 |
17 | def _cdf(self, x, c):
18 | return -numpy.expm1(-pow(x, c))
19 |
20 | def _ppf(self, q, c):
21 | return pow(-numpy.log1p(-q), 1.0 / c)
22 |
23 | def _mom(self, k, c):
24 | return special.gamma(1 - k * 1.0 / c)
25 |
26 | def _lower(self, c):
27 | return 0.0
28 |
29 | def _upper(self, c):
30 | return pow(35, (1.0 / c))
31 |
32 |
33 | class Frechet(ShiftScaleDistribution):
34 | """
35 | Frechet or Extreme value distribution type 2.
36 |
37 | Args:
38 | shape (float, Distribution):
39 | Shape parameter.
40 | scale (float, Distribution):
41 | Scaling parameter.
42 | shift (float, Distribution):
43 | Location parameter.
44 |
45 | Examples:
46 | >>> distribution = chaospy.Frechet(3)
47 | >>> distribution
48 | Frechet(3)
49 | >>> uloc = numpy.linspace(0, 1, 6)
50 | >>> uloc
51 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
52 | >>> xloc = distribution.inv(uloc)
53 | >>> xloc.round(3)
54 | array([0. , 0.607, 0.799, 0.971, 1.172, 3.271])
55 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
56 | True
57 | >>> distribution.pdf(xloc).round(3)
58 | array([0. , 0.883, 1.15 , 1.132, 0.824, 0. ])
59 | >>> distribution.sample(4).round(3)
60 | array([1.02 , 0.496, 1.442, 0.87 ])
61 |
62 | """
63 |
64 | def __init__(self, shape=1, scale=1, shift=0):
65 | super(Frechet, self).__init__(
66 | dist=frechet(shape),
67 | scale=scale,
68 | shift=shift,
69 | repr_args=[shape],
70 | )
71 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/log_laplace.py:
--------------------------------------------------------------------------------
1 | """Log-laplace distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class log_laplace(SimpleDistribution):
8 | """Log-laplace distribution."""
9 |
10 | def __init__(self, c):
11 | super(log_laplace, self).__init__(dict(c=c))
12 |
13 | def _pdf(self, x, c):
14 | cd2 = c / 2.0
15 | c = numpy.where(x < 1, c, -c)
16 | return cd2 * x ** (c - 1)
17 |
18 | def _cdf(self, x, c):
19 | return numpy.where(x < 1, 0.5 * x**c, 1 - 0.5 * x ** (-c))
20 |
21 | def _ppf(self, q, c):
22 | return numpy.where(
23 | q < 0.5, (2.0 * q) ** (1.0 / c), (2 * (1.0 - q)) ** (-1.0 / c)
24 | )
25 |
26 | def _lower(self, c):
27 | return 0.0
28 |
29 | def _upper(self, c):
30 | return 2e12 ** (1.0 / c)
31 |
32 |
33 | class LogLaplace(ShiftScaleDistribution):
34 | """
35 | Log-laplace distribution
36 |
37 | Args:
38 | shape (float, Distribution):
39 | Shape parameter
40 | scale (float, Distribution):
41 | Scaling parameter
42 | shift (float, Distribution):
43 | Location parameter
44 |
45 | Examples:
46 | >>> distribution = chaospy.LogLaplace(5)
47 | >>> distribution
48 | LogLaplace(5)
49 | >>> uloc = numpy.linspace(0, 1, 6)
50 | >>> uloc
51 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
52 | >>> xloc = distribution.inv(uloc)
53 | >>> xloc.round(3)
54 | array([ 0. , 0.833, 0.956, 1.046, 1.201, 288.54 ])
55 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
56 | True
57 | >>> distribution.pdf(xloc).round(3)
58 | array([0. , 1.201, 2.091, 1.913, 0.833, 0. ])
59 | >>> distribution.sample(4).round(3)
60 | array([1.076, 0.745, 1.587, 0.993])
61 |
62 | """
63 |
64 | def __init__(self, shape=1, scale=1, shift=0):
65 | super(LogLaplace, self).__init__(
66 | dist=log_laplace(shape),
67 | scale=scale,
68 | shift=shift,
69 | repr_args=[shape],
70 | )
71 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/trunc_exponential.py:
--------------------------------------------------------------------------------
1 | """Truncated exponential distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class truncexpon(SimpleDistribution):
9 | """Truncated exponential distribution."""
10 |
11 | def __init__(self, b):
12 | super(truncexpon, self).__init__(dict(b=b))
13 |
14 | def _pdf(self, x, b):
15 | return numpy.exp(-x) / (1 - numpy.exp(-b))
16 |
17 | def _cdf(self, x, b):
18 | return (1.0 - numpy.exp(-x)) / (1 - numpy.exp(-b))
19 |
20 | def _ppf(self, q, b):
21 | return -numpy.log(1 - q + q * numpy.exp(-b))
22 |
23 | def _lower(self, b):
24 | return 0.0
25 |
26 | def _upper(self, b):
27 | return b
28 |
29 |
30 | class TruncExponential(ShiftScaleDistribution):
31 | """
32 | Truncated exponential distribution.
33 |
34 | Args:
35 | upper (float, Distribution):
36 | Location of upper threshold
37 | scale (float, Distribution):
38 | Scaling parameter in the exponential distribution
39 | shift (float, Distribution):
40 | Location parameter
41 |
42 | Examples:
43 | >>> distribution = chaospy.TruncExponential(1.5)
44 | >>> distribution
45 | TruncExponential(1.5)
46 | >>> uloc = numpy.linspace(0, 1, 6)
47 | >>> uloc
48 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
49 | >>> xloc = distribution.inv(uloc)
50 | >>> xloc.round(3)
51 | array([0. , 0.169, 0.372, 0.628, 0.972, 1.5 ])
52 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
53 | True
54 | >>> distribution.pdf(xloc).round(3)
55 | array([1.287, 1.087, 0.887, 0.687, 0.487, 0.287])
56 | >>> distribution.sample(4).round(3)
57 | array([0.709, 0.094, 1.34 , 0.469])
58 |
59 | """
60 |
61 | def __init__(self, upper=1, scale=1, shift=0):
62 | super(TruncExponential, self).__init__(
63 | dist=truncexpon((upper - shift) * 1.0 / scale),
64 | scale=scale,
65 | shift=shift,
66 | repr_args=[upper],
67 | )
68 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/hyperbolic_secant.py:
--------------------------------------------------------------------------------
1 | """Hyperbolic secant distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class hyperbolic_secant(SimpleDistribution):
9 | """Hyperbolic secant distribution."""
10 |
11 | def __init__(self):
12 | super(hyperbolic_secant, self).__init__()
13 |
14 | def _pdf(self, x):
15 | return 0.5 * numpy.cosh(numpy.pi * x / 2.0) ** -1
16 |
17 | def _cdf(self, x):
18 | return 2 / numpy.pi * numpy.arctan(numpy.e ** (numpy.pi * x / 2.0))
19 |
20 | def _ppf(self, q):
21 | return 2 / numpy.pi * numpy.log(numpy.tan(numpy.pi * q / 2.0))
22 |
23 | def _lower(self):
24 | return -21.7
25 |
26 | def _upper(self):
27 | return 21.7
28 |
29 | def _mom(self, k):
30 | return numpy.abs(special.euler(k.item())[-1])
31 |
32 |
33 | class HyperbolicSecant(ShiftScaleDistribution):
34 | """
35 | Hyperbolic secant distribution
36 |
37 | Args:
38 | scale (float, Distribution):
39 | Scale parameter
40 | shift (float, Distribution):
41 | Location parameter
42 |
43 | Examples:
44 | >>> distribution = chaospy.HyperbolicSecant()
45 | >>> distribution
46 | HyperbolicSecant()
47 | >>> uloc = numpy.linspace(0, 1, 6)
48 | >>> uloc
49 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
50 | >>> xloc = distribution.inv(uloc)
51 | >>> xloc.round(3)
52 | array([-21.7 , -0.716, -0.203, 0.203, 0.716, 21.7 ])
53 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
54 | True
55 | >>> distribution.pdf(xloc).round(3)
56 | array([0. , 0.294, 0.476, 0.476, 0.294, 0. ])
57 | >>> distribution.sample(4).round(3)
58 | array([ 0.32 , -1.082, 1.622, -0.036])
59 | >>> distribution.mom(2).round(3)
60 | np.float64(1.0)
61 |
62 | """
63 |
64 | def __init__(self, scale=1, shift=0):
65 | super(HyperbolicSecant, self).__init__(
66 | dist=hyperbolic_secant(),
67 | scale=scale,
68 | shift=shift,
69 | )
70 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/nakagami.py:
--------------------------------------------------------------------------------
1 | """Nakagami-m distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class nakagami(SimpleDistribution):
9 | """Nakagami-m distribution."""
10 |
11 | def __init__(self, nu):
12 | super(nakagami, self).__init__(dict(nu=nu))
13 |
14 | def _pdf(self, x, nu):
15 | return (
16 | 2
17 | * nu**nu
18 | / special.gamma(nu)
19 | * (x ** (2 * nu - 1.0))
20 | * numpy.exp(-nu * x * x)
21 | )
22 |
23 | def _cdf(self, x, nu):
24 | return special.gammainc(nu, nu * x * x)
25 |
26 | def _ppf(self, q, nu):
27 | return numpy.sqrt(1.0 / nu * special.gammaincinv(nu, q))
28 |
29 | def _lower(self, nu):
30 | return 0.0
31 |
32 | def _upper(self, nu):
33 | return numpy.sqrt(1.0 / nu * special.gammaincinv(nu, 1 - 1e-16))
34 |
35 |
36 | class Nakagami(ShiftScaleDistribution):
37 | """
38 | Nakagami-m distribution.
39 |
40 | Args:
41 | shape (float, Distribution):
42 | Shape parameter
43 | scale (float, Distribution):
44 | Scaling parameter
45 | shift (float, Distribution):
46 | Location parameter
47 |
48 | Examples:
49 | >>> distribution = chaospy.Nakagami(1.5)
50 | >>> distribution
51 | Nakagami(1.5)
52 | >>> uloc = numpy.linspace(0, 1, 6)
53 | >>> uloc
54 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
55 | >>> xloc = distribution.inv(uloc)
56 | >>> xloc.round(3)
57 | array([0. , 0.579, 0.789, 0.991, 1.244, 5.079])
58 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
59 | True
60 | >>> distribution.pdf(xloc).round(3)
61 | array([0. , 0.84 , 1.015, 0.933, 0.63 , 0. ])
62 | >>> distribution.sample(4).round(3)
63 | array([1.05 , 0.465, 1.615, 0.87 ])
64 |
65 | """
66 |
67 | def __init__(self, shape=1, scale=1, shift=0):
68 | super(Nakagami, self).__init__(
69 | dist=nakagami(shape),
70 | scale=scale,
71 | shift=shift,
72 | repr_args=[shape],
73 | )
74 |
--------------------------------------------------------------------------------
/chaospy/descriptives/percentile.py:
--------------------------------------------------------------------------------
1 | """Percentile function."""
2 | import numpy
3 | import chaospy
4 |
5 |
6 | def Perc(poly, q, dist, sample=10000, **kws):
7 | """
8 | Percentile function.
9 |
10 | Note that this function is an empirical function that operates using Monte
11 | Carlo sampling.
12 |
13 | Args:
14 | poly (numpoly.ndpoly):
15 | Polynomial of interest.
16 | q (numpy.ndarray):
17 | positions where percentiles are taken. Must be a number or an
18 | array, where all values are on the interval ``[0, 100]``.
19 | dist (Distribution):
20 | Defines the space where percentile is taken.
21 | sample (int):
22 | Number of samples used in estimation.
23 |
24 | Returns:
25 | (numpy.ndarray):
26 | Percentiles of ``poly`` with ``Q.shape=poly.shape+q.shape``.
27 |
28 | Examples:
29 | >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
30 | >>> q0, q1 = chaospy.variable(2)
31 | >>> poly = chaospy.polynomial([0.05*q0, 0.2*q1, 0.01*q0*q1])
32 | >>> chaospy.Perc(poly, [0, 5, 50, 95, 100], dist).round(2)
33 | array([[ 0. , -3.29, -5.3 ],
34 | [ 0. , -0.64, -0.04],
35 | [ 0.03, -0.01, -0. ],
36 | [ 0.15, 0.66, 0.04],
37 | [ 1.61, 3.29, 5.3 ]])
38 |
39 | """
40 | poly = chaospy.aspolynomial(poly)
41 | shape = poly.shape
42 | poly = poly.ravel()
43 |
44 | q = numpy.asarray(q).ravel() / 100.0
45 | dim = len(dist)
46 |
47 | # Interior
48 | Z = dist.sample(sample, **kws).reshape(len(dist), sample)
49 | poly1 = poly(*Z)
50 |
51 | # Min/max
52 | ext = numpy.mgrid[(slice(0, 2, 1),) * dim].reshape(dim, 2**dim).T
53 | ext = numpy.where(ext, dist.lower, dist.upper).T
54 | poly2 = poly(*ext)
55 | poly2 = numpy.array([_ for _ in poly2.T if not numpy.any(numpy.isnan(_))]).T
56 |
57 | # Finish
58 | if numpy.sum(poly2.shape) > 0:
59 | poly1 = numpy.concatenate([poly1, poly2], -1)
60 | samples = poly1.shape[-1]
61 | poly1.sort()
62 | out = poly1.T[numpy.asarray(q * (samples - 1), dtype=int)]
63 | out = out.reshape(q.shape + shape)
64 |
65 | return out
66 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/uniform.py:
--------------------------------------------------------------------------------
1 | """Uniform probability distribution."""
2 | from ..baseclass import SimpleDistribution, LowerUpperDistribution
3 |
4 |
5 | class uniform(SimpleDistribution):
6 | """Uniform distribution fixed on the [-1, 1] interval."""
7 |
8 | def __init__(self):
9 | super(uniform, self).__init__()
10 |
11 | def _pdf(self, x):
12 | return 0.5
13 |
14 | def _cdf(self, x):
15 | return 0.5 * x + 0.5
16 |
17 | def _ppf(self, q):
18 | return 2 * q - 1
19 |
20 | def _lower(self):
21 | return -1.0
22 |
23 | def _upper(self):
24 | return 1.0
25 |
26 | def _mom(self, k):
27 | return 1.0 / (k + 1) * (k % 2 == 0)
28 |
29 | def _ttr(self, n):
30 | return 0.0, n * n / (4.0 * n * n - 1)
31 |
32 |
33 | class Uniform(LowerUpperDistribution):
34 | r"""
35 | Uniform probability distribution.
36 |
37 | Args:
38 | lower (float, Distribution):
39 | Lower threshold of distribution. Must be smaller than ``upper``.
40 | upper (float, Distribution):
41 | Upper threshold of distribution.
42 |
43 | Examples:
44 | >>> distribution = chaospy.Uniform(2, 4)
45 | >>> distribution
46 | Uniform(lower=2, upper=4)
47 | >>> uloc = numpy.linspace(0, 1, 6)
48 | >>> uloc
49 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
50 | >>> xloc = distribution.inv(uloc)
51 | >>> xloc.round(3)
52 | array([2. , 2.4, 2.8, 3.2, 3.6, 4. ])
53 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
54 | True
55 | >>> distribution.pdf(xloc).round(3)
56 | array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
57 | >>> distribution.sample(4).round(3)
58 | array([3.307, 2.23 , 3.901, 2.964])
59 | >>> distribution.mom(1).round(4)
60 | np.float64(3.0)
61 | >>> distribution.ttr([0, 1, 2, 3]).round(4)
62 | array([[ 3. , 3. , 3. , 3. ],
63 | [-0. , 0.3333, 0.2667, 0.2571]])
64 |
65 | """
66 |
67 | def __init__(self, lower=0.0, upper=1.0):
68 | super(Uniform, self).__init__(
69 | dist=uniform(),
70 | lower=lower,
71 | upper=upper,
72 | )
73 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/laplace.py:
--------------------------------------------------------------------------------
1 | """Laplace Probability Distribution."""
2 | import numpy
3 | from scipy import special
4 | import chaospy
5 |
6 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
7 |
8 |
9 | class laplace(SimpleDistribution):
10 | """Laplace Probability Distribution."""
11 |
12 | def __init__(self):
13 | super(laplace, self).__init__()
14 |
15 | def _pdf(self, x):
16 | return numpy.e ** -numpy.abs(x) / 2
17 |
18 | def _cdf(self, x):
19 | return (1 + numpy.sign(x) * (1 - numpy.e ** -abs(x))) / 2
20 |
21 | def _mom(self, k):
22 | return special.factorial(k) * ((k + 1) % 2)
23 |
24 | def _ppf(self, x):
25 | return numpy.where(x > 0.5, -numpy.log(2 * (1 - x)), numpy.log(2 * x))
26 |
27 | def _lower(self):
28 | return -20.0
29 |
30 | def _upper(self):
31 | return 20.0
32 |
33 |
34 | class Laplace(ShiftScaleDistribution):
35 | R"""
36 | Laplace Probability Distribution
37 |
38 | Args:
39 | mu (float, Distribution):
40 | Mean of the distribution.
41 | scale (float, Distribution):
42 | Scaling parameter. scale > 0.
43 |
44 | Examples:
45 | >>> distribution = chaospy.Laplace()
46 | >>> distribution
47 | Laplace()
48 | >>> uloc = numpy.linspace(0, 1, 6)
49 | >>> uloc
50 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
51 | >>> xloc = distribution.inv(uloc)
52 | >>> xloc.round(3)
53 | array([-20. , -0.916, -0.223, 0.223, 0.916, 20. ])
54 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
55 | True
56 | >>> distribution.pdf(xloc).round(3)
57 | array([0. , 0.2, 0.4, 0.4, 0.2, 0. ])
58 | >>> distribution.sample(4).round(3)
59 | array([ 0.367, -1.47 , 2.308, -0.036])
60 | >>> distribution.mom(1).round(3)
61 | np.float64(0.0)
62 |
63 | """
64 |
65 | def __init__(self, mu=0, sigma=1):
66 | super(Laplace, self).__init__(
67 | dist=laplace(),
68 | scale=sigma,
69 | shift=mu,
70 | )
71 | self._repr_args = chaospy.format_repr_kwargs(
72 | mu=(mu, 0)
73 | ) + chaospy.format_repr_kwargs(sigma=(sigma, 1))
74 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/weibull.py:
--------------------------------------------------------------------------------
1 | """Weibull Distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class weibull(SimpleDistribution):
9 | """Weibull Distribution."""
10 |
11 | def __init__(self, a=1):
12 | super(weibull, self).__init__(dict(a=a))
13 |
14 | def _pdf(self, x, a):
15 | return a * x ** (a - 1) * numpy.e ** (-(x**a))
16 |
17 | def _cdf(self, x, a):
18 | return 1 - numpy.e ** (-(x**a))
19 |
20 | def _ppf(self, q, a):
21 | return (-numpy.log(1 - q + 1 * (q == 1))) ** (1.0 / a) * (q != 1) + 30.0 ** (
22 | 1.0 / a
23 | ) * (q == 1)
24 |
25 | def _mom(self, k, a):
26 | return special.gamma(1.0 + k * 1.0 / a)
27 |
28 | def _lower(self, a):
29 | return 0.0
30 |
31 | def _upper(self, a):
32 | return 30.0 ** (1.0 / a)
33 |
34 |
35 | class Weibull(ShiftScaleDistribution):
36 | """
37 | Weibull Distribution
38 |
39 | Args:
40 | shape (float, Distribution):
41 | Shape parameter.
42 | scale (float, Distribution):
43 | Scale parameter.
44 | shift (float, Distribution):
45 | Location of lower bound.
46 |
47 | Examples:
48 | >>> distribution = chaospy.Weibull(2)
49 | >>> distribution
50 | Weibull(2)
51 | >>> uloc = numpy.linspace(0, 1, 6)
52 | >>> uloc
53 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
54 | >>> xloc = distribution.inv(uloc)
55 | >>> xloc.round(3)
56 | array([0. , 0.472, 0.715, 0.957, 1.269, 5.477])
57 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
58 | True
59 | >>> distribution.pdf(xloc).round(3)
60 | array([0. , 0.756, 0.858, 0.766, 0.507, 0. ])
61 | >>> distribution.sample(4).round(3)
62 | array([1.03 , 0.35 , 1.732, 0.811])
63 | >>> distribution.mom(1).round(4)
64 | np.float64(0.8862)
65 |
66 | """
67 |
68 | def __init__(self, shape=1, scale=1, shift=0):
69 | super(Weibull, self).__init__(
70 | dist=weibull(shape),
71 | scale=scale,
72 | shift=shift,
73 | repr_args=[shape],
74 | )
75 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/mv_student_t.py:
--------------------------------------------------------------------------------
1 | """Multivariate Student-T Distribution."""
2 | import numpy
3 | from scipy import special
4 | import chaospy
5 |
6 | from .student_t import student_t
7 | from ..baseclass import MeanCovarianceDistribution
8 |
9 |
10 | class MvStudentT(MeanCovarianceDistribution):
11 | """
12 | Multivariate Student-T Distribution.
13 |
14 | Args:
15 | df (float, Distribution):
16 | Degree of freedom
17 | mu (numpy.ndarray, Distribution):
18 | Location parameter
19 | sigma (numpy.ndarray):
20 | Covariance matrix.
21 |
22 | Examples:
23 | >>> distribution = chaospy.MvStudentT(40, [1, 2], [[1, 0.6], [0.6, 1]])
24 | >>> distribution
25 | MvStudentT(df=40, mu=[1, 2], sigma=[[1, 0.6], [0.6, 1]])
26 | >>> chaospy.Cov(distribution).round(4)
27 | array([[1.0526, 0.6316],
28 | [0.6316, 1.0526]])
29 | >>> mesh = numpy.mgrid[0.25:0.75:3j, 0.25:0.75:2j].reshape(2, -1)
30 | >>> mesh.round(4)
31 | array([[0.25, 0.25, 0.5 , 0.5 , 0.75, 0.75],
32 | [0.25, 0.75, 0.25, 0.75, 0.25, 0.75]])
33 | >>> inverse_map = distribution.inv(mesh)
34 | >>> inverse_map.round(4)
35 | array([[0.3193, 0.3193, 1. , 1. , 1.6807, 1.6807],
36 | [1.0471, 2.1361, 1.4555, 2.5445, 1.8639, 2.9529]])
37 | >>> numpy.allclose(distribution.fwd(inverse_map), mesh)
38 | True
39 | >>> distribution.pdf(inverse_map).round(4)
40 | array([0.1225, 0.1225, 0.1552, 0.1552, 0.1225, 0.1225])
41 | >>> distribution.sample(4).round(4)
42 | array([[ 1.3979, -0.2189, 2.6868, 0.9551],
43 | [ 3.1625, 0.6234, 1.582 , 1.7631]])
44 |
45 | """
46 |
47 | def __init__(
48 | self,
49 | df,
50 | mu,
51 | sigma=None,
52 | rotation=None,
53 | ):
54 | super(MvStudentT, self).__init__(
55 | dist=student_t(df),
56 | mean=mu,
57 | covariance=sigma,
58 | rotation=rotation,
59 | repr_args=chaospy.format_repr_kwargs(df=(df, None))
60 | + chaospy.format_repr_kwargs(mu=(mu, None))
61 | + chaospy.format_repr_kwargs(sigma=(sigma, None)),
62 | )
63 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/normal.py:
--------------------------------------------------------------------------------
1 | """Normal (Gaussian) probability distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class normal(SimpleDistribution):
9 | """Standard normal distribution."""
10 |
11 | def __init__(self):
12 | super(normal, self).__init__()
13 |
14 | def _lower(self):
15 | return -8.22
16 |
17 | def _upper(self):
18 | return 8.22
19 |
20 | def _pdf(self, x):
21 | return (2 * numpy.pi) ** (-0.5) * numpy.e ** (-(x**2) / 2.0)
22 |
23 | def _cdf(self, x):
24 | return special.ndtr(x)
25 |
26 | def _ppf(self, x):
27 | return special.ndtri(x)
28 |
29 | def _mom(self, k):
30 | return 0.5 * special.factorial2(k - 1) * (1 + (-1) ** k)
31 |
32 | def _ttr(self, n):
33 | return 0.0, 1.0 * n
34 |
35 |
36 | class Normal(ShiftScaleDistribution):
37 | R"""
38 | Normal (Gaussian) distribution
39 |
40 | Args:
41 | mu (float, Distribution):
42 | Mean of the distribution.
43 | sigma (float, Distribution):
44 | Standard deviation. sigma > 0
45 |
46 | Examples:
47 | >>> distribution = chaospy.Normal(2, 3)
48 | >>> distribution
49 | Normal(mu=2, sigma=3)
50 | >>> uloc = numpy.linspace(0, 1, 6)
51 | >>> uloc
52 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
53 | >>> xloc = distribution.inv(uloc)
54 | >>> xloc.round(3)
55 | array([-22.66 , -0.525, 1.24 , 2.76 , 4.525, 26.66 ])
56 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
57 | True
58 | >>> distribution.pdf(xloc).round(3)
59 | array([0. , 0.093, 0.129, 0.129, 0.093, 0. ])
60 | >>> distribution.sample(4).round(3)
61 | array([ 3.185, -1.601, 6.943, 1.866])
62 | >>> distribution.mom(1).round(3)
63 | np.float64(2.0)
64 | >>> distribution.ttr([0, 1, 2, 3]).round(3)
65 | array([[ 2., 2., 2., 2.],
66 | [ 0., 9., 18., 27.]])
67 |
68 | """
69 |
70 | def __init__(self, mu=0, sigma=1):
71 | super(Normal, self).__init__(dist=normal(), scale=sigma, shift=mu)
72 | self._repr_args = ["mu=%s" % mu, "sigma=%s" % sigma]
73 |
--------------------------------------------------------------------------------
/chaospy/descriptives/correlation/spearman.py:
--------------------------------------------------------------------------------
1 | """Spearman's correlation coefficient."""
2 | import numpy
3 | from scipy.stats import spearmanr
4 |
5 | import numpoly
6 | import chaospy
7 |
8 |
9 | def Spearman(poly, dist=None, sample=10000, retall=False, **kws):
10 | """
11 | Calculate Spearman's rank-order correlation coefficient.
12 |
13 | Args:
14 | poly (numpoly.ndpoly):
15 | Polynomial of interest.
16 | dist (Distribution):
17 | Defines the space where correlation is taken.
18 | sample (int):
19 | Number of samples used in estimation.
20 | retall (bool):
21 | If true, return p-value as well.
22 |
23 | Returns:
24 | (float, numpy.ndarray):
25 | Correlation output ``rho``. Of type float if two-dimensional problem.
26 | Correleation matrix if larger.
27 | (float, numpy.ndarray):
28 | The two-sided p-value for a hypothesis test whose null hypothesis
29 | is that two sets of data are uncorrelated, has same dimension as
30 | ``rho``.
31 |
32 | Examples:
33 | >>> distribution = chaospy.MvNormal(
34 | ... [3, 4], [[2, .5], [.5, 1]])
35 | >>> corr, pvalue = chaospy.Spearman(distribution, sample=50, retall=True)
36 | >>> corr.round(4)
37 | array([[1. , 0.603],
38 | [0.603, 1. ]])
39 | >>> pvalue.round(8)
40 | array([[0.00e+00, 3.58e-06],
41 | [3.58e-06, 0.00e+00]])
42 |
43 | """
44 | if isinstance(poly, chaospy.Distribution):
45 | poly, dist = numpoly.variable(len(poly)), poly
46 | else:
47 | poly = numpoly.polynomial(poly)
48 | samples = dist.sample(sample, **kws)
49 | corr = numpy.eye(len(poly))
50 | pval = numpy.zeros((len(poly), len(poly)))
51 | evals = poly.ravel()(*samples)
52 | assert len(poly) == len(evals)
53 | for idx in range(len(poly)):
54 | for idy in range(idx + 1, len(poly)):
55 | if idx == idy:
56 | pass
57 | spear = spearmanr(evals[idx], evals[idy])
58 | pval[idx, idy] = pval[idy, idx] = spear.pvalue
59 | corr[idx, idy] = corr[idy, idx] = spear.correlation
60 | if retall:
61 | return corr, pval
62 | return corr
63 |
--------------------------------------------------------------------------------
/chaospy/quadrature/hermite.py:
--------------------------------------------------------------------------------
1 | """Gauss-Hermite quadrature rule."""
2 | import numpy
3 | import chaospy
4 |
5 | from .hypercube import hypercube_quadrature
6 |
7 |
8 | def hermite(order, mu=0.0, sigma=1.0, physicist=False):
9 | r"""
10 | Gauss-Hermite quadrature rule.
11 |
12 | Compute the sample points and weights for Gauss-Hermite quadrature. The
13 | sample points are the roots of the nth degree Hermite polynomial. These
14 | sample points and weights correctly integrate polynomials of degree
15 | :math:`2N-1` or less.
16 |
17 | Gaussian quadrature come in two variants: physicist and probabilist. For
18 | Gauss-Hermite physicist means a weight function :math:`e^{-x^2}` and
19 | weights that sum to :math`\sqrt(\pi)`, and probabilist means a weight
20 | function is :math:`e^{-x^2/2}` and sum to 1.
21 |
22 | Args:
23 | order (int):
24 | The quadrature order.
25 | mu (float):
26 | Non-centrality parameter.
27 | sigma (float):
28 | Scale parameter.
29 | physicist (bool):
30 | Use physicist weights instead of probabilist variant.
31 |
32 | Returns:
33 | abscissas (numpy.ndarray):
34 | The ``order+1`` quadrature points for where to evaluate the model
35 | function with.
36 | weights (numpy.ndarray):
37 | The quadrature weights associated with each abscissas.
38 |
39 | Examples:
40 | >>> abscissas, weights = chaospy.quadrature.hermite(3)
41 | >>> abscissas
42 | array([[-2.33441422, -0.74196378, 0.74196378, 2.33441422]])
43 | >>> weights
44 | array([0.04587585, 0.45412415, 0.45412415, 0.04587585])
45 |
46 | See also:
47 | :func:`chaospy.quadrature.gaussian`
48 |
49 | """
50 | order = int(order)
51 | sigma = float(sigma * 2**-0.5 if physicist else sigma)
52 | coefficients = chaospy.construct_recurrence_coefficients(
53 | order=order, dist=chaospy.Normal(0, sigma)
54 | )
55 | [abscissas], [weights] = chaospy.coefficients_to_quadrature(coefficients)
56 | weights = weights * numpy.pi**0.5 if physicist else weights
57 | if order % 2 == 0:
58 | abscissas[len(abscissas) // 2] = 0
59 | abscissas += mu
60 | return abscissas[numpy.newaxis], weights
61 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/mielke.py:
--------------------------------------------------------------------------------
1 | """Mielke's beta-kappa distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class mielke(SimpleDistribution):
9 | """Mielke's beta-kappa distribution."""
10 |
11 | def __init__(self, k, s):
12 | super(mielke, self).__init__(dict(k=k, s=s))
13 |
14 | def _pdf(self, x, k, s):
15 | return k * x ** (k - 1.0) / (1.0 + x**s) ** (1.0 + k * 1.0 / s)
16 |
17 | def _cdf(self, x, k, s):
18 | return x**k / (1.0 + x**s) ** (k * 1.0 / s)
19 |
20 | def _ppf(self, q, k, s):
21 | qsk = pow(q, s * 1.0 / k)
22 | return pow(qsk / (1.0 - qsk), 1.0 / s)
23 |
24 | def _lower(self, k, s):
25 | return 0.0
26 |
27 | def _upper(self, k, s):
28 | qsk = pow(1 - 1e-10, s * 1.0 / k)
29 | return pow(qsk / (1.0 - qsk), 1.0 / s)
30 |
31 |
32 | class Mielke(ShiftScaleDistribution):
33 | """
34 | Mielke's beta-kappa distribution
35 |
36 | Args:
37 | kappa (float, Distribution):
38 | First shape parameter
39 | expo (float, Distribution):
40 | Second shape parameter
41 | scale (float, Distribution):
42 | Scaling parameter
43 | shift (float, Distribution):
44 | Location parameter
45 |
46 | Examples:
47 | >>> distribution = chaospy.Mielke(kappa=1.5, expo=15)
48 | >>> distribution
49 | Mielke(1.5, 15)
50 | >>> uloc = numpy.linspace(0, 1, 6)
51 | >>> uloc
52 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
53 | >>> xloc = distribution.inv(uloc)
54 | >>> xloc.round(3)
55 | array([0. , 0.342, 0.543, 0.712, 0.868, 3.981])
56 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
57 | True
58 | >>> distribution.pdf(xloc).round(3)
59 | array([0. , 0.877, 1.105, 1.257, 1.234, 0. ])
60 | >>> distribution.sample(4).round(3)
61 | array([0.754, 0.236, 1.028, 0.615])
62 |
63 | """
64 |
65 | def __init__(self, kappa=1, expo=1, scale=1, shift=0):
66 | super(Mielke, self).__init__(
67 | dist=mielke(kappa, expo),
68 | scale=scale,
69 | shift=shift,
70 | repr_args=[kappa, expo],
71 | )
72 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/exponential_power.py:
--------------------------------------------------------------------------------
1 | """Exponential power distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class exponential_power(SimpleDistribution):
8 | """Exponential power distribution."""
9 |
10 | def __init__(self, b=1):
11 | super(exponential_power, self).__init__(dict(b=b))
12 |
13 | def _pdf(self, x, b):
14 | xbm1 = x ** (b - 1.0)
15 | xb = xbm1 * x
16 | return numpy.exp(1) * b * xbm1 * numpy.exp(xb - numpy.exp(xb))
17 |
18 | def _cdf(self, x, b):
19 | return -numpy.expm1(-numpy.expm1(x**b))
20 |
21 | def _ppf(self, q, b):
22 | return pow(numpy.log1p(-numpy.log1p(-q)), 1.0 / b)
23 |
24 | def _lower(self, b):
25 | del b
26 | return 0.0
27 |
28 | def _upper(self, b):
29 | return 3.6 ** (1.0 / b)
30 |
31 |
32 | class ExponentialPower(ShiftScaleDistribution):
33 | """
34 | Exponential power distribution.
35 |
36 | Also known as Generalized error distribution and Generalized normal
37 | distribution version 1.
38 |
39 | Args:
40 | shape (float, Distribution):
41 | Shape parameter
42 | scale (float, Distribution):
43 | Scaling parameter
44 | shift (float, Distribution):
45 | Location parameter
46 |
47 | Examples:
48 | >>> distribution = chaospy.ExponentialPower(1.5)
49 | >>> distribution
50 | ExponentialPower(1.5)
51 | >>> uloc = numpy.linspace(0, 1, 6)
52 | >>> uloc
53 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
54 | >>> xloc = distribution.inv(uloc)
55 | >>> xloc.round(3)
56 | array([0. , 0.344, 0.554, 0.751, 0.973, 2.349])
57 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
58 | True
59 | >>> distribution.pdf(xloc).round(3)
60 | array([0. , 0.86 , 1.012, 0.996, 0.772, 0. ])
61 | >>> distribution.sample(4).round(3)
62 | array([0.805, 0.237, 1.243, 0.635])
63 |
64 | """
65 |
66 | def __init__(self, shape=1, scale=1, shift=0):
67 | super(ExponentialPower, self).__init__(
68 | dist=exponential_power(shape),
69 | scale=scale,
70 | shift=shift,
71 | repr_args=[shape],
72 | )
73 |
--------------------------------------------------------------------------------
/tests/distributions/test_1d_dependencies.py:
--------------------------------------------------------------------------------
1 | """Test dependent distributions with 1-D components."""
2 | from pytest import raises
3 | import numpy
4 | import chaospy
5 |
6 | DIST1 = chaospy.Uniform(1, 2)
7 | DIST2 = chaospy.Gamma(DIST1)
8 | JOINT1 = chaospy.J(DIST1, DIST2)
9 | JOINT2 = chaospy.J(DIST2, DIST1)
10 |
11 |
12 | def test_1d_stochastic_dependencies():
13 | """Ensure ``stochastic_dependencies`` behaves as expected for dependent 1-D distributions."""
14 | assert not DIST1.stochastic_dependent
15 | assert DIST2.stochastic_dependent
16 | assert JOINT1.stochastic_dependent
17 | assert JOINT2.stochastic_dependent
18 |
19 |
20 | def test_1d_dependent_bounds():
21 | """Ensure lower and upper bounds works for dependent 1-D distributions."""
22 | assert numpy.isclose(DIST2.lower, 0)
23 | assert numpy.isclose(DIST2.upper, 35.84367486)
24 | assert numpy.allclose(JOINT1.lower, [1, 0])
25 | assert numpy.allclose(JOINT1.upper, [2, 35.84367486])
26 | assert numpy.allclose(JOINT2.lower, [0, 1])
27 | assert numpy.allclose(JOINT2.upper, [35.84367486, 2])
28 |
29 |
30 | def test_1d_dependent_mapping():
31 | """Ensure inverse and forward behaves as expected for dependent 1-D distributions."""
32 | grid = numpy.array([[0, 0, 1, 1], [0, 1, 0, 1]])
33 | inv_map1 = numpy.array([[1, 1, 2, 2], [0, 32.2369909, 0, 35.84367486]])
34 | inv_map2 = numpy.array([[0, 0, 32.2369909, 35.84367486], [1, 2, 1, 2]])
35 |
36 | assert numpy.allclose(JOINT1.inv(grid), inv_map1)
37 | assert numpy.allclose(JOINT2.inv(grid), inv_map2)
38 | assert numpy.allclose(JOINT1.fwd(inv_map1), grid)
39 | assert numpy.allclose(JOINT2.fwd(inv_map2), grid)
40 |
41 |
42 | def test_1d_dependent_density():
43 | """Ensure probability density function behaves as expected for dependent 1-D distributions."""
44 | x_loc1 = numpy.array([0.8, 1.8, 1.2, 1.8])
45 | x_loc2 = numpy.array([2, 4, 6, 8])
46 | y_loc1 = numpy.array([0, 1, 1, 1])
47 | y_loc2 = numpy.array([0.1011967, 0.05961306, 0.00386314, 0.00190102])
48 | assert numpy.allclose(
49 | JOINT1.pdf([x_loc1, x_loc2], decompose=True, allow_approx=False),
50 | [y_loc1, y_loc2]
51 | )
52 | assert numpy.allclose(
53 | JOINT2.pdf([x_loc2, x_loc1], decompose=True, allow_approx=False),
54 | [y_loc2, y_loc1]
55 | )
56 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/double_gamma.py:
--------------------------------------------------------------------------------
1 | """Double gamma distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class double_gamma(SimpleDistribution):
9 | """Double gamma distribution."""
10 |
11 | def __init__(self, a):
12 | super(double_gamma, self).__init__(dict(a=a))
13 |
14 | def _pdf(self, x, a):
15 | ax = abs(x)
16 | return 1.0 / (2 * special.gamma(a)) * ax ** (a - 1.0) * numpy.exp(-ax)
17 |
18 | def _cdf(self, x, a):
19 | fac = 0.5 * special.gammainc(a, abs(x))
20 | return numpy.where(x > 0, 0.5 + fac, 0.5 - fac)
21 |
22 | def _ppf(self, q, a):
23 | fac = special.gammainccinv(a, 1 - abs(2 * q - 1))
24 | out = numpy.where(q > 0.5, fac, -fac)
25 | return out
26 |
27 | def _lower(self, a):
28 | return -special.gammainccinv(a, 2e-15)
29 |
30 | def _upper(self, a):
31 | return special.gammainccinv(a, 2e-15)
32 |
33 |
34 | class DoubleGamma(ShiftScaleDistribution):
35 | """
36 | Double gamma distribution.
37 |
38 | Args:
39 | shape (float, Distribution):
40 | Shape parameter
41 | scale (float, Distribution):
42 | Scaling parameter
43 | shift (float, Distribution):
44 | Location parameter
45 |
46 | Examples:
47 | >>> distribution = chaospy.DoubleGamma(shape=1.5)
48 | >>> distribution
49 | DoubleGamma(1.5)
50 | >>> uloc = numpy.linspace(0, 1, 6)
51 | >>> uloc
52 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
53 | >>> xloc = distribution.inv(uloc)
54 | >>> xloc.round(3)
55 | array([-35.769, -1.473, -0.503, 0.503, 1.473, 35.769])
56 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
57 | True
58 | >>> distribution.pdf(xloc).round(3)
59 | array([0. , 0.157, 0.242, 0.242, 0.157, 0. ])
60 | >>> distribution.sample(4).round(3)
61 | array([ 0.727, -2.154, 3.132, -0.138])
62 |
63 | """
64 |
65 | def __init__(self, shape=1, scale=1, shift=0):
66 | super(DoubleGamma, self).__init__(
67 | dist=double_gamma(shape),
68 | scale=scale,
69 | shift=shift,
70 | repr_args=[shape],
71 | )
72 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Chaospy
2 | =======
3 |
4 | .. toctree::
5 | :hidden:
6 |
7 | user_guide/index
8 | reference/index
9 | about_us
10 |
11 | Chaospy is a numerical toolbox for performing uncertainty quantification using
12 | polynomial chaos expansions, advanced Monte Carlo methods implemented in
13 | Python. It also includes a full suite of tools for doing low-discrepancy
14 | sampling, quadrature creation, polynomial manipulations, and a lot more.
15 |
16 | The philosophy behind ``chaospy`` is not to be a single tool that solves every
17 | uncertainty quantification problem, but instead be a specific tools to aid to
18 | let the user solve problems themselves. This includes both well established
19 | problems, but also to be a foundry for experimenting with new problems, that
20 | are not so well established. To do this, emphasis is put on the following:
21 |
22 | * Focus on an easy-to-use interface that embraces the `pythonic code style
23 | `_.
24 | * Make sure the code is "composable", such a way that changing one part of the
25 | code with something user defined should be easy and encouraged.
26 | * Try to support a broad width of the various methods for doing uncertainty
27 | quantification where that makes sense to involve ``chaospy``.
28 | * Make sure that ``chaospy`` plays nice with a large set of other
29 | similar projects. This includes `numpy `_, `scipy
30 | `_, `scikit-learn `_,
31 | `statsmodels `_, `openturns
32 | `_, and `gstools `_
33 | to mention a few.
34 | * Contribute all code to the community open source.
35 |
36 | .. _installation:
37 |
38 | Installation
39 | ------------
40 |
41 | Installation should be straight forward from `pip `_:
42 |
43 | .. code-block:: bash
44 |
45 | pip install chaospy
46 |
47 | Or if `Conda `_ is more to your liking:
48 |
49 | .. code-block:: bash
50 |
51 | conda install -c conda-forge chaospy
52 |
53 | For developer installation, go to the `chaospy repository
54 | `_. Otherwise, check out the `user
55 | guide `_ to see how to
56 | use the toolbox.
57 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/alpha.py:
--------------------------------------------------------------------------------
1 | """Alpha probability distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | import chaospy
6 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
7 |
8 |
9 | class alpha(SimpleDistribution):
10 | """Standard Alpha distribution."""
11 |
12 | def __init__(self, a=1):
13 | super(alpha, self).__init__(dict(a=a))
14 |
15 | def _cdf(self, x, a):
16 | return special.ndtr(a - 1.0 / x) / special.ndtr(a)
17 |
18 | def _ppf(self, q, a):
19 | out = 1.0 / (a - special.ndtri(q * special.ndtr(a)))
20 | return numpy.where(q == 1, self._upper(a), out)
21 |
22 | def _pdf(self, x, a):
23 | return numpy.where(
24 | x == 0,
25 | 0,
26 | numpy.e ** (-0.5 * (a - 1.0 / x) ** 2)
27 | / (numpy.sqrt(2 * numpy.pi) * x**2 * special.ndtr(a)),
28 | )
29 |
30 | def _lower(self, a):
31 | return 0.0
32 |
33 | def _upper(self, a):
34 | return 1.0 / (a - special.ndtri((1 - 1e-10) * special.ndtr(a)))
35 |
36 |
37 | class Alpha(ShiftScaleDistribution):
38 | """
39 | Alpha distribution.
40 |
41 | Args:
42 | shape (float, Distribution):
43 | Shape parameter
44 | scale (float, Distribution):
45 | Scale Parameter
46 | shift (float, Distribution):
47 | Location of lower threshold
48 |
49 | Examples:
50 | >>> distribution = chaospy.Alpha(6)
51 | >>> distribution
52 | Alpha(6)
53 | >>> uloc = numpy.linspace(0, 1, 6)
54 | >>> uloc
55 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
56 | >>> xloc = distribution.inv(uloc)
57 | >>> xloc.round(3)
58 | array([ 0. , 0.146, 0.16 , 0.174, 0.194, 63.709])
59 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
60 | True
61 | >>> distribution.pdf(xloc).round(3)
62 | array([ 0. , 13.104, 15.108, 12.759, 7.449, 0. ])
63 | >>> distribution.sample(4).round(3)
64 | array([0.178, 0.139, 0.23 , 0.165])
65 |
66 | """
67 |
68 | def __init__(self, shape=1, scale=1, shift=0):
69 | super(Alpha, self).__init__(
70 | dist=alpha(shape),
71 | scale=scale,
72 | shift=shift,
73 | repr_args=[shape],
74 | )
75 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/__init__.py:
--------------------------------------------------------------------------------
1 | """Collection of distributions."""
2 | from .alpha import Alpha
3 | from .anglit import Anglit
4 | from .beta import Beta, ArcSinus, PowerLaw, Wigner, PERT
5 | from .binomial import Binomial
6 | from .bradford import Bradford
7 | from .burr import Burr
8 | from .cauchy import Cauchy
9 | from .chi import Chi, Maxwell, Rayleigh
10 | from .chi_squared import ChiSquared
11 | from .discrete_uniform import DiscreteUniform
12 | from .double_gamma import DoubleGamma
13 | from .double_weibull import DoubleWeibull
14 | from .exponential_power import ExponentialPower
15 | from .exponential_weibull import ExponentialWeibull
16 | from .f import F
17 | from .fatigue_life import FatigueLife
18 | from .fisk import Fisk
19 | from .folded_cauchy import FoldedCauchy
20 | from .folded_normal import FoldedNormal
21 | from .frechet import Frechet
22 | from .gamma import Gamma, Exponential
23 | from .generalized_exponential import GeneralizedExponential
24 | from .generalized_extreme import GeneralizedExtreme
25 | from .generalized_gamma import GeneralizedGamma
26 | from .generalized_half_logistic import GeneralizedHalfLogistic
27 | from .gompertz import Gompertz
28 | from .hyperbolic_secant import HyperbolicSecant
29 | from .inverse_gamma import InverseGamma
30 | from .kumaraswamy import Kumaraswamy
31 | from .laplace import Laplace
32 | from .levy import Levy
33 | from .log_gamma import LogGamma
34 | from .log_laplace import LogLaplace
35 | from .log_normal import LogNormal, Gilbrat
36 | from .log_uniform import LogUniform
37 | from .log_weibull import LogWeibull
38 | from .logistic import Logistic
39 | from .mielke import Mielke
40 | from .mv_log_normal import MvLogNormal
41 | from .mv_normal import MvNormal
42 | from .mv_student_t import MvStudentT
43 | from .nakagami import Nakagami
44 | from .normal import Normal
45 | from .pareto1 import Pareto1
46 | from .pareto2 import Pareto2
47 | from .power_log_normal import PowerLogNormal
48 | from .power_normal import PowerNormal
49 | from .reciprocal import Reciprocal
50 | from .student_t import StudentT
51 | from .triangle import Triangle
52 | from .trunc_exponential import TruncExponential
53 | from .trunc_normal import TruncNormal
54 | from .tukey_lambda import TukeyLambda
55 | from .uniform import Uniform
56 | from .wald import Wald
57 | from .weibull import Weibull
58 | from .wrapped_cauchy import WrappedCauchy
59 |
--------------------------------------------------------------------------------
/chaospy/distributions/baseclass/slice_.py:
--------------------------------------------------------------------------------
1 | """Baseclass for all conditional distributions."""
2 | import numpy
3 | import chaospy
4 |
5 | from .distribution import Distribution
6 |
7 |
8 | class ItemDistribution(Distribution):
9 | """Baseclass for an index of multivariate distribution."""
10 |
11 | def __init__(self, index, parent):
12 | super(ItemDistribution, self).__init__(
13 | parameters=dict(index=index, parent=parent),
14 | dependencies=[parent._dependencies[index].copy()],
15 | rotation=[0],
16 | repr_args=[index, parent],
17 | )
18 |
19 | def get_parameters(self, idx, cache, assert_numerical=True):
20 | """Get distribution parameters."""
21 | assert idx == 0 or idx is None, "Indexes only have a single component"
22 | if idx == 0:
23 | idx = int(self._parameters["index"])
24 | parent = self._parameters["parent"]
25 | parameters = parent.get_parameters(
26 | idx, cache, assert_numerical=assert_numerical
27 | )
28 | return dict(parent=parent, parameters=parameters)
29 |
30 | def __repr__(self):
31 | return "ItemDistribution(%(index)d, %(parent)s)" % self._parameters
32 |
33 | def _lower(self, parent, parameters):
34 | return parent._lower(**parameters)
35 |
36 | def _upper(self, parent, parameters):
37 | return parent._upper(**parameters)
38 |
39 | def _ppf(self, xloc, parent, parameters):
40 | return parent._ppf(xloc, **parameters)
41 |
42 | def _cdf(self, xloc, parent, parameters):
43 | return parent._cdf(xloc, **parameters)
44 |
45 | def _pdf(self, xloc, parent, parameters):
46 | return parent._pdf(xloc, **parameters)
47 |
48 | def _mom(self, kloc, parent, parameters):
49 | idx = int(self._parameters["index"])
50 | kloc = kloc * numpy.eye(len(parent), dtype=int)[idx]
51 | return parent._mom(kloc, **parameters)
52 |
53 | def _ttr(self, kloc, parent, parameters):
54 | raise chaospy.StochasticallyDependentError("TTR not supported")
55 |
56 | def _cache(self, idx, cache, get):
57 | if idx is None:
58 | return self
59 | assert idx == 0
60 | idx = int(self._parameters["index"])
61 | parent = self._parameters["parent"]
62 | return parent._get_cache(idx, cache, get)
63 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/exponential_weibull.py:
--------------------------------------------------------------------------------
1 | """Exponential Weibull distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class exponential_weibull(SimpleDistribution):
8 | """Exponential Weibull distribution."""
9 |
10 | def __init__(self, a=1, c=1):
11 | super(exponential_weibull, self).__init__(dict(a=a, c=c))
12 |
13 | def _pdf(self, x, a, c):
14 | exc = numpy.exp(-(x**c))
15 | return a * c * (1 - exc) ** (a - 1) * exc * x ** (c - 1)
16 |
17 | def _cdf(self, x, a, c):
18 | exm1c = -numpy.expm1(-(x**c))
19 | return (exm1c) ** a
20 |
21 | def _ppf(self, q, a, c):
22 | return (-numpy.log1p(-(q ** (1.0 / a)))) ** (1.0 / c)
23 |
24 | def _lower(self, a, c):
25 | return 0.0
26 |
27 | def _upper(self, a, c):
28 | return (-numpy.log1p(-((1 - 1e-15) ** (1.0 / a)))) ** (1.0 / c)
29 |
30 |
31 | class ExponentialWeibull(ShiftScaleDistribution):
32 | """
33 | Exponential Weibull distribution.
34 |
35 | Args:
36 | alpha (float, Distribution):
37 | First shape parameter
38 | kappa (float, Distribution):
39 | Second shape parameter
40 | scale (float, Distribution):
41 | Scaling parameter
42 | shift (float, Distribution):
43 | Location parameter
44 |
45 | Examples:
46 | >>> distribution = chaospy.ExponentialWeibull(alpha=2, kappa=3)
47 | >>> distribution
48 | ExponentialWeibull(2, 3)
49 | >>> uloc = numpy.linspace(0, 1, 6)
50 | >>> uloc
51 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
52 | >>> xloc = distribution.inv(uloc)
53 | >>> xloc.round(2)
54 | array([0. , 0.84, 1. , 1.14, 1.31, 3.28])
55 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
56 | True
57 | >>> distribution.pdf(xloc).round(3)
58 | array([0. , 1.047, 1.396, 1.367, 0.972, 0. ])
59 | >>> distribution.sample(4).round(3)
60 | array([1.182, 0.745, 1.544, 1.058])
61 |
62 | """
63 |
64 | def __init__(self, alpha=1, kappa=1, scale=1, shift=0):
65 | super(ExponentialWeibull, self).__init__(
66 | dist=exponential_weibull(alpha, kappa),
67 | scale=scale,
68 | shift=shift,
69 | repr_args=[alpha, kappa],
70 | )
71 |
--------------------------------------------------------------------------------
/chaospy/distributions/baseclass/copula.py:
--------------------------------------------------------------------------------
1 | """Baseclass for all Copulas."""
2 | import numpy
3 | import chaospy
4 |
5 | from .distribution import Distribution
6 |
7 |
8 | class CopulaDistribution(Distribution):
9 | """Baseclass for all Copulas."""
10 |
11 | def __init__(self, dist, trans, rotation=None, repr_args=None):
12 | r"""
13 | Args:
14 | dist (Distribution):
15 | Distribution to wrap the copula around.
16 | trans (Distribution):
17 | The copula wrapper `[0,1]^D \into [0,1]^D`.
18 |
19 | """
20 | assert len(dist) == len(trans), "Copula length missmatch"
21 | accumulant = set()
22 | dependencies = [deps.copy() for deps in dist._dependencies]
23 | for idx, _ in sorted(enumerate(trans._dependencies), key=lambda x: len(x[1])):
24 | accumulant.update(dist._dependencies[idx])
25 | dependencies[idx] = accumulant.copy()
26 |
27 | super(CopulaDistribution, self).__init__(
28 | parameters=dict(dist=dist, trans=trans),
29 | dependencies=dependencies,
30 | rotation=rotation,
31 | repr_args=repr_args,
32 | )
33 |
34 | def get_parameters(self, idx, cache, assert_numerical=True):
35 | parameters = super(CopulaDistribution, self).get_parameters(
36 | idx, cache, assert_numerical=assert_numerical
37 | )
38 | if idx is None:
39 | del parameters["idx"]
40 | return parameters
41 |
42 | def _lower(self, idx, dist, trans, cache):
43 | return dist._get_lower(idx, cache=cache)
44 |
45 | def _upper(self, idx, dist, trans, cache):
46 | return dist._get_upper(idx, cache=cache)
47 |
48 | def _cdf(self, xloc, idx, dist, trans, cache):
49 | output = dist._get_fwd(xloc, idx, cache=cache)
50 | output = trans._get_fwd(output, idx, cache=cache)
51 | return output
52 |
53 | def _ppf(self, qloc, idx, dist, trans, cache):
54 | qloc = trans._get_inv(qloc, idx, cache=cache)
55 | xloc = dist._get_inv(qloc, idx, cache=cache)
56 | return xloc
57 |
58 | def _pdf(self, xloc, idx, dist, trans, cache):
59 | density = dist._get_pdf(xloc, idx, cache=cache.copy())
60 | return (
61 | trans._get_pdf(dist._get_fwd(xloc, idx, cache=cache), idx, cache=cache)
62 | * density
63 | )
64 |
--------------------------------------------------------------------------------
/docs/reference/distribution/collection.rst:
--------------------------------------------------------------------------------
1 | .. _distribution_collection:
2 |
3 | Collection
4 | ==========
5 |
6 | .. currentmodule:: chaospy
7 |
8 | Unbound distributions
9 | ---------------------
10 |
11 | .. autosummary::
12 | :template: distribution.rst
13 | :toctree: ../../api
14 |
15 | Cauchy
16 | DoubleGamma
17 | DoubleWeibull
18 | GeneralizedExtreme
19 | HyperbolicSecant
20 | Laplace
21 | LogGamma
22 | Logistic
23 | Normal
24 | PowerNormal
25 | StudentT
26 |
27 | Partially bound distributions
28 | -----------------------------
29 |
30 | .. autosummary::
31 | :template: distribution.rst
32 | :toctree: ../../api
33 |
34 | Alpha
35 | Burr
36 | Chi
37 | ChiSquared
38 | Maxwell
39 | Exponential
40 | ExponentialPower
41 | ExponentialWeibull
42 | F
43 | Fisk
44 | FoldedCauchy
45 | FoldedNormal
46 | Frechet
47 | Gamma
48 | GeneralizedExponential
49 | GeneralizedGamma
50 | GeneralizedHalfLogistic
51 | Gompertz
52 | InverseGamma
53 | Levy
54 | LogLaplace
55 | LogNormal
56 | LogWeibull
57 | Mielke
58 | Nakagami
59 | Pareto1
60 | Pareto2
61 | PowerLogNormal
62 | Wald
63 | Weibull
64 | WrappedCauchy
65 |
66 | Bound distributions
67 | -------------------
68 |
69 | .. autosummary::
70 | :template: distribution.rst
71 | :toctree: ../../api
72 |
73 | Anglit
74 | ArcSinus
75 | Beta
76 | Bradford
77 | FatigueLife
78 | PowerLaw
79 | Wigner
80 | PERT
81 | Kumaraswamy
82 | LogUniform
83 | Reciprocal
84 | Triangle
85 | TruncExponential
86 | TruncNormal
87 | TukeyLambda
88 | Uniform
89 |
90 | .. _multivariate_distributions:
91 |
92 | Multivariate distributions
93 | --------------------------
94 |
95 | .. autosummary::
96 | :template: distribution.rst
97 | :toctree: ../../api
98 |
99 | MvLogNormal
100 | MvNormal
101 | MvStudentT
102 |
103 | Discrete distributions
104 | ----------------------
105 |
106 | .. autosummary::
107 | :template: distribution.rst
108 | :toctree: ../../api
109 |
110 | Binomial
111 | DiscreteUniform
112 |
113 | Copulas
114 | -------
115 |
116 | .. autosummary::
117 | :template: distribution.rst
118 | :toctree: ../../api
119 |
120 | Clayton
121 | Gumbel
122 | Joe
123 | Nataf
124 | TCopula
125 |
--------------------------------------------------------------------------------
/chaospy/quadrature/gegenbauer.py:
--------------------------------------------------------------------------------
1 | """Gauss-Gegenbauer quadrature rule."""
2 | import numpy
3 | import chaospy
4 |
5 | from .hypercube import hypercube_quadrature
6 |
7 |
8 | def gegenbauer(order, alpha, lower=-1, upper=1, physicist=False):
9 | """
10 | Gauss-Gegenbauer quadrature rule.
11 |
12 | Compute the sample points and weights for Gauss-Gegenbauer quadrature. The
13 | sample points are the roots of the nth degree Gegenbauer polynomial. These
14 | sample points and weights correctly integrate polynomials of degree
15 | :math:`2N-1` or less.
16 |
17 | Gaussian quadrature come in two variants: physicist and probabilist. For
18 | Gauss-Gegenbauer physicist means a weight function
19 | :math:`(1-x^2)^{\alpha-0.5}` and weights that sum to :math`2^{2\alpha-1}`,
20 | and probabilist means a weight function is
21 | :math:`B(\alpha+0.5, \alpha+0.5) (x-x^2)^{\alpha+1/2}` (where :math:`B` is
22 | the beta normalizing constant) which sum to 1.
23 |
24 | Args:
25 | order (int):
26 | The quadrature order.
27 | alpha (float):
28 | Gegenbauer shape parameter.
29 | lower (float):
30 | Lower bound for the integration interval.
31 | upper (float):
32 | Upper bound for the integration interval.
33 | physicist (bool):
34 | Use physicist weights instead of probabilist.
35 |
36 | Returns:
37 | abscissas (numpy.ndarray):
38 | The ``order+1`` quadrature points for where to evaluate the model
39 | function with.
40 | weights (numpy.ndarray):
41 | The quadrature weights associated with each abscissas.
42 |
43 | Examples:
44 | >>> abscissas, weights = chaospy.quadrature.gegenbauer(3, alpha=2)
45 | >>> abscissas
46 | array([[-0.72741239, -0.26621648, 0.26621648, 0.72741239]])
47 | >>> weights
48 | array([0.10452141, 0.39547859, 0.39547859, 0.10452141])
49 |
50 | See also:
51 | :func:`chaospy.quadrature.gaussian`
52 |
53 | """
54 | order = int(order)
55 | coefficients = chaospy.construct_recurrence_coefficients(
56 | order=order, dist=chaospy.Beta(alpha + 0.5, alpha + 0.5, lower, upper)
57 | )
58 | [abscissas], [weights] = chaospy.coefficients_to_quadrature(coefficients)
59 | weights *= 2 ** (2 * alpha - 1) if physicist else 1
60 | return abscissas[numpy.newaxis], weights
61 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/power_normal.py:
--------------------------------------------------------------------------------
1 | """Power normal or Box-Cox distribution."""
2 | import numpy
3 | from scipy import special
4 | import chaospy
5 |
6 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
7 |
8 |
9 | class power_normal(SimpleDistribution):
10 | """Power normal or Box-Cox distribution."""
11 |
12 | def __init__(self, c):
13 | super(power_normal, self).__init__(dict(c=c))
14 |
15 | def _pdf(self, x, c):
16 | norm = (2 * numpy.pi) ** -0.5 * numpy.exp(-(x**2) / 2.0)
17 | return c * norm * special.ndtr(-x) ** (c - 1.0)
18 |
19 | def _cdf(self, x, c):
20 | return 1.0 - special.ndtr(-x) ** c
21 |
22 | def _ppf(self, q, c):
23 | return -special.ndtri(pow(1 - q, 1.0 / c))
24 |
25 | def _lower(self, c):
26 | return -special.ndtri(pow(1 - 1e-15, 1.0 / c))
27 |
28 | def _upper(self, c):
29 | return -special.ndtri(pow(1e-15, 1.0 / c))
30 |
31 |
32 | class PowerNormal(ShiftScaleDistribution):
33 | """
34 | Power normal or Box-Cox distribution.
35 |
36 | Args:
37 | shape (float, Distribution):
38 | Shape parameter
39 | mu (float, Distribution):
40 | Mean of the normal distribution
41 | scale (float, Distribution):
42 | Standard deviation of the normal distribution
43 |
44 | Examples:
45 | >>> distribution = chaospy.PowerNormal(1)
46 | >>> distribution
47 | PowerNormal(1)
48 | >>> uloc = numpy.linspace(0, 1, 6)
49 | >>> uloc
50 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
51 | >>> xloc = distribution.inv(uloc)
52 | >>> xloc.round(3)
53 | array([-7.941, -0.842, -0.253, 0.253, 0.842, 7.941])
54 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
55 | True
56 | >>> distribution.pdf(xloc).round(3)
57 | array([0. , 0.28 , 0.386, 0.386, 0.28 , 0. ])
58 | >>> distribution.sample(4).round(3)
59 | array([ 0.395, -1.2 , 1.648, -0.045])
60 |
61 | """
62 |
63 | def __init__(self, shape=1, mu=0, sigma=1):
64 | super(PowerNormal, self).__init__(
65 | dist=power_normal(shape),
66 | scale=sigma,
67 | shift=mu,
68 | )
69 | self._repr_args = [shape]
70 | self._repr_args += chaospy.format_repr_kwargs(mu=(mu, 0))
71 | self._repr_args += chaospy.format_repr_kwargs(sigma=(sigma, 1))
72 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/generalized_half_logistic.py:
--------------------------------------------------------------------------------
1 | """Generalized half-logistic distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class generalized_half_logistic(SimpleDistribution):
9 | """Generalized half-logistic distribution."""
10 |
11 | def __init__(self, c=1):
12 | super(generalized_half_logistic, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | limit = 1.0 / c
16 | tmp = 1 - c * x
17 | tmp0 = tmp ** (limit - 1)
18 | tmp2 = tmp0 * tmp
19 | return 2 * tmp0 / (1 + tmp2) ** 2
20 |
21 | def _cdf(self, x, c):
22 | limit = 1.0 / c
23 | tmp = 1 - c * x
24 | tmp2 = tmp ** (limit)
25 | return (1.0 - tmp2) / (1 + tmp2)
26 |
27 | def _ppf(self, q, c):
28 | return 1.0 / c * (1 - ((1.0 - q) / (1.0 + q)) ** c)
29 |
30 | def _lower(self, c):
31 | return 0.0
32 |
33 | def _upper(self, c):
34 | return 1 / numpy.where(c < 10**-10, 10**-10, c)
35 |
36 |
37 | class GeneralizedHalfLogistic(ShiftScaleDistribution):
38 | """
39 | Generalized half-logistic distribution
40 |
41 | Args:
42 | shape (float, Distribution):
43 | Shape parameter
44 | scale (float, Distribution):
45 | Scaling parameter
46 | shift (float, Distribution):
47 | Location parameter
48 |
49 | Examples:
50 | >>> distribution = chaospy.GeneralizedHalfLogistic(0.5)
51 | >>> distribution
52 | GeneralizedHalfLogistic(0.5)
53 | >>> uloc = numpy.linspace(0, 1, 6)
54 | >>> uloc
55 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
56 | >>> xloc = distribution.inv(uloc)
57 | >>> xloc.round(3)
58 | array([0. , 0.367, 0.691, 1. , 1.333, 2. ])
59 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
60 | True
61 | >>> distribution.pdf(xloc).round(3)
62 | array([0.5 , 0.588, 0.642, 0.64 , 0.54 , 0. ])
63 | >>> distribution.sample(4).round(3)
64 | array([1.085, 0.218, 1.681, 0.818])
65 |
66 | """
67 |
68 | def __init__(self, shape, scale=1, shift=0):
69 | super(GeneralizedHalfLogistic, self).__init__(
70 | dist=generalized_half_logistic(shape),
71 | scale=scale,
72 | shift=shift,
73 | repr_args=[shape],
74 | )
75 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/double_weibull.py:
--------------------------------------------------------------------------------
1 | """Double Weibull distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class double_weibull(SimpleDistribution):
9 | """Double weibull distribution."""
10 |
11 | def __init__(self, c):
12 | super(double_weibull, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | ax = numpy.abs(x)
16 | Px = c / 2.0 * ax ** (c - 1.0) * numpy.exp(-(ax**c))
17 | return Px
18 |
19 | def _cdf(self, x, c):
20 | Cx1 = 0.5 * numpy.exp(-abs(x) ** c)
21 | return numpy.where(x > 0, 1 - Cx1, Cx1)
22 |
23 | def _ppf(self, q, c):
24 | q_ = numpy.where(q > 0.5, 1 - q, q)
25 | Cq1 = numpy.where(q_ == 0, self._upper(c), 1)
26 | Cq1 = numpy.where((q != 0) & (c != 0), (-numpy.log(2 * q_)) ** (1.0 / c), Cq1)
27 | return numpy.where(q > 0.5, Cq1, -Cq1)
28 |
29 | def _lower(self, c):
30 | return -((-numpy.log(2e-10)) ** (1.0 / c))
31 |
32 | def _upper(self, c):
33 | return (-numpy.log(2e-10)) ** (1.0 / c)
34 |
35 |
36 | class DoubleWeibull(ShiftScaleDistribution):
37 | """
38 | Double Weibull distribution.
39 |
40 | Args:
41 | shape (float, Distribution):
42 | Shape parameter
43 | scale (float, Distribution):
44 | Scaling parameter
45 | shift (float, Distribution):
46 | Location parameter
47 |
48 | Examples:
49 | >>> distribution = chaospy.DoubleWeibull(1.5)
50 | >>> distribution
51 | DoubleWeibull(1.5)
52 | >>> uloc = numpy.linspace(0, 1, 6)
53 | >>> uloc
54 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
55 | >>> xloc = distribution.inv(uloc)
56 | >>> xloc.round(3)
57 | array([-7.93 , -0.943, -0.368, 0.368, 0.943, 7.93 ])
58 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
59 | True
60 | >>> distribution.pdf(xloc).round(3)
61 | array([0. , 0.291, 0.364, 0.364, 0.291, 0. ])
62 | >>> distribution.sample(4).round(3)
63 | array([ 0.513, -1.293, 1.747, -0.11 ])
64 |
65 | """
66 |
67 | def __init__(self, shape=1, scale=1, shift=0):
68 | super(DoubleWeibull, self).__init__(
69 | dist=double_weibull(shape),
70 | scale=scale,
71 | shift=shift,
72 | repr_args=[shape],
73 | )
74 |
--------------------------------------------------------------------------------
/tests/distributions/operators/test_truncation.py:
--------------------------------------------------------------------------------
1 | """Tests for truncation operator."""
2 | import numpy
3 | import chaospy
4 |
5 |
6 | def test_truncation_lower_as_dist():
7 | """Ensure lower bound as a distribution is supported."""
8 | dist1 = chaospy.Normal()
9 | dist2 = chaospy.Trunc(chaospy.Normal(), lower=dist1)
10 | joint = chaospy.J(dist1, dist2)
11 | ref10 = (0.5-dist1.fwd(-1))/(1-dist1.fwd(-1))
12 | assert numpy.allclose(joint.fwd([[-1, 0, 1], [0, 0, 0]]),
13 | [dist1.fwd([-1, 0, 1]), [ref10, 0, 0]])
14 |
15 |
16 | def test_truncation_upper_as_dist():
17 | """Ensure upper bound as a distribution is supported."""
18 | dist1 = chaospy.Normal()
19 | dist2 = chaospy.Trunc(chaospy.Normal(), upper=dist1)
20 | joint = chaospy.J(dist1, dist2)
21 | ref12 = 0.5/dist1.fwd(1)
22 | assert numpy.allclose(joint.fwd([[-1, 0, 1], [0, 0, 0]]),
23 | [dist1.fwd([-1, 0, 1]), [1, 1, ref12]])
24 |
25 |
26 | def test_truncation_both_as_dist():
27 | """Ensure that lower and upper bound combo is supported."""
28 | dist1 = chaospy.Normal()
29 | dist2 = chaospy.Normal()
30 | dist3 = chaospy.Trunc(chaospy.Normal(), lower=dist1, upper=dist2)
31 | joint = chaospy.J(dist1, dist2, dist3)
32 | ref21 = (0.5-dist1.fwd(-1))/(1-dist1.fwd(-1))/dist2.fwd(1)
33 | assert numpy.allclose(joint.fwd([[-1, -1, 1, 1],
34 | [-1, 1, -1, 1],
35 | [ 0, 0, 0, 0]]),
36 | [dist1.fwd([-1, -1, 1, 1]),
37 | dist2.fwd([-1, 1, -1, 1]),
38 | [1, ref21, 1, 0]])
39 |
40 |
41 | def test_trucation_multivariate():
42 | """Ensure that multivariate bounds works as expected."""
43 | dist1 = chaospy.Iid(chaospy.Normal(), 2)
44 | dist2 = chaospy.Trunc(chaospy.Iid(chaospy.Normal(), 2),
45 | lower=dist1, upper=[1, 1])
46 | joint = chaospy.J(dist1, dist2)
47 | assert numpy.allclose(
48 | joint.fwd([[-1, -1, -1, -1],
49 | [-1, -1, -1, -1],
50 | [ 0, 0, -2, 2],
51 | [-2, 2, 0, 0]]),
52 | [[0.15865525, 0.15865525, 0.15865525, 0.15865525],
53 | [0.15865525, 0.15865525, 0.15865525, 0.15865525],
54 | [0.48222003, 0.48222003, 0. , 1. ],
55 | [0. , 1. , 0.48222003, 0.48222003]],
56 | )
57 |
58 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/cauchy.py:
--------------------------------------------------------------------------------
1 | """Cauchy distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class cauchy(SimpleDistribution):
8 | """Standard Cauchy distribution."""
9 |
10 | def __init__(self):
11 | super(cauchy, self).__init__()
12 |
13 | def _pdf(self, x):
14 | return 1.0 / numpy.pi / (1.0 + x * x)
15 |
16 | def _cdf(self, x):
17 | return 0.5 + 1.0 / numpy.pi * numpy.arctan(x)
18 |
19 | def _ppf(self, q):
20 | return numpy.tan(numpy.pi * q - numpy.pi / 2.0)
21 |
22 | def _lower(self):
23 | return -3e13
24 |
25 | def _upper(self):
26 | return 3e13
27 |
28 |
29 | class Cauchy(ShiftScaleDistribution):
30 | """
31 | Cauchy distribution.
32 |
33 | Also known as Lorentz distribution, Cachy-Lorentz distribution, and
34 | Breit-Wigner distribution.
35 |
36 | Args:
37 | shift (float, Distribution):
38 | Location parameter
39 | scale (float, Distribution):
40 | Scaling parameter
41 |
42 | Examples:
43 | >>> distribution = chaospy.Cauchy()
44 | >>> distribution
45 | Cauchy()
46 | >>> uloc = numpy.linspace(0.1, 0.9, 5)
47 | >>> uloc
48 | array([0.1, 0.3, 0.5, 0.7, 0.9])
49 | >>> xloc = distribution.inv(uloc)
50 | >>> xloc.round(3)
51 | array([-3.078, -0.727, 0. , 0.727, 3.078])
52 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
53 | True
54 | >>> distribution.pdf(xloc).round(3)
55 | array([0.03 , 0.208, 0.318, 0.208, 0.03 ])
56 | >>> distribution.sample(4).round(3)
57 | array([ 0.524, -2.646, 6.35 , -0.056])
58 |
59 | Notes:
60 | The Cauchy distribution is what is known as a "pathological"
61 | distribution. It is not only infinitely bound, but heavy tailed
62 | enough that approximate bounds is also infinite for any reasonable
63 | approximation. This makes both bounds and moments results in
64 | non-sensibel results. E.g.::
65 |
66 | >>> distribution.lower < -1e10
67 | array([ True])
68 | >>> distribution.upper > 1e10
69 | array([ True])
70 |
71 | """
72 |
73 | def __init__(self, scale=1, shift=0):
74 | super(Cauchy, self).__init__(
75 | dist=cauchy(),
76 | scale=scale,
77 | shift=shift,
78 | )
79 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/reciprocal.py:
--------------------------------------------------------------------------------
1 | """Reciprocal distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class reciprocal(SimpleDistribution):
8 | def __init__(self, lower=1, upper=2):
9 | super(reciprocal, self).__init__(dict(lower=lower, upper=upper))
10 |
11 | def _pdf(self, x, lower, upper):
12 | return 1.0 / (x * numpy.log(upper / lower))
13 |
14 | def _cdf(self, x, lower, upper):
15 | return numpy.log(x / lower) / numpy.log(upper / lower)
16 |
17 | def _ppf(self, q, lower, upper):
18 | return numpy.e ** (q * numpy.log(upper / lower) + numpy.log(lower))
19 |
20 | def _lower(self, lower, upper):
21 | return lower
22 |
23 | def _upper(self, lower, upper):
24 | return upper
25 |
26 | def _mom(self, kloc, lower, upper):
27 | return (upper**kloc - lower**kloc) / (kloc * numpy.log(upper / lower))
28 |
29 |
30 | class Reciprocal(ShiftScaleDistribution):
31 | """
32 | Reciprocal distribution.
33 |
34 | Args:
35 | lower (float, Distribution):
36 | Lower threshold of distribution. Must be smaller than ``upper``.
37 | upper (float, Distribution):
38 | Upper threshold of distribution.
39 | scale (float, Distribution):
40 | Scaling parameter
41 | shift (float, Distribution):
42 | Location parameter
43 |
44 |
45 | Examples:
46 | >>> distribution = chaospy.Reciprocal(2, 4)
47 | >>> distribution
48 | Reciprocal(2, 4)
49 | >>> uloc = numpy.linspace(0, 1, 6)
50 | >>> uloc
51 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
52 | >>> xloc = distribution.inv(uloc)
53 | >>> xloc.round(3)
54 | array([2. , 2.297, 2.639, 3.031, 3.482, 4. ])
55 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
56 | True
57 | >>> distribution.pdf(xloc).round(3)
58 | array([0.721, 0.628, 0.547, 0.476, 0.414, 0.361])
59 | >>> distribution.sample(4).round(3)
60 | array([3.146, 2.166, 3.865, 2.794])
61 | >>> distribution.mom(1).round(4)
62 | np.float64(2.8854)
63 |
64 | """
65 |
66 | def __init__(self, lower, upper, shift=0, scale=1):
67 | super(Reciprocal, self).__init__(
68 | dist=reciprocal(lower, upper),
69 | shift=shift,
70 | scale=scale,
71 | repr_args=[lower, upper],
72 | )
73 |
--------------------------------------------------------------------------------
/chaospy/quadrature/jacobi.py:
--------------------------------------------------------------------------------
1 | """Gauss-Jakobi quadrature rule."""
2 | import numpy
3 | import chaospy
4 |
5 | from .hypercube import hypercube_quadrature
6 |
7 |
8 | def jacobi(order, alpha, beta, lower=-1, upper=1, physicist=False):
9 | """
10 | Gauss-Jacobi quadrature rule.
11 |
12 | Compute the sample points and weights for Gauss-Jacobi quadrature. The
13 | sample points are the roots of the nth degree Jacobi polynomial. These
14 | sample points and weights correctly integrate polynomials of degree
15 | :math:`2N-1` or less.
16 |
17 | Gaussian quadrature come in two variants: physicist and probabilist. For
18 | Gauss-Jacobi physicist means a weight function
19 | :math:`(1-x)^\alpha (1+x)^\beta` and
20 | weights that sum to :math`2^{\alpha+\beta}`, and probabilist means a weight
21 | function is :math:`B(\alpha, \beta) x^{\alpha-1}(1-x)^{\beta-1}` (where
22 | :math:`B` is the beta normalizing constant) which sum to 1.
23 |
24 | Args:
25 | order (int):
26 | The quadrature order.
27 | alpha (float):
28 | First Jakobi shape parameter.
29 | beta (float):
30 | Second Jakobi shape parameter.
31 | lower (float):
32 | Lower bound for the integration interval.
33 | upper (float):
34 | Upper bound for the integration interval.
35 | physicist (bool):
36 | Use physicist weights instead of probabilist.
37 |
38 | Returns:
39 | abscissas (numpy.ndarray):
40 | The ``order+1`` quadrature points for where to evaluate the model
41 | function with.
42 | weights (numpy.ndarray):
43 | The quadrature weights associated with each abscissas.
44 |
45 | Examples:
46 | >>> abscissas, weights = chaospy.quadrature.jacobi(3, alpha=2, beta=2)
47 | >>> abscissas
48 | array([[-0.69474659, -0.25056281, 0.25056281, 0.69474659]])
49 | >>> weights
50 | array([0.09535261, 0.40464739, 0.40464739, 0.09535261])
51 |
52 | See also:
53 | :func:`chaospy.quadrature.gaussian`
54 |
55 | """
56 | order = int(order)
57 | coefficients = chaospy.construct_recurrence_coefficients(
58 | order=order, dist=chaospy.Beta(alpha + 1, beta + 1, lower, upper)
59 | )
60 | [abscissas], [weights] = chaospy.coefficients_to_quadrature(coefficients)
61 | weights *= 2 ** (alpha + beta) if physicist else 1
62 | return abscissas[numpy.newaxis], weights
63 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/chi_squared.py:
--------------------------------------------------------------------------------
1 | """Non-central Chi-squared distribution."""
2 | import numpy
3 | from scipy import special
4 | import chaospy
5 |
6 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
7 |
8 |
9 | class chi_squared(SimpleDistribution):
10 | """Central Chi-squared distribution."""
11 |
12 | def __init__(self, df, nc):
13 | super(chi_squared, self).__init__(dict(df=df, nc=nc))
14 |
15 | def _pdf(self, x, df, nc):
16 | output = 0.5 * numpy.e ** (-0.5 * (x + nc))
17 | output *= (x / nc) ** (0.25 * df - 0.5)
18 | output *= special.iv(0.5 * df - 1, (nc * x) ** 0.5)
19 | return output
20 |
21 | def _cdf(self, x, df, nc):
22 | return special.chndtr(x, df, nc)
23 |
24 | def _ppf(self, qloc, df, nc):
25 | qloc = numpy.clip(qloc, None, 1 - 1e-12)
26 | return special.chndtrix(qloc, df, nc)
27 |
28 | def _lower(self, df, nc):
29 | return 0.0
30 |
31 | def _upper(self, df, nc):
32 | return special.chndtrix(1 - 1e-12, df, nc)
33 |
34 |
35 | class ChiSquared(ShiftScaleDistribution):
36 | """
37 | (Non-central) Chi-squared distribution.
38 |
39 | Args:
40 | df (float, Distribution):
41 | Degrees of freedom
42 | nc (float, Distribution):
43 | Non-centrality parameter
44 | scale (float, Distribution):
45 | Scaling parameter
46 | shift (float, Distribution):
47 | Location parameter
48 |
49 | Examples:
50 | >>> distribution = chaospy.ChiSquared(df=15)
51 | >>> distribution
52 | ChiSquared(15)
53 | >>> uloc = numpy.linspace(0, 1, 6)
54 | >>> uloc
55 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
56 | >>> xloc = distribution.inv(uloc)
57 | >>> xloc.round(3)
58 | array([ 0. , 11.003, 13.905, 16.784, 20.592, 95.358])
59 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
60 | True
61 | >>> distribution.pdf(xloc).round(3)
62 | array([0. , 0.062, 0.072, 0.064, 0.041, 0. ])
63 | >>> distribution.sample(4).round(3)
64 | array([17.655, 9.454, 26.66 , 15.047])
65 |
66 | """
67 |
68 | def __init__(self, df=1, nc=1, scale=1, shift=0):
69 | super(ChiSquared, self).__init__(
70 | dist=chi_squared(df, nc),
71 | scale=scale,
72 | shift=shift,
73 | repr_args=[df] + chaospy.format_repr_kwargs(nc=(nc, 1)),
74 | )
75 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/log_uniform.py:
--------------------------------------------------------------------------------
1 | """Log-uniform distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class log_uniform(SimpleDistribution):
8 | """Log-uniform distribution."""
9 |
10 | def __init__(self, lo=0, up=1):
11 | super(log_uniform, self).__init__(dict(lo=lo, up=up))
12 |
13 | def _pdf(self, x, lo, up):
14 | return 1.0 / (x * (up - lo))
15 |
16 | def _cdf(self, x, lo, up):
17 | return (numpy.log(x) - lo) / (up - lo)
18 |
19 | def _ppf(self, q, lo, up):
20 | return numpy.e ** (q * (up - lo) + lo)
21 |
22 | def _lower(self, lo, up):
23 | return numpy.e**lo
24 |
25 | def _upper(self, lo, up):
26 | return numpy.e**up
27 |
28 | def _mom(self, k, lo, up):
29 | return (
30 | (numpy.e ** (up * k) - numpy.e ** (lo * k)) / ((up - lo) * (k + (k == 0)))
31 | ) ** (k != 0)
32 |
33 |
34 | class LogUniform(ShiftScaleDistribution):
35 | """
36 | Log-uniform distribution
37 |
38 | Args:
39 | lower (float, Distribution):
40 | Location of lower threshold of uniform distribution.
41 | upper (float, Distribution):
42 | Location of upper threshold of uniform distribution.
43 | scale (float, Distribution):
44 | Scaling parameter
45 | shift (float, Distribution):
46 | Location parameter
47 |
48 | Examples:
49 | >>> distribution = chaospy.LogUniform(0.5, 1.5)
50 | >>> distribution
51 | LogUniform(0.5, 1.5)
52 | >>> uloc = numpy.linspace(0, 1, 6)
53 | >>> uloc
54 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
55 | >>> xloc = distribution.inv(uloc)
56 | >>> xloc.round(3)
57 | array([1.649, 2.014, 2.46 , 3.004, 3.669, 4.482])
58 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
59 | True
60 | >>> distribution.pdf(xloc).round(3)
61 | array([0.607, 0.497, 0.407, 0.333, 0.273, 0.223])
62 | >>> distribution.sample(4).round(3)
63 | array([3.17 , 1.85 , 4.264, 2.67 ])
64 | >>> distribution.mom(1).round(3)
65 | np.float64(2.833)
66 |
67 | """
68 |
69 | def __init__(self, lower=0, upper=1, scale=1, shift=0):
70 | super(LogUniform, self).__init__(
71 | dist=log_uniform(lower, upper),
72 | scale=scale,
73 | shift=shift,
74 | repr_args=[lower, upper],
75 | )
76 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/inverse_gamma.py:
--------------------------------------------------------------------------------
1 | """Inverse Gamma distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class inverse_gamma(SimpleDistribution):
9 | def __init__(self, a):
10 | super(inverse_gamma, self).__init__(dict(a=a))
11 |
12 | def _lower(self, a):
13 | return 0.0
14 |
15 | def _upper(self, a):
16 | return 1.0 / special.gammainccinv(a, 1 - 1e-16)
17 |
18 | def _pdf(self, x, a):
19 | x_ = numpy.where(x, x, 1)
20 | return numpy.where(
21 | x, x_ ** (-a - 1) * numpy.exp(-1.0 / x_) / special.gamma(a), 0
22 | )
23 |
24 | def _cdf(self, x, a):
25 | return numpy.where(x, special.gammaincc(a, 1.0 / numpy.where(x, x, 1)), 0)
26 |
27 | def _ppf(self, q, a):
28 | return 1.0 / special.gammainccinv(a, q)
29 |
30 | def _mom(self, k, a):
31 | if k > a:
32 | return self._upper(a)
33 | return 1.0 / numpy.prod(a - numpy.arange(1, k.item() + 1))
34 |
35 |
36 | class InverseGamma(ShiftScaleDistribution):
37 | """
38 | Inverse-Gamma distribution.
39 |
40 | Args:
41 | shape (float, Distribution):
42 | Shape parameter. a>0.
43 | scale (float, Distribution):
44 | Scale parameter. scale!=0
45 | shift (float, Distribution):
46 | Location of the lower bound.
47 |
48 | Examples:
49 | >>> distribution = chaospy.InverseGamma(shape=10)
50 | >>> distribution
51 | InverseGamma(10)
52 | >>> uloc = numpy.linspace(0, 1, 6)
53 | >>> uloc
54 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
55 | >>> xloc = distribution.inv(uloc)
56 | >>> xloc.round(3)
57 | array([0. , 0.08 , 0.095, 0.112, 0.137, 8.608])
58 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
59 | True
60 | >>> distribution.pdf(xloc).round(3)
61 | array([ 0. , 11.928, 12.963, 10.441, 5.808, 0. ])
62 | >>> distribution.sample(4).round(3)
63 | array([0.118, 0.072, 0.185, 0.102])
64 | >>> distribution.mom([1, 2, 3]).round(3)
65 | array([0.111, 0.014, 0.002])
66 |
67 | """
68 |
69 | def __init__(self, shape, scale=1, shift=0):
70 | super(InverseGamma, self).__init__(
71 | dist=inverse_gamma(shape),
72 | scale=scale,
73 | shift=shift,
74 | repr_args=[shape],
75 | )
76 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/folded_cauchy.py:
--------------------------------------------------------------------------------
1 | """Folded Cauchy distribution."""
2 | import numpy
3 |
4 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
5 |
6 |
7 | class folded_cauchy(SimpleDistribution):
8 | """Folded Cauchy distribution."""
9 |
10 | def __init__(self, c=0):
11 | super(folded_cauchy, self).__init__(dict(c=c))
12 |
13 | def _pdf(self, x, c):
14 | return 1.0 / (numpy.pi * (1 + (x - c) ** 2)) + 1 / (
15 | numpy.pi * (1 + (x + c) ** 2)
16 | )
17 |
18 | def _cdf(self, x, c):
19 | return (numpy.arctan(x - c) + numpy.arctan(x + c)) / numpy.pi
20 |
21 | def _lower(self, c):
22 | return 0.0
23 |
24 | def _upper(self, c):
25 | return 1e16 # actually infinity
26 |
27 |
28 | class FoldedCauchy(ShiftScaleDistribution):
29 | """
30 | Folded Cauchy distribution.
31 |
32 | Args:
33 | shape (float, Distribution):
34 | Shape parameter
35 | scale (float, Distribution):
36 | Scaling parameter
37 | shift (float, Distribution):
38 | Location parameter
39 |
40 | Examples:
41 | >>> distribution = chaospy.FoldedCauchy(1.5)
42 | >>> distribution
43 | FoldedCauchy(1.5)
44 | >>> uloc = numpy.linspace(0.1, 0.9, 5)
45 | >>> uloc
46 | array([0.1, 0.3, 0.5, 0.7, 0.9])
47 | >>> xloc = distribution.inv(uloc)
48 | >>> xloc.round(3)
49 | array([0.489, 1.217, 1.803, 2.67 , 6.644])
50 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
51 | True
52 | >>> distribution.pdf(xloc).round(3)
53 | array([0.222, 0.333, 0.318, 0.152, 0.016])
54 | >>> distribution.sample(4).round(3)
55 | array([1.929, 8.542, 0.311, 1.414])
56 |
57 | Notes:
58 | The Cauchy distribution is what is known as a "pathological"
59 | distribution. It is not only infinitely bound, but heavy tailed
60 | enough that approximate bounds is also infinite for any reasonable
61 | approximation. This makes both bounds and moments results in
62 | non-sensibel results. In the case of folded-Cauchy distribution::
63 |
64 | >>> distribution.upper > 1e10
65 | array([ True])
66 |
67 | """
68 |
69 | def __init__(self, shape=0, scale=1, shift=0):
70 | super(FoldedCauchy, self).__init__(
71 | dist=folded_cauchy(shape),
72 | scale=scale,
73 | shift=shift,
74 | repr_args=[shape],
75 | )
76 |
--------------------------------------------------------------------------------
/chaospy/distributions/sampler/sequences/grid.py:
--------------------------------------------------------------------------------
1 | """
2 | Generate samples from a regular grid.
3 |
4 | Example usage
5 | -------------
6 |
7 | Basic usage::
8 |
9 | >>> distribution = chaospy.Uniform(0, 1)
10 | >>> samples = distribution.sample(2, rule="grid")
11 | >>> samples.round(4)
12 | array([0.3333, 0.6667])
13 | >>> samples = distribution.sample(5, rule="grid")
14 | >>> samples.round(4)
15 | array([0.1667, 0.3333, 0.5 , 0.6667, 0.8333])
16 |
17 | Certain orders are nested::
18 |
19 | >>> samples = distribution.sample(3, rule="grid")
20 | >>> samples.round(4)
21 | array([0.25, 0.5 , 0.75])
22 | >>> samples = distribution.sample(7, rule="grid")
23 | >>> samples.round(4)
24 | array([0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875])
25 |
26 | Create nested samples directly with the dedicated function::
27 |
28 | >>> samples = distribution.sample(2, rule="nested_grid")
29 | >>> samples.round(4)
30 | array([0.25, 0.5 , 0.75])
31 | >>> samples = distribution.sample(3, rule="nested_grid")
32 | >>> samples.round(4)
33 | array([0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875])
34 |
35 | Multivariate usage::
36 |
37 | >>> distribution = chaospy.J(chaospy.Uniform(0, 1), chaospy.Uniform(0, 1))
38 | >>> samples = distribution.sample(2, rule="grid")
39 | >>> samples.round(4)
40 | array([[0.3333, 0.3333, 0.6667, 0.6667],
41 | [0.3333, 0.6667, 0.3333, 0.6667]])
42 |
43 | """
44 | import numpy
45 |
46 | import chaospy
47 | from chaospy.quadrature import utils
48 |
49 |
50 | def create_grid_samples(order, dim=1):
51 | """
52 | Create samples from a regular grid.
53 |
54 | Args:
55 | order (int):
56 | The order of the grid. Defines the number of samples.
57 | dim (int):
58 | The number of dimensions in the grid
59 |
60 | Returns (numpy.ndarray):
61 | Regular grid with ``shape == (dim, order)``.
62 | """
63 | x_data = numpy.arange(1, order + 1) / (order + 1.0)
64 | x_data = utils.combine([x_data] * dim)
65 | return x_data.T
66 |
67 |
68 | def create_nested_grid_samples(order, dim=1):
69 | """
70 | Create samples from a nested grid.
71 |
72 | Args:
73 | order (int):
74 | The order of the grid. Defines the number of samples.
75 | dim (int):
76 | The number of dimensions in the grid
77 |
78 | Returns (numpy.ndarray):
79 | Regular grid with ``shape == (dim, 2**order-1)``.
80 | """
81 | return create_grid_samples(order=2**order - 1, dim=dim)
82 |
--------------------------------------------------------------------------------
/tests/distributions/test_2d_dependencies.py:
--------------------------------------------------------------------------------
1 | """Test dependent distributions with 2-D components."""
2 | from pytest import raises
3 | import numpy
4 | import chaospy
5 |
6 | DIST1 = chaospy.J(chaospy.Uniform(1, 2), chaospy.Uniform(2, 4))
7 | DIST2 = chaospy.J(chaospy.Gamma(DIST1[0]), chaospy.Gamma(DIST1[1]))
8 | JOINT1 = chaospy.J(DIST1, DIST2)
9 | JOINT2 = chaospy.J(DIST2, DIST1)
10 |
11 |
12 | def test_2d_stochastic_dependencies():
13 | """Ensure ``stochastic_dependencies`` behaves as expected for dependent 2-D distributions."""
14 | assert not DIST1.stochastic_dependent
15 | assert DIST2.stochastic_dependent
16 | assert JOINT1.stochastic_dependent
17 | assert JOINT2.stochastic_dependent
18 |
19 |
20 | def test_2d_dependencies():
21 | """Ensure 2-D dependencies behaves as expected."""
22 | grid1 = numpy.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
23 | grid2 = numpy.array([[1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 1], [0, 1, 0, 1]])
24 | inv_map1 = numpy.array([[1, 1, 2, 2],
25 | [2, 4, 2, 4],
26 | [32.2369909, 32.2369909, 35.84367486, 35.84367486],
27 | [35.84367486, 41.71021463, 35.84367486, 41.71021463]])
28 | inv_map2 = numpy.array([[32.2369909, 32.2369909, 35.84367486, 35.84367486],
29 | [35.84367486, 41.71021463, 35.84367486, 41.71021463],
30 | [1, 1, 2, 2],
31 | [2, 4, 2, 4]])
32 | assert numpy.allclose(JOINT1.inv(grid1), inv_map1)
33 | assert numpy.allclose(JOINT2.inv(grid2), inv_map2)
34 |
35 |
36 | def test_2d_dependent_density():
37 | """Ensure probability density function behaves as expected for dependent 2-D distributions."""
38 | x_loc1 = numpy.array([0.8, 1.8, 1.2, 1.8])
39 | x_loc2 = numpy.array([1.8, 3.8, 3.2, 3.8])
40 | x_loc3 = numpy.array([2, 4, 6, 8])
41 | x_loc4 = numpy.array([2, 4, 6, 8])
42 |
43 | y_loc1 = numpy.array([0, 1, 1, 1])
44 | y_loc2 = numpy.array([0, 0.5, 0.5, 0.5])
45 | y_loc3 = numpy.array([0.1011967, 0.05961306, 0.00386314, 0.00190102])
46 | y_loc4 = numpy.array([0.25299175, 0.1892478, 0.05267923, 0.02413998])
47 |
48 | assert numpy.allclose(
49 | JOINT1.pdf([x_loc1, x_loc2, x_loc3, x_loc4], decompose=True, allow_approx=False),
50 | [y_loc1, y_loc2, y_loc3, y_loc4]
51 | )
52 | assert numpy.allclose(
53 | JOINT2.pdf([x_loc3, x_loc4, x_loc1, x_loc2], decompose=True, allow_approx=False),
54 | [y_loc3, y_loc4, y_loc1, y_loc2]
55 | )
56 |
--------------------------------------------------------------------------------
/chaospy/distributions/collection/fatigue_life.py:
--------------------------------------------------------------------------------
1 | """Fatigue-life distribution."""
2 | import numpy
3 | from scipy import special
4 |
5 | from ..baseclass import SimpleDistribution, ShiftScaleDistribution
6 |
7 |
8 | class fatigue_life(SimpleDistribution):
9 | """Fatigue-life distribution."""
10 |
11 | def __init__(self, c=0):
12 | super(fatigue_life, self).__init__(dict(c=c))
13 |
14 | def _pdf(self, x, c):
15 | output = (x + 1) / (2 * c * numpy.sqrt(2 * numpy.pi * x**3))
16 | output *= numpy.exp(-((x - 1) ** 2) / (2.0 * x * c**2))
17 | output[(x == 0) & numpy.isnan(output)] = 0
18 | return output
19 |
20 | def _cdf(self, x, c):
21 | out = special.ndtr(1.0 / c * (numpy.sqrt(x) - 1.0 / numpy.sqrt(x)))
22 | out = numpy.where(x == 0, 0, out)
23 | return out
24 |
25 | def _ppf(self, q, c):
26 | tmp = c * special.ndtri(q)
27 | out = numpy.where(
28 | numpy.isfinite(tmp), 0.25 * (tmp + numpy.sqrt(tmp**2 + 4)) ** 2, tmp
29 | )
30 | return out
31 |
32 | def _lower(self, c):
33 | return (-4 * c + numpy.sqrt(16 * c**2 + 1)) ** 2
34 |
35 | def _upper(self, c):
36 | return (4 * c + numpy.sqrt(16 * c**2 + 1)) ** 2
37 |
38 |
39 | class FatigueLife(ShiftScaleDistribution):
40 | """
41 | Fatigue-Life or Birmbaum-Sanders distribution
42 |
43 | Args:
44 | shape (float, Distribution):
45 | Shape parameter
46 | scale (float, Distribution):
47 | Scaling parameter
48 | shift (float, Distribution):
49 | Location parameter
50 |
51 | Examples:
52 | >>> distribution = chaospy.FatigueLife(0.5)
53 | >>> distribution
54 | FatigueLife(0.5)
55 | >>> uloc = numpy.linspace(0, 1, 6)
56 | >>> uloc
57 | array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
58 | >>> xloc = distribution.inv(uloc)
59 | >>> xloc.round(3)
60 | array([ 0.056, 0.659, 0.881, 1.135, 1.519, 17.944])
61 | >>> numpy.allclose(distribution.fwd(xloc), uloc)
62 | True
63 | >>> distribution.pdf(xloc).round(3)
64 | array([0. , 0.869, 0.879, 0.682, 0.377, 0. ])
65 | >>> distribution.sample(4).round(3)
66 | array([1.218, 0.553, 2.23 , 0.978])
67 |
68 | """
69 |
70 | def __init__(self, shape=1, scale=1, shift=0):
71 | super(FatigueLife, self).__init__(
72 | dist=fatigue_life(shape),
73 | scale=scale,
74 | shift=shift,
75 | repr_args=[shape],
76 | )
77 |
--------------------------------------------------------------------------------
/chaospy/distributions/operators/iid.py:
--------------------------------------------------------------------------------
1 | """Independent identical distributed vector of random variables."""
2 | from copy import deepcopy
3 | import numpy
4 | import chaospy
5 |
6 | from ..baseclass import Distribution
7 | from .joint import J
8 |
9 |
10 | class Iid(J):
11 | """
12 | Opaque method for creating independent identical distributed random
13 | variables from an univariate variable.
14 |
15 | Args:
16 | dist (Distribution):
17 | Distribution to make into i.i.d. vector. The
18 | distribution will be copied so to not become a part of
19 | the dependency graph.
20 | length (int):
21 | The length of new distribution.
22 |
23 | Examples:
24 | >>> distribution = chaospy.Iid(chaospy.Normal(0, 1), 2)
25 | >>> distribution
26 | Iid(Normal(mu=0, sigma=1), 2)
27 | >>> chaospy.Cov(distribution)
28 | array([[1., 0.],
29 | [0., 1.]])
30 | >>> mesh = numpy.mgrid[0.25:0.75:3j, 0.25:0.75:3j].reshape(2, -1)
31 | >>> mapped_mesh = distribution.inv(mesh)
32 | >>> mapped_mesh.round(2)
33 | array([[-0.67, -0.67, -0.67, 0. , 0. , 0. , 0.67, 0.67, 0.67],
34 | [-0.67, 0. , 0.67, -0.67, 0. , 0.67, -0.67, 0. , 0.67]])
35 | >>> distribution.fwd(mapped_mesh).round(2)
36 | array([[0.25, 0.25, 0.25, 0.5 , 0.5 , 0.5 , 0.75, 0.75, 0.75],
37 | [0.25, 0.5 , 0.75, 0.25, 0.5 , 0.75, 0.25, 0.5 , 0.75]])
38 | >>> distribution.pdf(mapped_mesh).round(3)
39 | array([0.101, 0.127, 0.101, 0.127, 0.159, 0.127, 0.101, 0.127, 0.101])
40 | >>> distribution.sample(4, rule="halton").round(3)
41 | array([[-1.15 , 0.319, -0.319, 1.15 ],
42 | [-0.14 , 0.765, -0.765, 0.14 ]])
43 | >>> distribution.mom([[1, 2, 2], [2, 1, 2]]).round(12)
44 | array([0., 0., 1.])
45 |
46 | """
47 |
48 | def __init__(self, dist, length, rotation=None):
49 | assert isinstance(dist, Distribution)
50 | assert len(dist) == 1 and length >= 1
51 | assert len(dist._dependencies[0]) == 1
52 | exclusion = dist._dependencies[0].copy()
53 | dists = [deepcopy(dist) for _ in range(length)]
54 | for dist in dists:
55 | dist._dependencies = chaospy.init_dependencies(
56 | dist, rotation=[0], dependency_type="iid"
57 | )
58 | super(Iid, self).__init__(*dists, rotation=rotation)
59 | self._exclusion.update(exclusion)
60 | self._repr_args = [dist, length]
61 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | import time
2 | import chaospy
3 |
4 | project = 'chaospy'
5 | author = 'Jonathan Feinberg'
6 | copyright = '%d, Jonathan Feinberg' % time.gmtime().tm_year
7 | version = ".".join(chaospy.__version__.split(".")[:2])
8 | release = chaospy.__version__
9 | master_doc = 'index'
10 |
11 | extensions = [
12 | 'sphinx.ext.autodoc',
13 | 'sphinx.ext.doctest',
14 | 'sphinx.ext.intersphinx',
15 | 'sphinx.ext.coverage',
16 | 'sphinx.ext.mathjax',
17 | 'sphinx.ext.viewcode',
18 | 'sphinx.ext.autosummary',
19 | 'nbsphinx',
20 | 'sphinxcontrib.bibtex',
21 | ]
22 |
23 | bibtex_bibfiles = ['bibliography.bib']
24 | bibtex_default_style = 'unsrt'
25 |
26 | templates_path = ['_templates']
27 | exclude_patterns = ['.build']
28 |
29 | rst_prolog = """
30 | """
31 | language = "en"
32 |
33 | # The name of the Pygments (syntax highlighting) style to use.
34 | pygments_style = "tango"
35 |
36 | # Execute content of Jupyter notebooks:
37 | # "always", "never", "auto" (on empty cell only)
38 | nbsphinx_execute = "never"
39 |
40 | # Create stubs automatically for all auto-summaries:
41 | autosummary_generate = True
42 |
43 | # The theme to use for HTML and HTML Help pages. See the documentation for
44 | # a list of builtin themes.
45 | html_theme = "pydata_sphinx_theme"
46 | html_theme_options = {
47 | "collapse_navigation": True,
48 | "external_links":
49 | [{"name": "Github", "url": "https://github.com/jonathf/chaospy"}],
50 | "footer_items": ["sphinx-version.html"],
51 | "navbar_align": "left",
52 | "navbar_end": ["search-field.html"],
53 | "navigation_depth": 2,
54 | "show_prev_next": False,
55 | "logo": {
56 | "image_light": "chaospy_logo2.svg",
57 | "image_dark": "chaospy_logo2.svg", # Use the same logo or a different one for dark mode
58 | },
59 | }
60 | html_short_title = "chaospy"
61 | html_context = {
62 | "doc_path": "docs",
63 | }
64 | html_logo = "_static/chaospy_logo2.svg"
65 | html_static_path = ['_static']
66 | html_sidebars = {
67 | "**": ["sidebar-nav-bs.html"],
68 | }
69 |
70 | htmlhelp_basename = 'chaospy'
71 | html_show_sourcelink = True
72 |
73 | intersphinx_mapping = {
74 | 'python': ('https://docs.python.org/3/', None),
75 | 'numpy': ('https://numpy.org/doc/stable/', None),
76 | 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
77 | 'numpoly': ('https://numpoly.readthedocs.io/en/master/', None),
78 | 'openturns': ('https://openturns.github.io/openturns/master/', None),
79 | 'scikit-learn': ('https://scikit-learn.org/stable/', None),
80 | }
81 |
--------------------------------------------------------------------------------
/tests/test_orthogonal_expansion.py:
--------------------------------------------------------------------------------
1 | """Testing polynomial related to distributions."""
2 | import chaospy
3 | import numpy
4 | import pytest
5 |
6 | DISTRIBUTIONS = {
7 | "discrete": chaospy.DiscreteUniform(-10, 10),
8 | "normal": chaospy.Normal(0, 1),
9 | "uniform": chaospy.Uniform(-1, 1),
10 | "exponential": chaospy.Exponential(1),
11 | "gamma": chaospy.Gamma(1),
12 | "beta": chaospy.Beta(3, 3, lower=-1, upper=1),
13 | "mvnormal": chaospy.MvNormal([0], [1]),
14 | "custom": chaospy.UserDistribution(
15 | cdf=lambda x: (x+1)/2,
16 | pdf=lambda x: 1/2.,
17 | lower=lambda: -1,
18 | upper=lambda: 1,
19 | ppf=lambda q: 2*q-1,
20 | mom=lambda k: ((k+1.)%2)/(k+1),
21 | ttr=lambda k: (0., k*k/(4.*k*k-1)),
22 | ),
23 | }
24 | BUILDERS = {
25 | "stieltjes": chaospy.expansion.stieltjes,
26 | "cholesky": chaospy.expansion.cholesky,
27 | # "gram_schmidt": chaospy.expansion.gram_schmidt,
28 | }
29 |
30 |
31 | @pytest.fixture(params=DISTRIBUTIONS)
32 | def distribution(request):
33 | return DISTRIBUTIONS[request.param]
34 |
35 |
36 | @pytest.fixture(params=BUILDERS)
37 | def builder(request):
38 | return BUILDERS[request.param]
39 |
40 |
41 | @pytest.fixture
42 | def expansion_small(builder, distribution):
43 | return builder(4, distribution, normed=True)
44 |
45 | @pytest.fixture
46 | def expansion_large(builder, distribution):
47 | return builder(7, distribution, normed=True)
48 |
49 |
50 | @pytest.fixture
51 | def expansion_approx(builder, distribution):
52 | def not_implemented(*args, **kwargs):
53 | raise chaospy.UnsupportedFeature()
54 |
55 | distribution._ttr = not_implemented
56 | distribution._mom = not_implemented
57 | return builder(4, distribution, normed=True)
58 |
59 |
60 | def test_orthogonality_small(expansion_small, distribution):
61 | outer = chaospy.E(chaospy.outer(expansion_small, expansion_small), distribution)
62 | assert numpy.allclose(outer, numpy.eye(len(outer)), rtol=1e-8)
63 |
64 |
65 | def test_orthogonality_large(expansion_large, distribution):
66 | outer = chaospy.E(chaospy.outer(expansion_large, expansion_large), distribution)
67 | assert numpy.allclose(outer, numpy.eye(len(outer)), rtol=1e-4, atol=1e-6)
68 |
69 |
70 | def test_approx_expansion(expansion_approx, expansion_small, distribution):
71 | outer1 = chaospy.E(chaospy.outer(expansion_small, expansion_small), distribution)
72 | outer2 = chaospy.E(chaospy.outer(expansion_approx, expansion_approx), distribution)
73 | assert numpy.allclose(outer1, outer2, rtol=1e-12)
74 |
--------------------------------------------------------------------------------