tag within highlighted CodeBlock for screen reader accessibility (see https://github.com/jgm/pandoc/issues/6352#issuecomment-626106786) -->
2 | // v0.0.1
3 | // Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020.
4 |
5 | document.addEventListener('DOMContentLoaded', function() {
6 | const codeList = document.getElementsByClassName("sourceCode");
7 | for (var i = 0; i < codeList.length; i++) {
8 | var linkList = codeList[i].getElementsByTagName('a');
9 | for (var j = 0; j < linkList.length; j++) {
10 | if (linkList[j].innerHTML === "") {
11 | linkList[j].setAttribute('aria-hidden', 'true');
12 | }
13 | }
14 | }
15 | });
16 |
--------------------------------------------------------------------------------
/docs/articles/layer_dense_variational_files/anchor-sections-1.0/anchor-sections.css:
--------------------------------------------------------------------------------
1 | /* Styles for section anchors */
2 | a.anchor-section {margin-left: 10px; visibility: hidden; color: inherit;}
3 | a.anchor-section::before {content: '#';}
4 | .hasAnchor:hover a.anchor-section {visibility: visible;}
5 |
--------------------------------------------------------------------------------
/docs/articles/layer_dense_variational_files/anchor-sections-1.0/anchor-sections.js:
--------------------------------------------------------------------------------
1 | // Anchor sections v1.0 written by Atsushi Yasumoto on Oct 3rd, 2020.
2 | document.addEventListener('DOMContentLoaded', function() {
3 | // Do nothing if AnchorJS is used
4 | if (typeof window.anchors === 'object' && anchors.hasOwnProperty('hasAnchorJSLink')) {
5 | return;
6 | }
7 |
8 | const h = document.querySelectorAll('h1, h2, h3, h4, h5, h6');
9 |
10 | // Do nothing if sections are already anchored
11 | if (Array.from(h).some(x => x.classList.contains('hasAnchor'))) {
12 | return null;
13 | }
14 |
15 | // Use section id when pandoc runs with --section-divs
16 | const section_id = function(x) {
17 | return ((x.classList.contains('section') || (x.tagName === 'SECTION'))
18 | ? x.id : '');
19 | };
20 |
21 | // Add anchors
22 | h.forEach(function(x) {
23 | const id = x.id || section_id(x.parentElement);
24 | if (id === '') {
25 | return null;
26 | }
27 | let anchor = document.createElement('a');
28 | anchor.href = '#' + id;
29 | anchor.classList = ['anchor-section'];
30 | x.classList.add('hasAnchor');
31 | x.appendChild(anchor);
32 | });
33 | });
34 |
--------------------------------------------------------------------------------
/docs/articles/layer_dense_variational_files/header-attrs-2.1/header-attrs.js:
--------------------------------------------------------------------------------
1 | // Pandoc 2.9 adds attributes on both header and div. We remove the former (to
2 | // be compatible with the behavior of Pandoc < 2.8).
3 | document.addEventListener('DOMContentLoaded', function(e) {
4 | var hs = document.querySelectorAll("div.section[class*='level'] > :first-child");
5 | var i, h, a;
6 | for (i = 0; i < hs.length; i++) {
7 | h = hs[i];
8 | if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6
9 | a = h.attributes;
10 | while (a.length > 0) h.removeAttribute(a[0].name);
11 | }
12 | });
13 |
--------------------------------------------------------------------------------
/docs/articles/layer_dense_variational_files/header-attrs-2.3/header-attrs.js:
--------------------------------------------------------------------------------
1 | // Pandoc 2.9 adds attributes on both header and div. We remove the former (to
2 | // be compatible with the behavior of Pandoc < 2.8).
3 | document.addEventListener('DOMContentLoaded', function(e) {
4 | var hs = document.querySelectorAll("div.section[class*='level'] > :first-child");
5 | var i, h, a;
6 | for (i = 0; i < hs.length; i++) {
7 | h = hs[i];
8 | if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6
9 | a = h.attributes;
10 | while (a.length > 0) h.removeAttribute(a[0].name);
11 | }
12 | });
13 |
--------------------------------------------------------------------------------
/docs/articles/layer_dense_variational_files/header-attrs-2.5/header-attrs.js:
--------------------------------------------------------------------------------
1 | // Pandoc 2.9 adds attributes on both header and div. We remove the former (to
2 | // be compatible with the behavior of Pandoc < 2.8).
3 | document.addEventListener('DOMContentLoaded', function(e) {
4 | var hs = document.querySelectorAll("div.section[class*='level'] > :first-child");
5 | var i, h, a;
6 | for (i = 0; i < hs.length; i++) {
7 | h = hs[i];
8 | if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6
9 | a = h.attributes;
10 | while (a.length > 0) h.removeAttribute(a[0].name);
11 | }
12 | });
13 |
--------------------------------------------------------------------------------
/docs/bootstrap-toc.css:
--------------------------------------------------------------------------------
1 | /*!
2 | * Bootstrap Table of Contents v0.4.1 (http://afeld.github.io/bootstrap-toc/)
3 | * Copyright 2015 Aidan Feldman
4 | * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */
5 |
6 | /* modified from https://github.com/twbs/bootstrap/blob/94b4076dd2efba9af71f0b18d4ee4b163aa9e0dd/docs/assets/css/src/docs.css#L548-L601 */
7 |
8 | /* All levels of nav */
9 | nav[data-toggle='toc'] .nav > li > a {
10 | display: block;
11 | padding: 4px 20px;
12 | font-size: 13px;
13 | font-weight: 500;
14 | color: #767676;
15 | }
16 | nav[data-toggle='toc'] .nav > li > a:hover,
17 | nav[data-toggle='toc'] .nav > li > a:focus {
18 | padding-left: 19px;
19 | color: #563d7c;
20 | text-decoration: none;
21 | background-color: transparent;
22 | border-left: 1px solid #563d7c;
23 | }
24 | nav[data-toggle='toc'] .nav > .active > a,
25 | nav[data-toggle='toc'] .nav > .active:hover > a,
26 | nav[data-toggle='toc'] .nav > .active:focus > a {
27 | padding-left: 18px;
28 | font-weight: bold;
29 | color: #563d7c;
30 | background-color: transparent;
31 | border-left: 2px solid #563d7c;
32 | }
33 |
34 | /* Nav: second level (shown on .active) */
35 | nav[data-toggle='toc'] .nav .nav {
36 | display: none; /* Hide by default, but at >768px, show it */
37 | padding-bottom: 10px;
38 | }
39 | nav[data-toggle='toc'] .nav .nav > li > a {
40 | padding-top: 1px;
41 | padding-bottom: 1px;
42 | padding-left: 30px;
43 | font-size: 12px;
44 | font-weight: normal;
45 | }
46 | nav[data-toggle='toc'] .nav .nav > li > a:hover,
47 | nav[data-toggle='toc'] .nav .nav > li > a:focus {
48 | padding-left: 29px;
49 | }
50 | nav[data-toggle='toc'] .nav .nav > .active > a,
51 | nav[data-toggle='toc'] .nav .nav > .active:hover > a,
52 | nav[data-toggle='toc'] .nav .nav > .active:focus > a {
53 | padding-left: 28px;
54 | font-weight: 500;
55 | }
56 |
57 | /* from https://github.com/twbs/bootstrap/blob/e38f066d8c203c3e032da0ff23cd2d6098ee2dd6/docs/assets/css/src/docs.css#L631-L634 */
58 | nav[data-toggle='toc'] .nav > .active > ul {
59 | display: block;
60 | }
61 |
--------------------------------------------------------------------------------
/docs/docsearch.js:
--------------------------------------------------------------------------------
1 | $(function() {
2 |
3 | // register a handler to move the focus to the search bar
4 | // upon pressing shift + "/" (i.e. "?")
5 | $(document).on('keydown', function(e) {
6 | if (e.shiftKey && e.keyCode == 191) {
7 | e.preventDefault();
8 | $("#search-input").focus();
9 | }
10 | });
11 |
12 | $(document).ready(function() {
13 | // do keyword highlighting
14 | /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */
15 | var mark = function() {
16 |
17 | var referrer = document.URL ;
18 | var paramKey = "q" ;
19 |
20 | if (referrer.indexOf("?") !== -1) {
21 | var qs = referrer.substr(referrer.indexOf('?') + 1);
22 | var qs_noanchor = qs.split('#')[0];
23 | var qsa = qs_noanchor.split('&');
24 | var keyword = "";
25 |
26 | for (var i = 0; i < qsa.length; i++) {
27 | var currentParam = qsa[i].split('=');
28 |
29 | if (currentParam.length !== 2) {
30 | continue;
31 | }
32 |
33 | if (currentParam[0] == paramKey) {
34 | keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20"));
35 | }
36 | }
37 |
38 | if (keyword !== "") {
39 | $(".contents").unmark({
40 | done: function() {
41 | $(".contents").mark(keyword);
42 | }
43 | });
44 | }
45 | }
46 | };
47 |
48 | mark();
49 | });
50 | });
51 |
52 | /* Search term highlighting ------------------------------*/
53 |
54 | function matchedWords(hit) {
55 | var words = [];
56 |
57 | var hierarchy = hit._highlightResult.hierarchy;
58 | // loop to fetch from lvl0, lvl1, etc.
59 | for (var idx in hierarchy) {
60 | words = words.concat(hierarchy[idx].matchedWords);
61 | }
62 |
63 | var content = hit._highlightResult.content;
64 | if (content) {
65 | words = words.concat(content.matchedWords);
66 | }
67 |
68 | // return unique words
69 | var words_uniq = [...new Set(words)];
70 | return words_uniq;
71 | }
72 |
73 | function updateHitURL(hit) {
74 |
75 | var words = matchedWords(hit);
76 | var url = "";
77 |
78 | if (hit.anchor) {
79 | url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor;
80 | } else {
81 | url = hit.url + '?q=' + escape(words.join(" "));
82 | }
83 |
84 | return url;
85 | }
86 |
--------------------------------------------------------------------------------
/docs/link.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
13 |
--------------------------------------------------------------------------------
/docs/pkgdown.yml:
--------------------------------------------------------------------------------
1 | pandoc: 2.11.4
2 | pkgdown: 1.6.1
3 | pkgdown_sha: ~
4 | articles:
5 | dynamic_linear_models: dynamic_linear_models.html
6 | hamiltonian_monte_carlo: hamiltonian_monte_carlo.html
7 | layer_dense_variational: layer_dense_variational.html
8 | last_built: 2021-05-20T13:35Z
9 |
10 |
--------------------------------------------------------------------------------
/man/glm_families.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/glm.R
3 | \name{glm_families}
4 | \alias{glm_families}
5 | \title{GLM families}
6 | \value{
7 | list of models that can be used as the \code{model} argument in \code{\link[=glm_fit]{glm_fit()}}
8 | }
9 | \description{
10 | A list of models that can be used as the \code{model} argument in \code{\link[=glm_fit]{glm_fit()}}:
11 | }
12 | \details{
13 | \itemize{
14 | \item \code{Bernoulli}: \code{Bernoulli(probs=mean)} where \code{mean = sigmoid(matmul(X, weights))}
15 | \item \code{BernoulliNormalCDF}: \code{Bernoulli(probs=mean)} where \verb{mean = Normal(0, 1).cdf(matmul(X, weights))}
16 | \item \code{GammaExp}: \code{Gamma(concentration=1, rate=1 / mean)} where \code{mean = exp(matmul(X, weights))}
17 | \item \code{GammaSoftplus}: \code{Gamma(concentration=1, rate=1 / mean)} where \code{mean = softplus(matmul(X, weights))}
18 | \item \code{LogNormal}: \code{LogNormal(loc=log(mean) - log(2) / 2, scale=sqrt(log(2)))} where
19 | \code{mean = exp(matmul(X, weights))}.
20 | \item \code{LogNormalSoftplus}: \code{LogNormal(loc=log(mean) - log(2) / 2, scale=sqrt(log(2)))} where
21 | \code{mean = softplus(matmul(X, weights))}
22 | \item \code{Normal}: \code{Normal(loc=mean, scale=1)} where \code{mean = matmul(X, weights)}.
23 | \item \code{NormalReciprocal}: \code{Normal(loc=mean, scale=1)} where \code{mean = 1 / matmul(X, weights)}
24 | \item \code{Poisson}: \code{Poisson(rate=mean)} where \code{mean = exp(matmul(X, weights))}.
25 | \item \code{PoissonSoftplus}: \code{Poisson(rate=mean)} where \code{mean = softplus(matmul(X, weights))}.
26 | }
27 | }
28 | \seealso{
29 | Other glm_fit:
30 | \code{\link{glm_fit.tensorflow.tensor}()},
31 | \code{\link{glm_fit_one_step.tensorflow.tensor}()}
32 | }
33 | \concept{glm_fit}
34 |
--------------------------------------------------------------------------------
/man/glm_fit.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/glm.R
3 | \name{glm_fit}
4 | \alias{glm_fit}
5 | \title{Runs multiple Fisher scoring steps}
6 | \usage{
7 | glm_fit(x, ...)
8 | }
9 | \arguments{
10 | \item{x}{float-like, matrix-shaped Tensor where each row represents a sample's
11 | features.}
12 |
13 | \item{...}{other arguments passed to specific methods.}
14 | }
15 | \value{
16 | A \code{glm_fit} object with parameter estimates, number of iterations,
17 | etc.
18 | }
19 | \description{
20 | Runs multiple Fisher scoring steps
21 | }
22 | \seealso{
23 | \code{\link[=glm_fit.tensorflow.tensor]{glm_fit.tensorflow.tensor()}}
24 | }
25 |
--------------------------------------------------------------------------------
/man/glm_fit_one_step.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/glm.R
3 | \name{glm_fit_one_step}
4 | \alias{glm_fit_one_step}
5 | \title{Runs one Fisher scoring step}
6 | \usage{
7 | glm_fit_one_step(x, ...)
8 | }
9 | \arguments{
10 | \item{x}{float-like, matrix-shaped Tensor where each row represents a sample's
11 | features.}
12 |
13 | \item{...}{other arguments passed to specific methods.}
14 | }
15 | \value{
16 | A \code{glm_fit} object with parameter estimates, number of iterations,
17 | etc.
18 | }
19 | \description{
20 | Runs one Fisher scoring step
21 | }
22 | \seealso{
23 | \code{\link[=glm_fit_one_step.tensorflow.tensor]{glm_fit_one_step.tensorflow.tensor()}}
24 | }
25 |
--------------------------------------------------------------------------------
/man/glm_fit_one_step.tensorflow.tensor.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/glm.R
3 | \name{glm_fit_one_step.tensorflow.tensor}
4 | \alias{glm_fit_one_step.tensorflow.tensor}
5 | \title{Runs one Fisher Scoring step}
6 | \usage{
7 | \method{glm_fit_one_step}{tensorflow.tensor}(
8 | x,
9 | response,
10 | model,
11 | model_coefficients_start = NULL,
12 | predicted_linear_response_start = NULL,
13 | l2_regularizer = NULL,
14 | dispersion = NULL,
15 | offset = NULL,
16 | learning_rate = NULL,
17 | fast_unsafe_numerics = TRUE,
18 | name = NULL,
19 | ...
20 | )
21 | }
22 | \arguments{
23 | \item{x}{float-like, matrix-shaped Tensor where each row represents a sample's
24 | features.}
25 |
26 | \item{response}{vector-shaped Tensor where each element represents a sample's
27 | observed response (to the corresponding row of features). Must have same \code{dtype}
28 | as \code{x}.}
29 |
30 | \item{model}{a string naming the model (see \link{glm_families}) or a \code{tfp$glm$ExponentialFamily-like}
31 | instance which implicitly characterizes a negative log-likelihood loss by specifying
32 | the distribuion's mean, gradient_mean, and variance.}
33 |
34 | \item{model_coefficients_start}{Optional (batch of) vector-shaped Tensor representing
35 | the initial model coefficients, one for each column in \code{x}. Must have same \code{dtype}
36 | as model_matrix. Default value: Zeros.}
37 |
38 | \item{predicted_linear_response_start}{Optional Tensor with shape, \code{dtype} matching
39 | \code{response}; represents offset shifted initial linear predictions based on
40 | \code{model_coefficients_start}. Default value: offset if model_coefficients is \code{NULL},
41 | and \code{tf$linalg$matvec(x, model_coefficients_start) + offset} otherwise.}
42 |
43 | \item{l2_regularizer}{Optional scalar Tensor representing L2 regularization penalty.
44 | Default: \code{NULL} ie. no regularization.}
45 |
46 | \item{dispersion}{Optional (batch of) Tensor representing response dispersion.}
47 |
48 | \item{offset}{Optional Tensor representing constant shift applied to \code{predicted_linear_response}.}
49 |
50 | \item{learning_rate}{Optional (batch of) scalar Tensor used to dampen iterative progress.
51 | Typically only needed if optimization diverges, should be no larger than 1 and typically
52 | very close to 1. Default value: \code{NULL} (i.e., 1).}
53 |
54 | \item{fast_unsafe_numerics}{Optional Python bool indicating if faster, less numerically
55 | accurate methods can be employed for computing the weighted least-squares solution. Default
56 | value: TRUE (i.e., "fast but possibly diminished accuracy").}
57 |
58 | \item{name}{usesed as name prefix to ops created by this function. Default value: "fit".}
59 |
60 | \item{...}{other arguments passed to specific methods.}
61 | }
62 | \value{
63 | A \code{glm_fit} object with parameter estimates, and
64 | number of required steps.
65 | }
66 | \description{
67 | Runs one Fisher Scoring step
68 | }
69 | \seealso{
70 | Other glm_fit:
71 | \code{\link{glm_families}},
72 | \code{\link{glm_fit.tensorflow.tensor}()}
73 | }
74 | \concept{glm_fit}
75 |
--------------------------------------------------------------------------------
/man/initializer_blockwise.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/initializers.R
3 | \name{initializer_blockwise}
4 | \alias{initializer_blockwise}
5 | \title{Blockwise Initializer}
6 | \usage{
7 | initializer_blockwise(initializers, sizes, validate_args = FALSE)
8 | }
9 | \arguments{
10 | \item{initializers}{list of Keras initializers, eg: \code{\link[keras:initializer_glorot_uniform]{keras::initializer_glorot_uniform()}}
11 | or \code{\link[=initializer_constant]{initializer_constant()}}.}
12 |
13 | \item{sizes}{list of integers scalars representing the number of elements associated
14 | with each initializer in \code{initializers}.}
15 |
16 | \item{validate_args}{bool indicating we should do (possibly expensive) graph-time
17 | assertions, if necessary.
18 |
19 | @return Initializer which concats other intializers}
20 | }
21 | \description{
22 | Initializer which concats other intializers
23 | }
24 |
--------------------------------------------------------------------------------
/man/install_tfprobability.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/install.R
3 | \name{install_tfprobability}
4 | \alias{install_tfprobability}
5 | \title{Installs TensorFlow Probability}
6 | \usage{
7 | install_tfprobability(
8 | method = c("auto", "virtualenv", "conda"),
9 | conda = "auto",
10 | version = "default",
11 | tensorflow = "default",
12 | extra_packages = NULL,
13 | ...,
14 | pip_ignore_installed = TRUE
15 | )
16 | }
17 | \arguments{
18 | \item{method}{Installation method. By default, "auto" automatically finds a
19 | method that will work in the local environment. Change the default to force
20 | a specific installation method. Note that the "virtualenv" method is not
21 | available on Windows.}
22 |
23 | \item{conda}{The path to a \code{conda} executable. Use \code{"auto"} to allow
24 | \code{reticulate} to automatically find an appropriate \code{conda} binary.
25 | See \strong{Finding Conda} and \code{\link[reticulate:conda_binary]{conda_binary()}} for more details.}
26 |
27 | \item{version}{TensorFlow version to install. Valid values include:
28 | \itemize{
29 | \item \code{"default"} installs 2.9
30 | \item \code{"release"} installs the latest release version of tensorflow (which may
31 | be incompatible with the current version of the R package)
32 | \item A version specification like \code{"2.4"} or \code{"2.4.0"}. Note that if the patch
33 | version is not supplied, the latest patch release is installed (e.g.,
34 | \code{"2.4"} today installs version "2.4.2")
35 | \item \code{nightly} for the latest available nightly build.
36 | \item To any specification, you can append "-cpu" to install the cpu version
37 | only of the package (e.g., \code{"2.4-cpu"})
38 | \item The full URL or path to a installer binary or python *.whl file.
39 | }}
40 |
41 | \item{tensorflow}{Synonym for \code{version}. Maintained for backwards.}
42 |
43 | \item{extra_packages}{Additional Python packages to install along with
44 | TensorFlow.}
45 |
46 | \item{...}{other arguments passed to \code{\link[reticulate:conda-tools]{reticulate::conda_install()}} or
47 | \code{\link[reticulate:virtualenv-tools]{reticulate::virtualenv_install()}}, depending on the \code{method} used.}
48 |
49 | \item{pip_ignore_installed}{Whether pip should ignore installed python
50 | packages and reinstall all already installed python packages. This defaults
51 | to \code{TRUE}, to ensure that TensorFlow dependencies like NumPy are compatible
52 | with the prebuilt TensorFlow binaries.}
53 | }
54 | \value{
55 | invisible
56 | }
57 | \description{
58 | Installs TensorFlow Probability
59 | }
60 |
--------------------------------------------------------------------------------
/man/layer_autoregressive_transform.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/layers.R
3 | \name{layer_autoregressive_transform}
4 | \alias{layer_autoregressive_transform}
5 | \title{An autoregressive normalizing flow layer, given a \code{layer_autoregressive}.}
6 | \usage{
7 | layer_autoregressive_transform(object, made, ...)
8 | }
9 | \arguments{
10 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
11 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
12 | The return value depends on \code{object}. If \code{object} is:
13 | \itemize{
14 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
15 | \item a \code{Sequential} model, the model with an additional layer is returned.
16 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
17 | }}
18 |
19 | \item{made}{A \code{Made} layer, which must output two parameters for each input.}
20 |
21 | \item{...}{Additional parameters passed to Keras Layer.}
22 | }
23 | \value{
24 | a Keras layer
25 | }
26 | \description{
27 | Following \href{https://arxiv.org/abs/1705.07057}{Papamakarios et al. (2017)}, given
28 | an autoregressive model \eqn{p(x)} with conditional distributions in the location-scale
29 | family, we can construct a normalizing flow for \eqn{p(x)}.
30 | }
31 | \details{
32 | Specifically, suppose made is a \verb{[layer_autoregressive()]} -- a layer implementing
33 | a Masked Autoencoder for Distribution Estimation (MADE) -- that computes location
34 | and log-scale parameters \eqn{made(x)[i]} for each input \eqn{x[i]}. Then we can represent
35 | the autoregressive model \eqn{p(x)} as \eqn{x = f(u)} where \eqn{u} is drawn
36 | from from some base distribution and where \eqn{f} is an invertible and
37 | differentiable function (i.e., a Bijector) and \eqn{f^{-1}(x)} is defined by:
38 |
39 | \if{html}{\out{}}\preformatted{library(tensorflow)
40 | library(zeallot)
41 | f_inverse <- function(x) \{
42 | c(shift, log_scale) \%<-\% tf$unstack(made(x), 2, axis = -1L)
43 | (x - shift) * tf$math$exp(-log_scale)
44 | \}
45 | }\if{html}{\out{
}}
46 |
47 | Given a \code{\link[=layer_autoregressive]{layer_autoregressive()}} made, a \code{\link[=layer_autoregressive_transform]{layer_autoregressive_transform()}}
48 | transforms an input \verb{tfd_*} \eqn{p(u)} to an output \verb{tfd_*} \eqn{p(x)} where
49 | \eqn{x = f(u)}.
50 | }
51 | \references{
52 | \href{https://arxiv.org/abs/1705.07057}{Papamakarios et al. (2017)}
53 | }
54 | \seealso{
55 | \code{\link[=tfb_masked_autoregressive_flow]{tfb_masked_autoregressive_flow()}} and \code{\link[=layer_autoregressive]{layer_autoregressive()}}
56 | }
57 |
--------------------------------------------------------------------------------
/man/layer_dense_variational.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/layers.R
3 | \name{layer_dense_variational}
4 | \alias{layer_dense_variational}
5 | \title{Dense Variational Layer}
6 | \usage{
7 | layer_dense_variational(
8 | object,
9 | units,
10 | make_posterior_fn,
11 | make_prior_fn,
12 | kl_weight = NULL,
13 | kl_use_exact = FALSE,
14 | activation = NULL,
15 | use_bias = TRUE,
16 | ...
17 | )
18 | }
19 | \arguments{
20 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
21 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
22 | The return value depends on \code{object}. If \code{object} is:
23 | \itemize{
24 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
25 | \item a \code{Sequential} model, the model with an additional layer is returned.
26 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
27 | }}
28 |
29 | \item{units}{Positive integer, dimensionality of the output space.}
30 |
31 | \item{make_posterior_fn}{function taking \code{tf$size(kernel)},
32 | \code{tf$size(bias)}, \code{dtype} and returns another callable which takes an
33 | input and produces a \code{tfd$Distribution} instance.}
34 |
35 | \item{make_prior_fn}{function taking \code{tf$size(kernel)}, \code{tf$size(bias)},
36 | \code{dtype} and returns another callable which takes an input and produces a
37 | \code{tfd$Distribution} instance.}
38 |
39 | \item{kl_weight}{Amount by which to scale the KL divergence loss between prior
40 | and posterior.}
41 |
42 | \item{kl_use_exact}{Logical indicating that the analytical KL divergence
43 | should be used rather than a Monte Carlo approximation.}
44 |
45 | \item{activation}{An activation function. See \code{keras::layer_dense}. Default: \code{NULL}.}
46 |
47 | \item{use_bias}{Whether or not the dense layers constructed in this layer
48 | should have a bias term. See \code{keras::layer_dense}. Default: \code{TRUE}.}
49 |
50 | \item{...}{Additional keyword arguments passed to the \code{keras::layer_dense} constructed by this layer.}
51 | }
52 | \value{
53 | a Keras layer
54 | }
55 | \description{
56 | This layer uses variational inference to fit a "surrogate" posterior to the
57 | distribution over both the \code{kernel} matrix and the \code{bias} terms which are
58 | otherwise used in a manner similar to \code{layer_dense()}.
59 | This layer fits the "weights posterior" according to the following generative
60 | process:
61 |
62 | \if{html}{\out{}}\preformatted{[K, b] ~ Prior()
63 | M = matmul(X, K) + b
64 | Y ~ Likelihood(M)
65 | }\if{html}{\out{
}}
66 | }
67 | \seealso{
68 | Other layers:
69 | \code{\link{layer_autoregressive}()},
70 | \code{\link{layer_conv_1d_flipout}()},
71 | \code{\link{layer_conv_1d_reparameterization}()},
72 | \code{\link{layer_conv_2d_flipout}()},
73 | \code{\link{layer_conv_2d_reparameterization}()},
74 | \code{\link{layer_conv_3d_flipout}()},
75 | \code{\link{layer_conv_3d_reparameterization}()},
76 | \code{\link{layer_dense_flipout}()},
77 | \code{\link{layer_dense_local_reparameterization}()},
78 | \code{\link{layer_dense_reparameterization}()},
79 | \code{\link{layer_variable}()}
80 | }
81 | \concept{layers}
82 |
--------------------------------------------------------------------------------
/man/layer_distribution_lambda.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_distribution_lambda}
4 | \alias{layer_distribution_lambda}
5 | \title{Keras layer enabling plumbing TFP distributions through Keras models}
6 | \usage{
7 | layer_distribution_lambda(
8 | object,
9 | make_distribution_fn,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | ...
12 | )
13 | }
14 | \arguments{
15 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
16 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
17 | The return value depends on \code{object}. If \code{object} is:
18 | \itemize{
19 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
20 | \item a \code{Sequential} model, the model with an additional layer is returned.
21 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
22 | }}
23 |
24 | \item{make_distribution_fn}{A callable that takes previous layer outputs and returns a \code{tfd$distributions$Distribution} instance.}
25 |
26 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
27 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
28 |
29 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
30 | }
31 | \value{
32 | a Keras layer
33 | }
34 | \description{
35 | Keras layer enabling plumbing TFP distributions through Keras models
36 | }
37 | \seealso{
38 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
39 |
40 | Other distribution_layers:
41 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
42 | \code{\link{layer_independent_bernoulli}()},
43 | \code{\link{layer_independent_logistic}()},
44 | \code{\link{layer_independent_normal}()},
45 | \code{\link{layer_independent_poisson}()},
46 | \code{\link{layer_kl_divergence_add_loss}()},
47 | \code{\link{layer_kl_divergence_regularizer}()},
48 | \code{\link{layer_mixture_logistic}()},
49 | \code{\link{layer_mixture_normal}()},
50 | \code{\link{layer_mixture_same_family}()},
51 | \code{\link{layer_multivariate_normal_tri_l}()},
52 | \code{\link{layer_one_hot_categorical}()}
53 | }
54 | \concept{distribution_layers}
55 |
--------------------------------------------------------------------------------
/man/layer_independent_bernoulli.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_independent_bernoulli}
4 | \alias{layer_independent_bernoulli}
5 | \title{An Independent-Bernoulli Keras layer from prod(event_shape) params}
6 | \usage{
7 | layer_independent_bernoulli(
8 | object,
9 | event_shape,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | sample_dtype = NULL,
12 | validate_args = FALSE,
13 | ...
14 | )
15 | }
16 | \arguments{
17 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
18 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
19 | The return value depends on \code{object}. If \code{object} is:
20 | \itemize{
21 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
22 | \item a \code{Sequential} model, the model with an additional layer is returned.
23 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
24 | }}
25 |
26 | \item{event_shape}{Scalar integer representing the size of single draw from this distribution.}
27 |
28 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
29 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
30 |
31 | \item{sample_dtype}{dtype of samples produced by this distribution.
32 | Default value: NULL (i.e., previous layer's dtype).}
33 |
34 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
35 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
36 | silently render incorrect outputs. Default value: FALSE.
37 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
38 |
39 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
40 | }
41 | \value{
42 | a Keras layer
43 | }
44 | \description{
45 | An Independent-Bernoulli Keras layer from prod(event_shape) params
46 | }
47 | \seealso{
48 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
49 |
50 | Other distribution_layers:
51 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
52 | \code{\link{layer_distribution_lambda}()},
53 | \code{\link{layer_independent_logistic}()},
54 | \code{\link{layer_independent_normal}()},
55 | \code{\link{layer_independent_poisson}()},
56 | \code{\link{layer_kl_divergence_add_loss}()},
57 | \code{\link{layer_kl_divergence_regularizer}()},
58 | \code{\link{layer_mixture_logistic}()},
59 | \code{\link{layer_mixture_normal}()},
60 | \code{\link{layer_mixture_same_family}()},
61 | \code{\link{layer_multivariate_normal_tri_l}()},
62 | \code{\link{layer_one_hot_categorical}()}
63 | }
64 | \concept{distribution_layers}
65 |
--------------------------------------------------------------------------------
/man/layer_independent_logistic.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_independent_logistic}
4 | \alias{layer_independent_logistic}
5 | \title{An independent Logistic Keras layer.}
6 | \usage{
7 | layer_independent_logistic(
8 | object,
9 | event_shape,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | validate_args = FALSE,
12 | ...
13 | )
14 | }
15 | \arguments{
16 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
17 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
18 | The return value depends on \code{object}. If \code{object} is:
19 | \itemize{
20 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
21 | \item a \code{Sequential} model, the model with an additional layer is returned.
22 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
23 | }}
24 |
25 | \item{event_shape}{Scalar integer representing the size of single draw from this distribution.}
26 |
27 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
28 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
29 |
30 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
31 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
32 | silently render incorrect outputs. Default value: FALSE.
33 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
34 |
35 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
36 | }
37 | \value{
38 | a Keras layer
39 | }
40 | \description{
41 | An independent Logistic Keras layer.
42 | }
43 | \seealso{
44 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
45 |
46 | Other distribution_layers:
47 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
48 | \code{\link{layer_distribution_lambda}()},
49 | \code{\link{layer_independent_bernoulli}()},
50 | \code{\link{layer_independent_normal}()},
51 | \code{\link{layer_independent_poisson}()},
52 | \code{\link{layer_kl_divergence_add_loss}()},
53 | \code{\link{layer_kl_divergence_regularizer}()},
54 | \code{\link{layer_mixture_logistic}()},
55 | \code{\link{layer_mixture_normal}()},
56 | \code{\link{layer_mixture_same_family}()},
57 | \code{\link{layer_multivariate_normal_tri_l}()},
58 | \code{\link{layer_one_hot_categorical}()}
59 | }
60 | \concept{distribution_layers}
61 |
--------------------------------------------------------------------------------
/man/layer_independent_normal.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_independent_normal}
4 | \alias{layer_independent_normal}
5 | \title{An independent Normal Keras layer.}
6 | \usage{
7 | layer_independent_normal(
8 | object,
9 | event_shape,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | validate_args = FALSE,
12 | ...
13 | )
14 | }
15 | \arguments{
16 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
17 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
18 | The return value depends on \code{object}. If \code{object} is:
19 | \itemize{
20 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
21 | \item a \code{Sequential} model, the model with an additional layer is returned.
22 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
23 | }}
24 |
25 | \item{event_shape}{Scalar integer representing the size of single draw from this distribution.}
26 |
27 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
28 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
29 |
30 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
31 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
32 | silently render incorrect outputs. Default value: FALSE.
33 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
34 |
35 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
36 | }
37 | \value{
38 | a Keras layer
39 | }
40 | \description{
41 | An independent Normal Keras layer.
42 | }
43 | \examples{
44 | \donttest{
45 | library(keras)
46 | input_shape <- c(28, 28, 1)
47 | encoded_shape <- 2
48 | n <- 2
49 | model <- keras_model_sequential(
50 | list(
51 | layer_input(shape = input_shape),
52 | layer_flatten(),
53 | layer_dense(units = n),
54 | layer_dense(units = params_size_independent_normal(encoded_shape)),
55 | layer_independent_normal(event_shape = encoded_shape)
56 | )
57 | )
58 | }
59 | }
60 | \seealso{
61 | Other distribution_layers:
62 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
63 | \code{\link{layer_distribution_lambda}()},
64 | \code{\link{layer_independent_bernoulli}()},
65 | \code{\link{layer_independent_logistic}()},
66 | \code{\link{layer_independent_poisson}()},
67 | \code{\link{layer_kl_divergence_add_loss}()},
68 | \code{\link{layer_kl_divergence_regularizer}()},
69 | \code{\link{layer_mixture_logistic}()},
70 | \code{\link{layer_mixture_normal}()},
71 | \code{\link{layer_mixture_same_family}()},
72 | \code{\link{layer_multivariate_normal_tri_l}()},
73 | \code{\link{layer_one_hot_categorical}()}
74 | }
75 | \concept{distribution_layers}
76 |
--------------------------------------------------------------------------------
/man/layer_independent_poisson.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_independent_poisson}
4 | \alias{layer_independent_poisson}
5 | \title{An independent Poisson Keras layer.}
6 | \usage{
7 | layer_independent_poisson(
8 | object,
9 | event_shape,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | validate_args = FALSE,
12 | ...
13 | )
14 | }
15 | \arguments{
16 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
17 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
18 | The return value depends on \code{object}. If \code{object} is:
19 | \itemize{
20 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
21 | \item a \code{Sequential} model, the model with an additional layer is returned.
22 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
23 | }}
24 |
25 | \item{event_shape}{Scalar integer representing the size of single draw from this distribution.}
26 |
27 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
28 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
29 |
30 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
31 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
32 | silently render incorrect outputs. Default value: FALSE.
33 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
34 |
35 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
36 | }
37 | \value{
38 | a Keras layer
39 | }
40 | \description{
41 | An independent Poisson Keras layer.
42 | }
43 | \seealso{
44 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
45 |
46 | Other distribution_layers:
47 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
48 | \code{\link{layer_distribution_lambda}()},
49 | \code{\link{layer_independent_bernoulli}()},
50 | \code{\link{layer_independent_logistic}()},
51 | \code{\link{layer_independent_normal}()},
52 | \code{\link{layer_kl_divergence_add_loss}()},
53 | \code{\link{layer_kl_divergence_regularizer}()},
54 | \code{\link{layer_mixture_logistic}()},
55 | \code{\link{layer_mixture_normal}()},
56 | \code{\link{layer_mixture_same_family}()},
57 | \code{\link{layer_multivariate_normal_tri_l}()},
58 | \code{\link{layer_one_hot_categorical}()}
59 | }
60 | \concept{distribution_layers}
61 |
--------------------------------------------------------------------------------
/man/layer_kl_divergence_add_loss.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_kl_divergence_add_loss}
4 | \alias{layer_kl_divergence_add_loss}
5 | \title{Pass-through layer that adds a KL divergence penalty to the model loss}
6 | \usage{
7 | layer_kl_divergence_add_loss(
8 | object,
9 | distribution_b,
10 | use_exact_kl = FALSE,
11 | test_points_reduce_axis = NULL,
12 | test_points_fn = tf$convert_to_tensor,
13 | weight = NULL,
14 | ...
15 | )
16 | }
17 | \arguments{
18 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
19 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
20 | The return value depends on \code{object}. If \code{object} is:
21 | \itemize{
22 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
23 | \item a \code{Sequential} model, the model with an additional layer is returned.
24 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
25 | }}
26 |
27 | \item{distribution_b}{Distribution instance corresponding to b as in \code{KL[a, b]}.
28 | The previous layer's output is presumed to be a Distribution instance and is a.}
29 |
30 | \item{use_exact_kl}{Logical indicating if KL divergence should be
31 | calculated exactly via \code{tfp$distributions$kl_divergence} or via Monte Carlo approximation.
32 | Default value: FALSE.}
33 |
34 | \item{test_points_reduce_axis}{Integer vector or scalar representing dimensions
35 | over which to reduce_mean while calculating the Monte Carlo approximation of the KL divergence.
36 | As is with all tf$reduce_* ops, NULL means reduce over all dimensions;
37 | () means reduce over none of them. Default value: () (i.e., no reduction).}
38 |
39 | \item{test_points_fn}{A callable taking a \code{tfp$distributions$Distribution} instance and returning a tensor
40 | used for random test points to approximate the KL divergence.
41 | Default value: tf$convert_to_tensor.}
42 |
43 | \item{weight}{Multiplier applied to the calculated KL divergence for each Keras batch member.
44 | Default value: NULL (i.e., do not weight each batch member).}
45 |
46 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
47 | }
48 | \value{
49 | a Keras layer
50 | }
51 | \description{
52 | Pass-through layer that adds a KL divergence penalty to the model loss
53 | }
54 | \seealso{
55 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
56 |
57 | Other distribution_layers:
58 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
59 | \code{\link{layer_distribution_lambda}()},
60 | \code{\link{layer_independent_bernoulli}()},
61 | \code{\link{layer_independent_logistic}()},
62 | \code{\link{layer_independent_normal}()},
63 | \code{\link{layer_independent_poisson}()},
64 | \code{\link{layer_kl_divergence_regularizer}()},
65 | \code{\link{layer_mixture_logistic}()},
66 | \code{\link{layer_mixture_normal}()},
67 | \code{\link{layer_mixture_same_family}()},
68 | \code{\link{layer_multivariate_normal_tri_l}()},
69 | \code{\link{layer_one_hot_categorical}()}
70 | }
71 | \concept{distribution_layers}
72 |
--------------------------------------------------------------------------------
/man/layer_kl_divergence_regularizer.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_kl_divergence_regularizer}
4 | \alias{layer_kl_divergence_regularizer}
5 | \title{Regularizer that adds a KL divergence penalty to the model loss}
6 | \usage{
7 | layer_kl_divergence_regularizer(
8 | object,
9 | distribution_b,
10 | use_exact_kl = FALSE,
11 | test_points_reduce_axis = NULL,
12 | test_points_fn = tf$convert_to_tensor,
13 | weight = NULL,
14 | ...
15 | )
16 | }
17 | \arguments{
18 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
19 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
20 | The return value depends on \code{object}. If \code{object} is:
21 | \itemize{
22 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
23 | \item a \code{Sequential} model, the model with an additional layer is returned.
24 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
25 | }}
26 |
27 | \item{distribution_b}{Distribution instance corresponding to b as in \code{KL[a, b]}.
28 | The previous layer's output is presumed to be a Distribution instance and is a.}
29 |
30 | \item{use_exact_kl}{Logical indicating if KL divergence should be
31 | calculated exactly via \code{tfp$distributions$kl_divergence} or via Monte Carlo approximation.
32 | Default value: FALSE.}
33 |
34 | \item{test_points_reduce_axis}{Integer vector or scalar representing dimensions
35 | over which to reduce_mean while calculating the Monte Carlo approximation of the KL divergence.
36 | As is with all tf$reduce_* ops, NULL means reduce over all dimensions;
37 | () means reduce over none of them. Default value: () (i.e., no reduction).}
38 |
39 | \item{test_points_fn}{A callable taking a \code{tfp$distributions$Distribution} instance and returning a tensor
40 | used for random test points to approximate the KL divergence.
41 | Default value: tf$convert_to_tensor.}
42 |
43 | \item{weight}{Multiplier applied to the calculated KL divergence for each Keras batch member.
44 | Default value: NULL (i.e., do not weight each batch member).}
45 |
46 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
47 | }
48 | \value{
49 | a Keras layer
50 | }
51 | \description{
52 | When using Monte Carlo approximation (e.g., \code{use_exact = FALSE}), it is presumed that the input
53 | distribution's concretization (i.e., \code{tf$convert_to_tensor(distribution)}) corresponds to a random
54 | sample. To override this behavior, set test_points_fn.
55 | }
56 | \seealso{
57 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
58 |
59 | Other distribution_layers:
60 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
61 | \code{\link{layer_distribution_lambda}()},
62 | \code{\link{layer_independent_bernoulli}()},
63 | \code{\link{layer_independent_logistic}()},
64 | \code{\link{layer_independent_normal}()},
65 | \code{\link{layer_independent_poisson}()},
66 | \code{\link{layer_kl_divergence_add_loss}()},
67 | \code{\link{layer_mixture_logistic}()},
68 | \code{\link{layer_mixture_normal}()},
69 | \code{\link{layer_mixture_same_family}()},
70 | \code{\link{layer_multivariate_normal_tri_l}()},
71 | \code{\link{layer_one_hot_categorical}()}
72 | }
73 | \concept{distribution_layers}
74 |
--------------------------------------------------------------------------------
/man/layer_mixture_logistic.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_mixture_logistic}
4 | \alias{layer_mixture_logistic}
5 | \title{A mixture distribution Keras layer, with independent logistic components.}
6 | \usage{
7 | layer_mixture_logistic(
8 | object,
9 | num_components,
10 | event_shape = list(),
11 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
12 | validate_args = FALSE,
13 | ...
14 | )
15 | }
16 | \arguments{
17 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
18 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
19 | The return value depends on \code{object}. If \code{object} is:
20 | \itemize{
21 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
22 | \item a \code{Sequential} model, the model with an additional layer is returned.
23 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
24 | }}
25 |
26 | \item{num_components}{Number of component distributions in the mixture distribution.}
27 |
28 | \item{event_shape}{integer vector \code{Tensor} representing the shape of single
29 | draw from this distribution.}
30 |
31 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
32 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
33 |
34 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
35 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
36 | silently render incorrect outputs. Default value: FALSE.
37 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
38 |
39 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
40 | }
41 | \value{
42 | a Keras layer
43 | }
44 | \description{
45 | A mixture distribution Keras layer, with independent logistic components.
46 | }
47 | \seealso{
48 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
49 |
50 | Other distribution_layers:
51 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
52 | \code{\link{layer_distribution_lambda}()},
53 | \code{\link{layer_independent_bernoulli}()},
54 | \code{\link{layer_independent_logistic}()},
55 | \code{\link{layer_independent_normal}()},
56 | \code{\link{layer_independent_poisson}()},
57 | \code{\link{layer_kl_divergence_add_loss}()},
58 | \code{\link{layer_kl_divergence_regularizer}()},
59 | \code{\link{layer_mixture_normal}()},
60 | \code{\link{layer_mixture_same_family}()},
61 | \code{\link{layer_multivariate_normal_tri_l}()},
62 | \code{\link{layer_one_hot_categorical}()}
63 | }
64 | \concept{distribution_layers}
65 |
--------------------------------------------------------------------------------
/man/layer_mixture_normal.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_mixture_normal}
4 | \alias{layer_mixture_normal}
5 | \title{A mixture distribution Keras layer, with independent normal components.}
6 | \usage{
7 | layer_mixture_normal(
8 | object,
9 | num_components,
10 | event_shape = list(),
11 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
12 | validate_args = FALSE,
13 | ...
14 | )
15 | }
16 | \arguments{
17 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
18 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
19 | The return value depends on \code{object}. If \code{object} is:
20 | \itemize{
21 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
22 | \item a \code{Sequential} model, the model with an additional layer is returned.
23 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
24 | }}
25 |
26 | \item{num_components}{Number of component distributions in the mixture distribution.}
27 |
28 | \item{event_shape}{integer vector \code{Tensor} representing the shape of single
29 | draw from this distribution.}
30 |
31 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
32 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
33 |
34 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
35 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
36 | silently render incorrect outputs. Default value: FALSE.
37 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
38 |
39 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
40 | }
41 | \value{
42 | a Keras layer
43 | }
44 | \description{
45 | A mixture distribution Keras layer, with independent normal components.
46 | }
47 | \seealso{
48 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
49 |
50 | Other distribution_layers:
51 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
52 | \code{\link{layer_distribution_lambda}()},
53 | \code{\link{layer_independent_bernoulli}()},
54 | \code{\link{layer_independent_logistic}()},
55 | \code{\link{layer_independent_normal}()},
56 | \code{\link{layer_independent_poisson}()},
57 | \code{\link{layer_kl_divergence_add_loss}()},
58 | \code{\link{layer_kl_divergence_regularizer}()},
59 | \code{\link{layer_mixture_logistic}()},
60 | \code{\link{layer_mixture_same_family}()},
61 | \code{\link{layer_multivariate_normal_tri_l}()},
62 | \code{\link{layer_one_hot_categorical}()}
63 | }
64 | \concept{distribution_layers}
65 |
--------------------------------------------------------------------------------
/man/layer_mixture_same_family.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_mixture_same_family}
4 | \alias{layer_mixture_same_family}
5 | \title{A mixture (same-family) Keras layer.}
6 | \usage{
7 | layer_mixture_same_family(
8 | object,
9 | num_components,
10 | component_layer,
11 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
12 | validate_args = FALSE,
13 | ...
14 | )
15 | }
16 | \arguments{
17 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
18 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
19 | The return value depends on \code{object}. If \code{object} is:
20 | \itemize{
21 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
22 | \item a \code{Sequential} model, the model with an additional layer is returned.
23 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
24 | }}
25 |
26 | \item{num_components}{Number of component distributions in the mixture distribution.}
27 |
28 | \item{component_layer}{Function that, given a tensor of shape
29 | \verb{batch_shape + [num_components, component_params_size]}, returns a
30 | \code{tfd.Distribution}-like instance that implements the component
31 | distribution (with batch shape \verb{batch_shape + [num_components]}) --
32 | e.g., a TFP distribution layer.}
33 |
34 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
35 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
36 |
37 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
38 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
39 | silently render incorrect outputs. Default value: FALSE.
40 | @param ... Additional arguments passed to \code{args} of \code{keras::create_layer}.}
41 |
42 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
43 | }
44 | \value{
45 | a Keras layer
46 | }
47 | \description{
48 | A mixture (same-family) Keras layer.
49 | }
50 | \seealso{
51 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
52 |
53 | Other distribution_layers:
54 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
55 | \code{\link{layer_distribution_lambda}()},
56 | \code{\link{layer_independent_bernoulli}()},
57 | \code{\link{layer_independent_logistic}()},
58 | \code{\link{layer_independent_normal}()},
59 | \code{\link{layer_independent_poisson}()},
60 | \code{\link{layer_kl_divergence_add_loss}()},
61 | \code{\link{layer_kl_divergence_regularizer}()},
62 | \code{\link{layer_mixture_logistic}()},
63 | \code{\link{layer_mixture_normal}()},
64 | \code{\link{layer_multivariate_normal_tri_l}()},
65 | \code{\link{layer_one_hot_categorical}()}
66 | }
67 | \concept{distribution_layers}
68 |
--------------------------------------------------------------------------------
/man/layer_multivariate_normal_tri_l.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_multivariate_normal_tri_l}
4 | \alias{layer_multivariate_normal_tri_l}
5 | \title{A d-variate Multivariate Normal TriL Keras layer from \code{d+d*(d+1)/ 2} params}
6 | \usage{
7 | layer_multivariate_normal_tri_l(
8 | object,
9 | event_size,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | validate_args = FALSE,
12 | ...
13 | )
14 | }
15 | \arguments{
16 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
17 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
18 | The return value depends on \code{object}. If \code{object} is:
19 | \itemize{
20 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
21 | \item a \code{Sequential} model, the model with an additional layer is returned.
22 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
23 | }}
24 |
25 | \item{event_size}{Integer vector tensor representing the shape of single draw from this distribution.}
26 |
27 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
28 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
29 |
30 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
31 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
32 | silently render incorrect outputs. Default value: FALSE.}
33 |
34 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
35 | }
36 | \value{
37 | a Keras layer
38 | }
39 | \description{
40 | A d-variate Multivariate Normal TriL Keras layer from \code{d+d*(d+1)/ 2} params
41 | }
42 | \seealso{
43 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
44 |
45 | Other distribution_layers:
46 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
47 | \code{\link{layer_distribution_lambda}()},
48 | \code{\link{layer_independent_bernoulli}()},
49 | \code{\link{layer_independent_logistic}()},
50 | \code{\link{layer_independent_normal}()},
51 | \code{\link{layer_independent_poisson}()},
52 | \code{\link{layer_kl_divergence_add_loss}()},
53 | \code{\link{layer_kl_divergence_regularizer}()},
54 | \code{\link{layer_mixture_logistic}()},
55 | \code{\link{layer_mixture_normal}()},
56 | \code{\link{layer_mixture_same_family}()},
57 | \code{\link{layer_one_hot_categorical}()}
58 | }
59 | \concept{distribution_layers}
60 |
--------------------------------------------------------------------------------
/man/layer_one_hot_categorical.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-layers.R
3 | \name{layer_one_hot_categorical}
4 | \alias{layer_one_hot_categorical}
5 | \title{A \code{d}-variate OneHotCategorical Keras layer from \code{d} params.}
6 | \usage{
7 | layer_one_hot_categorical(
8 | object,
9 | event_size,
10 | convert_to_tensor_fn = tfp$distributions$Distribution$sample,
11 | sample_dtype = NULL,
12 | validate_args = FALSE,
13 | ...
14 | )
15 | }
16 | \arguments{
17 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
18 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
19 | The return value depends on \code{object}. If \code{object} is:
20 | \itemize{
21 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
22 | \item a \code{Sequential} model, the model with an additional layer is returned.
23 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
24 | }}
25 |
26 | \item{event_size}{Scalar \code{integer} representing the size of single draw from this distribution.}
27 |
28 | \item{convert_to_tensor_fn}{A callable that takes a tfd$Distribution instance and returns a
29 | tf$Tensor-like object. Default value: \code{tfd$distributions$Distribution$sample}.}
30 |
31 | \item{sample_dtype}{\code{dtype} of samples produced by this distribution.
32 | Default value: \code{NULL} (i.e., previous layer's \code{dtype}).}
33 |
34 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
35 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
36 | silently render incorrect outputs. Default value: FALSE.}
37 |
38 | \item{...}{Additional arguments passed to \code{args} of \code{keras::create_layer}.}
39 | }
40 | \value{
41 | a Keras layer
42 | }
43 | \description{
44 | Typical choices for \code{convert_to_tensor_fn} include:
45 | \itemize{
46 | \item \code{tfp$distributions$Distribution$sample}
47 | \item \code{tfp$distributions$Distribution$mean}
48 | \item \code{tfp$distributions$Distribution$mode}
49 | \item \code{tfp$distributions$OneHotCategorical$logits}
50 | }
51 | }
52 | \seealso{
53 | For an example how to use in a Keras model, see \code{\link[=layer_independent_normal]{layer_independent_normal()}}.
54 |
55 | Other distribution_layers:
56 | \code{\link{layer_categorical_mixture_of_one_hot_categorical}()},
57 | \code{\link{layer_distribution_lambda}()},
58 | \code{\link{layer_independent_bernoulli}()},
59 | \code{\link{layer_independent_logistic}()},
60 | \code{\link{layer_independent_normal}()},
61 | \code{\link{layer_independent_poisson}()},
62 | \code{\link{layer_kl_divergence_add_loss}()},
63 | \code{\link{layer_kl_divergence_regularizer}()},
64 | \code{\link{layer_mixture_logistic}()},
65 | \code{\link{layer_mixture_normal}()},
66 | \code{\link{layer_mixture_same_family}()},
67 | \code{\link{layer_multivariate_normal_tri_l}()}
68 | }
69 | \concept{distribution_layers}
70 |
--------------------------------------------------------------------------------
/man/layer_variable.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/layers.R
3 | \name{layer_variable}
4 | \alias{layer_variable}
5 | \title{Variable Layer}
6 | \usage{
7 | layer_variable(
8 | object,
9 | shape,
10 | dtype = NULL,
11 | activation = NULL,
12 | initializer = "zeros",
13 | regularizer = NULL,
14 | constraint = NULL,
15 | ...
16 | )
17 | }
18 | \arguments{
19 | \item{object}{What to compose the new \code{Layer} instance with. Typically a
20 | Sequential model or a Tensor (e.g., as returned by \code{layer_input()}).
21 | The return value depends on \code{object}. If \code{object} is:
22 | \itemize{
23 | \item missing or \code{NULL}, the \code{Layer} instance is returned.
24 | \item a \code{Sequential} model, the model with an additional layer is returned.
25 | \item a Tensor, the output tensor from \code{layer_instance(object)} is returned.
26 | }}
27 |
28 | \item{shape}{integer or integer vector specifying the shape of the output of this layer.}
29 |
30 | \item{dtype}{TensorFlow \code{dtype} of the variable created by this layer.}
31 |
32 | \item{activation}{An activation function. See \code{keras::layer_dense}. Default: \code{NULL}.}
33 |
34 | \item{initializer}{Initializer for the \code{constant} vector.}
35 |
36 | \item{regularizer}{Regularizer function applied to the \code{constant} vector.}
37 |
38 | \item{constraint}{Constraint function applied to the \code{constant} vector.}
39 |
40 | \item{...}{Additional keyword arguments passed to the \code{keras::layer_dense} constructed by this layer.}
41 | }
42 | \value{
43 | a Keras layer
44 | }
45 | \description{
46 | Simply returns a (trainable) variable, regardless of input.
47 | This layer implements the mathematical function \code{f(x) = c} where \code{c} is a
48 | constant, i.e., unchanged for all \code{x}. Like other Keras layers, the constant
49 | is \code{trainable}. This layer can also be interpretted as the special case of
50 | \code{layer_dense()} when the \code{kernel} is forced to be the zero matrix
51 | (\code{tf$zeros}).
52 | }
53 | \seealso{
54 | Other layers:
55 | \code{\link{layer_autoregressive}()},
56 | \code{\link{layer_conv_1d_flipout}()},
57 | \code{\link{layer_conv_1d_reparameterization}()},
58 | \code{\link{layer_conv_2d_flipout}()},
59 | \code{\link{layer_conv_2d_reparameterization}()},
60 | \code{\link{layer_conv_3d_flipout}()},
61 | \code{\link{layer_conv_3d_reparameterization}()},
62 | \code{\link{layer_dense_flipout}()},
63 | \code{\link{layer_dense_local_reparameterization}()},
64 | \code{\link{layer_dense_reparameterization}()},
65 | \code{\link{layer_dense_variational}()}
66 | }
67 | \concept{layers}
68 |
--------------------------------------------------------------------------------
/man/mcmc_effective_sample_size.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mcmc-functions.R
3 | \name{mcmc_effective_sample_size}
4 | \alias{mcmc_effective_sample_size}
5 | \title{Estimate a lower bound on effective sample size for each independent chain.}
6 | \usage{
7 | mcmc_effective_sample_size(
8 | states,
9 | filter_threshold = 0,
10 | filter_beyond_lag = NULL,
11 | name = NULL
12 | )
13 | }
14 | \arguments{
15 | \item{states}{\code{Tensor} or list of \code{Tensor} objects. Dimension zero should index
16 | identically distributed states.}
17 |
18 | \item{filter_threshold}{\code{Tensor} or list of \code{Tensor} objects.
19 | Must broadcast with \code{state}. The auto-correlation sequence is truncated
20 | after the first appearance of a term less than \code{filter_threshold}.
21 | Setting to \code{NULL} means we use no threshold filter. Since \verb{|R_k| <= 1},
22 | setting to any number less than \code{-1} has the same effect.}
23 |
24 | \item{filter_beyond_lag}{\code{Tensor} or list of \code{Tensor} objects. Must be
25 | \code{int}-like and scalar valued. The auto-correlation sequence is truncated
26 | to this length. Setting to \code{NULL} means we do not filter based on number of lags.}
27 |
28 | \item{name}{name to prepend to created ops.}
29 | }
30 | \value{
31 | \code{Tensor} or list of \code{Tensor} objects. The effective sample size of
32 | each component of \code{states}. Shape will be \verb{states$shape[1:]}.
33 | }
34 | \description{
35 | Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
36 | with the same variance as \code{state}.
37 | }
38 | \details{
39 | More precisely, given a stationary sequence of possibly correlated random
40 | variables \verb{X_1, X_2,...,X_N}, each identically distributed ESS is the number
41 | such that
42 | \verb{Variance\{ N**-1 * Sum\{X_i\} \} = ESS**-1 * Variance\{ X_1 \}.}
43 |
44 | If the sequence is uncorrelated, \code{ESS = N}. In general, one should expect
45 | \code{ESS <= N}, with more highly correlated sequences having smaller \code{ESS}.
46 | }
47 | \seealso{
48 | Other mcmc_functions:
49 | \code{\link{mcmc_potential_scale_reduction}()},
50 | \code{\link{mcmc_sample_annealed_importance_chain}()},
51 | \code{\link{mcmc_sample_chain}()},
52 | \code{\link{mcmc_sample_halton_sequence}()}
53 | }
54 | \concept{mcmc_functions}
55 |
--------------------------------------------------------------------------------
/man/mcmc_metropolis_hastings.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mcmc-kernels.R
3 | \name{mcmc_metropolis_hastings}
4 | \alias{mcmc_metropolis_hastings}
5 | \title{Runs one step of the Metropolis-Hastings algorithm.}
6 | \usage{
7 | mcmc_metropolis_hastings(inner_kernel, seed = NULL, name = NULL)
8 | }
9 | \arguments{
10 | \item{inner_kernel}{\code{TransitionKernel}-like object which has \code{collections$namedtuple}
11 | \code{kernel_results} and which contains a \code{target_log_prob} member and optionally a \code{log_acceptance_correction} member.}
12 |
13 | \item{seed}{integer to seed the random number generator.}
14 |
15 | \item{name}{string prefixed to Ops created by this function. Default value: \code{NULL} (i.e., "mh_kernel").}
16 | }
17 | \value{
18 | a Monte Carlo sampling kernel
19 | }
20 | \description{
21 | The Metropolis-Hastings algorithm is a Markov chain Monte Carlo (MCMC) technique which uses a proposal distribution
22 | to eventually sample from a target distribution.
23 | }
24 | \details{
25 | Note: \code{inner_kernel$one_step} must return \code{kernel_results} as a \code{collections$namedtuple} which must:
26 | \itemize{
27 | \item have a \code{target_log_prob} field,
28 | \item optionally have a \code{log_acceptance_correction} field, and,
29 | \item have only fields which are \code{Tensor}-valued.
30 | }
31 |
32 | The Metropolis-Hastings log acceptance-probability is computed as:
33 |
34 | \if{html}{\out{}}\preformatted{log_accept_ratio = (current_kernel_results.target_log_prob
35 | - previous_kernel_results.target_log_prob
36 | + current_kernel_results.log_acceptance_correction)
37 | }\if{html}{\out{
}}
38 |
39 | If \code{current_kernel_results$log_acceptance_correction} does not exist, it is
40 | presumed \code{0} (i.e., that the proposal distribution is symmetric).
41 | The most common use-case for \code{log_acceptance_correction} is in the
42 | Metropolis-Hastings algorithm, i.e.,
43 |
44 | \if{html}{\out{}}\preformatted{accept_prob(x' | x) = p(x') / p(x) (g(x|x') / g(x'|x))
45 | where,
46 | p represents the target distribution,
47 | g represents the proposal (conditional) distribution,
48 | x' is the proposed state, and,
49 | x is current state
50 | }\if{html}{\out{
}}
51 |
52 | The log of the parenthetical term is the \code{log_acceptance_correction}.
53 | The \code{log_acceptance_correction} may not necessarily correspond to the ratio of
54 | proposal distributions, e.g, \code{log_acceptance_correction} has a different
55 | interpretation in Hamiltonian Monte Carlo.
56 | }
57 | \seealso{
58 | Other mcmc_kernels:
59 | \code{\link{mcmc_dual_averaging_step_size_adaptation}()},
60 | \code{\link{mcmc_hamiltonian_monte_carlo}()},
61 | \code{\link{mcmc_metropolis_adjusted_langevin_algorithm}()},
62 | \code{\link{mcmc_no_u_turn_sampler}()},
63 | \code{\link{mcmc_random_walk_metropolis}()},
64 | \code{\link{mcmc_replica_exchange_mc}()},
65 | \code{\link{mcmc_simple_step_size_adaptation}()},
66 | \code{\link{mcmc_slice_sampler}()},
67 | \code{\link{mcmc_transformed_transition_kernel}()},
68 | \code{\link{mcmc_uncalibrated_hamiltonian_monte_carlo}()},
69 | \code{\link{mcmc_uncalibrated_langevin}()},
70 | \code{\link{mcmc_uncalibrated_random_walk}()}
71 | }
72 | \concept{mcmc_kernels}
73 |
--------------------------------------------------------------------------------
/man/mcmc_random_walk_metropolis.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mcmc-kernels.R
3 | \name{mcmc_random_walk_metropolis}
4 | \alias{mcmc_random_walk_metropolis}
5 | \title{Runs one step of the RWM algorithm with symmetric proposal.}
6 | \usage{
7 | mcmc_random_walk_metropolis(
8 | target_log_prob_fn,
9 | new_state_fn = NULL,
10 | seed = NULL,
11 | name = NULL
12 | )
13 | }
14 | \arguments{
15 | \item{target_log_prob_fn}{Function which takes an argument like
16 | \code{current_state} ((if it's a list \code{current_state} will be unpacked) and returns its
17 | (possibly unnormalized) log-density under the target distribution.}
18 |
19 | \item{new_state_fn}{Function which takes a list of state parts and a
20 | seed; returns a same-type \code{list} of \code{Tensor}s, each being a perturbation
21 | of the input state parts. The perturbation distribution is assumed to be
22 | a symmetric distribution centered at the input state part.
23 | Default value: \code{NULL} which is mapped to \code{tfp$mcmc$random_walk_normal_fn()}.}
24 |
25 | \item{seed}{integer to seed the random number generator.}
26 |
27 | \item{name}{String name prefixed to Ops created by this function.
28 | Default value: \code{NULL} (i.e., 'rwm_kernel').}
29 | }
30 | \value{
31 | a Monte Carlo sampling kernel
32 | }
33 | \description{
34 | Random Walk Metropolis is a gradient-free Markov chain Monte Carlo
35 | (MCMC) algorithm. The algorithm involves a proposal generating step
36 | \code{proposal_state = current_state + perturb} by a random
37 | perturbation, followed by Metropolis-Hastings accept/reject step. For more
38 | details see Section 2.1 of Roberts and Rosenthal (2004).
39 | }
40 | \details{
41 | The current class implements RWM for normal and uniform proposals. Alternatively,
42 | the user can supply any custom proposal generating function.
43 | The function \code{one_step} can update multiple chains in parallel. It assumes
44 | that all leftmost dimensions of \code{current_state} index independent chain states
45 | (and are therefore updated independently). The output of
46 | \code{target_log_prob_fn(current_state)} should sum log-probabilities across all
47 | event dimensions. Slices along the rightmost dimensions may have different
48 | target distributions; for example, \verb{current_state[0, :]} could have a
49 | different target distribution from \verb{current_state[1, :]}. These semantics
50 | are governed by \code{target_log_prob_fn(current_state)}. (The number of
51 | independent chains is \code{tf$size(target_log_prob_fn(current_state))}.)
52 | }
53 | \seealso{
54 | Other mcmc_kernels:
55 | \code{\link{mcmc_dual_averaging_step_size_adaptation}()},
56 | \code{\link{mcmc_hamiltonian_monte_carlo}()},
57 | \code{\link{mcmc_metropolis_adjusted_langevin_algorithm}()},
58 | \code{\link{mcmc_metropolis_hastings}()},
59 | \code{\link{mcmc_no_u_turn_sampler}()},
60 | \code{\link{mcmc_replica_exchange_mc}()},
61 | \code{\link{mcmc_simple_step_size_adaptation}()},
62 | \code{\link{mcmc_slice_sampler}()},
63 | \code{\link{mcmc_transformed_transition_kernel}()},
64 | \code{\link{mcmc_uncalibrated_hamiltonian_monte_carlo}()},
65 | \code{\link{mcmc_uncalibrated_langevin}()},
66 | \code{\link{mcmc_uncalibrated_random_walk}()}
67 | }
68 | \concept{mcmc_kernels}
69 |
--------------------------------------------------------------------------------
/man/mcmc_transformed_transition_kernel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mcmc-kernels.R
3 | \name{mcmc_transformed_transition_kernel}
4 | \alias{mcmc_transformed_transition_kernel}
5 | \title{Applies a bijector to the MCMC's state space}
6 | \usage{
7 | mcmc_transformed_transition_kernel(inner_kernel, bijector, name = NULL)
8 | }
9 | \arguments{
10 | \item{inner_kernel}{\code{TransitionKernel}-like object which has a \code{target_log_prob_fn} argument.}
11 |
12 | \item{bijector}{bijector or list of bijectors. These bijectors use \code{forward} to map the
13 | \code{inner_kernel} state space to the state expected by \code{inner_kernel$target_log_prob_fn}.}
14 |
15 | \item{name}{string prefixed to Ops created by this function.
16 | Default value: \code{NULL} (i.e., "transformed_kernel").}
17 | }
18 | \value{
19 | a Monte Carlo sampling kernel
20 | }
21 | \description{
22 | The transformed transition kernel enables fitting
23 | a bijector which serves to decorrelate the Markov chain Monte Carlo (MCMC)
24 | event dimensions thus making the chain mix faster. This is
25 | particularly useful when the geometry of the target distribution is
26 | unfavorable. In such cases it may take many evaluations of the
27 | \code{target_log_prob_fn} for the chain to mix between faraway states.
28 | }
29 | \details{
30 | The idea of training an affine function to decorrelate chain event dims was
31 | presented in Parno and Marzouk (2014). Used in conjunction with the
32 | Hamiltonian Monte Carlo transition kernel, the Parno and Marzouk (2014)
33 | idea is an instance of Riemannian manifold HMC (Girolami and Calderhead, 2011).
34 |
35 | The transformed transition kernel enables arbitrary bijective transformations
36 | of arbitrary transition kernels, e.g., one could use bijectors
37 | \code{tfb_affine}, \code{tfb_real_nvp}, etc.
38 | with transition kernels \code{mcmc_hamiltonian_monte_carlo}, \code{mcmc_random_walk_metropolis}, etc.
39 | }
40 | \section{References}{
41 |
42 | \itemize{
43 | \item \href{https://arxiv.org/abs/1412.5492}{Matthew Parno and Youssef Marzouk. Transport map accelerated Markov chain Monte Carlo. \emph{arXiv preprint arXiv:1412.5492}, 2014.}
44 | \item \href{http://people.ee.duke.edu/~lcarin/Girolami2011.pdf}{Mark Girolami and Ben Calderhead. Riemann manifold langevin and hamiltonian monte carlo methods. In \emph{Journal of the Royal Statistical Society}, 2011.}
45 | }
46 | }
47 |
48 | \seealso{
49 | Other mcmc_kernels:
50 | \code{\link{mcmc_dual_averaging_step_size_adaptation}()},
51 | \code{\link{mcmc_hamiltonian_monte_carlo}()},
52 | \code{\link{mcmc_metropolis_adjusted_langevin_algorithm}()},
53 | \code{\link{mcmc_metropolis_hastings}()},
54 | \code{\link{mcmc_no_u_turn_sampler}()},
55 | \code{\link{mcmc_random_walk_metropolis}()},
56 | \code{\link{mcmc_replica_exchange_mc}()},
57 | \code{\link{mcmc_simple_step_size_adaptation}()},
58 | \code{\link{mcmc_slice_sampler}()},
59 | \code{\link{mcmc_uncalibrated_hamiltonian_monte_carlo}()},
60 | \code{\link{mcmc_uncalibrated_langevin}()},
61 | \code{\link{mcmc_uncalibrated_random_walk}()}
62 | }
63 | \concept{mcmc_kernels}
64 |
--------------------------------------------------------------------------------
/man/mcmc_uncalibrated_random_walk.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mcmc-kernels.R
3 | \name{mcmc_uncalibrated_random_walk}
4 | \alias{mcmc_uncalibrated_random_walk}
5 | \title{Generate proposal for the Random Walk Metropolis algorithm.}
6 | \usage{
7 | mcmc_uncalibrated_random_walk(
8 | target_log_prob_fn,
9 | new_state_fn = NULL,
10 | seed = NULL,
11 | name = NULL
12 | )
13 | }
14 | \arguments{
15 | \item{target_log_prob_fn}{Function which takes an argument like
16 | \code{current_state} ((if it's a list \code{current_state} will be unpacked) and returns its
17 | (possibly unnormalized) log-density under the target distribution.}
18 |
19 | \item{new_state_fn}{Function which takes a list of state parts and a
20 | seed; returns a same-type \code{list} of \code{Tensor}s, each being a perturbation
21 | of the input state parts. The perturbation distribution is assumed to be
22 | a symmetric distribution centered at the input state part.
23 | Default value: \code{NULL} which is mapped to \code{tfp$mcmc$random_walk_normal_fn()}.}
24 |
25 | \item{seed}{integer to seed the random number generator.}
26 |
27 | \item{name}{String name prefixed to Ops created by this function.
28 | Default value: \code{NULL} (i.e., 'rwm_kernel').}
29 | }
30 | \value{
31 | a Monte Carlo sampling kernel
32 | }
33 | \description{
34 | Warning: this kernel will not result in a chain which converges to the
35 | \code{target_log_prob}. To get a convergent MCMC, use
36 | \code{mcmc_random_walk_metropolis(...)} or
37 | \code{mcmc_metropolis_hastings(mcmc_uncalibrated_random_walk(...))}.
38 | }
39 | \seealso{
40 | Other mcmc_kernels:
41 | \code{\link{mcmc_dual_averaging_step_size_adaptation}()},
42 | \code{\link{mcmc_hamiltonian_monte_carlo}()},
43 | \code{\link{mcmc_metropolis_adjusted_langevin_algorithm}()},
44 | \code{\link{mcmc_metropolis_hastings}()},
45 | \code{\link{mcmc_no_u_turn_sampler}()},
46 | \code{\link{mcmc_random_walk_metropolis}()},
47 | \code{\link{mcmc_replica_exchange_mc}()},
48 | \code{\link{mcmc_simple_step_size_adaptation}()},
49 | \code{\link{mcmc_slice_sampler}()},
50 | \code{\link{mcmc_transformed_transition_kernel}()},
51 | \code{\link{mcmc_uncalibrated_hamiltonian_monte_carlo}()},
52 | \code{\link{mcmc_uncalibrated_langevin}()}
53 | }
54 | \concept{mcmc_kernels}
55 |
--------------------------------------------------------------------------------
/man/params_size_categorical_mixture_of_one_hot_categorical.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_categorical_mixture_of_one_hot_categorical}
4 | \alias{params_size_categorical_mixture_of_one_hot_categorical}
5 | \title{number of \code{params} needed to create a CategoricalMixtureOfOneHotCategorical distribution}
6 | \usage{
7 | params_size_categorical_mixture_of_one_hot_categorical(
8 | event_size,
9 | num_components
10 | )
11 | }
12 | \arguments{
13 | \item{event_size}{event size of this distribution}
14 |
15 | \item{num_components}{number of components in the mixture}
16 | }
17 | \value{
18 | a scalar
19 | }
20 | \description{
21 | number of \code{params} needed to create a CategoricalMixtureOfOneHotCategorical distribution
22 | }
23 |
--------------------------------------------------------------------------------
/man/params_size_independent_bernoulli.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_independent_bernoulli}
4 | \alias{params_size_independent_bernoulli}
5 | \title{number of \code{params} needed to create an IndependentBernoulli distribution}
6 | \usage{
7 | params_size_independent_bernoulli(event_size)
8 | }
9 | \arguments{
10 | \item{event_size}{event size of this distribution}
11 | }
12 | \value{
13 | a scalar
14 | }
15 | \description{
16 | number of \code{params} needed to create an IndependentBernoulli distribution
17 | }
18 |
--------------------------------------------------------------------------------
/man/params_size_independent_logistic.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_independent_logistic}
4 | \alias{params_size_independent_logistic}
5 | \title{number of \code{params} needed to create an IndependentLogistic distribution}
6 | \usage{
7 | params_size_independent_logistic(event_size)
8 | }
9 | \arguments{
10 | \item{event_size}{event size of this distribution}
11 | }
12 | \value{
13 | a scalar
14 | }
15 | \description{
16 | number of \code{params} needed to create an IndependentLogistic distribution
17 | }
18 |
--------------------------------------------------------------------------------
/man/params_size_independent_normal.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_independent_normal}
4 | \alias{params_size_independent_normal}
5 | \title{number of \code{params} needed to create an IndependentNormal distribution}
6 | \usage{
7 | params_size_independent_normal(event_size)
8 | }
9 | \arguments{
10 | \item{event_size}{event size of this distribution}
11 | }
12 | \value{
13 | a scalar
14 | }
15 | \description{
16 | number of \code{params} needed to create an IndependentNormal distribution
17 | }
18 |
--------------------------------------------------------------------------------
/man/params_size_independent_poisson.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_independent_poisson}
4 | \alias{params_size_independent_poisson}
5 | \title{number of \code{params} needed to create an IndependentPoisson distribution}
6 | \usage{
7 | params_size_independent_poisson(event_size)
8 | }
9 | \arguments{
10 | \item{event_size}{event size of this distribution}
11 | }
12 | \value{
13 | a scalar
14 | }
15 | \description{
16 | number of \code{params} needed to create an IndependentPoisson distribution
17 | }
18 |
--------------------------------------------------------------------------------
/man/params_size_mixture_logistic.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_mixture_logistic}
4 | \alias{params_size_mixture_logistic}
5 | \title{number of \code{params} needed to create a MixtureLogistic distribution}
6 | \usage{
7 | params_size_mixture_logistic(num_components, event_shape)
8 | }
9 | \arguments{
10 | \item{num_components}{Number of component distributions in the mixture distribution.}
11 |
12 | \item{event_shape}{Number of parameters needed to create a single component distribution.}
13 | }
14 | \value{
15 | a scalar
16 | }
17 | \description{
18 | number of \code{params} needed to create a MixtureLogistic distribution
19 | }
20 |
--------------------------------------------------------------------------------
/man/params_size_mixture_normal.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_mixture_normal}
4 | \alias{params_size_mixture_normal}
5 | \title{number of \code{params} needed to create a MixtureNormal distribution}
6 | \usage{
7 | params_size_mixture_normal(num_components, event_shape)
8 | }
9 | \arguments{
10 | \item{num_components}{Number of component distributions in the mixture distribution.}
11 |
12 | \item{event_shape}{Number of parameters needed to create a single component distribution.}
13 | }
14 | \value{
15 | a scalar
16 | }
17 | \description{
18 | number of \code{params} needed to create a MixtureNormal distribution
19 | }
20 |
--------------------------------------------------------------------------------
/man/params_size_mixture_same_family.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_mixture_same_family}
4 | \alias{params_size_mixture_same_family}
5 | \title{number of \code{params} needed to create a MixtureSameFamily distribution}
6 | \usage{
7 | params_size_mixture_same_family(num_components, component_params_size)
8 | }
9 | \arguments{
10 | \item{num_components}{Number of component distributions in the mixture distribution.}
11 |
12 | \item{component_params_size}{Number of parameters needed to create a single component distribution.}
13 | }
14 | \value{
15 | a scalar
16 | }
17 | \description{
18 | number of \code{params} needed to create a MixtureSameFamily distribution
19 | }
20 |
--------------------------------------------------------------------------------
/man/params_size_multivariate_normal_tri_l.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_multivariate_normal_tri_l}
4 | \alias{params_size_multivariate_normal_tri_l}
5 | \title{number of \code{params} needed to create a MultivariateNormalTriL distribution}
6 | \usage{
7 | params_size_multivariate_normal_tri_l(event_size)
8 | }
9 | \arguments{
10 | \item{event_size}{event size of this distribution}
11 | }
12 | \value{
13 | a scalar
14 | }
15 | \description{
16 | number of \code{params} needed to create a MultivariateNormalTriL distribution
17 | }
18 |
--------------------------------------------------------------------------------
/man/params_size_one_hot_categorical.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-static-methods.R
3 | \name{params_size_one_hot_categorical}
4 | \alias{params_size_one_hot_categorical}
5 | \title{number of \code{params} needed to create a OneHotCategorical distribution}
6 | \usage{
7 | params_size_one_hot_categorical(event_size)
8 | }
9 | \arguments{
10 | \item{event_size}{event size of this distribution}
11 | }
12 | \value{
13 | a scalar
14 | }
15 | \description{
16 | number of \code{params} needed to create a OneHotCategorical distribution
17 | }
18 |
--------------------------------------------------------------------------------
/man/reexports.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/reexports.R
3 | \docType{import}
4 | \name{reexports}
5 | \alias{reexports}
6 | \alias{tf}
7 | \alias{shape}
8 | \alias{tf_config}
9 | \alias{\%>\%}
10 | \title{Objects exported from other packages}
11 | \value{
12 | a alias for tensorflow::tf
13 |
14 | a alias for tensorflow::shape
15 |
16 | a alias for tensorflow::tf_config
17 |
18 | a alias for magrittr::\verb{\%>\%}
19 | }
20 | \keyword{internal}
21 | \description{
22 | These objects are imported from other packages. Follow the links
23 | below to see their documentation.
24 |
25 | \describe{
26 | \item{magrittr}{\code{\link[magrittr:pipe]{\%>\%}}}
27 |
28 | \item{tensorflow}{\code{\link[tensorflow]{shape}}, \code{\link[tensorflow]{tf}}, \code{\link[tensorflow]{tf_config}}}
29 | }}
30 |
31 |
--------------------------------------------------------------------------------
/man/sts_build_factored_surrogate_posterior.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/sts-functions.R
3 | \name{sts_build_factored_surrogate_posterior}
4 | \alias{sts_build_factored_surrogate_posterior}
5 | \title{Build a variational posterior that factors over model parameters.}
6 | \usage{
7 | sts_build_factored_surrogate_posterior(
8 | model,
9 | batch_shape = list(),
10 | seed = NULL,
11 | name = NULL
12 | )
13 | }
14 | \arguments{
15 | \item{model}{An instance of \code{StructuralTimeSeries} representing a
16 | time-series model. This represents a joint distribution over
17 | time-series and their parameters with batch shape \verb{[b1, ..., bN]}.#'}
18 |
19 | \item{batch_shape}{Batch shape (\code{list}, or \code{integer}) of initial
20 | states to optimize in parallel.
21 | Default value: \code{list()}. (i.e., just run a single optimization).}
22 |
23 | \item{seed}{integer to seed the random number generator.}
24 |
25 | \item{name}{string prefixed to ops created by this function.
26 | Default value: \code{NULL} (i.e., 'build_factored_surrogate_posterior').}
27 | }
28 | \value{
29 | variational_posterior \code{tfd_joint_distribution_named} defining a trainable
30 | surrogate posterior over model parameters. Samples from this
31 | distribution are named lists with \code{character} parameter names as keys.
32 | }
33 | \description{
34 | The surrogate posterior consists of independent Normal distributions for
35 | each parameter with trainable \code{loc} and \code{scale}, transformed using the
36 | parameter's \code{bijector} to the appropriate support space for that parameter.
37 | }
38 | \seealso{
39 | Other sts-functions:
40 | \code{\link{sts_build_factored_variational_loss}()},
41 | \code{\link{sts_decompose_by_component}()},
42 | \code{\link{sts_decompose_forecast_by_component}()},
43 | \code{\link{sts_fit_with_hmc}()},
44 | \code{\link{sts_forecast}()},
45 | \code{\link{sts_one_step_predictive}()},
46 | \code{\link{sts_sample_uniform_initial_state}()}
47 | }
48 | \concept{sts-functions}
49 |
--------------------------------------------------------------------------------
/man/sts_decompose_forecast_by_component.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/sts-functions.R
3 | \name{sts_decompose_forecast_by_component}
4 | \alias{sts_decompose_forecast_by_component}
5 | \title{Decompose a forecast distribution into contributions from each component.}
6 | \usage{
7 | sts_decompose_forecast_by_component(model, forecast_dist, parameter_samples)
8 | }
9 | \arguments{
10 | \item{model}{An instance of \code{sts_sum} representing a structural time series model.}
11 |
12 | \item{forecast_dist}{A \code{Distribution} instance returned by \code{sts_forecast()}.
13 | (specifically, must be a \code{tfd.MixtureSameFamily} over a
14 | \code{tfd_linear_gaussian_state_space_model} parameterized by posterior samples).}
15 |
16 | \item{parameter_samples}{\code{list} of \code{tensors} representing posterior samples
17 | of model parameters, with shapes
18 | \verb{list(tf$concat(list(list(num_posterior_draws), param<1>$prior$batch_shape, param<1>$prior$event_shape), list(list(num_posterior_draws), param<2>$prior$batch_shape, param<2>$prior$event_shape), ... ) )}
19 | for all model parameters.
20 | This may optionally also be a named list mapping parameter names to \code{tensor} values.}
21 | }
22 | \value{
23 | component_dists A named list mapping
24 | component StructuralTimeSeries instances (elements of \code{model$components})
25 | to \code{Distribution} instances representing the marginal forecast for each component.
26 | Each distribution has batch shape matching \code{forecast_dist} (specifically,
27 | the event shape is \verb{[num_steps_forecast]}).
28 | }
29 | \description{
30 | Decompose a forecast distribution into contributions from each component.
31 | }
32 | \seealso{
33 | Other sts-functions:
34 | \code{\link{sts_build_factored_surrogate_posterior}()},
35 | \code{\link{sts_build_factored_variational_loss}()},
36 | \code{\link{sts_decompose_by_component}()},
37 | \code{\link{sts_fit_with_hmc}()},
38 | \code{\link{sts_forecast}()},
39 | \code{\link{sts_one_step_predictive}()},
40 | \code{\link{sts_sample_uniform_initial_state}()}
41 | }
42 | \concept{sts-functions}
43 |
--------------------------------------------------------------------------------
/man/sts_local_level.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/sts.R
3 | \name{sts_local_level}
4 | \alias{sts_local_level}
5 | \title{Formal representation of a local level model}
6 | \usage{
7 | sts_local_level(
8 | observed_time_series = NULL,
9 | level_scale_prior = NULL,
10 | initial_level_prior = NULL,
11 | name = NULL
12 | )
13 | }
14 | \arguments{
15 | \item{observed_time_series}{optional \code{float} \code{tensor} of shape
16 | \verb{batch_shape + [T, 1]} (omitting the trailing unit dimension is also
17 | supported when \code{T > 1}), specifying an observed time series.
18 | Any priors not explicitly set will be given default values according to
19 | the scale of the observed time series (or batch of time series). May
20 | optionally be an instance of \code{sts_masked_time_series}, which includes
21 | a mask \code{tensor} to specify timesteps with missing observations.
22 | Default value: \code{NULL}.}
23 |
24 | \item{level_scale_prior}{optional \code{tfp$distribution} instance specifying a prior
25 | on the \code{level_scale} parameter. If \code{NULL}, a heuristic default prior is
26 | constructed based on the provided \code{observed_time_series}.
27 | Default value: \code{NULL}.}
28 |
29 | \item{initial_level_prior}{optional \code{tfp$distribution} instance specifying a
30 | prior on the initial level. If \code{NULL}, a heuristic default prior is
31 | constructed based on the provided \code{observed_time_series}.
32 | Default value: \code{NULL}.}
33 |
34 | \item{name}{the name of this model component. Default value: 'LocalLevel'.}
35 | }
36 | \value{
37 | an instance of \code{StructuralTimeSeries}.
38 | }
39 | \description{
40 | The local level model posits a \code{level} evolving via a Gaussian random walk:
41 |
42 | \if{html}{\out{}}\preformatted{level[t] = level[t-1] + Normal(0., level_scale)
43 | }\if{html}{\out{
}}
44 | }
45 | \details{
46 | The latent state is \verb{[level]}. We observe a noisy realization of the current
47 | level: \code{f[t] = level[t] + Normal(0., observation_noise_scale)} at each timestep.
48 | }
49 | \seealso{
50 | For usage examples see \code{\link[=sts_fit_with_hmc]{sts_fit_with_hmc()}}, \code{\link[=sts_forecast]{sts_forecast()}}, \code{\link[=sts_decompose_by_component]{sts_decompose_by_component()}}.
51 |
52 | Other sts:
53 | \code{\link{sts_additive_state_space_model}()},
54 | \code{\link{sts_autoregressive_state_space_model}()},
55 | \code{\link{sts_autoregressive}()},
56 | \code{\link{sts_constrained_seasonal_state_space_model}()},
57 | \code{\link{sts_dynamic_linear_regression_state_space_model}()},
58 | \code{\link{sts_dynamic_linear_regression}()},
59 | \code{\link{sts_linear_regression}()},
60 | \code{\link{sts_local_level_state_space_model}()},
61 | \code{\link{sts_local_linear_trend_state_space_model}()},
62 | \code{\link{sts_local_linear_trend}()},
63 | \code{\link{sts_seasonal_state_space_model}()},
64 | \code{\link{sts_seasonal}()},
65 | \code{\link{sts_semi_local_linear_trend_state_space_model}()},
66 | \code{\link{sts_semi_local_linear_trend}()},
67 | \code{\link{sts_smooth_seasonal_state_space_model}()},
68 | \code{\link{sts_smooth_seasonal}()},
69 | \code{\link{sts_sparse_linear_regression}()},
70 | \code{\link{sts_sum}()}
71 | }
72 | \concept{sts}
73 |
--------------------------------------------------------------------------------
/man/sts_one_step_predictive.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/sts-functions.R
3 | \name{sts_one_step_predictive}
4 | \alias{sts_one_step_predictive}
5 | \title{Compute one-step-ahead predictive distributions for all timesteps}
6 | \usage{
7 | sts_one_step_predictive(
8 | observed_time_series,
9 | model,
10 | parameter_samples,
11 | timesteps_are_event_shape = TRUE
12 | )
13 | }
14 | \arguments{
15 | \item{observed_time_series}{\code{float} \code{tensor} of shape
16 | \verb{concat([sample_shape, model.batch_shape, [num_timesteps, 1]])} where
17 | \code{sample_shape} corresponds to i.i.d. observations, and the trailing \verb{[1]}
18 | dimension may (optionally) be omitted if \code{num_timesteps > 1}. May
19 | optionally be an instance of \code{sts_masked_time_series}, which includes
20 | a mask \code{tensor} to specify timesteps with missing observations.}
21 |
22 | \item{model}{An instance of \code{StructuralTimeSeries} representing a
23 | time-series model. This represents a joint distribution over
24 | time-series and their parameters with batch shape \verb{[b1, ..., bN]}.}
25 |
26 | \item{parameter_samples}{\code{list} of \code{tensors} representing posterior samples
27 | of model parameters, with shapes
28 | \verb{list(tf$concat(list(list(num_posterior_draws), param<1>$prior$batch_shape, param<1>$prior$event_shape), list(list(num_posterior_draws), param<2>$prior$batch_shape, param<2>$prior$event_shape), ... ) )}
29 | for all model parameters.
30 | This may optionally also be a named list mapping parameter names to \code{tensor} values.}
31 |
32 | \item{timesteps_are_event_shape}{Deprecated, for backwards compatibility only. If False, the predictive distribution will return per-timestep probabilities Default value: TRUE.}
33 | }
34 | \value{
35 | forecast_dist a \code{tfd_mixture_same_family} instance with event shape
36 | \code{list(num_timesteps)} and batch shape \code{tf$concat(list(sample_shape, model$batch_shape))}, with
37 | \code{num_posterior_draws} mixture components. The \code{t}th step represents the
38 | forecast distribution \code{p(observed_time_series[t] | observed_time_series[0:t-1], parameter_samples)}.
39 | }
40 | \description{
41 | Given samples from the posterior over parameters, return the predictive
42 | distribution over observations at each time \code{T}, given observations up
43 | through time \code{T-1}.
44 | }
45 | \seealso{
46 | Other sts-functions:
47 | \code{\link{sts_build_factored_surrogate_posterior}()},
48 | \code{\link{sts_build_factored_variational_loss}()},
49 | \code{\link{sts_decompose_by_component}()},
50 | \code{\link{sts_decompose_forecast_by_component}()},
51 | \code{\link{sts_fit_with_hmc}()},
52 | \code{\link{sts_forecast}()},
53 | \code{\link{sts_sample_uniform_initial_state}()}
54 | }
55 | \concept{sts-functions}
56 |
--------------------------------------------------------------------------------
/man/sts_sample_uniform_initial_state.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/sts-functions.R
3 | \name{sts_sample_uniform_initial_state}
4 | \alias{sts_sample_uniform_initial_state}
5 | \title{Initialize from a uniform \verb{[-2, 2]} distribution in unconstrained space.}
6 | \usage{
7 | sts_sample_uniform_initial_state(
8 | parameter,
9 | return_constrained = TRUE,
10 | init_sample_shape = list(),
11 | seed = NULL
12 | )
13 | }
14 | \arguments{
15 | \item{parameter}{\code{sts$Parameter} named tuple instance.}
16 |
17 | \item{return_constrained}{if \code{TRUE}, re-applies the constraining bijector
18 | to return initializations in the original domain. Otherwise, returns
19 | initializations in the unconstrained space.
20 | Default value: \code{TRUE}.}
21 |
22 | \item{init_sample_shape}{\code{sample_shape} of the sampled initializations.
23 | Default value: \code{list()}.}
24 |
25 | \item{seed}{integer to seed the random number generator.}
26 | }
27 | \value{
28 | uniform_initializer \code{Tensor} of shape
29 | \verb{concat([init_sample_shape, parameter.prior.batch_shape, transformed_event_shape])}, where
30 | \code{transformed_event_shape} is \code{parameter.prior.event_shape}, if
31 | \code{return_constrained=TRUE}, and otherwise it is
32 | \code{parameter$bijector$inverse_event_shape(parameter$prior$event_shape)}.
33 | }
34 | \description{
35 | Initialize from a uniform \verb{[-2, 2]} distribution in unconstrained space.
36 | }
37 | \seealso{
38 | Other sts-functions:
39 | \code{\link{sts_build_factored_surrogate_posterior}()},
40 | \code{\link{sts_build_factored_variational_loss}()},
41 | \code{\link{sts_decompose_by_component}()},
42 | \code{\link{sts_decompose_forecast_by_component}()},
43 | \code{\link{sts_fit_with_hmc}()},
44 | \code{\link{sts_forecast}()},
45 | \code{\link{sts_one_step_predictive}()}
46 | }
47 | \concept{sts-functions}
48 |
--------------------------------------------------------------------------------
/man/tfb_exp.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijectors.R
3 | \name{tfb_exp}
4 | \alias{tfb_exp}
5 | \title{Computes\code{Y=g(X)=exp(X)}}
6 | \usage{
7 | tfb_exp(validate_args = FALSE, name = "exp")
8 | }
9 | \arguments{
10 | \item{validate_args}{Logical, default FALSE. Whether to validate input with asserts. If validate_args is
11 | FALSE, and the inputs are invalid, correct behavior is not guaranteed.}
12 |
13 | \item{name}{name prefixed to Ops created by this class.}
14 | }
15 | \value{
16 | a bijector instance.
17 | }
18 | \description{
19 | Computes\code{Y=g(X)=exp(X)}
20 | }
21 | \seealso{
22 | For usage examples see \code{\link[=tfb_forward]{tfb_forward()}}, \code{\link[=tfb_inverse]{tfb_inverse()}}, \code{\link[=tfb_inverse_log_det_jacobian]{tfb_inverse_log_det_jacobian()}}.
23 |
24 | Other bijectors:
25 | \code{\link{tfb_absolute_value}()},
26 | \code{\link{tfb_affine_linear_operator}()},
27 | \code{\link{tfb_affine_scalar}()},
28 | \code{\link{tfb_affine}()},
29 | \code{\link{tfb_ascending}()},
30 | \code{\link{tfb_batch_normalization}()},
31 | \code{\link{tfb_blockwise}()},
32 | \code{\link{tfb_chain}()},
33 | \code{\link{tfb_cholesky_outer_product}()},
34 | \code{\link{tfb_cholesky_to_inv_cholesky}()},
35 | \code{\link{tfb_correlation_cholesky}()},
36 | \code{\link{tfb_cumsum}()},
37 | \code{\link{tfb_discrete_cosine_transform}()},
38 | \code{\link{tfb_expm1}()},
39 | \code{\link{tfb_ffjord}()},
40 | \code{\link{tfb_fill_scale_tri_l}()},
41 | \code{\link{tfb_fill_triangular}()},
42 | \code{\link{tfb_glow}()},
43 | \code{\link{tfb_gompertz_cdf}()},
44 | \code{\link{tfb_gumbel_cdf}()},
45 | \code{\link{tfb_gumbel}()},
46 | \code{\link{tfb_identity}()},
47 | \code{\link{tfb_inline}()},
48 | \code{\link{tfb_invert}()},
49 | \code{\link{tfb_iterated_sigmoid_centered}()},
50 | \code{\link{tfb_kumaraswamy_cdf}()},
51 | \code{\link{tfb_kumaraswamy}()},
52 | \code{\link{tfb_lambert_w_tail}()},
53 | \code{\link{tfb_masked_autoregressive_default_template}()},
54 | \code{\link{tfb_masked_autoregressive_flow}()},
55 | \code{\link{tfb_masked_dense}()},
56 | \code{\link{tfb_matrix_inverse_tri_l}()},
57 | \code{\link{tfb_matvec_lu}()},
58 | \code{\link{tfb_normal_cdf}()},
59 | \code{\link{tfb_ordered}()},
60 | \code{\link{tfb_pad}()},
61 | \code{\link{tfb_permute}()},
62 | \code{\link{tfb_power_transform}()},
63 | \code{\link{tfb_rational_quadratic_spline}()},
64 | \code{\link{tfb_rayleigh_cdf}()},
65 | \code{\link{tfb_real_nvp_default_template}()},
66 | \code{\link{tfb_real_nvp}()},
67 | \code{\link{tfb_reciprocal}()},
68 | \code{\link{tfb_reshape}()},
69 | \code{\link{tfb_scale_matvec_diag}()},
70 | \code{\link{tfb_scale_matvec_linear_operator}()},
71 | \code{\link{tfb_scale_matvec_lu}()},
72 | \code{\link{tfb_scale_matvec_tri_l}()},
73 | \code{\link{tfb_scale_tri_l}()},
74 | \code{\link{tfb_scale}()},
75 | \code{\link{tfb_shifted_gompertz_cdf}()},
76 | \code{\link{tfb_shift}()},
77 | \code{\link{tfb_sigmoid}()},
78 | \code{\link{tfb_sinh_arcsinh}()},
79 | \code{\link{tfb_sinh}()},
80 | \code{\link{tfb_softmax_centered}()},
81 | \code{\link{tfb_softplus}()},
82 | \code{\link{tfb_softsign}()},
83 | \code{\link{tfb_split}()},
84 | \code{\link{tfb_square}()},
85 | \code{\link{tfb_tanh}()},
86 | \code{\link{tfb_transform_diagonal}()},
87 | \code{\link{tfb_transpose}()},
88 | \code{\link{tfb_weibull_cdf}()},
89 | \code{\link{tfb_weibull}()}
90 | }
91 | \concept{bijectors}
92 |
--------------------------------------------------------------------------------
/man/tfb_forward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijector-methods.R
3 | \name{tfb_forward}
4 | \alias{tfb_forward}
5 | \title{Returns the forward Bijector evaluation, i.e., \code{X = g(Y)}.}
6 | \usage{
7 | tfb_forward(bijector, x, name = "forward")
8 | }
9 | \arguments{
10 | \item{bijector}{The bijector to apply}
11 |
12 | \item{x}{Tensor. The input to the "forward" evaluation.}
13 |
14 | \item{name}{name of the operation}
15 | }
16 | \value{
17 | a tensor
18 | }
19 | \description{
20 | Returns the forward Bijector evaluation, i.e., \code{X = g(Y)}.
21 | }
22 | \examples{
23 | \donttest{
24 | b <- tfb_affine_scalar(shift = 1, scale = 2)
25 | x <- 10
26 | b \%>\% tfb_forward(x)
27 | }
28 | }
29 | \seealso{
30 | Other bijector_methods:
31 | \code{\link{tfb_forward_log_det_jacobian}()},
32 | \code{\link{tfb_inverse_log_det_jacobian}()},
33 | \code{\link{tfb_inverse}()}
34 | }
35 | \concept{bijector_methods}
36 |
--------------------------------------------------------------------------------
/man/tfb_forward_log_det_jacobian.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijector-methods.R
3 | \name{tfb_forward_log_det_jacobian}
4 | \alias{tfb_forward_log_det_jacobian}
5 | \title{Returns the result of the forward evaluation of the log determinant of the Jacobian}
6 | \usage{
7 | tfb_forward_log_det_jacobian(
8 | bijector,
9 | x,
10 | event_ndims,
11 | name = "forward_log_det_jacobian"
12 | )
13 | }
14 | \arguments{
15 | \item{bijector}{The bijector to apply}
16 |
17 | \item{x}{Tensor. The input to the "forward" Jacobian determinant evaluation.}
18 |
19 | \item{event_ndims}{Number of dimensions in the probabilistic events being transformed.
20 | Must be greater than or equal to bijector$forward_min_event_ndims. The result is summed over the final
21 | dimensions to produce a scalar Jacobian determinant for each event, i.e. it has shape
22 | x$shape$ndims - event_ndims dimensions.}
23 |
24 | \item{name}{name of the operation}
25 | }
26 | \value{
27 | a tensor
28 | }
29 | \description{
30 | Returns the result of the forward evaluation of the log determinant of the Jacobian
31 | }
32 | \examples{
33 | \donttest{
34 | b <- tfb_affine_scalar(shift = 1, scale = 2)
35 | x <- 10
36 | b \%>\% tfb_forward_log_det_jacobian(x, event_ndims = 0)
37 | }
38 | }
39 | \seealso{
40 | Other bijector_methods:
41 | \code{\link{tfb_forward}()},
42 | \code{\link{tfb_inverse_log_det_jacobian}()},
43 | \code{\link{tfb_inverse}()}
44 | }
45 | \concept{bijector_methods}
46 |
--------------------------------------------------------------------------------
/man/tfb_identity.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijectors.R
3 | \name{tfb_identity}
4 | \alias{tfb_identity}
5 | \title{Computes\code{Y = g(X) = X}}
6 | \usage{
7 | tfb_identity(validate_args = FALSE, name = "identity")
8 | }
9 | \arguments{
10 | \item{validate_args}{Logical, default FALSE. Whether to validate input with asserts. If validate_args is
11 | FALSE, and the inputs are invalid, correct behavior is not guaranteed.}
12 |
13 | \item{name}{name prefixed to Ops created by this class.}
14 | }
15 | \value{
16 | a bijector instance.
17 | }
18 | \description{
19 | Computes\code{Y = g(X) = X}
20 | }
21 | \seealso{
22 | For usage examples see \code{\link[=tfb_forward]{tfb_forward()}}, \code{\link[=tfb_inverse]{tfb_inverse()}}, \code{\link[=tfb_inverse_log_det_jacobian]{tfb_inverse_log_det_jacobian()}}.
23 |
24 | Other bijectors:
25 | \code{\link{tfb_absolute_value}()},
26 | \code{\link{tfb_affine_linear_operator}()},
27 | \code{\link{tfb_affine_scalar}()},
28 | \code{\link{tfb_affine}()},
29 | \code{\link{tfb_ascending}()},
30 | \code{\link{tfb_batch_normalization}()},
31 | \code{\link{tfb_blockwise}()},
32 | \code{\link{tfb_chain}()},
33 | \code{\link{tfb_cholesky_outer_product}()},
34 | \code{\link{tfb_cholesky_to_inv_cholesky}()},
35 | \code{\link{tfb_correlation_cholesky}()},
36 | \code{\link{tfb_cumsum}()},
37 | \code{\link{tfb_discrete_cosine_transform}()},
38 | \code{\link{tfb_expm1}()},
39 | \code{\link{tfb_exp}()},
40 | \code{\link{tfb_ffjord}()},
41 | \code{\link{tfb_fill_scale_tri_l}()},
42 | \code{\link{tfb_fill_triangular}()},
43 | \code{\link{tfb_glow}()},
44 | \code{\link{tfb_gompertz_cdf}()},
45 | \code{\link{tfb_gumbel_cdf}()},
46 | \code{\link{tfb_gumbel}()},
47 | \code{\link{tfb_inline}()},
48 | \code{\link{tfb_invert}()},
49 | \code{\link{tfb_iterated_sigmoid_centered}()},
50 | \code{\link{tfb_kumaraswamy_cdf}()},
51 | \code{\link{tfb_kumaraswamy}()},
52 | \code{\link{tfb_lambert_w_tail}()},
53 | \code{\link{tfb_masked_autoregressive_default_template}()},
54 | \code{\link{tfb_masked_autoregressive_flow}()},
55 | \code{\link{tfb_masked_dense}()},
56 | \code{\link{tfb_matrix_inverse_tri_l}()},
57 | \code{\link{tfb_matvec_lu}()},
58 | \code{\link{tfb_normal_cdf}()},
59 | \code{\link{tfb_ordered}()},
60 | \code{\link{tfb_pad}()},
61 | \code{\link{tfb_permute}()},
62 | \code{\link{tfb_power_transform}()},
63 | \code{\link{tfb_rational_quadratic_spline}()},
64 | \code{\link{tfb_rayleigh_cdf}()},
65 | \code{\link{tfb_real_nvp_default_template}()},
66 | \code{\link{tfb_real_nvp}()},
67 | \code{\link{tfb_reciprocal}()},
68 | \code{\link{tfb_reshape}()},
69 | \code{\link{tfb_scale_matvec_diag}()},
70 | \code{\link{tfb_scale_matvec_linear_operator}()},
71 | \code{\link{tfb_scale_matvec_lu}()},
72 | \code{\link{tfb_scale_matvec_tri_l}()},
73 | \code{\link{tfb_scale_tri_l}()},
74 | \code{\link{tfb_scale}()},
75 | \code{\link{tfb_shifted_gompertz_cdf}()},
76 | \code{\link{tfb_shift}()},
77 | \code{\link{tfb_sigmoid}()},
78 | \code{\link{tfb_sinh_arcsinh}()},
79 | \code{\link{tfb_sinh}()},
80 | \code{\link{tfb_softmax_centered}()},
81 | \code{\link{tfb_softplus}()},
82 | \code{\link{tfb_softsign}()},
83 | \code{\link{tfb_split}()},
84 | \code{\link{tfb_square}()},
85 | \code{\link{tfb_tanh}()},
86 | \code{\link{tfb_transform_diagonal}()},
87 | \code{\link{tfb_transpose}()},
88 | \code{\link{tfb_weibull_cdf}()},
89 | \code{\link{tfb_weibull}()}
90 | }
91 | \concept{bijectors}
92 |
--------------------------------------------------------------------------------
/man/tfb_inverse.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijector-methods.R
3 | \name{tfb_inverse}
4 | \alias{tfb_inverse}
5 | \title{Returns the inverse Bijector evaluation, i.e., \code{X = g^{-1}(Y)}.}
6 | \usage{
7 | tfb_inverse(bijector, y, name = "inverse")
8 | }
9 | \arguments{
10 | \item{bijector}{The bijector to apply}
11 |
12 | \item{y}{Tensor. The input to the "inverse" evaluation.}
13 |
14 | \item{name}{name of the operation}
15 | }
16 | \value{
17 | a tensor
18 | }
19 | \description{
20 | Returns the inverse Bijector evaluation, i.e., \code{X = g^{-1}(Y)}.
21 | }
22 | \examples{
23 | \donttest{
24 | b <- tfb_affine_scalar(shift = 1, scale = 2)
25 | x <- 10
26 | y <- b \%>\% tfb_forward(x)
27 | b \%>\% tfb_inverse(y)
28 | }
29 | }
30 | \seealso{
31 | Other bijector_methods:
32 | \code{\link{tfb_forward_log_det_jacobian}()},
33 | \code{\link{tfb_forward}()},
34 | \code{\link{tfb_inverse_log_det_jacobian}()}
35 | }
36 | \concept{bijector_methods}
37 |
--------------------------------------------------------------------------------
/man/tfb_inverse_log_det_jacobian.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijector-methods.R
3 | \name{tfb_inverse_log_det_jacobian}
4 | \alias{tfb_inverse_log_det_jacobian}
5 | \title{Returns the result of the inverse evaluation of the log determinant of the Jacobian}
6 | \usage{
7 | tfb_inverse_log_det_jacobian(
8 | bijector,
9 | y,
10 | event_ndims,
11 | name = "inverse_log_det_jacobian"
12 | )
13 | }
14 | \arguments{
15 | \item{bijector}{The bijector to apply}
16 |
17 | \item{y}{Tensor. The input to the "inverse" Jacobian determinant evaluation.}
18 |
19 | \item{event_ndims}{Number of dimensions in the probabilistic events being transformed.
20 | Must be greater than or equal to bijector$inverse_min_event_ndims. The result is summed over the final
21 | dimensions to produce a scalar Jacobian determinant for each event, i.e. it has shape
22 | x$shape$ndims - event_ndims dimensions.}
23 |
24 | \item{name}{name of the operation}
25 | }
26 | \value{
27 | a tensor
28 | }
29 | \description{
30 | Returns the result of the inverse evaluation of the log determinant of the Jacobian
31 | }
32 | \examples{
33 | \donttest{
34 | b <- tfb_affine_scalar(shift = 1, scale = 2)
35 | x <- 10
36 | y <- b \%>\% tfb_forward(x)
37 | b \%>\% tfb_inverse_log_det_jacobian(y, event_ndims = 0)
38 | }
39 | }
40 | \seealso{
41 | Other bijector_methods:
42 | \code{\link{tfb_forward_log_det_jacobian}()},
43 | \code{\link{tfb_forward}()},
44 | \code{\link{tfb_inverse}()}
45 | }
46 | \concept{bijector_methods}
47 |
--------------------------------------------------------------------------------
/man/tfb_sinh.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/bijectors.R
3 | \name{tfb_sinh}
4 | \alias{tfb_sinh}
5 | \title{Bijector that computes \code{Y = sinh(X)}.}
6 | \usage{
7 | tfb_sinh(validate_args = FALSE, name = "sinh")
8 | }
9 | \arguments{
10 | \item{validate_args}{Logical, default FALSE. Whether to validate input with asserts. If validate_args is
11 | FALSE, and the inputs are invalid, correct behavior is not guaranteed.}
12 |
13 | \item{name}{name prefixed to Ops created by this class.}
14 | }
15 | \value{
16 | a bijector instance.
17 | }
18 | \description{
19 | Bijector that computes \code{Y = sinh(X)}.
20 | }
21 | \seealso{
22 | For usage examples see \code{\link[=tfb_forward]{tfb_forward()}}, \code{\link[=tfb_inverse]{tfb_inverse()}}, \code{\link[=tfb_inverse_log_det_jacobian]{tfb_inverse_log_det_jacobian()}}.
23 |
24 | Other bijectors:
25 | \code{\link{tfb_absolute_value}()},
26 | \code{\link{tfb_affine_linear_operator}()},
27 | \code{\link{tfb_affine_scalar}()},
28 | \code{\link{tfb_affine}()},
29 | \code{\link{tfb_ascending}()},
30 | \code{\link{tfb_batch_normalization}()},
31 | \code{\link{tfb_blockwise}()},
32 | \code{\link{tfb_chain}()},
33 | \code{\link{tfb_cholesky_outer_product}()},
34 | \code{\link{tfb_cholesky_to_inv_cholesky}()},
35 | \code{\link{tfb_correlation_cholesky}()},
36 | \code{\link{tfb_cumsum}()},
37 | \code{\link{tfb_discrete_cosine_transform}()},
38 | \code{\link{tfb_expm1}()},
39 | \code{\link{tfb_exp}()},
40 | \code{\link{tfb_ffjord}()},
41 | \code{\link{tfb_fill_scale_tri_l}()},
42 | \code{\link{tfb_fill_triangular}()},
43 | \code{\link{tfb_glow}()},
44 | \code{\link{tfb_gompertz_cdf}()},
45 | \code{\link{tfb_gumbel_cdf}()},
46 | \code{\link{tfb_gumbel}()},
47 | \code{\link{tfb_identity}()},
48 | \code{\link{tfb_inline}()},
49 | \code{\link{tfb_invert}()},
50 | \code{\link{tfb_iterated_sigmoid_centered}()},
51 | \code{\link{tfb_kumaraswamy_cdf}()},
52 | \code{\link{tfb_kumaraswamy}()},
53 | \code{\link{tfb_lambert_w_tail}()},
54 | \code{\link{tfb_masked_autoregressive_default_template}()},
55 | \code{\link{tfb_masked_autoregressive_flow}()},
56 | \code{\link{tfb_masked_dense}()},
57 | \code{\link{tfb_matrix_inverse_tri_l}()},
58 | \code{\link{tfb_matvec_lu}()},
59 | \code{\link{tfb_normal_cdf}()},
60 | \code{\link{tfb_ordered}()},
61 | \code{\link{tfb_pad}()},
62 | \code{\link{tfb_permute}()},
63 | \code{\link{tfb_power_transform}()},
64 | \code{\link{tfb_rational_quadratic_spline}()},
65 | \code{\link{tfb_rayleigh_cdf}()},
66 | \code{\link{tfb_real_nvp_default_template}()},
67 | \code{\link{tfb_real_nvp}()},
68 | \code{\link{tfb_reciprocal}()},
69 | \code{\link{tfb_reshape}()},
70 | \code{\link{tfb_scale_matvec_diag}()},
71 | \code{\link{tfb_scale_matvec_linear_operator}()},
72 | \code{\link{tfb_scale_matvec_lu}()},
73 | \code{\link{tfb_scale_matvec_tri_l}()},
74 | \code{\link{tfb_scale_tri_l}()},
75 | \code{\link{tfb_scale}()},
76 | \code{\link{tfb_shifted_gompertz_cdf}()},
77 | \code{\link{tfb_shift}()},
78 | \code{\link{tfb_sigmoid}()},
79 | \code{\link{tfb_sinh_arcsinh}()},
80 | \code{\link{tfb_softmax_centered}()},
81 | \code{\link{tfb_softplus}()},
82 | \code{\link{tfb_softsign}()},
83 | \code{\link{tfb_split}()},
84 | \code{\link{tfb_square}()},
85 | \code{\link{tfb_tanh}()},
86 | \code{\link{tfb_transform_diagonal}()},
87 | \code{\link{tfb_transpose}()},
88 | \code{\link{tfb_weibull_cdf}()},
89 | \code{\link{tfb_weibull}()}
90 | }
91 | \concept{bijectors}
92 |
--------------------------------------------------------------------------------
/man/tfd_blockwise.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distributions.R
3 | \name{tfd_blockwise}
4 | \alias{tfd_blockwise}
5 | \title{Blockwise distribution}
6 | \usage{
7 | tfd_blockwise(
8 | distributions,
9 | dtype_override = NULL,
10 | validate_args = FALSE,
11 | allow_nan_stats = FALSE,
12 | name = "Blockwise"
13 | )
14 | }
15 | \arguments{
16 | \item{distributions}{list of Distribution instances. All distribution instances
17 | must have the same batch_shape and all must have `event_ndims==1``, i.e., be
18 | vector-variate distributions.}
19 |
20 | \item{dtype_override}{samples of distributions will be cast to this dtype. If
21 | unspecified, all distributions must have the same dtype. Default value:
22 | \code{NULL} (i.e., do not cast).}
23 |
24 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
25 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
26 | silently render incorrect outputs. Default value: FALSE.}
27 |
28 | \item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
29 | use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
30 | one or more of the statistic's batch members are undefined.}
31 |
32 | \item{name}{name prefixed to Ops created by this class.}
33 | }
34 | \value{
35 | a distribution instance.
36 | }
37 | \description{
38 | Blockwise distribution
39 | }
40 | \seealso{
41 | For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
42 | }
43 |
--------------------------------------------------------------------------------
/man/tfd_cdf.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_cdf}
4 | \alias{tfd_cdf}
5 | \title{Cumulative distribution function.
6 | Given random variable X, the cumulative distribution function cdf is:
7 | \code{cdf(x) := P[X <= x]}}
8 | \usage{
9 | tfd_cdf(distribution, value, ...)
10 | }
11 | \arguments{
12 | \item{distribution}{The distribution being used.}
13 |
14 | \item{value}{float or double Tensor.}
15 |
16 | \item{...}{Additional parameters passed to Python.}
17 | }
18 | \value{
19 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
20 | }
21 | \description{
22 | Cumulative distribution function.
23 | Given random variable X, the cumulative distribution function cdf is:
24 | \code{cdf(x) := P[X <= x]}
25 | }
26 | \examples{
27 | \donttest{
28 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
29 | x <- d \%>\% tfd_sample()
30 | d \%>\% tfd_cdf(x)
31 | }
32 | }
33 | \seealso{
34 | Other distribution_methods:
35 | \code{\link{tfd_covariance}()},
36 | \code{\link{tfd_cross_entropy}()},
37 | \code{\link{tfd_entropy}()},
38 | \code{\link{tfd_kl_divergence}()},
39 | \code{\link{tfd_log_cdf}()},
40 | \code{\link{tfd_log_prob}()},
41 | \code{\link{tfd_log_survival_function}()},
42 | \code{\link{tfd_mean}()},
43 | \code{\link{tfd_mode}()},
44 | \code{\link{tfd_prob}()},
45 | \code{\link{tfd_quantile}()},
46 | \code{\link{tfd_sample}()},
47 | \code{\link{tfd_stddev}()},
48 | \code{\link{tfd_survival_function}()},
49 | \code{\link{tfd_variance}()}
50 | }
51 | \concept{distribution_methods}
52 |
--------------------------------------------------------------------------------
/man/tfd_covariance.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_covariance}
4 | \alias{tfd_covariance}
5 | \title{Covariance.}
6 | \usage{
7 | tfd_covariance(distribution, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{...}{Additional parameters passed to Python.}
13 | }
14 | \value{
15 | Floating-point Tensor with shape \verb{[B1, ..., Bn, k, k]} where the first n dimensions
16 | are batch coordinates and \code{k = reduce_prod(self.event_shape)}.
17 | }
18 | \description{
19 | Covariance is (possibly) defined only for non-scalar-event distributions.
20 | For example, for a length-k, vector-valued distribution, it is calculated as,
21 | \code{Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]}
22 | where Cov is a (batch of) k x k matrix, 0 <= (i, j) < k, and E denotes expectation.
23 | }
24 | \details{
25 | Alternatively, for non-vector, multivariate distributions (e.g., matrix-valued, Wishart),
26 | Covariance shall return a (batch of) matrices under some vectorization of the events, i.e.,
27 | \verb{Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]}
28 | where Cov is a (batch of) k x k matrices, 0 <= (i, j) < k = reduce_prod(event_shape),
29 | and Vec is some function mapping indices of this distribution's event dimensions to indices of a
30 | length-k vector.
31 | }
32 | \examples{
33 | \donttest{
34 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
35 | d \%>\% tfd_variance()
36 | }
37 | }
38 | \seealso{
39 | Other distribution_methods:
40 | \code{\link{tfd_cdf}()},
41 | \code{\link{tfd_cross_entropy}()},
42 | \code{\link{tfd_entropy}()},
43 | \code{\link{tfd_kl_divergence}()},
44 | \code{\link{tfd_log_cdf}()},
45 | \code{\link{tfd_log_prob}()},
46 | \code{\link{tfd_log_survival_function}()},
47 | \code{\link{tfd_mean}()},
48 | \code{\link{tfd_mode}()},
49 | \code{\link{tfd_prob}()},
50 | \code{\link{tfd_quantile}()},
51 | \code{\link{tfd_sample}()},
52 | \code{\link{tfd_stddev}()},
53 | \code{\link{tfd_survival_function}()},
54 | \code{\link{tfd_variance}()}
55 | }
56 | \concept{distribution_methods}
57 |
--------------------------------------------------------------------------------
/man/tfd_cross_entropy.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_cross_entropy}
4 | \alias{tfd_cross_entropy}
5 | \title{Computes the (Shannon) cross entropy.}
6 | \usage{
7 | tfd_cross_entropy(distribution, other, name = "cross_entropy")
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{other}{\code{tfp$distributions$Distribution} instance.}
13 |
14 | \item{name}{String prepended to names of ops created by this function.}
15 | }
16 | \value{
17 | cross_entropy: self.dtype Tensor with shape \verb{[B1, ..., Bn]} representing n different calculations of (Shannon) cross entropy.
18 | }
19 | \description{
20 | Denote this distribution (self) by P and the other distribution by Q.
21 | Assuming P, Q are absolutely continuous with respect to one another and permit densities
22 | p(x) dr(x) and q(x) dr(x), (Shannon) cross entropy is defined as:
23 | \verb{H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)}
24 | where F denotes the support of the random variable \code{X ~ P}.
25 | }
26 | \examples{
27 | \donttest{
28 | d1 <- tfd_normal(loc = 1, scale = 1)
29 | d2 <- tfd_normal(loc = 2, scale = 1)
30 | d1 \%>\% tfd_cross_entropy(d2)
31 | }
32 | }
33 | \seealso{
34 | Other distribution_methods:
35 | \code{\link{tfd_cdf}()},
36 | \code{\link{tfd_covariance}()},
37 | \code{\link{tfd_entropy}()},
38 | \code{\link{tfd_kl_divergence}()},
39 | \code{\link{tfd_log_cdf}()},
40 | \code{\link{tfd_log_prob}()},
41 | \code{\link{tfd_log_survival_function}()},
42 | \code{\link{tfd_mean}()},
43 | \code{\link{tfd_mode}()},
44 | \code{\link{tfd_prob}()},
45 | \code{\link{tfd_quantile}()},
46 | \code{\link{tfd_sample}()},
47 | \code{\link{tfd_stddev}()},
48 | \code{\link{tfd_survival_function}()},
49 | \code{\link{tfd_variance}()}
50 | }
51 | \concept{distribution_methods}
52 |
--------------------------------------------------------------------------------
/man/tfd_entropy.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_entropy}
4 | \alias{tfd_entropy}
5 | \title{Shannon entropy in nats.}
6 | \usage{
7 | tfd_entropy(distribution, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{...}{Additional parameters passed to Python.}
13 | }
14 | \value{
15 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
16 | }
17 | \description{
18 | Shannon entropy in nats.
19 | }
20 | \examples{
21 | \donttest{
22 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
23 | d \%>\% tfd_entropy()
24 | }
25 | }
26 | \seealso{
27 | Other distribution_methods:
28 | \code{\link{tfd_cdf}()},
29 | \code{\link{tfd_covariance}()},
30 | \code{\link{tfd_cross_entropy}()},
31 | \code{\link{tfd_kl_divergence}()},
32 | \code{\link{tfd_log_cdf}()},
33 | \code{\link{tfd_log_prob}()},
34 | \code{\link{tfd_log_survival_function}()},
35 | \code{\link{tfd_mean}()},
36 | \code{\link{tfd_mode}()},
37 | \code{\link{tfd_prob}()},
38 | \code{\link{tfd_quantile}()},
39 | \code{\link{tfd_sample}()},
40 | \code{\link{tfd_stddev}()},
41 | \code{\link{tfd_survival_function}()},
42 | \code{\link{tfd_variance}()}
43 | }
44 | \concept{distribution_methods}
45 |
--------------------------------------------------------------------------------
/man/tfd_exp_relaxed_one_hot_categorical.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distributions.R
3 | \name{tfd_exp_relaxed_one_hot_categorical}
4 | \alias{tfd_exp_relaxed_one_hot_categorical}
5 | \title{ExpRelaxedOneHotCategorical distribution with temperature and logits.}
6 | \usage{
7 | tfd_exp_relaxed_one_hot_categorical(
8 | temperature,
9 | logits = NULL,
10 | probs = NULL,
11 | validate_args = FALSE,
12 | allow_nan_stats = TRUE,
13 | name = "ExpRelaxedOneHotCategorical"
14 | )
15 | }
16 | \arguments{
17 | \item{temperature}{An 0-D Tensor, representing the temperature of a set of
18 | ExpRelaxedCategorical distributions. The temperature should be positive.}
19 |
20 | \item{logits}{An N-D Tensor, N >= 1, representing the log probabilities of a
21 | set of ExpRelaxedCategorical distributions. The first N - 1 dimensions index
22 | into a batch of independent distributions and the last dimension represents a
23 | vector of logits for each class. Only one of logits or probs should be passed
24 | in.}
25 |
26 | \item{probs}{An N-D Tensor, N >= 1, representing the probabilities of a set of
27 | ExpRelaxedCategorical distributions. The first N - 1 dimensions index into a
28 | batch of independent distributions and the last dimension represents a vector
29 | of probabilities for each class. Only one of logits or probs should be passed
30 | in.}
31 |
32 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
33 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
34 | silently render incorrect outputs. Default value: FALSE.}
35 |
36 | \item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
37 | use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
38 | one or more of the statistic's batch members are undefined.}
39 |
40 | \item{name}{name prefixed to Ops created by this class.}
41 | }
42 | \value{
43 | a distribution instance.
44 | }
45 | \description{
46 | ExpRelaxedOneHotCategorical distribution with temperature and logits.
47 | }
48 | \seealso{
49 | For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
50 | }
51 |
--------------------------------------------------------------------------------
/man/tfd_finite_discrete.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distributions.R
3 | \name{tfd_finite_discrete}
4 | \alias{tfd_finite_discrete}
5 | \title{The finite discrete distribution.}
6 | \usage{
7 | tfd_finite_discrete(
8 | outcomes,
9 | logits = NULL,
10 | probs = NULL,
11 | rtol = NULL,
12 | atol = NULL,
13 | validate_args = FALSE,
14 | allow_nan_stats = TRUE,
15 | name = "FiniteDiscrete"
16 | )
17 | }
18 | \arguments{
19 | \item{outcomes}{A 1-D floating or integer \code{Tensor}, representing a list of
20 | possible outcomes in strictly ascending order.}
21 |
22 | \item{logits}{A floating N-D \code{Tensor}, \code{N >= 1}, representing the log
23 | probabilities of a set of FiniteDiscrete distributions. The first \code{N - 1}
24 | dimensions index into a batch of independent distributions and the
25 | last dimension represents a vector of logits for each discrete value.
26 | Only one of \code{logits} or \code{probs} should be passed in.}
27 |
28 | \item{probs}{A floating N-D \code{Tensor}, \code{N >= 1}, representing the probabilities
29 | of a set of FiniteDiscrete distributions. The first \code{N - 1} dimensions
30 | index into a batch of independent distributions and the last dimension
31 | represents a vector of probabilities for each discrete value. Only one
32 | of \code{logits} or \code{probs} should be passed in.}
33 |
34 | \item{rtol}{\code{Tensor} with same \code{dtype} as \code{outcomes}. The relative tolerance for
35 | floating number comparison. Only effective when \code{outcomes} is a floating
36 | \code{Tensor}. Default is \code{10 * eps}.}
37 |
38 | \item{atol}{\code{Tensor} with same \code{dtype} as \code{outcomes}. The absolute tolerance for
39 | floating number comparison. Only effective when \code{outcomes} is a floating
40 | \code{Tensor}. Default is \code{10 * eps}.}
41 |
42 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
43 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
44 | silently render incorrect outputs. Default value: FALSE.}
45 |
46 | \item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
47 | use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
48 | one or more of the statistic's batch members are undefined.}
49 |
50 | \item{name}{string prefixed to Ops created by this class.}
51 | }
52 | \value{
53 | a distribution instance.
54 | }
55 | \description{
56 | The FiniteDiscrete distribution is parameterized by either probabilities or
57 | log-probabilities of a set of \code{K} possible outcomes, which is defined by
58 | a strictly ascending list of \code{K} values.
59 | }
60 | \details{
61 | Note: log_prob, prob, cdf, mode, and entropy are differentiable with respect
62 | to \code{logits} or \code{probs} but not with respect to \code{outcomes}.
63 |
64 | Mathematical Details
65 |
66 | The probability mass function (pmf) is,
67 |
68 | \verb{pmf(x; pi, qi) = prod_j pi_j**[x == qi_j]}
69 | }
70 | \seealso{
71 | For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
72 | }
73 |
--------------------------------------------------------------------------------
/man/tfd_kl_divergence.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_kl_divergence}
4 | \alias{tfd_kl_divergence}
5 | \title{Computes the Kullback--Leibler divergence.}
6 | \usage{
7 | tfd_kl_divergence(distribution, other, name = "kl_divergence")
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{other}{\code{tfp$distributions$Distribution} instance.}
13 |
14 | \item{name}{String prepended to names of ops created by this function.}
15 | }
16 | \value{
17 | self$dtype Tensor with shape \verb{[B1, ..., Bn]} representing n different calculations
18 | of the Kullback-Leibler divergence.
19 | }
20 | \description{
21 | Denote this distribution by p and the other distribution by q.
22 | Assuming p, q are absolutely continuous with respect to reference measure r,
23 | the KL divergence is defined as:
24 | \verb{KL[p, q] = E_p[log(p(X)/q(X))] = -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x) = H[p, q] - H[p]}
25 | where F denotes the support of the random variable \code{X ~ p}, \code{H[., .]}
26 | denotes (Shannon) cross entropy, and \code{H[.]} denotes (Shannon) entropy.
27 | }
28 | \examples{
29 | \donttest{
30 | d1 <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
31 | d2 <- tfd_normal(loc = c(1.5, 2), scale = c(1, 0.5))
32 | d1 \%>\% tfd_kl_divergence(d2)
33 | }
34 | }
35 | \seealso{
36 | Other distribution_methods:
37 | \code{\link{tfd_cdf}()},
38 | \code{\link{tfd_covariance}()},
39 | \code{\link{tfd_cross_entropy}()},
40 | \code{\link{tfd_entropy}()},
41 | \code{\link{tfd_log_cdf}()},
42 | \code{\link{tfd_log_prob}()},
43 | \code{\link{tfd_log_survival_function}()},
44 | \code{\link{tfd_mean}()},
45 | \code{\link{tfd_mode}()},
46 | \code{\link{tfd_prob}()},
47 | \code{\link{tfd_quantile}()},
48 | \code{\link{tfd_sample}()},
49 | \code{\link{tfd_stddev}()},
50 | \code{\link{tfd_survival_function}()},
51 | \code{\link{tfd_variance}()}
52 | }
53 | \concept{distribution_methods}
54 |
--------------------------------------------------------------------------------
/man/tfd_log_cdf.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_log_cdf}
4 | \alias{tfd_log_cdf}
5 | \title{Log cumulative distribution function.}
6 | \usage{
7 | tfd_log_cdf(distribution, value, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{value}{float or double Tensor.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
18 | }
19 | \description{
20 | Given random variable X, the cumulative distribution function cdf is:
21 | \code{tfd_log_cdf(x) := Log[ P[X <= x] ]}
22 | Often, a numerical approximation can be used for \code{tfd_log_cdf(x)} that yields
23 | a more accurate answer than simply taking the logarithm of the cdf when x << -1.
24 | }
25 | \examples{
26 | \donttest{
27 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
28 | x <- d \%>\% tfd_sample()
29 | d \%>\% tfd_log_cdf(x)
30 | }
31 | }
32 | \seealso{
33 | Other distribution_methods:
34 | \code{\link{tfd_cdf}()},
35 | \code{\link{tfd_covariance}()},
36 | \code{\link{tfd_cross_entropy}()},
37 | \code{\link{tfd_entropy}()},
38 | \code{\link{tfd_kl_divergence}()},
39 | \code{\link{tfd_log_prob}()},
40 | \code{\link{tfd_log_survival_function}()},
41 | \code{\link{tfd_mean}()},
42 | \code{\link{tfd_mode}()},
43 | \code{\link{tfd_prob}()},
44 | \code{\link{tfd_quantile}()},
45 | \code{\link{tfd_sample}()},
46 | \code{\link{tfd_stddev}()},
47 | \code{\link{tfd_survival_function}()},
48 | \code{\link{tfd_variance}()}
49 | }
50 | \concept{distribution_methods}
51 |
--------------------------------------------------------------------------------
/man/tfd_log_prob.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_log_prob}
4 | \alias{tfd_log_prob}
5 | \title{Log probability density/mass function.}
6 | \usage{
7 | tfd_log_prob(distribution, value, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{value}{float or double Tensor.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
18 | }
19 | \description{
20 | Log probability density/mass function.
21 | }
22 | \examples{
23 | \donttest{
24 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
25 | x <- d \%>\% tfd_sample()
26 | d \%>\% tfd_log_prob(x)
27 | }
28 | }
29 | \seealso{
30 | Other distribution_methods:
31 | \code{\link{tfd_cdf}()},
32 | \code{\link{tfd_covariance}()},
33 | \code{\link{tfd_cross_entropy}()},
34 | \code{\link{tfd_entropy}()},
35 | \code{\link{tfd_kl_divergence}()},
36 | \code{\link{tfd_log_cdf}()},
37 | \code{\link{tfd_log_survival_function}()},
38 | \code{\link{tfd_mean}()},
39 | \code{\link{tfd_mode}()},
40 | \code{\link{tfd_prob}()},
41 | \code{\link{tfd_quantile}()},
42 | \code{\link{tfd_sample}()},
43 | \code{\link{tfd_stddev}()},
44 | \code{\link{tfd_survival_function}()},
45 | \code{\link{tfd_variance}()}
46 | }
47 | \concept{distribution_methods}
48 |
--------------------------------------------------------------------------------
/man/tfd_log_survival_function.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_log_survival_function}
4 | \alias{tfd_log_survival_function}
5 | \title{Log survival function.}
6 | \usage{
7 | tfd_log_survival_function(distribution, value, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{value}{float or double Tensor.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
18 | }
19 | \description{
20 | Given random variable X, the survival function is defined:
21 | \code{tfd_log_survival_function(x) = Log[ P[X > x] ] = Log[ 1 - P[X <= x] ] = Log[ 1 - cdf(x) ]}
22 | }
23 | \details{
24 | Typically, different numerical approximations can be used for the log survival function,
25 | which are more accurate than 1 - cdf(x) when x >> 1.
26 | }
27 | \examples{
28 | \donttest{
29 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
30 | x <- d \%>\% tfd_sample()
31 | d \%>\% tfd_log_survival_function(x)
32 | }
33 | }
34 | \seealso{
35 | Other distribution_methods:
36 | \code{\link{tfd_cdf}()},
37 | \code{\link{tfd_covariance}()},
38 | \code{\link{tfd_cross_entropy}()},
39 | \code{\link{tfd_entropy}()},
40 | \code{\link{tfd_kl_divergence}()},
41 | \code{\link{tfd_log_cdf}()},
42 | \code{\link{tfd_log_prob}()},
43 | \code{\link{tfd_mean}()},
44 | \code{\link{tfd_mode}()},
45 | \code{\link{tfd_prob}()},
46 | \code{\link{tfd_quantile}()},
47 | \code{\link{tfd_sample}()},
48 | \code{\link{tfd_stddev}()},
49 | \code{\link{tfd_survival_function}()},
50 | \code{\link{tfd_variance}()}
51 | }
52 | \concept{distribution_methods}
53 |
--------------------------------------------------------------------------------
/man/tfd_logit_normal.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distributions.R
3 | \name{tfd_logit_normal}
4 | \alias{tfd_logit_normal}
5 | \title{The Logit-Normal distribution}
6 | \usage{
7 | tfd_logit_normal(
8 | loc,
9 | scale,
10 | validate_args = FALSE,
11 | allow_nan_stats = TRUE,
12 | name = "LogitNormal"
13 | )
14 | }
15 | \arguments{
16 | \item{loc}{Floating point tensor; the means of the distribution(s).}
17 |
18 | \item{scale}{loating point tensor; the stddevs of the distribution(s).
19 | Must contain only positive values.}
20 |
21 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
22 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
23 | silently render incorrect outputs. Default value: FALSE.}
24 |
25 | \item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
26 | use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
27 | one or more of the statistic's batch members are undefined.}
28 |
29 | \item{name}{name prefixed to Ops created by this class.}
30 | }
31 | \value{
32 | a distribution instance.
33 | }
34 | \description{
35 | The Logit-Normal distribution models positive-valued random variables whose
36 | logit (i.e., sigmoid_inverse, i.e., \code{log(p) - log1p(-p)}) is normally
37 | distributed with mean \code{loc} and standard deviation \code{scale}. It is
38 | constructed as the sigmoid transformation, (i.e., \code{1 / (1 + exp(-x))}) of a
39 | Normal distribution.
40 | }
41 | \seealso{
42 | For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
43 | }
44 |
--------------------------------------------------------------------------------
/man/tfd_mean.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_mean}
4 | \alias{tfd_mean}
5 | \title{Mean.}
6 | \usage{
7 | tfd_mean(distribution, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{...}{Additional parameters passed to Python.}
13 | }
14 | \value{
15 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
16 | }
17 | \description{
18 | Mean.
19 | }
20 | \examples{
21 | \donttest{
22 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
23 | d \%>\% tfd_mean()
24 | }
25 | }
26 | \seealso{
27 | Other distribution_methods:
28 | \code{\link{tfd_cdf}()},
29 | \code{\link{tfd_covariance}()},
30 | \code{\link{tfd_cross_entropy}()},
31 | \code{\link{tfd_entropy}()},
32 | \code{\link{tfd_kl_divergence}()},
33 | \code{\link{tfd_log_cdf}()},
34 | \code{\link{tfd_log_prob}()},
35 | \code{\link{tfd_log_survival_function}()},
36 | \code{\link{tfd_mode}()},
37 | \code{\link{tfd_prob}()},
38 | \code{\link{tfd_quantile}()},
39 | \code{\link{tfd_sample}()},
40 | \code{\link{tfd_stddev}()},
41 | \code{\link{tfd_survival_function}()},
42 | \code{\link{tfd_variance}()}
43 | }
44 | \concept{distribution_methods}
45 |
--------------------------------------------------------------------------------
/man/tfd_mode.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_mode}
4 | \alias{tfd_mode}
5 | \title{Mode.}
6 | \usage{
7 | tfd_mode(distribution, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{...}{Additional parameters passed to Python.}
13 | }
14 | \value{
15 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
16 | }
17 | \description{
18 | Mode.
19 | }
20 | \examples{
21 | \donttest{
22 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
23 | d \%>\% tfd_mode()
24 | }
25 | }
26 | \seealso{
27 | Other distribution_methods:
28 | \code{\link{tfd_cdf}()},
29 | \code{\link{tfd_covariance}()},
30 | \code{\link{tfd_cross_entropy}()},
31 | \code{\link{tfd_entropy}()},
32 | \code{\link{tfd_kl_divergence}()},
33 | \code{\link{tfd_log_cdf}()},
34 | \code{\link{tfd_log_prob}()},
35 | \code{\link{tfd_log_survival_function}()},
36 | \code{\link{tfd_mean}()},
37 | \code{\link{tfd_prob}()},
38 | \code{\link{tfd_quantile}()},
39 | \code{\link{tfd_sample}()},
40 | \code{\link{tfd_stddev}()},
41 | \code{\link{tfd_survival_function}()},
42 | \code{\link{tfd_variance}()}
43 | }
44 | \concept{distribution_methods}
45 |
--------------------------------------------------------------------------------
/man/tfd_pert.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distributions.R
3 | \name{tfd_pert}
4 | \alias{tfd_pert}
5 | \title{Modified PERT distribution for modeling expert predictions.}
6 | \usage{
7 | tfd_pert(
8 | low,
9 | peak,
10 | high,
11 | temperature = 4,
12 | validate_args = FALSE,
13 | allow_nan_stats = FALSE,
14 | name = "Pert"
15 | )
16 | }
17 | \arguments{
18 | \item{low}{lower bound}
19 |
20 | \item{peak}{most frequent value}
21 |
22 | \item{high}{upper bound}
23 |
24 | \item{temperature}{controls the shape of the distribution}
25 |
26 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
27 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
28 | silently render incorrect outputs. Default value: FALSE.}
29 |
30 | \item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
31 | use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
32 | one or more of the statistic's batch members are undefined.}
33 |
34 | \item{name}{name prefixed to Ops created by this class.}
35 | }
36 | \value{
37 | a distribution instance.
38 | }
39 | \description{
40 | The PERT distribution is a loc-scale family of Beta distributions
41 | fit onto a real interval between \code{low} and \code{high} values set by the user,
42 | along with a \code{peak} to indicate the expert's most frequent prediction,
43 | and \code{temperature} to control how sharp the peak is.
44 | }
45 | \details{
46 | The distribution is similar to a \href{https://en.wikipedia.org/wiki/Triangular_distribution}{Triangular distribution}
47 | (i.e. \code{tfd.Triangular}) but with a smooth peak.
48 |
49 | Mathematical Details
50 |
51 | In terms of a Beta distribution, PERT can be expressed as
52 |
53 | \if{html}{\out{}}\preformatted{PERT ~ loc + scale * Beta(concentration1, concentration0)
54 | }\if{html}{\out{
}}
55 |
56 | where
57 |
58 | \if{html}{\out{}}\preformatted{loc = low
59 | scale = high - low
60 | concentration1 = 1 + temperature * (peak - low)/(high - low)
61 | concentration0 = 1 + temperature * (high - peak)/(high - low)
62 | temperature > 0
63 | }\if{html}{\out{
}}
64 |
65 | The support is \verb{[low, high]}. The \code{peak} must fit in that interval:
66 | \verb{low < peak < high}. The \code{temperature} is a positive parameter that
67 | controls the shape of the distribution. Higher values yield a sharper peak.
68 | The standard PERT distribution is obtained when \code{temperature = 4}.
69 | }
70 | \seealso{
71 | For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
72 | }
73 |
--------------------------------------------------------------------------------
/man/tfd_prob.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_prob}
4 | \alias{tfd_prob}
5 | \title{Probability density/mass function.}
6 | \usage{
7 | tfd_prob(distribution, value, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{value}{float or double Tensor.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
18 | }
19 | \description{
20 | Probability density/mass function.
21 | }
22 | \examples{
23 | \donttest{
24 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
25 | x <- d \%>\% tfd_sample()
26 | d \%>\% tfd_prob(x)
27 | }
28 | }
29 | \seealso{
30 | Other distribution_methods:
31 | \code{\link{tfd_cdf}()},
32 | \code{\link{tfd_covariance}()},
33 | \code{\link{tfd_cross_entropy}()},
34 | \code{\link{tfd_entropy}()},
35 | \code{\link{tfd_kl_divergence}()},
36 | \code{\link{tfd_log_cdf}()},
37 | \code{\link{tfd_log_prob}()},
38 | \code{\link{tfd_log_survival_function}()},
39 | \code{\link{tfd_mean}()},
40 | \code{\link{tfd_mode}()},
41 | \code{\link{tfd_quantile}()},
42 | \code{\link{tfd_sample}()},
43 | \code{\link{tfd_stddev}()},
44 | \code{\link{tfd_survival_function}()},
45 | \code{\link{tfd_variance}()}
46 | }
47 | \concept{distribution_methods}
48 |
--------------------------------------------------------------------------------
/man/tfd_quantile.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_quantile}
4 | \alias{tfd_quantile}
5 | \title{Quantile function. Aka "inverse cdf" or "percent point function".}
6 | \usage{
7 | tfd_quantile(distribution, value, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{value}{float or double Tensor.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
18 | }
19 | \description{
20 | Given random variable X and p in \verb{[0, 1]}, the quantile is:
21 | \code{tfd_quantile(p) := x} such that \code{P[X <= x] == p}
22 | }
23 | \examples{
24 | \donttest{
25 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
26 | d \%>\% tfd_quantile(0.5)
27 | }
28 | }
29 | \seealso{
30 | Other distribution_methods:
31 | \code{\link{tfd_cdf}()},
32 | \code{\link{tfd_covariance}()},
33 | \code{\link{tfd_cross_entropy}()},
34 | \code{\link{tfd_entropy}()},
35 | \code{\link{tfd_kl_divergence}()},
36 | \code{\link{tfd_log_cdf}()},
37 | \code{\link{tfd_log_prob}()},
38 | \code{\link{tfd_log_survival_function}()},
39 | \code{\link{tfd_mean}()},
40 | \code{\link{tfd_mode}()},
41 | \code{\link{tfd_prob}()},
42 | \code{\link{tfd_sample}()},
43 | \code{\link{tfd_stddev}()},
44 | \code{\link{tfd_survival_function}()},
45 | \code{\link{tfd_variance}()}
46 | }
47 | \concept{distribution_methods}
48 |
--------------------------------------------------------------------------------
/man/tfd_sample.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_sample}
4 | \alias{tfd_sample}
5 | \title{Generate samples of the specified shape.}
6 | \usage{
7 | tfd_sample(distribution, sample_shape = list(), ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{sample_shape}{0D or 1D int32 Tensor. Shape of the generated samples.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor with prepended dimensions sample_shape.
18 | }
19 | \description{
20 | Note that a call to \code{tfd_sample()} without arguments will generate a single sample.
21 | }
22 | \examples{
23 | \donttest{
24 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
25 | d \%>\% tfd_sample()
26 | }
27 | }
28 | \seealso{
29 | Other distribution_methods:
30 | \code{\link{tfd_cdf}()},
31 | \code{\link{tfd_covariance}()},
32 | \code{\link{tfd_cross_entropy}()},
33 | \code{\link{tfd_entropy}()},
34 | \code{\link{tfd_kl_divergence}()},
35 | \code{\link{tfd_log_cdf}()},
36 | \code{\link{tfd_log_prob}()},
37 | \code{\link{tfd_log_survival_function}()},
38 | \code{\link{tfd_mean}()},
39 | \code{\link{tfd_mode}()},
40 | \code{\link{tfd_prob}()},
41 | \code{\link{tfd_quantile}()},
42 | \code{\link{tfd_stddev}()},
43 | \code{\link{tfd_survival_function}()},
44 | \code{\link{tfd_variance}()}
45 | }
46 | \concept{distribution_methods}
47 |
--------------------------------------------------------------------------------
/man/tfd_stddev.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_stddev}
4 | \alias{tfd_stddev}
5 | \title{Standard deviation.}
6 | \usage{
7 | tfd_stddev(distribution, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{...}{Additional parameters passed to Python.}
13 | }
14 | \value{
15 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
16 | }
17 | \description{
18 | Standard deviation is defined as, stddev = \code{E[(X - E[X])**2]**0.5}
19 | #' where X is the random variable associated with this distribution, E denotes expectation,
20 | and \code{Var$shape = batch_shape + event_shape}.
21 | }
22 | \examples{
23 | \donttest{
24 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
25 | d \%>\% tfd_stddev()
26 | }
27 | }
28 | \seealso{
29 | Other distribution_methods:
30 | \code{\link{tfd_cdf}()},
31 | \code{\link{tfd_covariance}()},
32 | \code{\link{tfd_cross_entropy}()},
33 | \code{\link{tfd_entropy}()},
34 | \code{\link{tfd_kl_divergence}()},
35 | \code{\link{tfd_log_cdf}()},
36 | \code{\link{tfd_log_prob}()},
37 | \code{\link{tfd_log_survival_function}()},
38 | \code{\link{tfd_mean}()},
39 | \code{\link{tfd_mode}()},
40 | \code{\link{tfd_prob}()},
41 | \code{\link{tfd_quantile}()},
42 | \code{\link{tfd_sample}()},
43 | \code{\link{tfd_survival_function}()},
44 | \code{\link{tfd_variance}()}
45 | }
46 | \concept{distribution_methods}
47 |
--------------------------------------------------------------------------------
/man/tfd_survival_function.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_survival_function}
4 | \alias{tfd_survival_function}
5 | \title{Survival function.}
6 | \usage{
7 | tfd_survival_function(distribution, value, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{value}{float or double Tensor.}
13 |
14 | \item{...}{Additional parameters passed to Python.}
15 | }
16 | \value{
17 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
18 | }
19 | \description{
20 | Given random variable X, the survival function is defined:
21 | \code{tfd_survival_function(x) = P[X > x] = 1 - P[X <= x] = 1 - cdf(x)}.
22 | }
23 | \examples{
24 | \donttest{
25 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
26 | x <- d \%>\% tfd_sample()
27 | d \%>\% tfd_survival_function(x)
28 | }
29 | }
30 | \seealso{
31 | Other distribution_methods:
32 | \code{\link{tfd_cdf}()},
33 | \code{\link{tfd_covariance}()},
34 | \code{\link{tfd_cross_entropy}()},
35 | \code{\link{tfd_entropy}()},
36 | \code{\link{tfd_kl_divergence}()},
37 | \code{\link{tfd_log_cdf}()},
38 | \code{\link{tfd_log_prob}()},
39 | \code{\link{tfd_log_survival_function}()},
40 | \code{\link{tfd_mean}()},
41 | \code{\link{tfd_mode}()},
42 | \code{\link{tfd_prob}()},
43 | \code{\link{tfd_quantile}()},
44 | \code{\link{tfd_sample}()},
45 | \code{\link{tfd_stddev}()},
46 | \code{\link{tfd_variance}()}
47 | }
48 | \concept{distribution_methods}
49 |
--------------------------------------------------------------------------------
/man/tfd_variance.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distribution-methods.R
3 | \name{tfd_variance}
4 | \alias{tfd_variance}
5 | \title{Variance.}
6 | \usage{
7 | tfd_variance(distribution, ...)
8 | }
9 | \arguments{
10 | \item{distribution}{The distribution being used.}
11 |
12 | \item{...}{Additional parameters passed to Python.}
13 | }
14 | \value{
15 | a Tensor of shape \code{sample_shape(x) + self$batch_shape} with values of type \code{self$dtype}.
16 | }
17 | \description{
18 | Variance is defined as, \code{Var = E[(X - E[X])**2]}
19 | where X is the random variable associated with this distribution, E denotes expectation,
20 | and \code{Var$shape = batch_shape + event_shape}.
21 | }
22 | \examples{
23 | \donttest{
24 | d <- tfd_normal(loc = c(1, 2), scale = c(1, 0.5))
25 | d \%>\% tfd_variance()
26 | }
27 | }
28 | \seealso{
29 | Other distribution_methods:
30 | \code{\link{tfd_cdf}()},
31 | \code{\link{tfd_covariance}()},
32 | \code{\link{tfd_cross_entropy}()},
33 | \code{\link{tfd_entropy}()},
34 | \code{\link{tfd_kl_divergence}()},
35 | \code{\link{tfd_log_cdf}()},
36 | \code{\link{tfd_log_prob}()},
37 | \code{\link{tfd_log_survival_function}()},
38 | \code{\link{tfd_mean}()},
39 | \code{\link{tfd_mode}()},
40 | \code{\link{tfd_prob}()},
41 | \code{\link{tfd_quantile}()},
42 | \code{\link{tfd_sample}()},
43 | \code{\link{tfd_stddev}()},
44 | \code{\link{tfd_survival_function}()}
45 | }
46 | \concept{distribution_methods}
47 |
--------------------------------------------------------------------------------
/man/tfd_vector_deterministic.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/distributions.R
3 | \name{tfd_vector_deterministic}
4 | \alias{tfd_vector_deterministic}
5 | \title{Vector Deterministic Distribution}
6 | \usage{
7 | tfd_vector_deterministic(
8 | loc,
9 | atol = NULL,
10 | rtol = NULL,
11 | validate_args = FALSE,
12 | allow_nan_stats = TRUE,
13 | name = "VectorDeterministic"
14 | )
15 | }
16 | \arguments{
17 | \item{loc}{Numeric Tensor of shape [B1, ..., Bb, k], with b >= 0, k >= 0 The
18 | point (or batch of points) on which this distribution is supported.}
19 |
20 | \item{atol}{Non-negative Tensor of same dtype as loc and broadcastable shape.
21 | The absolute tolerance for comparing closeness to loc. Default is 0.}
22 |
23 | \item{rtol}{Non-negative Tensor of same dtype as loc and broadcastable shape.
24 | The relative tolerance for comparing closeness to loc. Default is 0.}
25 |
26 | \item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
27 | for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
28 | silently render incorrect outputs. Default value: FALSE.}
29 |
30 | \item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
31 | use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
32 | one or more of the statistic's batch members are undefined.}
33 |
34 | \item{name}{name prefixed to Ops created by this class.}
35 | }
36 | \value{
37 | a distribution instance.
38 | }
39 | \description{
40 | The VectorDeterministic distribution is parameterized by a batch point loc in R^k.
41 | The distribution is supported at this point only, and corresponds to a random
42 | variable that is constant, equal to loc.
43 | }
44 | \details{
45 | See \href{https://en.wikipedia.org/wiki/Degenerate_distribution}{Degenerate rv}.
46 | }
47 | \seealso{
48 | For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
49 | }
50 |
--------------------------------------------------------------------------------
/man/tfp.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/package.R
3 | \docType{data}
4 | \name{tfp}
5 | \alias{tfp}
6 | \title{Handle to the \code{tensorflow_probability} module}
7 | \format{
8 | An object of class \code{python.builtin.module} (inherits from \code{python.builtin.object}) of length 0.
9 | }
10 | \usage{
11 | tfp
12 | }
13 | \value{
14 | Module(tensorflow_probability)
15 | }
16 | \description{
17 | Handle to the \code{tensorflow_probability} module
18 | }
19 | \keyword{datasets}
20 |
--------------------------------------------------------------------------------
/man/tfp_version.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/package.R
3 | \name{tfp_version}
4 | \alias{tfp_version}
5 | \title{TensorFlow Probability Version}
6 | \usage{
7 | tfp_version()
8 | }
9 | \value{
10 | the Python TFP version
11 | }
12 | \description{
13 | TensorFlow Probability Version
14 | }
15 |
--------------------------------------------------------------------------------
/man/vi_amari_alpha.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_amari_alpha}
4 | \alias{vi_amari_alpha}
5 | \title{The Amari-alpha Csiszar-function in log-space}
6 | \usage{
7 | vi_amari_alpha(logu, alpha = 1, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{alpha}{\code{float}-like scalar.}
13 |
14 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
15 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
16 | when \verb{p, q} are unnormalized measures.}
17 |
18 | \item{name}{name prefixed to Ops created by this function.}
19 | }
20 | \value{
21 | amari_alpha_of_u \code{float}-like \code{Tensor} of the Csiszar-function evaluated
22 | at \code{u = exp(logu)}.
23 | }
24 | \description{
25 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
26 | }
27 | \details{
28 | When \code{self_normalized = TRUE}, the Amari-alpha Csiszar-function is:
29 |
30 | \if{html}{\out{}}\preformatted{f(u) = \{ -log(u) + (u - 1)\}, alpha = 0
31 | \{ u log(u) - (u - 1)\}, alpha = 1
32 | \{ ((u^alpha - 1) - alpha (u - 1) / (alpha (alpha - 1))\}, otherwise
33 | }\if{html}{\out{
}}
34 |
35 | When \code{self_normalized = FALSE} the \code{(u - 1)} terms are omitted.
36 |
37 | Warning: when \code{alpha != 0} and/or \code{self_normalized = True} this function makes
38 | non-log-space calculations and may therefore be numerically unstable for
39 | \verb{|logu| >> 0}.
40 | }
41 | \section{References}{
42 |
43 | \itemize{
44 | \item A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences: Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp. 1532-1568, 2010.
45 | }
46 | }
47 |
48 | \seealso{
49 | Other vi-functions:
50 | \code{\link{vi_arithmetic_geometric}()},
51 | \code{\link{vi_chi_square}()},
52 | \code{\link{vi_csiszar_vimco}()},
53 | \code{\link{vi_dual_csiszar_function}()},
54 | \code{\link{vi_fit_surrogate_posterior}()},
55 | \code{\link{vi_jeffreys}()},
56 | \code{\link{vi_jensen_shannon}()},
57 | \code{\link{vi_kl_forward}()},
58 | \code{\link{vi_kl_reverse}()},
59 | \code{\link{vi_log1p_abs}()},
60 | \code{\link{vi_modified_gan}()},
61 | \code{\link{vi_monte_carlo_variational_loss}()},
62 | \code{\link{vi_pearson}()},
63 | \code{\link{vi_squared_hellinger}()},
64 | \code{\link{vi_symmetrized_csiszar_function}()}
65 | }
66 | \concept{vi-functions}
67 |
--------------------------------------------------------------------------------
/man/vi_arithmetic_geometric.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_arithmetic_geometric}
4 | \alias{vi_arithmetic_geometric}
5 | \title{The Arithmetic-Geometric Csiszar-function in log-space}
6 | \usage{
7 | vi_arithmetic_geometric(logu, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
13 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
14 | when \verb{p, q} are unnormalized measures.}
15 |
16 | \item{name}{name prefixed to Ops created by this function.}
17 | }
18 | \value{
19 | arithmetic_geometric_of_u: \code{float}-like \code{Tensor} of the
20 | Csiszar-function evaluated at \code{u = exp(logu)}.
21 | }
22 | \description{
23 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
24 | }
25 | \details{
26 | When \code{self_normalized = True} the Arithmetic-Geometric Csiszar-function is:
27 |
28 | \if{html}{\out{}}\preformatted{f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
29 | }\if{html}{\out{
}}
30 |
31 | When \code{self_normalized = False} the \verb{(1 + u) log(2)} term is omitted.
32 |
33 | Observe that as an f-Divergence, this Csiszar-function implies:
34 |
35 | \if{html}{\out{}}\preformatted{D_f[p, q] = KL[m, p] + KL[m, q]
36 | m(x) = 0.5 p(x) + 0.5 q(x)
37 | }\if{html}{\out{
}}
38 |
39 | In a sense, this divergence is the "reverse" of the Jensen-Shannon
40 | f-Divergence.
41 | This Csiszar-function induces a symmetric f-Divergence, i.e.,
42 | \code{D_f[p, q] = D_f[q, p]}.
43 |
44 | Warning: when self_normalized = True\verb{this function makes non-log-space calculations and may therefore be numerically unstable for}|logu| >> 0`.
45 | }
46 | \seealso{
47 | Other vi-functions:
48 | \code{\link{vi_amari_alpha}()},
49 | \code{\link{vi_chi_square}()},
50 | \code{\link{vi_csiszar_vimco}()},
51 | \code{\link{vi_dual_csiszar_function}()},
52 | \code{\link{vi_fit_surrogate_posterior}()},
53 | \code{\link{vi_jeffreys}()},
54 | \code{\link{vi_jensen_shannon}()},
55 | \code{\link{vi_kl_forward}()},
56 | \code{\link{vi_kl_reverse}()},
57 | \code{\link{vi_log1p_abs}()},
58 | \code{\link{vi_modified_gan}()},
59 | \code{\link{vi_monte_carlo_variational_loss}()},
60 | \code{\link{vi_pearson}()},
61 | \code{\link{vi_squared_hellinger}()},
62 | \code{\link{vi_symmetrized_csiszar_function}()}
63 | }
64 | \concept{vi-functions}
65 |
--------------------------------------------------------------------------------
/man/vi_chi_square.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_chi_square}
4 | \alias{vi_chi_square}
5 | \title{The chi-square Csiszar-function in log-space}
6 | \usage{
7 | vi_chi_square(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | chi_square_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
20 | }
21 | \details{
22 | The Chi-square Csiszar-function is:
23 |
24 | \if{html}{\out{}}\preformatted{f(u) = u**2 - 1
25 | }\if{html}{\out{
}}
26 |
27 | Warning: this function makes non-log-space calculations and may
28 | therefore be numerically unstable for \verb{|logu| >> 0}.
29 | }
30 | \seealso{
31 | Other vi-functions:
32 | \code{\link{vi_amari_alpha}()},
33 | \code{\link{vi_arithmetic_geometric}()},
34 | \code{\link{vi_csiszar_vimco}()},
35 | \code{\link{vi_dual_csiszar_function}()},
36 | \code{\link{vi_fit_surrogate_posterior}()},
37 | \code{\link{vi_jeffreys}()},
38 | \code{\link{vi_jensen_shannon}()},
39 | \code{\link{vi_kl_forward}()},
40 | \code{\link{vi_kl_reverse}()},
41 | \code{\link{vi_log1p_abs}()},
42 | \code{\link{vi_modified_gan}()},
43 | \code{\link{vi_monte_carlo_variational_loss}()},
44 | \code{\link{vi_pearson}()},
45 | \code{\link{vi_squared_hellinger}()},
46 | \code{\link{vi_symmetrized_csiszar_function}()}
47 | }
48 | \concept{vi-functions}
49 |
--------------------------------------------------------------------------------
/man/vi_dual_csiszar_function.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_dual_csiszar_function}
4 | \alias{vi_dual_csiszar_function}
5 | \title{Calculates the dual Csiszar-function in log-space}
6 | \usage{
7 | vi_dual_csiszar_function(logu, csiszar_function, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{csiszar_function}{function representing a Csiszar-function over log-domain.}
13 |
14 | \item{name}{name prefixed to Ops created by this function.}
15 | }
16 | \value{
17 | dual_f_of_u \code{float}-like \code{Tensor} of the result of calculating the dual of
18 | \code{f} at \code{u = exp(logu)}.
19 | }
20 | \description{
21 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
22 | }
23 | \details{
24 | The Csiszar-dual is defined as:
25 |
26 | \if{html}{\out{}}\preformatted{f^*(u) = u f(1 / u)
27 | }\if{html}{\out{
}}
28 |
29 | where \code{f} is some other Csiszar-function.
30 | For example, the dual of \code{kl_reverse} is \code{kl_forward}, i.e.,
31 |
32 | \if{html}{\out{}}\preformatted{f(u) = -log(u)
33 | f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
34 | }\if{html}{\out{
}}
35 |
36 | The dual of the dual is the original function:
37 |
38 | \if{html}{\out{}}\preformatted{f^**(u) = \{u f(1/u)\}^*(u) = u (1/u) f(1/(1/u)) = f(u)
39 | }\if{html}{\out{
}}
40 |
41 | Warning: this function makes non-log-space calculations and may therefore be
42 | numerically unstable for \verb{|logu| >> 0}.
43 | }
44 | \seealso{
45 | Other vi-functions:
46 | \code{\link{vi_amari_alpha}()},
47 | \code{\link{vi_arithmetic_geometric}()},
48 | \code{\link{vi_chi_square}()},
49 | \code{\link{vi_csiszar_vimco}()},
50 | \code{\link{vi_fit_surrogate_posterior}()},
51 | \code{\link{vi_jeffreys}()},
52 | \code{\link{vi_jensen_shannon}()},
53 | \code{\link{vi_kl_forward}()},
54 | \code{\link{vi_kl_reverse}()},
55 | \code{\link{vi_log1p_abs}()},
56 | \code{\link{vi_modified_gan}()},
57 | \code{\link{vi_monte_carlo_variational_loss}()},
58 | \code{\link{vi_pearson}()},
59 | \code{\link{vi_squared_hellinger}()},
60 | \code{\link{vi_symmetrized_csiszar_function}()}
61 | }
62 | \concept{vi-functions}
63 |
--------------------------------------------------------------------------------
/man/vi_jeffreys.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_jeffreys}
4 | \alias{vi_jeffreys}
5 | \title{The Jeffreys Csiszar-function in log-space}
6 | \usage{
7 | vi_jeffreys(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | jeffreys_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
20 | }
21 | \details{
22 | The Jeffreys Csiszar-function is:
23 |
24 | \if{html}{\out{}}\preformatted{f(u) = 0.5 ( u log(u) - log(u))
25 | = 0.5 kl_forward + 0.5 kl_reverse
26 | = symmetrized_csiszar_function(kl_reverse)
27 | = symmetrized_csiszar_function(kl_forward)
28 | }\if{html}{\out{
}}
29 |
30 | This Csiszar-function induces a symmetric f-Divergence, i.e.,
31 | \code{D_f[p, q] = D_f[q, p]}.
32 |
33 | Warning: this function makes non-log-space calculations and may
34 | therefore be numerically unstable for \verb{|logu| >> 0}.
35 | }
36 | \seealso{
37 | Other vi-functions:
38 | \code{\link{vi_amari_alpha}()},
39 | \code{\link{vi_arithmetic_geometric}()},
40 | \code{\link{vi_chi_square}()},
41 | \code{\link{vi_csiszar_vimco}()},
42 | \code{\link{vi_dual_csiszar_function}()},
43 | \code{\link{vi_fit_surrogate_posterior}()},
44 | \code{\link{vi_jensen_shannon}()},
45 | \code{\link{vi_kl_forward}()},
46 | \code{\link{vi_kl_reverse}()},
47 | \code{\link{vi_log1p_abs}()},
48 | \code{\link{vi_modified_gan}()},
49 | \code{\link{vi_monte_carlo_variational_loss}()},
50 | \code{\link{vi_pearson}()},
51 | \code{\link{vi_squared_hellinger}()},
52 | \code{\link{vi_symmetrized_csiszar_function}()}
53 | }
54 | \concept{vi-functions}
55 |
--------------------------------------------------------------------------------
/man/vi_jensen_shannon.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_jensen_shannon}
4 | \alias{vi_jensen_shannon}
5 | \title{The Jensen-Shannon Csiszar-function in log-space}
6 | \usage{
7 | vi_jensen_shannon(logu, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
13 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
14 | when \verb{p, q} are unnormalized measures.}
15 |
16 | \item{name}{name prefixed to Ops created by this function.}
17 | }
18 | \value{
19 | jensen_shannon_of_u, \code{float}-like \code{Tensor} of the Csiszar-function
20 | evaluated at \code{u = exp(logu)}.
21 | }
22 | \description{
23 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
24 | }
25 | \details{
26 | When \code{self_normalized = True}, the Jensen-Shannon Csiszar-function is:
27 |
28 | \if{html}{\out{}}\preformatted{f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
29 | }\if{html}{\out{
}}
30 |
31 | When \code{self_normalized = False} the \verb{(u + 1) log(2)} term is omitted.
32 |
33 | Observe that as an f-Divergence, this Csiszar-function implies:
34 |
35 | \if{html}{\out{}}\preformatted{D_f[p, q] = KL[p, m] + KL[q, m]
36 | m(x) = 0.5 p(x) + 0.5 q(x)
37 | }\if{html}{\out{
}}
38 |
39 | In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
40 | f-Divergence.
41 |
42 | This Csiszar-function induces a symmetric f-Divergence, i.e.,
43 | \code{D_f[p, q] = D_f[q, p]}.
44 |
45 | Warning: this function makes non-log-space calculations and may therefore be
46 | numerically unstable for \verb{|logu| >> 0}.
47 | }
48 | \section{References}{
49 |
50 | \itemize{
51 | \item Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
52 | Inf. Th., 37, 145-151, 1991.
53 | }
54 | }
55 |
56 | \seealso{
57 | Other vi-functions:
58 | \code{\link{vi_amari_alpha}()},
59 | \code{\link{vi_arithmetic_geometric}()},
60 | \code{\link{vi_chi_square}()},
61 | \code{\link{vi_csiszar_vimco}()},
62 | \code{\link{vi_dual_csiszar_function}()},
63 | \code{\link{vi_fit_surrogate_posterior}()},
64 | \code{\link{vi_jeffreys}()},
65 | \code{\link{vi_kl_forward}()},
66 | \code{\link{vi_kl_reverse}()},
67 | \code{\link{vi_log1p_abs}()},
68 | \code{\link{vi_modified_gan}()},
69 | \code{\link{vi_monte_carlo_variational_loss}()},
70 | \code{\link{vi_pearson}()},
71 | \code{\link{vi_squared_hellinger}()},
72 | \code{\link{vi_symmetrized_csiszar_function}()}
73 | }
74 | \concept{vi-functions}
75 |
--------------------------------------------------------------------------------
/man/vi_kl_forward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_kl_forward}
4 | \alias{vi_kl_forward}
5 | \title{The forward Kullback-Leibler Csiszar-function in log-space}
6 | \usage{
7 | vi_kl_forward(logu, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
13 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
14 | when \verb{p, q} are unnormalized measures.}
15 |
16 | \item{name}{name prefixed to Ops created by this function.}
17 | }
18 | \value{
19 | kl_forward_of_u: \code{float}-like \code{Tensor} of the Csiszar-function evaluated at
20 | \code{u = exp(logu)}.
21 | }
22 | \description{
23 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
24 | }
25 | \details{
26 | When \code{self_normalized = TRUE}, the KL-reverse Csiszar-function is \verb{f(u) = u log(u) - (u - 1)}.
27 | When \code{self_normalized = FALSE} the \code{(u - 1)} term is omitted.
28 | Observe that as an f-Divergence, this Csiszar-function implies: \code{D_f[p, q] = KL[q, p]}
29 |
30 | The KL is "forward" because in maximum likelihood we think of minimizing \code{q} as in \code{KL[p, q]}.
31 |
32 | Warning: when self_normalized = True\verb{this function makes non-log-space calculations and may therefore be numerically unstable for}|logu| >> 0`.
33 | }
34 | \seealso{
35 | Other vi-functions:
36 | \code{\link{vi_amari_alpha}()},
37 | \code{\link{vi_arithmetic_geometric}()},
38 | \code{\link{vi_chi_square}()},
39 | \code{\link{vi_csiszar_vimco}()},
40 | \code{\link{vi_dual_csiszar_function}()},
41 | \code{\link{vi_fit_surrogate_posterior}()},
42 | \code{\link{vi_jeffreys}()},
43 | \code{\link{vi_jensen_shannon}()},
44 | \code{\link{vi_kl_reverse}()},
45 | \code{\link{vi_log1p_abs}()},
46 | \code{\link{vi_modified_gan}()},
47 | \code{\link{vi_monte_carlo_variational_loss}()},
48 | \code{\link{vi_pearson}()},
49 | \code{\link{vi_squared_hellinger}()},
50 | \code{\link{vi_symmetrized_csiszar_function}()}
51 | }
52 | \concept{vi-functions}
53 |
--------------------------------------------------------------------------------
/man/vi_kl_reverse.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_kl_reverse}
4 | \alias{vi_kl_reverse}
5 | \title{The reverse Kullback-Leibler Csiszar-function in log-space}
6 | \usage{
7 | vi_kl_reverse(logu, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
13 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
14 | when \verb{p, q} are unnormalized measures.}
15 |
16 | \item{name}{name prefixed to Ops created by this function.}
17 | }
18 | \value{
19 | kl_reverse_of_u \code{float}-like \code{Tensor} of the Csiszar-function evaluated at
20 | \code{u = exp(logu)}.
21 | }
22 | \description{
23 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
24 | }
25 | \details{
26 | When \code{self_normalized = TRUE}, the KL-reverse Csiszar-function is \code{f(u) = -log(u) + (u - 1)}.
27 | When \code{self_normalized = FALSE} the \code{(u - 1)} term is omitted.
28 | Observe that as an f-Divergence, this Csiszar-function implies: \code{D_f[p, q] = KL[q, p]}
29 |
30 | The KL is "reverse" because in maximum likelihood we think of minimizing \code{q} as in \code{KL[p, q]}.
31 |
32 | Warning: when self_normalized = True\verb{this function makes non-log-space calculations and may therefore be numerically unstable for}|logu| >> 0`.
33 | }
34 | \seealso{
35 | Other vi-functions:
36 | \code{\link{vi_amari_alpha}()},
37 | \code{\link{vi_arithmetic_geometric}()},
38 | \code{\link{vi_chi_square}()},
39 | \code{\link{vi_csiszar_vimco}()},
40 | \code{\link{vi_dual_csiszar_function}()},
41 | \code{\link{vi_fit_surrogate_posterior}()},
42 | \code{\link{vi_jeffreys}()},
43 | \code{\link{vi_jensen_shannon}()},
44 | \code{\link{vi_kl_forward}()},
45 | \code{\link{vi_log1p_abs}()},
46 | \code{\link{vi_modified_gan}()},
47 | \code{\link{vi_monte_carlo_variational_loss}()},
48 | \code{\link{vi_pearson}()},
49 | \code{\link{vi_squared_hellinger}()},
50 | \code{\link{vi_symmetrized_csiszar_function}()}
51 | }
52 | \concept{vi-functions}
53 |
--------------------------------------------------------------------------------
/man/vi_log1p_abs.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_log1p_abs}
4 | \alias{vi_log1p_abs}
5 | \title{The log1p-abs Csiszar-function in log-space}
6 | \usage{
7 | vi_log1p_abs(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | log1p_abs_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
20 | }
21 | \details{
22 | The Log1p-Abs Csiszar-function is:
23 |
24 | \if{html}{\out{}}\preformatted{f(u) = u**(sign(u-1)) - 1
25 | }\if{html}{\out{
}}
26 |
27 | This function is so-named because it was invented from the following recipe.
28 | Choose a convex function g such that g(0)=0 and solve for f:
29 |
30 | \if{html}{\out{}}\preformatted{log(1 + f(u)) = g(log(u)).
31 | <=>
32 | f(u) = exp(g(log(u))) - 1
33 | }\if{html}{\out{
}}
34 |
35 | That is, the graph is identically \code{g} when y-axis is \code{log1p}-domain and x-axis
36 | is \code{log}-domain.
37 |
38 | Warning: this function makes non-log-space calculations and may
39 | therefore be numerically unstable for \verb{|logu| >> 0}.
40 | }
41 | \seealso{
42 | Other vi-functions:
43 | \code{\link{vi_amari_alpha}()},
44 | \code{\link{vi_arithmetic_geometric}()},
45 | \code{\link{vi_chi_square}()},
46 | \code{\link{vi_csiszar_vimco}()},
47 | \code{\link{vi_dual_csiszar_function}()},
48 | \code{\link{vi_fit_surrogate_posterior}()},
49 | \code{\link{vi_jeffreys}()},
50 | \code{\link{vi_jensen_shannon}()},
51 | \code{\link{vi_kl_forward}()},
52 | \code{\link{vi_kl_reverse}()},
53 | \code{\link{vi_modified_gan}()},
54 | \code{\link{vi_monte_carlo_variational_loss}()},
55 | \code{\link{vi_pearson}()},
56 | \code{\link{vi_squared_hellinger}()},
57 | \code{\link{vi_symmetrized_csiszar_function}()}
58 | }
59 | \concept{vi-functions}
60 |
--------------------------------------------------------------------------------
/man/vi_modified_gan.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_modified_gan}
4 | \alias{vi_modified_gan}
5 | \title{The Modified-GAN Csiszar-function in log-space}
6 | \usage{
7 | vi_modified_gan(logu, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
13 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
14 | when \verb{p, q} are unnormalized measures.}
15 |
16 | \item{name}{name prefixed to Ops created by this function.}
17 | }
18 | \value{
19 | jensen_shannon_of_u, \code{float}-like \code{Tensor} of the Csiszar-function
20 | evaluated at \code{u = exp(logu)}.
21 | }
22 | \description{
23 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
24 | }
25 | \details{
26 | When \code{self_normalized = True} the modified-GAN (Generative/Adversarial
27 | Network) Csiszar-function is:
28 |
29 | \if{html}{\out{}}\preformatted{f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
30 | }\if{html}{\out{
}}
31 |
32 | When \code{self_normalized = False} the \code{0.5 (u - 1)} is omitted.
33 |
34 | The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
35 | \code{self_normalized = False}).
36 |
37 | Warning: this function makes non-log-space calculations and may therefore be
38 | numerically unstable for \verb{|logu| >> 0}.
39 | }
40 | \seealso{
41 | Other vi-functions:
42 | \code{\link{vi_amari_alpha}()},
43 | \code{\link{vi_arithmetic_geometric}()},
44 | \code{\link{vi_chi_square}()},
45 | \code{\link{vi_csiszar_vimco}()},
46 | \code{\link{vi_dual_csiszar_function}()},
47 | \code{\link{vi_fit_surrogate_posterior}()},
48 | \code{\link{vi_jeffreys}()},
49 | \code{\link{vi_jensen_shannon}()},
50 | \code{\link{vi_kl_forward}()},
51 | \code{\link{vi_kl_reverse}()},
52 | \code{\link{vi_log1p_abs}()},
53 | \code{\link{vi_monte_carlo_variational_loss}()},
54 | \code{\link{vi_pearson}()},
55 | \code{\link{vi_squared_hellinger}()},
56 | \code{\link{vi_symmetrized_csiszar_function}()}
57 | }
58 | \concept{vi-functions}
59 |
--------------------------------------------------------------------------------
/man/vi_pearson.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_pearson}
4 | \alias{vi_pearson}
5 | \title{The Pearson Csiszar-function in log-space}
6 | \usage{
7 | vi_pearson(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | pearson_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
20 | }
21 | \details{
22 | The Pearson Csiszar-function is:
23 |
24 | \if{html}{\out{}}\preformatted{f(u) = (u - 1)**2
25 | }\if{html}{\out{
}}
26 |
27 | Warning: this function makes non-log-space calculations and may therefore be
28 | numerically unstable for \verb{|logu| >> 0}.
29 | }
30 | \seealso{
31 | Other vi-functions:
32 | \code{\link{vi_amari_alpha}()},
33 | \code{\link{vi_arithmetic_geometric}()},
34 | \code{\link{vi_chi_square}()},
35 | \code{\link{vi_csiszar_vimco}()},
36 | \code{\link{vi_dual_csiszar_function}()},
37 | \code{\link{vi_fit_surrogate_posterior}()},
38 | \code{\link{vi_jeffreys}()},
39 | \code{\link{vi_jensen_shannon}()},
40 | \code{\link{vi_kl_forward}()},
41 | \code{\link{vi_kl_reverse}()},
42 | \code{\link{vi_log1p_abs}()},
43 | \code{\link{vi_modified_gan}()},
44 | \code{\link{vi_monte_carlo_variational_loss}()},
45 | \code{\link{vi_squared_hellinger}()},
46 | \code{\link{vi_symmetrized_csiszar_function}()}
47 | }
48 | \concept{vi-functions}
49 |
--------------------------------------------------------------------------------
/man/vi_squared_hellinger.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_squared_hellinger}
4 | \alias{vi_squared_hellinger}
5 | \title{The Squared-Hellinger Csiszar-function in log-space}
6 | \usage{
7 | vi_squared_hellinger(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | Squared-Hellinger_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
20 | }
21 | \details{
22 | The Squared-Hellinger Csiszar-function is:
23 |
24 | \if{html}{\out{}}\preformatted{f(u) = (sqrt(u) - 1)**2
25 | }\if{html}{\out{
}}
26 |
27 | This Csiszar-function induces a symmetric f-Divergence, i.e.,
28 | \code{D_f[p, q] = D_f[q, p]}.
29 |
30 | Warning: this function makes non-log-space calculations and may
31 | therefore be numerically unstable for \verb{|logu| >> 0}.
32 | }
33 | \seealso{
34 | Other vi-functions:
35 | \code{\link{vi_amari_alpha}()},
36 | \code{\link{vi_arithmetic_geometric}()},
37 | \code{\link{vi_chi_square}()},
38 | \code{\link{vi_csiszar_vimco}()},
39 | \code{\link{vi_dual_csiszar_function}()},
40 | \code{\link{vi_fit_surrogate_posterior}()},
41 | \code{\link{vi_jeffreys}()},
42 | \code{\link{vi_jensen_shannon}()},
43 | \code{\link{vi_kl_forward}()},
44 | \code{\link{vi_kl_reverse}()},
45 | \code{\link{vi_log1p_abs}()},
46 | \code{\link{vi_modified_gan}()},
47 | \code{\link{vi_monte_carlo_variational_loss}()},
48 | \code{\link{vi_pearson}()},
49 | \code{\link{vi_symmetrized_csiszar_function}()}
50 | }
51 | \concept{vi-functions}
52 |
--------------------------------------------------------------------------------
/man/vi_symmetrized_csiszar_function.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_symmetrized_csiszar_function}
4 | \alias{vi_symmetrized_csiszar_function}
5 | \title{Symmetrizes a Csiszar-function in log-space}
6 | \usage{
7 | vi_symmetrized_csiszar_function(logu, csiszar_function, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{csiszar_function}{function representing a Csiszar-function over log-domain.}
13 |
14 | \item{name}{name prefixed to Ops created by this function.}
15 | }
16 | \value{
17 | symmetrized_g_of_u: \code{float}-like \code{Tensor} of the result of applying the
18 | symmetrization of \code{g} evaluated at \code{u = exp(logu)}.
19 | }
20 | \description{
21 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
22 | }
23 | \details{
24 | The symmetrized Csiszar-function is defined as:
25 |
26 | \if{html}{\out{}}\preformatted{f_g(u) = 0.5 g(u) + 0.5 u g (1 / u)
27 | }\if{html}{\out{
}}
28 |
29 | where \code{g} is some other Csiszar-function.
30 | We say the function is "symmetrized" because:
31 |
32 | \if{html}{\out{}}\preformatted{D_\{f_g\}[p, q] = D_\{f_g\}[q, p]
33 | }\if{html}{\out{
}}
34 |
35 | for all \verb{p << >> q} (i.e., \code{support(p) = support(q)}).
36 |
37 | There exists alternatives for symmetrizing a Csiszar-function. For example,
38 |
39 | \if{html}{\out{}}\preformatted{f_g(u) = max(f(u), f^*(u)),
40 | }\if{html}{\out{
}}
41 |
42 | where \verb{f^*} is the dual Csiszar-function, also implies a symmetric
43 | f-Divergence.
44 |
45 | Example:
46 | When either of the following functions are symmetrized, we obtain the
47 | Jensen-Shannon Csiszar-function, i.e.,
48 |
49 | \if{html}{\out{}}\preformatted{g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1
50 | h(u) = log(4) + 2 u log(u / (1 + u))
51 | }\if{html}{\out{
}}
52 |
53 | implies,
54 |
55 | \if{html}{\out{}}\preformatted{f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2)
56 | = jensen_shannon(log(u)).
57 | }\if{html}{\out{
}}
58 |
59 | Warning: this function makes non-log-space calculations and may therefore be
60 | numerically unstable for \verb{|logu| >> 0}.
61 | }
62 | \seealso{
63 | Other vi-functions:
64 | \code{\link{vi_amari_alpha}()},
65 | \code{\link{vi_arithmetic_geometric}()},
66 | \code{\link{vi_chi_square}()},
67 | \code{\link{vi_csiszar_vimco}()},
68 | \code{\link{vi_dual_csiszar_function}()},
69 | \code{\link{vi_fit_surrogate_posterior}()},
70 | \code{\link{vi_jeffreys}()},
71 | \code{\link{vi_jensen_shannon}()},
72 | \code{\link{vi_kl_forward}()},
73 | \code{\link{vi_kl_reverse}()},
74 | \code{\link{vi_log1p_abs}()},
75 | \code{\link{vi_modified_gan}()},
76 | \code{\link{vi_monte_carlo_variational_loss}()},
77 | \code{\link{vi_pearson}()},
78 | \code{\link{vi_squared_hellinger}()}
79 | }
80 | \concept{vi-functions}
81 |
--------------------------------------------------------------------------------
/man/vi_t_power.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_t_power}
4 | \alias{vi_t_power}
5 | \title{The T-Power Csiszar-function in log-space}
6 | \usage{
7 | vi_t_power(logu, t, self_normalized = FALSE, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{t}{\code{Tensor} of same \code{dtype} as \code{logu} and broadcastable shape.}
13 |
14 | \item{self_normalized}{\code{logical} indicating whether \verb{f'(u=1)=0}. When
15 | \verb{f'(u=1)=0} the implied Csiszar f-Divergence remains non-negative even
16 | when \verb{p, q} are unnormalized measures.}
17 |
18 | \item{name}{name prefixed to Ops created by this function.}
19 | }
20 | \value{
21 | t_power_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
22 | evaluated at \code{u = exp(logu)}.
23 | }
24 | \description{
25 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
26 | }
27 | \details{
28 | When \code{self_normalized = True} the T-Power Csiszar-function is:
29 |
30 | \if{html}{\out{}}\preformatted{f(u) = s [ u**t - 1 - t(u - 1) ]
31 | s = \{ -1 0 < t < 1 \}
32 | \{ +1 otherwise \}
33 | }\if{html}{\out{
}}
34 |
35 | When \code{self_normalized = False} the \code{- t(u - 1)} term is omitted.
36 |
37 | This is similar to the \code{amari_alpha} Csiszar-function, with the associated
38 | divergence being the same up to factors depending only on \code{t}.
39 |
40 | Warning: when self_normalized = True\verb{this function makes non-log-space calculations and may therefore be numerically unstable for}|logu| >> 0`.
41 | }
42 | \seealso{
43 | Other vi-functions#':
44 | \code{\link{vi_total_variation}()},
45 | \code{\link{vi_triangular}()}
46 | }
47 | \concept{vi-functions#'}
48 |
--------------------------------------------------------------------------------
/man/vi_total_variation.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_total_variation}
4 | \alias{vi_total_variation}
5 | \title{The Total Variation Csiszar-function in log-space}
6 | \usage{
7 | vi_total_variation(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | total_variation_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | A Csiszar-function is a member of \verb{F = \{ f:R_+ to R : f convex \}}.
20 | }
21 | \details{
22 | The Total-Variation Csiszar-function is:
23 |
24 | \if{html}{\out{}}\preformatted{f(u) = 0.5 |u - 1|
25 | }\if{html}{\out{
}}
26 |
27 | Warning: this function makes non-log-space calculations and may therefore be
28 | numerically unstable for \verb{|logu| >> 0}.
29 | }
30 | \seealso{
31 | Other vi-functions#':
32 | \code{\link{vi_t_power}()},
33 | \code{\link{vi_triangular}()}
34 | }
35 | \concept{vi-functions#'}
36 |
--------------------------------------------------------------------------------
/man/vi_triangular.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/vi-functions.R
3 | \name{vi_triangular}
4 | \alias{vi_triangular}
5 | \title{The Triangular Csiszar-function in log-space}
6 | \usage{
7 | vi_triangular(logu, name = NULL)
8 | }
9 | \arguments{
10 | \item{logu}{\code{float}-like \code{Tensor} representing \code{log(u)} from above.}
11 |
12 | \item{name}{name prefixed to Ops created by this function.}
13 | }
14 | \value{
15 | triangular_of_u: \code{float}-like \code{Tensor} of the Csiszar-function
16 | evaluated at \code{u = exp(logu)}.
17 | }
18 | \description{
19 | The Triangular Csiszar-function is:
20 | }
21 | \details{
22 | \if{html}{\out{}}\preformatted{f(u) = (u - 1)**2 / (1 + u)
23 | }\if{html}{\out{
}}
24 |
25 | Warning: this function makes non-log-space calculations and may
26 | therefore be numerically unstable for \verb{|logu| >> 0}.
27 | }
28 | \seealso{
29 | Other vi-functions#':
30 | \code{\link{vi_t_power}()},
31 | \code{\link{vi_total_variation}()}
32 | }
33 | \concept{vi-functions#'}
34 |
--------------------------------------------------------------------------------
/pkgdown/_pkgdown.yml:
--------------------------------------------------------------------------------
1 | template:
2 | params:
3 | bootswatch: flatly
4 |
5 | reference:
6 |
7 | - title: "Distributions"
8 | contents:
9 | - has_concept("distributions")
10 | - title: "Distribution methods"
11 | contents:
12 | - has_concept("distribution_methods")
13 | - title: "Keras layers: Distribution layers"
14 | contents:
15 | - has_concept("distribution_layers")
16 | - title: "Keras layers: Other"
17 | contents:
18 | - has_concept("layers")
19 | - title: "Bijectors"
20 | contents:
21 | - has_concept("bijectors")
22 | - title: "Bijector methods"
23 | contents:
24 | - has_concept("bijector_methods")
25 | - title: "Variational inference"
26 | contents:
27 | - has_concept("vi-functions")
28 | - title: "MCMC kernels"
29 | contents:
30 | - has_concept("mcmc_kernels")
31 | - title: "MCMC functions"
32 | contents:
33 | - has_concept("mcmc_functions")
34 | - title: "Structural time series models"
35 | contents:
36 | - has_concept("sts")
37 | - title: "Structural time series modeling functions"
38 | contents:
39 | - has_concept("sts-functions")
40 | - title: "Generalized Linear Models"
41 | contents:
42 | - has_concept("glm_fit")
43 |
44 |
45 |
--------------------------------------------------------------------------------
/tests/testthat.R:
--------------------------------------------------------------------------------
1 | library(testthat)
2 | library(tfprobability)
3 |
4 | if (identical(Sys.getenv("NOT_CRAN"), "true")) {
5 | test_check("tfprobability")
6 | }
7 |
--------------------------------------------------------------------------------
/tests/testthat/helper-utils.R:
--------------------------------------------------------------------------------
1 |
2 | Sys.setenv("CUDA_VISIBLE_DEVICES" = "")
3 |
4 | options(testthat.progress.max_fails = Inf)
5 | tensorflow::as_tensor(1)
6 |
7 | have_tfp <- function() {
8 | reticulate::py_module_available("tensorflow_probability")
9 | }
10 |
11 | skip_if_no_tfp <- function() {
12 | if (!have_tfp())
13 | skip("TensorFlow Probability not available for testing")
14 | }
15 |
16 | skip_if_tfp_below <- function(version) {
17 | if (tfprobability:::tfp_version() < version) {
18 | skip(paste0("Skipped since this test requires TensorFlow Probability >= ", version))
19 | }
20 | }
21 |
22 | skip_if_tfp_above <- function(version) {
23 | if (tfprobability:::tfp_version() > version) {
24 | skip(paste0("Skipped since this test requires TensorFlow Probability <= ", version))
25 | }
26 | }
27 |
28 | skip_if_tf_below <- function(version) {
29 | if (tensorflow:::tf_version() < version) {
30 | skip(paste0("Skipped since this test requires TensorFlow >= ", version))
31 | }
32 | }
33 |
34 | skip_if_tf_above <- function(version) {
35 | if (tensorflow:::tf_version() > version) {
36 | skip(paste0("Skipped since this test requires TensorFlow <= ", version))
37 | }
38 | }
39 |
40 | skip_if_not_eager <- function() {
41 | if (!tf$executing_eagerly())
42 | skip("This test requires eager execution")
43 | }
44 |
45 | skip_if_eager <- function() {
46 | if (tf$executing_eagerly())
47 | skip("This test requires graph execution")
48 | }
49 |
50 | test_succeeds <- function(desc, expr) {
51 | test_that(desc, {
52 | skip_if_no_tfp()
53 | expect_error(force(expr), NA)
54 | })
55 | }
56 |
57 | tensor_value <- function(tensor) {
58 | if (tf$executing_eagerly()) {
59 | as.array(tensor)
60 | } else {
61 | sess <- tf$compat$v1$Session()
62 | sess$run(tf$global_variables_initializer())
63 | sess$run(tensor)
64 | }
65 | }
66 |
67 | as_tensors <- tfprobability:::as_tensors
68 |
--------------------------------------------------------------------------------
/tests/testthat/test-distribution-staticmethods.R:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/tests/testthat/test-distribution-staticmethods.R
--------------------------------------------------------------------------------
/tests/testthat/test-glm.R:
--------------------------------------------------------------------------------
1 |
2 | test_succeeds("glm_fit.tensorflow.tensor works", {
3 |
4 | skip_if_tfp_below("0.8")
5 |
6 | x <- matrix(runif(100), ncol = 2)
7 | y <- rnorm(50, mean = rowSums(x), sd = 0.2)
8 |
9 | model <- glm_fit(x, y, model = tfp$glm$Normal())
10 | model_r <- glm(y ~ 0 + x[,1] + x[,2])
11 |
12 | expect_equivalent(as.numeric(model[[1]]), model_r$coefficients)
13 | expect_s3_class(model, "glm_fit")
14 |
15 |
16 | model <- glm_fit(x, y, model = "Normal")
17 | model_r <- glm(y ~ 0 + x[,1] + x[,2])
18 |
19 | expect_equivalent(as.numeric(model[[1]]), model_r$coefficients)
20 | expect_s3_class(model, "glm_fit")
21 | })
22 |
23 | test_succeeds("glm_fit_one_step.tensorflow.tensor works", {
24 |
25 | skip_if_tfp_below("0.8")
26 |
27 | x <- matrix(runif(100), ncol = 2)
28 | y <- rnorm(50, mean = rowSums(x), sd = 0.2)
29 |
30 | model <- glm_fit(x, y, model = tfp$glm$Normal())
31 | model_r <- glm(y ~ 0 + x[,1] + x[,2])
32 |
33 | expect_equivalent(as.numeric(model[[1]]), model_r$coefficients)
34 | expect_s3_class(model, "glm_fit")
35 |
36 |
37 | model <- glm_fit(x, y, model = "Normal")
38 | model_r <- glm(y ~ 0 + x[,1] + x[,2])
39 |
40 | expect_equivalent(as.numeric(model[[1]]), model_r$coefficients)
41 | expect_s3_class(model, "glm_fit")
42 | })
43 |
44 |
45 |
--------------------------------------------------------------------------------
/tests/testthat/test-initializers.R:
--------------------------------------------------------------------------------
1 | context("tensorflow probability keras initializers")
2 |
3 | test_succeeds("initializer_blockwise works", {
4 |
5 | init <- initializer_blockwise(
6 | initializers = lapply(1:5, keras::initializer_constant),
7 | sizes = rep(1, 5)
8 | )
9 |
10 | layer <- keras::layer_dense(units = 5, input_shape = 1, kernel_initializer = init)
11 | layer$build(input_shape = 1L)
12 |
13 | expect_equivalent(as.numeric(keras::get_weights(layer)[[1]]), 1:5)
14 | })
15 |
--------------------------------------------------------------------------------
/tfprobability.Rproj:
--------------------------------------------------------------------------------
1 | Version: 1.0
2 |
3 | RestoreWorkspace: No
4 | SaveWorkspace: No
5 | AlwaysSaveHistory: Default
6 |
7 | EnableCodeIndexing: Yes
8 | UseSpacesForTab: Yes
9 | NumSpacesForTab: 2
10 | Encoding: UTF-8
11 |
12 | RnwWeave: Sweave
13 | LaTeX: pdfLaTeX
14 |
15 | AutoAppendNewline: Yes
16 | StripTrailingWhitespace: Yes
17 |
18 | BuildType: Package
19 | PackageUseDevtools: Yes
20 | PackageInstallArgs: --no-multiarch --with-keep.source
21 | PackageCheckArgs: --as-cran
22 | PackageRoxygenize: rd,collate,namespace,vignette
23 |
--------------------------------------------------------------------------------
/vignettes/images/capm_filtered.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/vignettes/images/capm_filtered.png
--------------------------------------------------------------------------------
/vignettes/images/capm_forecast.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/vignettes/images/capm_forecast.png
--------------------------------------------------------------------------------
/vignettes/images/capm_smoothed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/vignettes/images/capm_smoothed.png
--------------------------------------------------------------------------------
/vignettes/images/thumb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/vignettes/images/thumb.png
--------------------------------------------------------------------------------
/vignettes/images/uncertainty.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/vignettes/images/uncertainty.png
--------------------------------------------------------------------------------
/vignettes/images/uncertainty_data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rstudio/tfprobability/917025f48b800e497470449c1589cb48f8ad1fe0/vignettes/images/uncertainty_data.png
--------------------------------------------------------------------------------