├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ └── R-CMD-check.yaml ├── .gitignore ├── .travis.yml ├── CONDUCT.md ├── DESCRIPTION ├── NAMESPACE ├── NEWS ├── R ├── bf.R ├── bridge_methods.R ├── bridge_sampler.R ├── bridge_sampler_internals.R ├── bridge_sampler_normal.R ├── bridge_sampler_tools.R ├── bridge_sampler_warp3.R ├── error_measures.R ├── ier-data.R ├── logml.R ├── post_prob.R └── turtles-data.R ├── README.md ├── bridgesampling.Rproj ├── data ├── ier.rda └── turtles.rda ├── development.R ├── doc ├── bridgesampling_example_jags.R ├── bridgesampling_example_jags.Rmd ├── bridgesampling_example_jags.html ├── bridgesampling_example_nimble.R ├── bridgesampling_example_nimble.Rmd ├── bridgesampling_example_nimble.html ├── bridgesampling_example_stan.R ├── bridgesampling_example_stan.Rmd ├── bridgesampling_example_stan.html ├── bridgesampling_paper.R ├── bridgesampling_paper.pdf ├── bridgesampling_paper.pdf.asis ├── bridgesampling_paper_extended.R ├── bridgesampling_paper_extended.pdf ├── bridgesampling_paper_extended.pdf.asis ├── bridgesampling_stan_ttest.R ├── bridgesampling_stan_ttest.Rmd ├── bridgesampling_stan_ttest.html ├── bridgesampling_tutorial.R ├── bridgesampling_tutorial.pdf └── bridgesampling_tutorial.pdf.asis ├── examples ├── example.bridge_sampler.R ├── example.ier.R ├── example.post_prob.R └── example.turtles.R ├── inst ├── CITATION └── extdata │ ├── vignette_example_jags.RData │ ├── vignette_example_nimble.RData │ ├── vignette_example_stan.RData │ └── vignette_stan_ttest.RData ├── man ├── bf.Rd ├── bridge-methods.Rd ├── bridge_sampler.Rd ├── error_measures.Rd ├── ier.Rd ├── logml.Rd ├── post_prob.Rd └── turtles.Rd ├── revdep ├── .gitignore ├── README.md ├── email.yml ├── failures.md └── problems.md ├── tests ├── testthat.R └── testthat │ ├── test-bf.R │ ├── test-bridge_sampler.R │ ├── test-bridge_sampler_Rcpp.R │ ├── test-bridge_sampler_Rcpp_parallel.R │ ├── test-bridge_sampler_mcmc.list.R │ ├── test-bridge_sampler_parallel.R │ ├── test-bridge_sampler_print_method.R │ ├── test-bridge_sampler_summary_method.R │ ├── test-nimble_bridge_sampler.R │ ├── test-post_prob.R │ ├── test-stan_bridge_sampler_basic.R │ ├── test-stan_bridge_sampler_bugs.R │ ├── test-stanreg_bridge_sampler_basic.R │ ├── test-vignette_example_jags.R │ ├── test-vignette_example_nimble.R │ ├── test-vignette_example_stan.R │ ├── test-vignette_stan_ttest.R │ ├── test_dat.txt │ ├── unnormalized_normal_density.cpp │ └── unnormalized_normal_density_mu.cpp └── vignettes ├── bridgesampling_example_jags.Rmd ├── bridgesampling_example_nimble.Rmd ├── bridgesampling_example_simplex_or_circular_parameter_spaces.Rmd ├── bridgesampling_example_stan.Rmd ├── bridgesampling_paper.pdf ├── bridgesampling_paper.pdf.asis ├── bridgesampling_paper_extended.pdf ├── bridgesampling_paper_extended.pdf.asis ├── bridgesampling_stan_ttest.Rmd ├── bridgesampling_tutorial.pdf └── bridgesampling_tutorial.pdf.asis /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | development.R 4 | examples 5 | ^\.travis\.yml$ 6 | README.md 7 | CONDUCT.md 8 | ^vignettes/bridgesampling_example_simplex_or_circular_parameter_spaces\.Rmd$ 9 | ^doc$ 10 | ^Meta$ 11 | 12 | ^revdep$ 13 | ^\.github$ 14 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - master 5 | pull_request: 6 | branches: 7 | - master 8 | 9 | name: R-CMD-check 10 | 11 | jobs: 12 | R-CMD-check: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | config: 21 | - {os: windows-latest, r: 'release'} 22 | - {os: macOS-latest, r: 'release'} 23 | - {os: macOS-latest, r: 'devel'} 24 | - {os: ubuntu-16.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/xenial/latest"} 25 | 26 | env: 27 | R_REMOTES_NO_ERRORS_FROM_WARNINGS: true 28 | RSPM: ${{ matrix.config.rspm }} 29 | 30 | steps: 31 | - uses: actions/checkout@v2 32 | 33 | - uses: r-lib/actions/setup-r@master 34 | with: 35 | r-version: ${{ matrix.config.r }} 36 | 37 | - uses: r-lib/actions/setup-pandoc@master 38 | 39 | - name: Query dependencies 40 | run: | 41 | install.packages('remotes') 42 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 43 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 44 | shell: Rscript {0} 45 | 46 | - name: Cache R packages 47 | if: runner.os != 'Windows' 48 | uses: actions/cache@v1 49 | with: 50 | path: ${{ env.R_LIBS_USER }} 51 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 52 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 53 | 54 | - name: Install system dependencies 55 | if: runner.os == 'Linux' 56 | env: 57 | RHUB_PLATFORM: linux-x86_64-ubuntu-gcc 58 | run: | 59 | Rscript -e "remotes::install_github('r-hub/sysreqs')" 60 | sysreqs=$(Rscript -e "cat(sysreqs::sysreq_commands('DESCRIPTION'))") 61 | sudo -s eval "$sysreqs" 62 | 63 | - name: Install dependencies 64 | run: | 65 | remotes::install_deps(dependencies = TRUE) 66 | remove.packages(c("StanHeaders", "rstan", "BH", "RcppParallel", "RcppEigen")) 67 | install.packages("BH", type = "source") 68 | install.packages("RcppEigen", type = "source") 69 | install.packages("RcppParallel", type = "source") 70 | install.packages("StanHeaders", type = "source") 71 | install.packages("rstan", type = "source") 72 | remotes::install_cran("rcmdcheck") 73 | shell: Rscript {0} 74 | 75 | - name: Check 76 | env: 77 | _R_CHECK_CRAN_INCOMING_REMOTE_: false 78 | run: | 79 | if (R.version$major < 4 && isTRUE(.Platform$OS.type == "windows")) { 80 | dotR <- file.path(Sys.getenv("HOME"), ".R") 81 | if (!file.exists(dotR)) dir.create(dotR) 82 | path_makevars <- ifelse(.Platform$OS.type == "windows", "Makevars.win", "Makevars") 83 | M <- file.path(dotR, path_makevars) 84 | if (!file.exists(M)) file.create(M) 85 | cat("\nCXX14FLAGS=-O3", 86 | "CXX14 = $(BINPREF)g++ -m$(WIN) -std=c++1y", 87 | "CXX11FLAGS=-O3", 88 | file = M, sep = "\n", append = TRUE) 89 | } 90 | rcmdcheck::rcmdcheck(args = c("--no-manual", "--as-cran"), error_on = "warning", check_dir = "check") 91 | shell: Rscript {0} 92 | 93 | - name: Upload check results 94 | if: failure() 95 | uses: actions/upload-artifact@master 96 | with: 97 | name: ${{ runner.os }}-r${{ matrix.config.r }}-results 98 | path: check 99 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user/ 2 | .Rproj.user 3 | .Rhistory 4 | Meta 5 | 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r 2 | 3 | language: r 4 | r: devel 5 | sudo: FALSE 6 | compiler: clang 7 | 8 | cache: 9 | apt: true 10 | packages: true 11 | ccache: true 12 | 13 | warnings_are_errors: TRUE 14 | 15 | r_build_args: '--no-build-vignettes' 16 | r_check_args: '--ignore-vignettes' 17 | 18 | addons: 19 | apt: 20 | packages: 21 | - jags 22 | r_binary_packages: 23 | - rstanarm 24 | - Matrix 25 | - coda 26 | - testthat 27 | - rmarkdown 28 | - knitr 29 | - stringr 30 | - mvtnorm 31 | 32 | -------------------------------------------------------------------------------- /CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Code of Conduct 2 | 3 | As contributors and maintainers of this project, we pledge to respect all people who 4 | contribute through reporting issues, posting feature requests, updating documentation, 5 | submitting pull requests or patches, and other activities. 6 | 7 | We are committed to making participation in this project a harassment-free experience for 8 | everyone, regardless of level of experience, gender, gender identity and expression, 9 | sexual orientation, disability, personal appearance, body size, race, ethnicity, age, or religion. 10 | 11 | Examples of unacceptable behavior by participants include the use of sexual language or 12 | imagery, derogatory comments or personal attacks, trolling, public or private harassment, 13 | insults, or other unprofessional conduct. 14 | 15 | Project maintainers have the right and responsibility to remove, edit, or reject comments, 16 | commits, code, wiki edits, issues, and other contributions that are not aligned to this 17 | Code of Conduct. Project maintainers who do not follow the Code of Conduct may be removed 18 | from the project team. 19 | 20 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by 21 | opening an issue or contacting one or more of the project maintainers. 22 | 23 | This Code of Conduct is adapted from the Contributor Covenant 24 | (http:contributor-covenant.org), version 1.0.0, available at 25 | http://contributor-covenant.org/version/1/0/0/ 26 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: bridgesampling 2 | Type: Package 3 | Title: Bridge Sampling for Marginal Likelihoods and Bayes Factors 4 | Version: 1.1-5 5 | Authors@R: c(person(given="Quentin F.", family="Gronau", role=c("aut", "cre"), 6 | email="Quentin.F.Gronau@gmail.com", 7 | comment=c(ORCID="0000-0001-5510-6943")), 8 | person(given="Henrik", family="Singmann", role="aut", 9 | comment=c(ORCID="0000-0002-4842-3657")), 10 | person(given="Jonathan J.", family="Forster", role="ctb"), 11 | person(given="Eric-Jan", family="Wagenmakers", role="ths"), 12 | person(family="The JASP Team", role="ctb"), 13 | person("Jiqiang", "Guo", role = "ctb"), 14 | person("Jonah", "Gabry", role = "ctb"), 15 | person("Ben", "Goodrich", role = c("ctb")), 16 | person("Kees", "Mulder", role = c("ctb")), 17 | person("Perry", "de Valpine", role = c("ctb")) 18 | ) 19 | Depends: 20 | R (>= 3.0.0) 21 | Imports: 22 | mvtnorm, 23 | Matrix, 24 | Brobdingnag, 25 | stringr, 26 | coda, 27 | parallel, 28 | scales, 29 | utils, 30 | methods 31 | Suggests: 32 | testthat, 33 | Rcpp, 34 | RcppEigen, 35 | R2jags, 36 | rjags, 37 | runjags, 38 | knitr, 39 | rmarkdown, 40 | R.rsp, 41 | BayesFactor, 42 | rstan, 43 | rstanarm, 44 | nimble, 45 | MCMCpack 46 | Description: Provides functions for estimating marginal likelihoods, Bayes 47 | factors, posterior model probabilities, and normalizing constants in general, 48 | via different versions of bridge sampling (Meng & Wong, 1996, 49 | ). 50 | Gronau, Singmann, & Wagenmakers (2020) . 51 | License: GPL (>=2) 52 | LazyData: true 53 | RoxygenNote: 7.2.3 54 | VignetteBuilder: knitr, R.rsp 55 | URL: https://github.com/quentingronau/bridgesampling 56 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(bayes_factor,default) 4 | S3method(bf,bridge) 5 | S3method(bf,bridge_list) 6 | S3method(bf,default) 7 | S3method(bridge_sampler,MCMC_refClass) 8 | S3method(bridge_sampler,matrix) 9 | S3method(bridge_sampler,mcmc) 10 | S3method(bridge_sampler,mcmc.list) 11 | S3method(bridge_sampler,rjags) 12 | S3method(bridge_sampler,runjags) 13 | S3method(bridge_sampler,stanfit) 14 | S3method(bridge_sampler,stanreg) 15 | S3method(error_measures,bridge) 16 | S3method(error_measures,bridge_list) 17 | S3method(logml,bridge) 18 | S3method(logml,bridge_list) 19 | S3method(post_prob,bridge) 20 | S3method(post_prob,bridge_list) 21 | S3method(post_prob,default) 22 | S3method(print,bf_bridge) 23 | S3method(print,bf_bridge_list) 24 | S3method(print,bf_default) 25 | S3method(print,bridge) 26 | S3method(print,bridge_list) 27 | S3method(print,summary.bridge) 28 | S3method(print,summary.bridge_list) 29 | S3method(summary,bridge) 30 | S3method(summary,bridge_list) 31 | export(bayes_factor) 32 | export(bf) 33 | export(bridge_sampler) 34 | export(error_measures) 35 | export(logml) 36 | export(post_prob) 37 | import(Brobdingnag) 38 | importFrom(Matrix,nearPD) 39 | importFrom(coda,spectrum0.ar) 40 | importFrom(methods,is) 41 | importFrom(mvtnorm,dmvnorm) 42 | importFrom(mvtnorm,rmvnorm) 43 | importFrom(stats,cov) 44 | importFrom(stats,dnorm) 45 | importFrom(stats,median) 46 | importFrom(stats,pnorm) 47 | importFrom(stats,qnorm) 48 | importFrom(stats,var) 49 | importFrom(stringr,str_sub) 50 | importFrom(utils,read.csv) 51 | -------------------------------------------------------------------------------- /R/bf.R: -------------------------------------------------------------------------------- 1 | #' Generic function that computes Bayes factor(s) from marginal likelihoods. \code{bayes_factor()} is simply an (S3 generic) alias for \code{bf()}. 2 | #' @export 3 | #' @title Bayes Factor(s) from Marginal Likelihoods 4 | #' @param x1 Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x1} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise. 5 | #' @param x2 Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x2} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise. 6 | #' @param log Boolean. If \code{TRUE}, the function returns the log of the Bayes factor. Default is \code{FALSE}. 7 | #' @param ... currently not used here, but can be used by other methods. 8 | #' @details Computes the Bayes factor (Kass & Raftery, 1995) in favor of the model associated with \code{x1} over the model associated with \code{x2}. 9 | #' @return For the default method returns a list of class \code{"bf_default"} with components: 10 | #' \itemize{ 11 | #' \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. 12 | #' \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. 13 | #' } 14 | #' 15 | #' 16 | #' For the method for \code{"bridge"} objects returns a list of class \code{"bf_bridge"} with components: 17 | #' \itemize{ 18 | #' \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. 19 | #' \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. 20 | #' } 21 | #' 22 | #' 23 | #' For the method for \code{"bridge_list"} objects returns a list of class \code{"bf_bridge_list"} with components: 24 | #' \itemize{ 25 | #' \item \code{bf}: a numeric vector consisting of Bayes factors where each element gives the Bayes factor for one set of logmls in favor of the model associated with \code{x1} over the model associated with \code{x2}. The length of this vector is given by the \code{"bridge_list"} element with the most \code{repetitions}. Elements with fewer repetitions will be recycled (with warning). 26 | #' \item \code{bf_median_based}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2} that is based on the median values of the logml estimates. 27 | #' \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. 28 | #' } 29 | #' @author Quentin F. Gronau 30 | #' @note For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} 31 | #' @references 32 | #' Kass, R. E., & Raftery, A. E. (1995). Bayes factors. \emph{Journal of the American Statistical Association, 90(430)}, 773-795. \doi{10.1080/01621459.1995.10476572} 33 | #' @importFrom methods is 34 | bf <- function(x1, x2, log = FALSE, ...) { 35 | UseMethod("bf", x1) 36 | } 37 | 38 | #' @rdname bf 39 | #' @export 40 | bayes_factor <- function(x1, x2, log = FALSE, ...) { 41 | UseMethod("bayes_factor", x1) 42 | } 43 | 44 | 45 | #' @rdname bf 46 | #' @export 47 | bayes_factor.default <- function(x1, x2, log = FALSE, ...) { 48 | bf(x1, x2, log = log, ...) 49 | } 50 | 51 | 52 | .bf_calc <- function(logml1, logml2, log) { 53 | bf <- logml1 - logml2 54 | if (! log) 55 | bf <- exp(bf) 56 | return(bf) 57 | } 58 | 59 | #' @rdname bf 60 | #' @export 61 | bf.bridge <- function(x1, x2, log = FALSE, ...) { 62 | if (!inherits(x2, c("bridge", "bridge_list"))) 63 | stop("x2 needs to be of class 'bridge' or 'bridge_list'.", call. = FALSE) 64 | bf <- .bf_calc(logml(x1), logml(x2), log = log) 65 | out <- list(bf = bf, log = log) 66 | class(out) <- "bf_bridge" 67 | try({ 68 | mc <- match.call() 69 | name1 <- deparse(mc[["x1"]]) 70 | name2 <- deparse(mc[["x2"]]) 71 | attr(out, "model_names") <- c(name1, name2) 72 | }, silent = TRUE) 73 | return(out) 74 | } 75 | 76 | 77 | #' @rdname bf 78 | #' @export 79 | bf.bridge_list <- function(x1, x2, log = FALSE, ...) { 80 | if (!inherits(x2, c("bridge", "bridge_list"))) 81 | stop("x2 needs to be of class 'bridge' or 'bridge_list'.", call. = FALSE) 82 | logml1 <- x1$logml 83 | logml2 <- x2$logml 84 | median1 <- median(logml1, na.rm = TRUE) 85 | median2 <- median(logml2, na.rm = TRUE) 86 | len1 <- length(logml1) 87 | len2 <- length(logml2) 88 | max_len <- max(c(len1, len2)) 89 | if (!all(c(len1, len2) == max_len)) { 90 | warning("Not all objects provide ", max_len, 91 | " logmls. Some values are recycled.", call. = FALSE) 92 | logml1 <- rep(logml1, length.out = max_len) 93 | logml2 <- rep(logml2, length.out = max_len) 94 | } 95 | bf <- .bf_calc(logml1, logml2, log = log) 96 | bf_median_based <- .bf_calc(median1, median2, log = log) 97 | out <- list(bf = bf, bf_median_based = bf_median_based, log = log) 98 | class(out) <- "bf_bridge_list" 99 | try({ 100 | mc <- match.call() 101 | name1 <- deparse(mc[["x1"]]) 102 | name2 <- deparse(mc[["x2"]]) 103 | attr(out, "model_names") <- c(name1, name2) 104 | }, silent = TRUE) 105 | return(out) 106 | } 107 | 108 | #' @rdname bf 109 | #' @export 110 | bf.default <- function(x1, x2, log = FALSE, ...) { 111 | if (!is.numeric(c(x1, x2))) { 112 | stop("logml values need to be numeric", call. = FALSE) 113 | } 114 | if (length(x1) > 1 || length(x2) > 1) { 115 | stop("Both logmls need to be scalar values.", call. = FALSE) 116 | } 117 | bf <- .bf_calc(x1, x2, log = log) 118 | out <- list(bf = bf, log = log) 119 | class(out) <- "bf_default" 120 | try({ 121 | mc <- match.call() 122 | name1 <- deparse(mc[["x1"]]) 123 | name2 <- deparse(mc[["x2"]]) 124 | attr(out, "model_names") <- c(name1, name2) 125 | }, silent = TRUE) 126 | return(out) 127 | } 128 | 129 | ######## Methods for bf objects: 130 | 131 | #' @method print bf_bridge 132 | #' @export 133 | print.bf_bridge <- function(x, ...) { 134 | if(!is.null(attr(x, "model_names"))) { 135 | model_names <- attr(x, "model_names") 136 | } else { 137 | model_names <- c("x1", "x2") 138 | } 139 | cat("Estimated ", if (x$log) "log " else NULL , 140 | "Bayes factor in favor of ", model_names[1], 141 | " over ", model_names[2], ": ", 142 | formatC(x$bf, digits = 5, format = "f"), 143 | "\n", sep = "") 144 | } 145 | 146 | #' @method print bf_bridge_list 147 | #' @export 148 | print.bf_bridge_list <- function(x, na.rm = TRUE,...) { 149 | if(!is.null(attr(x, "model_names"))) { 150 | model_names <- attr(x, "model_names") 151 | } else { 152 | model_names <- c("x1", "x2") 153 | } 154 | cat("Estimated ", if (x$log) "log " else NULL , 155 | "Bayes factor (based on medians of log marginal likelihood estimates)\n", 156 | " in favor of ", model_names[1], " over ", model_names[2], ": ", 157 | formatC(x$bf_median_based, digits = 5, format = "f"), 158 | "\nRange of estimates: ", 159 | formatC(range(x$bf, na.rm=na.rm)[1], digits = 5, format = "f"), " to ", 160 | formatC(range(x$bf, na.rm = na.rm)[2], digits = 5, format = "f"), 161 | "\nInterquartile range: ", 162 | formatC(stats::IQR(x$bf, na.rm = na.rm), digits = 5, format = "f"), 163 | "\n", sep = "") 164 | if (any(is.na(x$bf))) warning(sum(is.na(x$bf)), 165 | " log Bayes factor estimate(s) are NAs.", 166 | call. = FALSE) 167 | } 168 | 169 | #' @method print bf_default 170 | #' @export 171 | print.bf_default <- function(x, ...) { 172 | if(!is.null(attr(x, "model_names"))) { 173 | model_names <- attr(x, "model_names") 174 | } else { 175 | model_names <- c("Model 1", "Model 2") 176 | } 177 | cat(if (x$log) "Log " else NULL , 178 | "Bayes factor in favor of ", model_names[1], 179 | " over ", model_names[2], ": ", 180 | formatC(x$bf, digits = 5, format = "f"), 181 | "\n", sep = "") 182 | } 183 | -------------------------------------------------------------------------------- /R/bridge_methods.R: -------------------------------------------------------------------------------- 1 | #' Methods for bridge and bridge_list objects 2 | #' 3 | #' Methods defined for objects returned from the generic \code{\link{bridge_sampler}} function. 4 | #' 5 | #' @param object,x object of class \code{bridge} or \code{bridge_list} as returned from \code{\link{bridge_sampler}}. 6 | #' @param na.rm logical. Should NA estimates in \code{bridge_list} objects be removed? Passed to \code{\link{error_measures}}. 7 | #' @param ... further arguments, currently ignored. 8 | #' 9 | #' @return 10 | #' The \code{summary} methods return a \code{data.frame} which contains the log marginal likelihood plus the result returned from invoking \code{\link{error_measures}}. 11 | #' 12 | #' The \code{print} methods simply print and return nothing. 13 | #' 14 | #' 15 | #' @name bridge-methods 16 | NULL 17 | 18 | 19 | # summary methods 20 | 21 | #' @rdname bridge-methods 22 | #' @method summary bridge 23 | #' @export 24 | summary.bridge <- function(object, na.rm = TRUE, ...) { 25 | 26 | if( ! (object$method %in% c("normal", "warp3"))) { 27 | stop('object$method needs to be either "normal" or "warp3".', call. = FALSE) 28 | } 29 | 30 | if (object$method == "normal") { 31 | 32 | em <- error_measures(object) 33 | out <- data.frame("Logml_Estimate" = object$logml, 34 | "Relative_Mean_Squared_Error" = em$re2, 35 | "Coefficient_of_Variation" = em$cv, 36 | "Percentage_Error" = em$percentage, 37 | "Method" = object$method, 38 | "Repetitions" = 1, 39 | stringsAsFactors = FALSE) 40 | 41 | } else if (object$method == "warp3") { 42 | 43 | out <- data.frame("Logml_Estimate" = object$logml, 44 | "Method" = object$method, 45 | "Repetitions" = 1) 46 | 47 | } 48 | 49 | class(out) <- c("summary.bridge", "data.frame") 50 | return(out) 51 | 52 | } 53 | 54 | #' @rdname bridge-methods 55 | #' @method summary bridge_list 56 | #' @export 57 | summary.bridge_list <- function(object, na.rm = TRUE, ...) { 58 | 59 | if( ! (object$method %in% c("normal", "warp3"))) { 60 | stop('object$method needs to be either "normal" or "warp3".', call. = FALSE) 61 | } 62 | 63 | em <- error_measures(object, na.rm = na.rm) 64 | out <- data.frame("Logml_Estimate" = median(object$logml, na.rm = na.rm), 65 | "Min" = em$min, 66 | "Max" = em$max, 67 | "Interquartile_Range" = em$IQR, 68 | "Method" = object$method, 69 | "Repetitions" = object$repetitions) 70 | 71 | class(out) <- c("summary.bridge_list", "data.frame") 72 | return(out) 73 | 74 | } 75 | 76 | # print summary methods 77 | 78 | #' @rdname bridge-methods 79 | #' @method print summary.bridge 80 | #' @export 81 | print.summary.bridge <- function(x, ...) { 82 | 83 | if (x[["Method"]] == "normal") { 84 | 85 | cat('\nBridge sampling log marginal likelihood estimate \n(method = "', 86 | as.character(x[["Method"]]), 87 | '", repetitions = ', x[["Repetitions"]], '):\n\n ', 88 | x[["Logml_Estimate"]], 89 | '\n\nError Measures:\n\n Relative Mean-Squared Error: ', 90 | x[["Relative_Mean_Squared_Error"]], 91 | '\n Coefficient of Variation: ', x[["Coefficient_of_Variation"]], 92 | '\n Percentage Error: ', x[["Percentage_Error"]], 93 | '\n\nNote:\nAll error measures are approximate.\n\n', sep = "") 94 | 95 | } else if (x[["Method"]] == "warp3") { 96 | 97 | cat('\nBridge sampling log marginal likelihood estimate \n(method = "', 98 | as.character(x[["Method"]]), 99 | '", repetitions = ', x[["Repetitions"]], '):\n\n ', 100 | x[["Logml_Estimate"]], 101 | '\n\nNote:\nNo error measures are available for method = "warp3"', 102 | '\nwith repetitions = 1.', 103 | '\nWe recommend to run the warp3 procedure multiple times to', 104 | '\nassess the uncertainty of the estimate.\n\n', sep = "") 105 | 106 | } 107 | 108 | } 109 | 110 | #' @rdname bridge-methods 111 | #' @method print summary.bridge_list 112 | #' @export 113 | print.summary.bridge_list <- function(x, ...) { 114 | cat('\nBridge sampling log marginal likelihood estimate \n(method = "', 115 | as.character(x[["Method"]]), '", repetitions = ', x[["Repetitions"]], 116 | '):\n\n ', x[["Logml_Estimate"]], 117 | '\n\nError Measures:\n\n Min: ', x[["Min"]], 118 | '\n Max: ', x[["Max"]], 119 | '\n Interquartile Range: ', x[["Interquartile_Range"]], 120 | '\n\nNote:\nAll error measures are based on ', x[["Repetitions"]], 121 | ' estimates.\n\n', sep = "") 122 | 123 | } 124 | 125 | # print methods 126 | 127 | #' @rdname bridge-methods 128 | #' @method print bridge 129 | #' @export 130 | print.bridge <- function(x, ...) { 131 | 132 | cat("Bridge sampling estimate of the log marginal likelihood: ", 133 | round(x$logml, 5), "\nEstimate obtained in ", x$niter, 134 | " iteration(s) via method \"", x$method, "\".\n", sep = "") 135 | } 136 | 137 | #' @rdname bridge-methods 138 | #' @method print bridge_list 139 | #' @export 140 | print.bridge_list <- function(x, na.rm = TRUE, ...) { 141 | 142 | cat("Median of ", x$repetitions, " bridge sampling estimates\nof the log marginal likelihood: ", 143 | round(median(x$logml, na.rm = na.rm), 5), "\nRange of estimates: ", round(range(x$logml, na.rm = na.rm)[1], 5), " to ", 144 | round(range(x$logml, na.rm = na.rm)[2], 5), 145 | "\nInterquartile range: ", round(stats::IQR(x$logml, na.rm = na.rm), 5), "\nMethod: ", x$method, "\n", sep = "") 146 | if (any(is.na(x$logml))) warning(sum(is.na(x$logml))," bridge sampling estimate(s) are NAs.", call. = FALSE) 147 | } 148 | 149 | 150 | -------------------------------------------------------------------------------- /R/bridge_sampler_tools.R: -------------------------------------------------------------------------------- 1 | 2 | #-------------------------------------------------------------------------- 3 | # functions for Stan support via rstan 4 | #-------------------------------------------------------------------------- 5 | 6 | # taken from rstan: 7 | .rstan_relist <- function (x, skeleton) { 8 | lst <- utils::relist(x, skeleton) 9 | for (i in seq_along(skeleton)) dim(lst[[i]]) <- dim(skeleton[[i]]) 10 | lst 11 | } 12 | 13 | # taken from rstan: 14 | .create_skeleton <- function (pars, dims) { 15 | lst <- lapply(seq_along(pars), function(i) { 16 | len_dims <- length(dims[[i]]) 17 | if (len_dims < 1) 18 | return(0) 19 | return(array(0, dim = dims[[i]])) 20 | }) 21 | names(lst) <- pars 22 | lst 23 | } 24 | 25 | .stan_log_posterior <- function(s.row, data) { 26 | out <- tryCatch(rstan::log_prob(object = data$stanfit, upars = s.row), error = function(e) -Inf) 27 | if (is.na(out)) out <- -Inf 28 | return(out) 29 | } 30 | 31 | 32 | -------------------------------------------------------------------------------- /R/error_measures.R: -------------------------------------------------------------------------------- 1 | #' Computes error measures for estimated marginal likelihood. 2 | #' @export 3 | #' @title Error Measures for Estimated Marginal Likelihood 4 | #' @param bridge_object an object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. 5 | #' @param na.rm a logical indicating whether missing values in logml estimates should be removed. Ignored for the \code{bridge} method. 6 | #' @param ... additional arguments (currently ignored). 7 | #' @details Computes error measures for marginal likelihood bridge sampling estimates. The approximate errors for a \code{bridge_object} of class \code{"bridge"} that has been obtained with \code{method = "normal"} and \code{repetitions = 1} are based on Fruehwirth-Schnatter (2004). 8 | #' Not applicable in case the object of class \code{"bridge"} has been obtained with \code{method = "warp3"} and \code{repetitions = 1}. 9 | #' To assess the uncertainty of the estimate in this case, it is recommended to run the \code{"warp3"} procedure multiple times. 10 | #' @return If \code{bridge_object} is of class \code{"bridge"} and has been obtained with \code{method = "normal"} and \code{repetitions = 1}, returns a list with components: 11 | #' \itemize{ 12 | #' \item \code{re2}: approximate relative mean-squared error for marginal likelihood estimate. 13 | #' \item \code{cv}: approximate coefficient of variation for marginal likelihood estimate (assumes that bridge estimate is unbiased). 14 | #' \item \code{percentage}: approximate percentage error of marginal likelihood estimate. 15 | #' } 16 | #' If \code{bridge_object} is of class \code{"bridge_list"}, returns a list with components: 17 | #' \itemize{ 18 | #' \item \code{min}: minimum of the log marginal likelihood estimates. 19 | #' \item \code{max}: maximum of the log marginal likelihood estimates. 20 | #' \item \code{IQR}: interquartile range of the log marginal likelihood estimates. 21 | #' } 22 | #' @author Quentin F. Gronau 23 | #' @note For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} 24 | #' 25 | #' @seealso The \code{summary} methods for \code{bridge} and \code{bridge_list} objects automatically invoke this function, see \code{\link{bridge-methods}}. 26 | #' 27 | #' @references 28 | #' Fruehwirth-Schnatter, S. (2004). Estimating marginal likelihoods for mixture and Markov switching models using bridge sampling techniques. \emph{The Econometrics Journal, 7}, 143-167. \doi{10.1111/j.1368-423X.2004.00125.x} 29 | #' @import Brobdingnag 30 | #' @importFrom coda spectrum0.ar 31 | #' @export 32 | error_measures <- function (bridge_object, ...) { 33 | UseMethod("error_measures", bridge_object) 34 | } 35 | 36 | #' @rdname error_measures 37 | #' @export 38 | error_measures.bridge <- function(bridge_object,...) { 39 | 40 | if (bridge_object$method == "warp3") { 41 | stop(paste0("error_measures not implemented for warp3 method with", 42 | "\n repetitions = 1.", 43 | "\n We recommend to run the warp3 procedure multiple times", 44 | "\n to assess the uncertainty of the estimate.")) 45 | } 46 | 47 | e <- as.brob( exp(1) ) 48 | 49 | ml <- e^(bridge_object$logml) 50 | g_p <- e^(bridge_object$q12) 51 | g_g <- e^(bridge_object$q22) 52 | priorTimesLik_p <- e^(bridge_object$q11) 53 | priorTimesLik_g <- e^(bridge_object$q21) 54 | p_p <- priorTimesLik_p/ml 55 | p_g <- priorTimesLik_g/ml 56 | 57 | N1 <- length(p_p) 58 | N2 <- length(g_g) 59 | s1 <- N1/(N1 + N2) 60 | s2 <- N2/(N1 + N2) 61 | 62 | f1 <- as.numeric( p_g/(s1*p_g + s2*g_g) ) 63 | f2 <- as.numeric( g_p/(s1*p_p + s2*g_p) ) 64 | rho_f2 <- spectrum0.ar( f2 )$spec 65 | 66 | term1 <- 1/N2 * var( f1 ) / mean( f1 )^2 67 | term2 <- rho_f2/N1 * var( f2 ) / mean( f2 )^2 68 | 69 | re2 <- term1 + term2 70 | 71 | # convert to coefficient of variation (assumes that bridge estimate is unbiased) 72 | cv <- sqrt(re2) 73 | 74 | # convert to percentage error 75 | percentage <- scales::percent(cv) 76 | return(list(re2 = re2, cv = cv, percentage = percentage)) 77 | 78 | } 79 | 80 | #' @rdname error_measures 81 | #' @export 82 | error_measures.bridge_list <- function(bridge_object, na.rm = TRUE, ...) { 83 | 84 | return(list(min = min(bridge_object$logml, na.rm = na.rm), 85 | max = max(bridge_object$logml, na.rm = na.rm), 86 | IQR = stats::IQR(bridge_object$logml, na.rm = na.rm))) 87 | 88 | } 89 | -------------------------------------------------------------------------------- /R/ier-data.R: -------------------------------------------------------------------------------- 1 | #' Standardized International Exchange Rate Changes from 1975 to 1986 2 | #' 3 | #' This data set contains the changes in monthly international exchange rates for pounds sterling from January 1975 to December 1986 obtained from West and Harrison (1997, pp. 612-615). Currencies tracked are US Dollar (column \code{us_dollar}), Canadian Dollar (column \code{canadian_dollar}), Japanese Yen (column \code{yen}), French Franc (column \code{franc}), Italian Lira (column \code{lira}), and the (West) German Mark (column \code{mark}). Each series has been standardized with respect to its sample mean and standard deviation. 4 | #' 5 | #' @docType data 6 | #' @keywords dataset 7 | #' @name ier 8 | #' @usage ier 9 | #' @format A matrix with 143 rows and 6 columns. 10 | #' @source West, M., Harrison, J. (1997). \emph{Bayesian forecasting and dynamic models} (2nd ed.). Springer-Verlag, New York. 11 | #' 12 | #' Lopes, H. F., West, M. (2004). Bayesian model assessment in factor analysis. \emph{Statistica Sinica, 14}, 41-67. 13 | #' @encoding UTF-8 14 | #' 15 | #' @example examples/example.ier.R 16 | NULL 17 | -------------------------------------------------------------------------------- /R/logml.R: -------------------------------------------------------------------------------- 1 | #' Generic function that returns log marginal likelihood from bridge objects. For objects of class \code{"bridge_list"}, which contains multiple log marginal likelihoods, \code{fun} is performed on the vector and its result returned. 2 | #' @title Log Marginal Likelihoods from Bridge Objects 3 | #' @param x Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. 4 | #' @param fun Function which returns a scalar value and is applied to the \code{logml} vector of \code{"bridge_list"} objects. Default is \code{\link{median}}. 5 | #' @param ... Further arguments passed to \code{fun}. 6 | #' @return scalar numeric 7 | #' @export 8 | logml <- function (x, ...) { 9 | UseMethod("logml", x) 10 | } 11 | 12 | 13 | #' @rdname logml 14 | #' @export 15 | logml.bridge <- function (x, ...) { 16 | x$logml 17 | } 18 | 19 | #' @rdname logml 20 | #' @export 21 | logml.bridge_list <- function (x, fun = median, ...) { 22 | out <- fun(x$logml, ...) 23 | if (length(out) != 1) { 24 | warning("fun returns results of length != 1, only first used.") 25 | out <- out[1] 26 | } 27 | out 28 | } 29 | 30 | -------------------------------------------------------------------------------- /R/post_prob.R: -------------------------------------------------------------------------------- 1 | #' Generic function that computes posterior model probabilities from marginal 2 | #' likelihoods. 3 | #' @export 4 | #' @title Posterior Model Probabilities from Marginal Likelihoods 5 | #' @param x Object of class \code{"bridge"} or \code{"bridge_list"} as returned 6 | #' from \code{\link{bridge_sampler}}. Additionally, the default method assumes 7 | #' that all passed objects are numeric log marginal likelihoods (e.g., from 8 | #' \code{\link{logml}}) and will throw an error otherwise. 9 | #' @param ... further objects of class \code{"bridge"} or \code{"bridge_list"} 10 | #' as returned from \code{\link{bridge_sampler}}. Or numeric values for the 11 | #' default method. 12 | #' @param prior_prob numeric vector with prior model probabilities. If omitted, 13 | #' a uniform prior is used (i.e., all models are equally likely a priori). The 14 | #' default \code{NULL} corresponds to equal prior model weights. 15 | #' @param model_names If \code{NULL} (the default) will use model names derived 16 | #' from deparsing the call. Otherwise will use the passed values as model 17 | #' names. 18 | #' 19 | #' @return For the default method and the method for \code{"bridge"} objects, a 20 | #' named numeric vector with posterior model probabilities (i.e., which sum to 21 | #' one). 22 | #' 23 | #' For the method for \code{"bridge_list"} objects, a matrix consisting of 24 | #' posterior model probabilities where each row sums to one and gives the 25 | #' model probabilities for one set of logmls. The (named) columns correspond 26 | #' to the models and the number of rows is given by the \code{"bridge_list"} 27 | #' element with the most \code{repetitions}. Elements with fewer repetitions 28 | #' will be recycled (with warning). 29 | #' @author Quentin F. Gronau and Henrik Singmann 30 | #' @note For realistic examples, see \code{\link{bridge_sampler}} and the 31 | #' accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} 32 | #' \cr \code{vignette("bridgesampling_example_stan")} 33 | #' @example examples/example.post_prob.R 34 | #' @importFrom methods is 35 | post_prob <- function (x, ..., prior_prob = NULL, model_names = NULL) { 36 | UseMethod("post_prob", x) 37 | } 38 | 39 | #' @rdname post_prob 40 | #' @export 41 | post_prob.bridge <- function(x, ..., prior_prob = NULL, model_names = NULL) { 42 | dots <- list(...) 43 | mc <- match.call() 44 | modb <- vapply(dots, inherits, NA, what = c("bridge", "bridge_list")) 45 | if (is.null(model_names)) 46 | model_names <- c(deparse(mc[["x"]]), vapply(which(modb), function(x) deparse(mc[[x+2]]), "")) 47 | if (sum(modb) == 0) 48 | stop("Only one object of class 'bridge' or 'bridge_list' passed.", call. = FALSE) 49 | if (sum(modb) != length(dots)) 50 | warning("Objects not of class 'bridge' or 'bridge_list' are ignored.", call. = FALSE) 51 | 52 | logml <- vapply(c(list(x), dots[modb]), logml, FUN.VALUE = 0) 53 | 54 | .post_prob_calc(logml=logml, model_names = model_names, prior_prob=prior_prob) 55 | } 56 | 57 | 58 | #' @rdname post_prob 59 | #' @export 60 | post_prob.bridge_list <- function(x, ..., prior_prob = NULL, model_names = NULL) { 61 | dots <- list(...) 62 | mc <- match.call() 63 | modb <- vapply(dots, inherits, NA, what = c("bridge", "bridge_list")) 64 | if (is.null(model_names)) 65 | model_names <- c(deparse(mc[["x"]]), vapply(which(modb), function(x) deparse(mc[[x+2]]), "")) 66 | if (sum(modb) == 0) 67 | stop("Only one object of class 'bridge' or 'bridge_list' passed.", call. = FALSE) 68 | if (sum(modb) != length(dots)) 69 | warning("Objects not of class 'bridge' or 'bridge_list' are ignored.", call. = FALSE) 70 | 71 | logml <- lapply(c(list(x), dots[modb]), "[[", i = "logml") 72 | len <- vapply(logml, length, FUN.VALUE = 0) 73 | if (!all(len == max(len))) { 74 | warning("Not all objects provide ", max(len), " logmls. Some values are recycled.", call. = FALSE) 75 | logml <- lapply(logml, function(x) rep(x, length.out = max(len))) 76 | } 77 | t(apply(as.data.frame(logml), 1, .post_prob_calc, 78 | model_names = model_names, prior_prob=prior_prob)) 79 | } 80 | 81 | #' @rdname post_prob 82 | #' @export 83 | post_prob.default <- function(x, ..., prior_prob = NULL, model_names = NULL) { 84 | dots <- list(...) 85 | mc <- match.call() 86 | if (is.null(model_names)) 87 | model_names <- c(rep(deparse(mc[["x"]]), length(x)), 88 | rep(vapply(seq_along(dots), function(x) deparse(mc[[x+2]]), ""), 89 | times = vapply(dots, length, 0))) 90 | logml <- c(x, unlist(dots)) 91 | if (!is.numeric(logml)) { 92 | stop("logml values need to be numeric", call. = FALSE) 93 | } 94 | .post_prob_calc(logml=logml, model_names = model_names, prior_prob=prior_prob) 95 | 96 | } 97 | 98 | .post_prob_calc <- function(logml, model_names, prior_prob) { 99 | e <- as.brob(exp(1)) 100 | 101 | if(is.null(prior_prob)) 102 | prior_prob <- rep(1/length(logml), length(logml)) 103 | 104 | if(!isTRUE(all.equal(sum(prior_prob), 1))) 105 | stop("Prior model probabilities do not sum to one.", call. = FALSE) 106 | 107 | if(length(logml) != length(prior_prob)) 108 | stop("Number of objects/logml-values needs to match number of elements in prior_prob.", call. = FALSE) 109 | 110 | if(any(is.na(logml))) { 111 | post_prob <- rep(NA_real_, length(logml)) 112 | warning("NAs in logml values. No posterior probabilities calculated.", call. = FALSE) 113 | } else { 114 | post_prob <- as.numeric(e^logml*prior_prob / sum(e^logml*prior_prob)) 115 | if(!isTRUE(all.equal(sum(post_prob), 1))) 116 | warning("Posterior model probabilities do not sum to one.", call. = FALSE) 117 | } 118 | names(post_prob) <- make.unique(as.character(model_names)) 119 | 120 | return(post_prob) 121 | 122 | } 123 | 124 | -------------------------------------------------------------------------------- /R/turtles-data.R: -------------------------------------------------------------------------------- 1 | #' Turtles Data from Janzen, Tucker, and Paukstis (2000) 2 | #' 3 | #' This data set contains information about 244 newborn turtles from 31 4 | #' different clutches. For each turtle, the data set includes information about 5 | #' survival status (column \code{y}; 0 = died, 1 = survived), birth weight in 6 | #' grams (column \code{x}), and clutch (family) membership (column 7 | #' \code{clutch}; an integer between one and 31). The clutches have been ordered 8 | #' according to mean birth weight. 9 | #' 10 | #' @docType data 11 | #' @keywords dataset 12 | #' @name turtles 13 | #' @usage turtles 14 | #' @format A data.frame with 244 rows and 3 variables. 15 | #' @source Janzen, F. J., Tucker, J. K., & Paukstis, G. L. (2000). Experimental 16 | #' analysis of an early life-history stage: Selection on size of hatchling 17 | #' turtles. \emph{Ecology, 81(8)}, 2290-2304. 18 | #' \doi{10.2307/177115} 19 | #' 20 | #' Overstall, A. M., & Forster, J. J. (2010). Default Bayesian model 21 | #' determination methods for generalised linear mixed models. 22 | #' \emph{Computational Statistics & Data Analysis, 54}, 3269-3288. 23 | #' \doi{10.1016/j.csda.2010.03.008} 24 | #' 25 | #' Sinharay, S., & Stern, H. S. (2005). An empirical comparison of methods for 26 | #' computing Bayes factors in generalized linear mixed models. \emph{Journal 27 | #' of Computational and Graphical Statistics, 14(2)}, 415-435. 28 | #' \doi{10.1198/106186005X47471} 29 | #' @encoding UTF-8 30 | #' 31 | #' @example examples/example.turtles.R 32 | NULL 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [![R build status](https://github.com/quentingronau/bridgesampling/workflows/R-CMD-check/badge.svg)](https://github.com/quentingronau/bridgesampling/actions) 3 | [![Travis-CI Build Status](https://travis-ci.org/quentingronau/bridgesampling.svg?branch=master)](https://travis-ci.org/quentingronau/bridgesampling) 4 | 5 | 6 | bridgesampling: Bridge Sampling for Marginal Likelihoods and Bayes Factors 7 | ==== 8 | 9 | `bridgesampling` is an R package for conducting Bayesian model comparisons using bridge sampling (Meng & Wong, 1996). 10 | Specifically, it allows one to compute marginal likelihoods, Bayes factors, and posterior model probabilities. 11 | 12 | Meng, X.-L., & Wong, W. H. (1996). Simulating ratios of normalizing constants via a simple identity: A theoretical exploration. *Statistica Sinica*, 6, 831-860. 13 | 14 | For additional information, see the vignettes with a hierarchical normal example implemented in Jags ([link](https://htmlpreview.github.io/?https://github.com/quentingronau/bridgesampling/blob/master/inst/doc/bridgesampling_example_jags.html)) and in Stan ([link](https://htmlpreview.github.io/?https://github.com/quentingronau/bridgesampling/blob/master/inst/doc/bridgesampling_example_stan.html)), and a Bayesian t-test example implemented in Stan ([link](https://htmlpreview.github.io/?https://github.com/quentingronau/bridgesampling/blob/master/inst/doc/bridgesampling_stan_ttest.html)). 15 | 16 | 17 | ## Installation 18 | 19 | - `bridgesampling` is [available from CRAN](https://cloud.r-project.org/package=bridgesampling) so the current stable version can be installed directly via: `install.packages("bridgesampling")` 20 | 21 | - To install the latest development version you will need the [`devtools`](https://github.com/hadley/devtools) package: 22 | `devtools::install_github("quentingronau/bridgesampling@master")` 23 | 24 | - For building, use `--compact-vignettes="gs+qpdf"` 25 | 26 | ---- 27 | Please note that this project is released with a [Contributor Code of Conduct](CONDUCT.md). By participating in this project you agree to abide by its terms. 28 | -------------------------------------------------------------------------------- /bridgesampling.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageBuildArgs: --compact-vignettes=gs+qpdf 22 | PackageCheckArgs: --as-cran 23 | PackageRoxygenize: rd,collate,namespace 24 | -------------------------------------------------------------------------------- /data/ier.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/data/ier.rda -------------------------------------------------------------------------------- /data/turtles.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/data/turtles.rda -------------------------------------------------------------------------------- /development.R: -------------------------------------------------------------------------------- 1 | require(devtools) 2 | require(testthat) 3 | options(error = NULL) 4 | 5 | load_all() 6 | devtools::test() 7 | 8 | devtools::document() 9 | 10 | build_vignettes() 11 | devtools::build(args = '--compact-vignettes=gs+qpdf') 12 | 13 | Sys.setenv(`_R_CHECK_FORCE_SUGGESTS_` = "false") 14 | Sys.setenv(NOT_CRAN = "false") 15 | devtools::check() 16 | 17 | ### check reverse dependencies: 18 | 19 | usethis::use_revdep() 20 | revdepcheck::revdep_check(num_workers = 4) 21 | -------------------------------------------------------------------------------- /doc/bridgesampling_example_jags.R: -------------------------------------------------------------------------------- 1 | ## ------------------------------------------------------------------------ 2 | library(bridgesampling) 3 | 4 | ### generate data ### 5 | set.seed(12345) 6 | 7 | mu <- 0 8 | tau2 <- 0.5 9 | sigma2 <- 1 10 | 11 | n <- 20 12 | theta <- rnorm(n, mu, sqrt(tau2)) 13 | y <- rnorm(n, theta, sqrt(sigma2)) 14 | 15 | 16 | ## ----eval=FALSE---------------------------------------------------------- 17 | # ### set prior parameters ### 18 | # mu0 <- 0 19 | # tau20 <- 1 20 | # alpha <- 1 21 | # beta <- 1 22 | 23 | ## ---- eval=FALSE--------------------------------------------------------- 24 | # library(R2jags) 25 | # 26 | # ### functions to get posterior samples ### 27 | # 28 | # # H0: mu = 0 29 | # getSamplesModelH0 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { 30 | # 31 | # model <- " 32 | # model { 33 | # for (i in 1:n) { 34 | # theta[i] ~ dnorm(0, invTau2) 35 | # y[i] ~ dnorm(theta[i], 1/sigma2) 36 | # } 37 | # invTau2 ~ dgamma(alpha, beta) 38 | # tau2 <- 1/invTau2 39 | # }" 40 | # 41 | # s <- jags(data, parameters.to.save = c("theta", "invTau2"), 42 | # model.file = textConnection(model), 43 | # n.chains = nchains, n.iter = niter, 44 | # n.burnin = nburnin, n.thin = 1) 45 | # 46 | # return(s) 47 | # 48 | # } 49 | # 50 | # # H1: mu != 0 51 | # getSamplesModelH1 <- function(data, niter = 52000, nburnin = 2000, 52 | # nchains = 3) { 53 | # 54 | # model <- " 55 | # model { 56 | # for (i in 1:n) { 57 | # theta[i] ~ dnorm(mu, invTau2) 58 | # y[i] ~ dnorm(theta[i], 1/sigma2) 59 | # } 60 | # mu ~ dnorm(mu0, 1/tau20) 61 | # invTau2 ~ dgamma(alpha, beta) 62 | # tau2 <- 1/invTau2 63 | # }" 64 | # 65 | # s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), 66 | # model.file = textConnection(model), 67 | # n.chains = nchains, n.iter = niter, 68 | # n.burnin = nburnin, n.thin = 1) 69 | # 70 | # return(s) 71 | # 72 | # } 73 | # 74 | # ### get posterior samples ### 75 | # 76 | # # create data lists for JAGS 77 | # data_H0 <- list(y = y, n = length(y), alpha = alpha, beta = beta, sigma2 = sigma2) 78 | # data_H1 <- list(y = y, n = length(y), mu0 = mu0, tau20 = tau20, alpha = alpha, 79 | # beta = beta, sigma2 = sigma2) 80 | # 81 | # # fit models 82 | # samples_H0 <- getSamplesModelH0(data_H0) 83 | # samples_H1 <- getSamplesModelH1(data_H1) 84 | # 85 | 86 | ## ----eval=FALSE---------------------------------------------------------- 87 | # ### functions for evaluating the unnormalized posteriors on log scale ### 88 | # 89 | # log_posterior_H0 <- function(samples.row, data) { 90 | # 91 | # mu <- 0 92 | # invTau2 <- samples.row[[ "invTau2" ]] 93 | # theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] 94 | # 95 | # sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + 96 | # sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + 97 | # dgamma(invTau2, data$alpha, data$beta, log = TRUE) 98 | # 99 | # } 100 | # 101 | # log_posterior_H1 <- function(samples.row, data) { 102 | # 103 | # mu <- samples.row[[ "mu" ]] 104 | # invTau2 <- samples.row[[ "invTau2" ]] 105 | # theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] 106 | # 107 | # sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + 108 | # sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + 109 | # dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + 110 | # dgamma(invTau2, data$alpha, data$beta, log = TRUE) 111 | # 112 | # } 113 | # 114 | 115 | ## ----eval=FALSE---------------------------------------------------------- 116 | # # specify parameter bounds H0 117 | # cn <- colnames(samples_H0$BUGSoutput$sims.matrix) 118 | # cn <- cn[cn != "deviance"] 119 | # lb_H0 <- rep(-Inf, length(cn)) 120 | # ub_H0 <- rep(Inf, length(cn)) 121 | # names(lb_H0) <- names(ub_H0) <- cn 122 | # lb_H0[[ "invTau2" ]] <- 0 123 | # 124 | # # specify parameter bounds H1 125 | # cn <- colnames(samples_H1$BUGSoutput$sims.matrix) 126 | # cn <- cn[cn != "deviance"] 127 | # lb_H1 <- rep(-Inf, length(cn)) 128 | # ub_H1 <- rep(Inf, length(cn)) 129 | # names(lb_H1) <- names(ub_H1) <- cn 130 | # lb_H1[[ "invTau2" ]] <- 0 131 | 132 | ## ---- echo=FALSE--------------------------------------------------------- 133 | load(system.file("extdata/", "vignette_example_jags.RData", 134 | package = "bridgesampling")) 135 | 136 | ## ----eval=FALSE---------------------------------------------------------- 137 | # # compute log marginal likelihood via bridge sampling for H0 138 | # H0.bridge <- bridge_sampler(samples = samples_H0, data = data_H0, 139 | # log_posterior = log_posterior_H0, lb = lb_H0, 140 | # ub = ub_H0, silent = TRUE) 141 | # 142 | # # compute log marginal likelihood via bridge sampling for H1 143 | # H1.bridge <- bridge_sampler(samples = samples_H1, data = data_H1, 144 | # log_posterior = log_posterior_H1, lb = lb_H1, 145 | # ub = ub_H1, silent = TRUE) 146 | 147 | ## ------------------------------------------------------------------------ 148 | print(H0.bridge) 149 | print(H1.bridge) 150 | 151 | ## ----eval=FALSE---------------------------------------------------------- 152 | # # compute percentage errors 153 | # H0.error <- error_measures(H0.bridge)$percentage 154 | # H1.error <- error_measures(H1.bridge)$percentage 155 | 156 | ## ------------------------------------------------------------------------ 157 | print(H0.error) 158 | print(H1.error) 159 | 160 | ## ------------------------------------------------------------------------ 161 | # compute Bayes factor 162 | BF01 <- bf(H0.bridge, H1.bridge) 163 | print(BF01) 164 | 165 | ## ------------------------------------------------------------------------ 166 | # compute posterior model probabilities (assuming equal prior model probabilities) 167 | post1 <- post_prob(H0.bridge, H1.bridge) 168 | print(post1) 169 | 170 | ## ------------------------------------------------------------------------ 171 | # compute posterior model probabilities (using user-specified prior model probabilities) 172 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 173 | print(post2) 174 | 175 | -------------------------------------------------------------------------------- /doc/bridgesampling_example_nimble.R: -------------------------------------------------------------------------------- 1 | ## ------------------------------------------------------------------------ 2 | library(bridgesampling) 3 | 4 | ### generate data ### 5 | set.seed(12345) 6 | 7 | mu <- 0 8 | tau2 <- 0.5 9 | sigma2 <- 1 10 | 11 | n <- 20 12 | theta <- rnorm(n, mu, sqrt(tau2)) 13 | y <- rnorm(n, theta, sqrt(sigma2)) 14 | 15 | 16 | ## ----eval=FALSE---------------------------------------------------------- 17 | # ### set prior parameters ### 18 | # mu0 <- 0 19 | # tau20 <- 1 20 | # alpha <- 1 21 | # beta <- 1 22 | 23 | ## ---- eval=FALSE--------------------------------------------------------- 24 | # library("nimble") 25 | # 26 | # # models 27 | # codeH0 <- nimbleCode({ 28 | # invTau2 ~ dgamma(1, 1) 29 | # tau2 <- 1/invTau2 30 | # for (i in 1:20) { 31 | # theta[i] ~ dnorm(0, sd = sqrt(tau2)) 32 | # y[i] ~ dnorm(theta[i], sd = 1) 33 | # } 34 | # }) 35 | # codeH1 <- nimbleCode({ 36 | # mu ~ dnorm(0, sd = 1) 37 | # invTau2 ~ dgamma(1, 1) 38 | # tau2 <- 1/invTau2 39 | # for (i in 1:20) { 40 | # theta[i] ~ dnorm(mu, sd = sqrt(tau2)) 41 | # y[i] ~ dnorm(theta[i], sd = 1) 42 | # } 43 | # }) 44 | # 45 | # ## steps for H0: 46 | # modelH0 <- nimbleModel(codeH0) 47 | # modelH0$setData(y = y) # set data 48 | # cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ 49 | # 50 | # ## steps for H1: 51 | # modelH1 <- nimbleModel(codeH1) 52 | # modelH1$setData(y = y) # set data 53 | # cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ 54 | # 55 | 56 | ## ---- eval=FALSE--------------------------------------------------------- 57 | # 58 | # # build MCMC functions, skipping customization of the configuration. 59 | # mcmcH0 <- buildMCMC(modelH0, 60 | # monitors = modelH0$getNodeNames(stochOnly = TRUE, 61 | # includeData = FALSE)) 62 | # mcmcH1 <- buildMCMC(modelH1, 63 | # monitors = modelH1$getNodeNames(stochOnly = TRUE, 64 | # includeData = FALSE)) 65 | # # compile the MCMC function via generated C++ 66 | # cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) 67 | # cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) 68 | # 69 | # # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. 70 | # # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 71 | # samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, 72 | # progressBar = FALSE) 73 | # samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, 74 | # progressBar = FALSE) 75 | 76 | ## ---- echo=FALSE--------------------------------------------------------- 77 | load(system.file("extdata/", "vignette_example_nimble.RData", 78 | package = "bridgesampling")) 79 | 80 | ## ----eval=FALSE---------------------------------------------------------- 81 | # # compute log marginal likelihood via bridge sampling for H0 82 | # H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) 83 | # 84 | # # compute log marginal likelihood via bridge sampling for H1 85 | # H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) 86 | 87 | ## ------------------------------------------------------------------------ 88 | print(H0.bridge) 89 | print(H1.bridge) 90 | 91 | ## ----eval=FALSE---------------------------------------------------------- 92 | # # compute percentage errors 93 | # H0.error <- error_measures(H0.bridge)$percentage 94 | # H1.error <- error_measures(H1.bridge)$percentage 95 | 96 | ## ------------------------------------------------------------------------ 97 | print(H0.error) 98 | print(H1.error) 99 | 100 | ## ------------------------------------------------------------------------ 101 | # compute Bayes factor 102 | BF01 <- bf(H0.bridge, H1.bridge) 103 | print(BF01) 104 | 105 | ## ------------------------------------------------------------------------ 106 | # compute posterior model probabilities (assuming equal prior model probabilities) 107 | post1 <- post_prob(H0.bridge, H1.bridge) 108 | print(post1) 109 | 110 | ## ------------------------------------------------------------------------ 111 | # compute posterior model probabilities (using user-specified prior model probabilities) 112 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 113 | print(post2) 114 | 115 | -------------------------------------------------------------------------------- /doc/bridgesampling_example_nimble.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hierarchical Normal Example (nimble)" 3 | author: "Quentin F. Gronau, Henrik Singmann & Perry de Valpine" 4 | date: "`r Sys.Date()`" 5 | show_toc: true 6 | output: 7 | knitr:::html_vignette: 8 | toc: yes 9 | vignette: > 10 | %\VignetteIndexEntry{Hierarchical Normal Example Nimble} 11 | %\VignetteEngine{knitr::rmarkdown} 12 | %\VignetteEncoding{UTF-8} 13 | --- 14 | 15 | In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `nimble`. The [`nimble` documentation](https://r-nimble.org/html_manual/cha-welcome-nimble.html) provides a comprehensive overview. This vignette uses the same models and data as the [`Stan` vignette](bridgesampling_example_stan.html) and [`Jags` vignette](bridgesampling_example_jags.html). 16 | 17 | ## Model and Data 18 | The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. 19 | 20 | In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: 21 | 22 | ```{r} 23 | library(bridgesampling) 24 | 25 | ### generate data ### 26 | set.seed(12345) 27 | 28 | mu <- 0 29 | tau2 <- 0.5 30 | sigma2 <- 1 31 | 32 | n <- 20 33 | theta <- rnorm(n, mu, sqrt(tau2)) 34 | y <- rnorm(n, theta, sqrt(sigma2)) 35 | 36 | ``` 37 | 38 | Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: 39 | 40 | ```{r,eval=FALSE} 41 | ### set prior parameters ### 42 | mu0 <- 0 43 | tau20 <- 1 44 | alpha <- 1 45 | beta <- 1 46 | ``` 47 | 48 | ## Specifying the Models 49 | Next, we implement the models in `nimble`. This requires to first transform the code into a `nimbleModel`, then we need to set the data, and then we can compile the model. Given that `nimble` is build on BUGS, the similarity between the `nimble` code and the [`Jags` code](bridgesampling_example_jags.html) is not too surprising. 50 | 51 | ```{r, eval=FALSE} 52 | library("nimble") 53 | 54 | # models 55 | codeH0 <- nimbleCode({ 56 | invTau2 ~ dgamma(1, 1) 57 | tau2 <- 1/invTau2 58 | for (i in 1:20) { 59 | theta[i] ~ dnorm(0, sd = sqrt(tau2)) 60 | y[i] ~ dnorm(theta[i], sd = 1) 61 | } 62 | }) 63 | codeH1 <- nimbleCode({ 64 | mu ~ dnorm(0, sd = 1) 65 | invTau2 ~ dgamma(1, 1) 66 | tau2 <- 1/invTau2 67 | for (i in 1:20) { 68 | theta[i] ~ dnorm(mu, sd = sqrt(tau2)) 69 | y[i] ~ dnorm(theta[i], sd = 1) 70 | } 71 | }) 72 | 73 | ## steps for H0: 74 | modelH0 <- nimbleModel(codeH0) 75 | modelH0$setData(y = y) # set data 76 | cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ 77 | 78 | ## steps for H1: 79 | modelH1 <- nimbleModel(codeH1) 80 | modelH1$setData(y = y) # set data 81 | cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ 82 | 83 | ``` 84 | ## Fitting the Models 85 | Fitting a model with `nimble` requires one to first create an MCMC function from the (compiled or uncompiled) model. This function then needs to be compiled again. With this object we can then create the samples. Note that nimble uses a reference object semantic so we do not actually need the samples object, as the samples will be saved in the MCMC function objects. But as `runMCMC` returns them anyway, we nevertheless save them. 86 | 87 | One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. 88 | 89 | ```{r, eval=FALSE} 90 | 91 | # build MCMC functions, skipping customization of the configuration. 92 | mcmcH0 <- buildMCMC(modelH0, 93 | monitors = modelH0$getNodeNames(stochOnly = TRUE, 94 | includeData = FALSE)) 95 | mcmcH1 <- buildMCMC(modelH1, 96 | monitors = modelH1$getNodeNames(stochOnly = TRUE, 97 | includeData = FALSE)) 98 | # compile the MCMC function via generated C++ 99 | cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) 100 | cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) 101 | 102 | # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. 103 | # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 104 | samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, 105 | progressBar = FALSE) 106 | samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, 107 | progressBar = FALSE) 108 | ``` 109 | 110 | ## Computing the (Log) Marginal Likelihoods 111 | Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the compiled MCMC function objects (of class `"MCMC_refClass"`) which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: 112 | ```{r, echo=FALSE} 113 | load(system.file("extdata/", "vignette_example_nimble.RData", 114 | package = "bridgesampling")) 115 | ``` 116 | 117 | ```{r,eval=FALSE} 118 | # compute log marginal likelihood via bridge sampling for H0 119 | H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) 120 | 121 | # compute log marginal likelihood via bridge sampling for H1 122 | H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) 123 | ``` 124 | We obtain: 125 | ```{r} 126 | print(H0.bridge) 127 | print(H1.bridge) 128 | ``` 129 | We can use the `error_measures` function to compute an approximate percentage error of the estimates: 130 | ```{r,eval=FALSE} 131 | # compute percentage errors 132 | H0.error <- error_measures(H0.bridge)$percentage 133 | H1.error <- error_measures(H1.bridge)$percentage 134 | ``` 135 | 136 | We obtain: 137 | ```{r} 138 | print(H0.error) 139 | print(H1.error) 140 | ``` 141 | 142 | ## Bayesian Model Comparison 143 | To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. 144 | In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: 145 | ```{r} 146 | # compute Bayes factor 147 | BF01 <- bf(H0.bridge, H1.bridge) 148 | print(BF01) 149 | ``` 150 | In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: 151 | ```{r} 152 | # compute posterior model probabilities (assuming equal prior model probabilities) 153 | post1 <- post_prob(H0.bridge, H1.bridge) 154 | print(post1) 155 | ``` 156 | When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: 157 | ```{r} 158 | # compute posterior model probabilities (using user-specified prior model probabilities) 159 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 160 | print(post2) 161 | ``` 162 | -------------------------------------------------------------------------------- /doc/bridgesampling_example_stan.R: -------------------------------------------------------------------------------- 1 | ## ------------------------------------------------------------------------ 2 | library(bridgesampling) 3 | 4 | ### generate data ### 5 | set.seed(12345) 6 | 7 | mu <- 0 8 | tau2 <- 0.5 9 | sigma2 <- 1 10 | 11 | n <- 20 12 | theta <- rnorm(n, mu, sqrt(tau2)) 13 | y <- rnorm(n, theta, sqrt(sigma2)) 14 | 15 | 16 | ## ----eval=FALSE---------------------------------------------------------- 17 | # ### set prior parameters ### 18 | # mu0 <- 0 19 | # tau20 <- 1 20 | # alpha <- 1 21 | # beta <- 1 22 | 23 | ## ---- eval=FALSE--------------------------------------------------------- 24 | # library(rstan) 25 | # 26 | # # models 27 | # stancodeH0 <- 'data { 28 | # int n; // number of observations 29 | # vector[n] y; // observations 30 | # real alpha; 31 | # real beta; 32 | # real sigma2; 33 | # } 34 | # parameters { 35 | # real tau2; // group-level variance 36 | # vector[n] theta; // participant effects 37 | # } 38 | # model { 39 | # target += inv_gamma_lpdf(tau2 | alpha, beta); 40 | # target += normal_lpdf(theta | 0, sqrt(tau2)); 41 | # target += normal_lpdf(y | theta, sqrt(sigma2)); 42 | # } 43 | # ' 44 | # stancodeH1 <- 'data { 45 | # int n; // number of observations 46 | # vector[n] y; // observations 47 | # real mu0; 48 | # real tau20; 49 | # real alpha; 50 | # real beta; 51 | # real sigma2; 52 | # } 53 | # parameters { 54 | # real mu; 55 | # real tau2; // group-level variance 56 | # vector[n] theta; // participant effects 57 | # } 58 | # model { 59 | # target += normal_lpdf(mu | mu0, sqrt(tau20)); 60 | # target += inv_gamma_lpdf(tau2 | alpha, beta); 61 | # target += normal_lpdf(theta | mu, sqrt(tau2)); 62 | # target += normal_lpdf(y | theta, sqrt(sigma2)); 63 | # } 64 | # ' 65 | # # compile models 66 | # stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 67 | # stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") 68 | 69 | ## ---- eval=FALSE--------------------------------------------------------- 70 | # # fit models 71 | # stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, 72 | # alpha = alpha, 73 | # beta = beta, 74 | # sigma2 = sigma2), 75 | # iter = 50000, warmup = 1000, chains = 3, cores = 1) 76 | # stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, 77 | # mu0 = mu0, 78 | # tau20 = tau20, 79 | # alpha = alpha, 80 | # beta = beta, 81 | # sigma2 = sigma2), 82 | # iter = 50000, warmup = 1000, chains = 3, cores = 1) 83 | 84 | ## ---- echo=FALSE--------------------------------------------------------- 85 | load(system.file("extdata/", "vignette_example_stan.RData", 86 | package = "bridgesampling")) 87 | 88 | ## ----eval=FALSE---------------------------------------------------------- 89 | # # compute log marginal likelihood via bridge sampling for H0 90 | # H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) 91 | # 92 | # # compute log marginal likelihood via bridge sampling for H1 93 | # H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) 94 | 95 | ## ------------------------------------------------------------------------ 96 | print(H0.bridge) 97 | print(H1.bridge) 98 | 99 | ## ----eval=FALSE---------------------------------------------------------- 100 | # # compute percentage errors 101 | # H0.error <- error_measures(H0.bridge)$percentage 102 | # H1.error <- error_measures(H1.bridge)$percentage 103 | 104 | ## ------------------------------------------------------------------------ 105 | print(H0.error) 106 | print(H1.error) 107 | 108 | ## ------------------------------------------------------------------------ 109 | # compute Bayes factor 110 | BF01 <- bf(H0.bridge, H1.bridge) 111 | print(BF01) 112 | 113 | ## ------------------------------------------------------------------------ 114 | # compute posterior model probabilities (assuming equal prior model probabilities) 115 | post1 <- post_prob(H0.bridge, H1.bridge) 116 | print(post1) 117 | 118 | ## ------------------------------------------------------------------------ 119 | # compute posterior model probabilities (using user-specified prior model probabilities) 120 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 121 | print(post2) 122 | 123 | -------------------------------------------------------------------------------- /doc/bridgesampling_example_stan.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hierarchical Normal Example (Stan)" 3 | author: "Quentin F. Gronau" 4 | date: "`r Sys.Date()`" 5 | show_toc: true 6 | output: 7 | knitr:::html_vignette: 8 | toc: yes 9 | vignette: > 10 | %\VignetteIndexEntry{Hierarchical Normal Example Stan} 11 | %\VignetteEngine{knitr::rmarkdown} 12 | %\VignetteEncoding{UTF-8} 13 | --- 14 | 15 | In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `Stan`. This vignette uses the same models and data as the [`Jags` vignette](bridgesampling_example_jags.html). 16 | 17 | ## Model and Data 18 | The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. 19 | 20 | In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: 21 | 22 | ```{r} 23 | library(bridgesampling) 24 | 25 | ### generate data ### 26 | set.seed(12345) 27 | 28 | mu <- 0 29 | tau2 <- 0.5 30 | sigma2 <- 1 31 | 32 | n <- 20 33 | theta <- rnorm(n, mu, sqrt(tau2)) 34 | y <- rnorm(n, theta, sqrt(sigma2)) 35 | 36 | ``` 37 | 38 | Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: 39 | 40 | ```{r,eval=FALSE} 41 | ### set prior parameters ### 42 | mu0 <- 0 43 | tau20 <- 1 44 | alpha <- 1 45 | beta <- 1 46 | ``` 47 | 48 | ## Specifying the Models 49 | Next, we implement the models in `Stan`. Note that to compute the (log) marginal likelihood for a `Stan` model, we need to specify the model in a certain way. Instad of using `"~"` signs for specifying distributions, we need to directly use the (log) density functions. The reason for this is that when using the `"~"` sign, constant terms are dropped which are not needed for sampling from the posterior. However, for computing the marginal likelihood, these constants need to be retained. For instance, instead of writing `y ~ normal(mu, sigma)` we would need to write `target += normal_lpdf(y | mu, sigma)`. The models can then be specified and compiled as follows (note that it is necessary to install `rstan` for this): 50 | ```{r, eval=FALSE} 51 | library(rstan) 52 | 53 | # models 54 | stancodeH0 <- 'data { 55 | int n; // number of observations 56 | vector[n] y; // observations 57 | real alpha; 58 | real beta; 59 | real sigma2; 60 | } 61 | parameters { 62 | real tau2; // group-level variance 63 | vector[n] theta; // participant effects 64 | } 65 | model { 66 | target += inv_gamma_lpdf(tau2 | alpha, beta); 67 | target += normal_lpdf(theta | 0, sqrt(tau2)); 68 | target += normal_lpdf(y | theta, sqrt(sigma2)); 69 | } 70 | ' 71 | stancodeH1 <- 'data { 72 | int n; // number of observations 73 | vector[n] y; // observations 74 | real mu0; 75 | real tau20; 76 | real alpha; 77 | real beta; 78 | real sigma2; 79 | } 80 | parameters { 81 | real mu; 82 | real tau2; // group-level variance 83 | vector[n] theta; // participant effects 84 | } 85 | model { 86 | target += normal_lpdf(mu | mu0, sqrt(tau20)); 87 | target += inv_gamma_lpdf(tau2 | alpha, beta); 88 | target += normal_lpdf(theta | mu, sqrt(tau2)); 89 | target += normal_lpdf(y | theta, sqrt(sigma2)); 90 | } 91 | ' 92 | # compile models 93 | stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 94 | stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") 95 | ``` 96 | ## Fitting the Models 97 | Now we can fit the null and the alternative model in `Stan`. One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. 98 | ```{r, eval=FALSE} 99 | # fit models 100 | stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, 101 | alpha = alpha, 102 | beta = beta, 103 | sigma2 = sigma2), 104 | iter = 50000, warmup = 1000, chains = 3, cores = 1) 105 | stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, 106 | mu0 = mu0, 107 | tau20 = tau20, 108 | alpha = alpha, 109 | beta = beta, 110 | sigma2 = sigma2), 111 | iter = 50000, warmup = 1000, chains = 3, cores = 1) 112 | ``` 113 | 114 | ## Computing the (Log) Marginal Likelihoods 115 | Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the `stanfit` objects which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: 116 | ```{r, echo=FALSE} 117 | load(system.file("extdata/", "vignette_example_stan.RData", 118 | package = "bridgesampling")) 119 | ``` 120 | 121 | ```{r,eval=FALSE} 122 | # compute log marginal likelihood via bridge sampling for H0 123 | H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) 124 | 125 | # compute log marginal likelihood via bridge sampling for H1 126 | H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) 127 | ``` 128 | We obtain: 129 | ```{r} 130 | print(H0.bridge) 131 | print(H1.bridge) 132 | ``` 133 | We can use the `error_measures` function to compute an approximate percentage error of the estimates: 134 | ```{r,eval=FALSE} 135 | # compute percentage errors 136 | H0.error <- error_measures(H0.bridge)$percentage 137 | H1.error <- error_measures(H1.bridge)$percentage 138 | ``` 139 | We obtain: 140 | ```{r} 141 | print(H0.error) 142 | print(H1.error) 143 | ``` 144 | 145 | ## Bayesian Model Comparison 146 | To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. 147 | In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: 148 | ```{r} 149 | # compute Bayes factor 150 | BF01 <- bf(H0.bridge, H1.bridge) 151 | print(BF01) 152 | ``` 153 | In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: 154 | ```{r} 155 | # compute posterior model probabilities (assuming equal prior model probabilities) 156 | post1 <- post_prob(H0.bridge, H1.bridge) 157 | print(post1) 158 | ``` 159 | When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: 160 | ```{r} 161 | # compute posterior model probabilities (using user-specified prior model probabilities) 162 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 163 | print(post2) 164 | ``` 165 | -------------------------------------------------------------------------------- /doc/bridgesampling_paper.R: -------------------------------------------------------------------------------- 1 | ### This is an R script tangled from 'bridgesampling_paper.pdf.asis' 2 | -------------------------------------------------------------------------------- /doc/bridgesampling_paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/doc/bridgesampling_paper.pdf -------------------------------------------------------------------------------- /doc/bridgesampling_paper.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (JSS version)} 2 | %\VignetteEngine{R.rsp::asis} 3 | -------------------------------------------------------------------------------- /doc/bridgesampling_paper_extended.R: -------------------------------------------------------------------------------- 1 | ### This is an R script tangled from 'bridgesampling_paper_extended.pdf.asis' 2 | -------------------------------------------------------------------------------- /doc/bridgesampling_paper_extended.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/doc/bridgesampling_paper_extended.pdf -------------------------------------------------------------------------------- /doc/bridgesampling_paper_extended.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (Extended)} 2 | %\VignetteEngine{R.rsp::asis} 3 | -------------------------------------------------------------------------------- /doc/bridgesampling_stan_ttest.R: -------------------------------------------------------------------------------- 1 | ## ------------------------------------------------------------------------ 2 | library(bridgesampling) 3 | 4 | set.seed(12345) 5 | 6 | # Sleep data from t.test example 7 | data(sleep) 8 | 9 | # compute difference scores 10 | y <- sleep$extra[sleep$group == 2] - sleep$extra[sleep$group == 1] 11 | n <- length(y) 12 | 13 | ## ---- eval=FALSE--------------------------------------------------------- 14 | # library(rstan) 15 | # 16 | # # models 17 | # stancodeH0 <- ' 18 | # data { 19 | # int n; // number of observations 20 | # vector[n] y; // observations 21 | # } 22 | # parameters { 23 | # real sigma2; // variance parameter 24 | # } 25 | # model { 26 | # target += log(1/sigma2); // Jeffreys prior on sigma2 27 | # target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood 28 | # } 29 | # ' 30 | # stancodeH1 <- ' 31 | # data { 32 | # int n; // number of observations 33 | # vector[n] y; // observations 34 | # real r; // Cauchy prior scale 35 | # } 36 | # parameters { 37 | # real delta; 38 | # real sigma2;// variance parameter 39 | # } 40 | # model { 41 | # target += cauchy_lpdf(delta | 0, r); // Cauchy prior on delta 42 | # target += log(1/sigma2); // Jeffreys prior on sigma2 43 | # target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood 44 | # } 45 | # ' 46 | # # compile models 47 | # stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 48 | # stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") 49 | 50 | ## ---- eval=FALSE--------------------------------------------------------- 51 | # # fit models 52 | # stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n), 53 | # iter = 20000, warmup = 1000, chains = 4, cores = 1, 54 | # control = list(adapt_delta = .99)) 55 | # stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, r = 1/sqrt(2)), 56 | # iter = 20000, warmup = 1000, chains = 4, cores = 1, 57 | # control = list(adapt_delta = .99)) 58 | 59 | ## ---- echo=FALSE--------------------------------------------------------- 60 | load(system.file("extdata/", "vignette_stan_ttest.RData", 61 | package = "bridgesampling")) 62 | 63 | ## ---- eval=FALSE--------------------------------------------------------- 64 | # H0 <- bridge_sampler(stanfitH0, silent = TRUE) 65 | # H1 <- bridge_sampler(stanfitH1, silent = TRUE) 66 | 67 | ## ------------------------------------------------------------------------ 68 | print(H0) 69 | print(H1) 70 | 71 | ## ----eval=FALSE---------------------------------------------------------- 72 | # # compute percentage errors 73 | # H0.error <- error_measures(H0)$percentage 74 | # H1.error <- error_measures(H1)$percentage 75 | 76 | ## ------------------------------------------------------------------------ 77 | print(H0.error) 78 | print(H1.error) 79 | 80 | ## ------------------------------------------------------------------------ 81 | # compute Bayes factor 82 | BF10 <- bf(H1, H0) 83 | print(BF10) 84 | 85 | ## ---- eval=FALSE--------------------------------------------------------- 86 | # library(BayesFactor) 87 | # BF10.BayesFactor <- extractBF(ttestBF(y), onlybf = TRUE) 88 | 89 | ## ---- message=FALSE------------------------------------------------------ 90 | print(BF10.BayesFactor) 91 | 92 | ## ---- eval=FALSE--------------------------------------------------------- 93 | # stancodeHplus <- ' 94 | # data { 95 | # int n; // number of observations 96 | # vector[n] y; // observations 97 | # real r; // Cauchy prior scale 98 | # } 99 | # parameters { 100 | # real delta; // constrained to be positive 101 | # real sigma2;// variance parameter 102 | # } 103 | # model { 104 | # target += cauchy_lpdf(delta | 0, r) - cauchy_lccdf(0 | 0, r); // Cauchy prior on delta 105 | # target += log(1/sigma2); // Jeffreys prior on sigma2 106 | # target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood 107 | # } 108 | # ' 109 | # # compile and fit model 110 | # stanmodelHplus <- stan_model(model_code = stancodeHplus, model_name="stanmodel") 111 | # stanfitHplus <- sampling(stanmodelHplus, data = list(y = y, n = n, r = 1/sqrt(2)), 112 | # iter = 30000, warmup = 1000, chains = 4, 113 | # control = list(adapt_delta = .99)) 114 | 115 | ## ----eval=FALSE---------------------------------------------------------- 116 | # Hplus <- bridge_sampler(stanfitHplus, silent = TRUE) 117 | 118 | ## ------------------------------------------------------------------------ 119 | print(Hplus) 120 | 121 | ## ----eval=FALSE---------------------------------------------------------- 122 | # Hplus.error <- error_measures(Hplus)$percentage 123 | 124 | ## ------------------------------------------------------------------------ 125 | print(Hplus.error) 126 | 127 | ## ------------------------------------------------------------------------ 128 | # compute Bayes factor 129 | BFplus0 <- bf(Hplus, H0) 130 | print(BFplus0) 131 | 132 | ## ---- eval=FALSE--------------------------------------------------------- 133 | # BFplus0.BayesFactor <- extractBF(ttestBF(y, nullInterval = c(0, Inf)), onlybf = TRUE)[1] 134 | 135 | ## ------------------------------------------------------------------------ 136 | print(BFplus0.BayesFactor) 137 | 138 | -------------------------------------------------------------------------------- /doc/bridgesampling_tutorial.R: -------------------------------------------------------------------------------- 1 | ### This is an R script tangled from 'bridgesampling_tutorial.pdf.asis' 2 | -------------------------------------------------------------------------------- /doc/bridgesampling_tutorial.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/doc/bridgesampling_tutorial.pdf -------------------------------------------------------------------------------- /doc/bridgesampling_tutorial.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{A Tutorial on Bridge Sampling} 2 | %\VignetteEngine{R.rsp::asis} 3 | -------------------------------------------------------------------------------- /examples/example.bridge_sampler.R: -------------------------------------------------------------------------------- 1 | ## ------------------------------------------------------------------------ 2 | ## Example 1: Estimating the Normalizing Constant of a Two-Dimensional 3 | ## Standard Normal Distribution 4 | ## ------------------------------------------------------------------------ 5 | 6 | library(bridgesampling) 7 | library(mvtnorm) 8 | 9 | samples <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 10 | colnames(samples) <- c("x1", "x2") 11 | log_density <- function(samples.row, data) { 12 | -.5*t(samples.row) %*% samples.row 13 | } 14 | 15 | lb <- rep(-Inf, 2) 16 | ub <- rep(Inf, 2) 17 | names(lb) <- names(ub) <- colnames(samples) 18 | bridge_result <- bridge_sampler(samples = samples, log_posterior = log_density, 19 | data = NULL, lb = lb, ub = ub, silent = TRUE) 20 | 21 | # compare to analytical value 22 | analytical <- log(2*pi) 23 | print(cbind(bridge_result$logml, analytical)) 24 | 25 | \dontrun{ 26 | 27 | ## ------------------------------------------------------------------------ 28 | ## Example 2: Hierarchical Normal Model 29 | ## ------------------------------------------------------------------------ 30 | 31 | # for a full description of the example, see 32 | vignette("bridgesampling_example_jags") 33 | 34 | library(R2jags) 35 | 36 | ### generate data ### 37 | 38 | set.seed(12345) 39 | 40 | mu <- 0 41 | tau2 <- 0.5 42 | sigma2 <- 1 43 | 44 | n <- 20 45 | theta <- rnorm(n, mu, sqrt(tau2)) 46 | y <- rnorm(n, theta, sqrt(sigma2)) 47 | 48 | 49 | ### set prior parameters 50 | alpha <- 1 51 | beta <- 1 52 | mu0 <- 0 53 | tau20 <- 1 54 | 55 | ### functions to get posterior samples ### 56 | 57 | ### H0: mu = 0 58 | 59 | getSamplesModelH0 <- function(data, niter = 52000, nburnin = 2000, nchains = 3) { 60 | 61 | model <- " 62 | model { 63 | for (i in 1:n) { 64 | theta[i] ~ dnorm(0, invTau2) 65 | y[i] ~ dnorm(theta[i], 1/sigma2) 66 | } 67 | invTau2 ~ dgamma(alpha, beta) 68 | tau2 <- 1/invTau2 69 | }" 70 | 71 | s <- jags(data, parameters.to.save = c("theta", "invTau2"), 72 | model.file = textConnection(model), 73 | n.chains = nchains, n.iter = niter, 74 | n.burnin = nburnin, n.thin = 1) 75 | 76 | return(s) 77 | 78 | } 79 | 80 | ### H1: mu != 0 81 | 82 | getSamplesModelH1 <- function(data, niter = 52000, nburnin = 2000, 83 | nchains = 3) { 84 | 85 | model <- " 86 | model { 87 | for (i in 1:n) { 88 | theta[i] ~ dnorm(mu, invTau2) 89 | y[i] ~ dnorm(theta[i], 1/sigma2) 90 | } 91 | mu ~ dnorm(mu0, 1/tau20) 92 | invTau2 ~ dgamma(alpha, beta) 93 | tau2 <- 1/invTau2 94 | }" 95 | 96 | s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), 97 | model.file = textConnection(model), 98 | n.chains = nchains, n.iter = niter, 99 | n.burnin = nburnin, n.thin = 1) 100 | 101 | return(s) 102 | 103 | } 104 | 105 | ### get posterior samples ### 106 | 107 | # create data lists for Jags 108 | data_H0 <- list(y = y, n = length(y), alpha = alpha, beta = beta, sigma2 = sigma2) 109 | data_H1 <- list(y = y, n = length(y), mu0 = mu0, tau20 = tau20, alpha = alpha, 110 | beta = beta, sigma2 = sigma2) 111 | 112 | # fit models 113 | samples_H0 <- getSamplesModelH0(data_H0) 114 | samples_H1 <- getSamplesModelH1(data_H1) 115 | 116 | 117 | ### functions for evaluating the unnormalized posteriors on log scale ### 118 | log_posterior_H0 <- function(samples.row, data) { 119 | 120 | mu <- 0 121 | invTau2 <- samples.row[[ "invTau2" ]] 122 | theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] 123 | 124 | sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + 125 | sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + 126 | dgamma(invTau2, data$alpha, data$beta, log = TRUE) 127 | 128 | } 129 | 130 | log_posterior_H1 <- function(samples.row, data) { 131 | 132 | mu <- samples.row[[ "mu" ]] 133 | invTau2 <- samples.row[[ "invTau2" ]] 134 | theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] 135 | 136 | sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + 137 | sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + 138 | dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + 139 | dgamma(invTau2, data$alpha, data$beta, log = TRUE) 140 | 141 | } 142 | 143 | # specify parameter bounds H0 144 | cn <- colnames(samples_H0$BUGSoutput$sims.matrix) 145 | cn <- cn[cn != "deviance"] 146 | lb_H0 <- rep(-Inf, length(cn)) 147 | ub_H0 <- rep(Inf, length(cn)) 148 | names(lb_H0) <- names(ub_H0) <- cn 149 | lb_H0[[ "invTau2" ]] <- 0 150 | 151 | # specify parameter bounds H1 152 | cn <- colnames(samples_H1$BUGSoutput$sims.matrix) 153 | cn <- cn[cn != "deviance"] 154 | lb_H1 <- rep(-Inf, length(cn)) 155 | ub_H1 <- rep(Inf, length(cn)) 156 | names(lb_H1) <- names(ub_H1) <- cn 157 | lb_H1[[ "invTau2" ]] <- 0 158 | 159 | 160 | # compute log marginal likelihood via bridge sampling for H0 161 | H0.bridge <- bridge_sampler(samples = samples_H0, data = data_H0, 162 | log_posterior = log_posterior_H0, lb = lb_H0, 163 | ub = ub_H0, silent = TRUE) 164 | print(H0.bridge) 165 | 166 | # compute log marginal likelihood via bridge sampling for H1 167 | H1.bridge <- bridge_sampler(samples = samples_H1, data = data_H1, 168 | log_posterior = log_posterior_H1, lb = lb_H1, 169 | ub = ub_H1, silent = TRUE) 170 | print(H1.bridge) 171 | 172 | # compute percentage error 173 | print(error_measures(H0.bridge)$percentage) 174 | print(error_measures(H1.bridge)$percentage) 175 | 176 | # compute Bayes factor 177 | BF01 <- bf(H0.bridge, H1.bridge) 178 | print(BF01) 179 | 180 | # compute posterior model probabilities (assuming equal prior model probabilities) 181 | post1 <- post_prob(H0.bridge, H1.bridge) 182 | print(post1) 183 | 184 | # compute posterior model probabilities (using user-specified prior model probabilities) 185 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 186 | print(post2) 187 | 188 | } 189 | 190 | \dontrun{ 191 | 192 | ## ------------------------------------------------------------------------ 193 | ## Example 3: rstanarm 194 | ## ------------------------------------------------------------------------ 195 | library(rstanarm) 196 | 197 | # N.B.: remember to specify the diagnostic_file 198 | 199 | fit_1 <- stan_glm(mpg ~ wt + qsec + am, data = mtcars, 200 | chains = 2, cores = 2, iter = 5000, 201 | diagnostic_file = file.path(tempdir(), "df.csv")) 202 | bridge_1 <- bridge_sampler(fit_1) 203 | fit_2 <- update(fit_1, formula = . ~ . + cyl) 204 | bridge_2 <- bridge_sampler(fit_2, method = "warp3") 205 | bf(bridge_1, bridge_2) 206 | 207 | } 208 | 209 | -------------------------------------------------------------------------------- /examples/example.ier.R: -------------------------------------------------------------------------------- 1 | 2 | \dontrun{ 3 | 4 | ################################################################################ 5 | # BAYESIAN FACTOR ANALYSIS (AS PROPOSED BY LOPES & WEST, 2004) 6 | ################################################################################ 7 | 8 | library(bridgesampling) 9 | library(rstan) 10 | 11 | cores <- 4 12 | options(mc.cores = cores) 13 | 14 | data("ier") 15 | 16 | #------------------------------------------------------------------------------- 17 | # plot data 18 | #------------------------------------------------------------------------------- 19 | 20 | currency <- colnames(ier) 21 | label <- c("US Dollar", "Canadian Dollar", "Yen", "Franc", "Lira", "Mark") 22 | op <- par(mfrow = c(3, 2), mar = c(6, 6, 3, 3)) 23 | 24 | for (i in seq_along(currency)) { 25 | plot(ier[,currency[i]], type = "l", col = "darkblue", axes = FALSE, 26 | ylim = c(-4, 4), ylab = "", xlab = "", lwd = 2) 27 | axis(1, at = 0:12*12, labels = 1975:1987, cex.axis = 1.7) 28 | axis(2, at = pretty(c(-4, 4)), las = 1, cex.axis = 1.7) 29 | mtext("Year", 1, cex = 1.5, line = 3.2) 30 | mtext("Exchange Rate Changes", 2, cex = 1.4, line = 3.2) 31 | mtext(label[i], 3, cex = 1.6, line = .1) 32 | } 33 | 34 | par(op) 35 | 36 | #------------------------------------------------------------------------------- 37 | # stan model 38 | #------------------------------------------------------------------------------- 39 | 40 | model_code <- 41 | "data { 42 | int T; // number of observations 43 | int m; // number of variables 44 | int k; // number of factors 45 | matrix[T,m] Y; // data matrix 46 | } 47 | transformed data { 48 | int r; 49 | vector[m] zeros; 50 | r = m * k - k * (k - 1) / 2; // number of non-zero factor loadings 51 | zeros = rep_vector(0.0, m); 52 | } 53 | parameters { 54 | real beta_lower[r - k]; // lower-diagonal elements of beta 55 | real beta_diag [k]; // diagonal elements of beta 56 | vector[m] sigma2; // residual variances 57 | } 58 | transformed parameters { 59 | matrix[m,k] beta; 60 | cov_matrix[m] Omega; 61 | // construct lower-triangular factor loadings matrix 62 | { 63 | int index_lower = 1; 64 | for (j in 1:k) { 65 | for (i in 1:m) { 66 | if (i == j) { 67 | beta[j,j] = beta_diag[j]; 68 | } else if (i >= j) { 69 | beta[i,j] = beta_lower[index_lower]; 70 | index_lower = index_lower + 1; 71 | } else { 72 | beta[i,j] = 0.0; 73 | } 74 | } 75 | } 76 | } 77 | Omega = beta * beta' + diag_matrix(sigma2); 78 | } 79 | model { 80 | // priors 81 | target += normal_lpdf(beta_diag | 0, 1) - k * normal_lccdf(0 | 0, 1); 82 | target += normal_lpdf(beta_lower | 0, 1); 83 | target += inv_gamma_lpdf(sigma2 | 2.2 / 2.0, 0.1 / 2.0); 84 | 85 | // likelihood 86 | for(t in 1:T) { 87 | target += multi_normal_lpdf(Y[t] | zeros, Omega); 88 | } 89 | }" 90 | 91 | # compile model 92 | model <- stan_model(model_code = model_code) 93 | 94 | 95 | #------------------------------------------------------------------------------- 96 | # fit models and compute log marginal likelihoods 97 | #------------------------------------------------------------------------------- 98 | 99 | # function for generating starting values 100 | init_fun <- function(nchains, k, m) { 101 | r <- m * k - k * (k - 1) / 2 102 | out <- vector("list", nchains) 103 | for (i in seq_len(nchains)) { 104 | beta_lower <- array(runif(r - k, 0.05, 1), dim = r - k) 105 | beta_diag <- array(runif(k, .05, 1), dim = k) 106 | sigma2 <- array(runif(m, .05, 1.5), dim = m) 107 | out[[i]] <- list(beta_lower = beta_lower, 108 | beta_diag = beta_diag, 109 | sigma2 = sigma2) 110 | } 111 | return(out) 112 | } 113 | 114 | set.seed(1) 115 | stanfit <- bridge <- vector("list", 3) 116 | for (k in 1:3) { 117 | stanfit[[k]] <- sampling(model, 118 | data = list(Y = ier, T = nrow(ier), 119 | m = ncol(ier), k = k), 120 | iter = 11000, warmup = 1000, chains = 4, 121 | init = init_fun(nchains = 4, k = k, m = ncol(ier)), 122 | cores = cores, seed = 1) 123 | bridge[[k]] <- bridge_sampler(stanfit[[k]], method = "warp3", 124 | repetitions = 10, cores = cores) 125 | } 126 | 127 | # example output 128 | summary(bridge[[2]]) 129 | 130 | #------------------------------------------------------------------------------- 131 | # compute posterior model probabilities 132 | #------------------------------------------------------------------------------- 133 | 134 | pp <- post_prob(bridge[[1]], bridge[[2]], bridge[[3]], 135 | model_names = c("k = 1", "k = 2", "k = 3")) 136 | pp 137 | 138 | op <- par(mar = c(6, 6, 3, 3)) 139 | boxplot(pp, axes = FALSE, 140 | ylim = c(0, 1), ylab = "", 141 | xlab = "") 142 | axis(1, at = 1:3, labels = colnames(pp), cex.axis = 1.7) 143 | axis(2, cex.axis = 1.1) 144 | mtext("Posterior Model Probability", 2, cex = 1.5, line = 3.2) 145 | mtext("Number of Factors", 1, cex = 1.4, line = 3.2) 146 | par(op) 147 | 148 | } 149 | -------------------------------------------------------------------------------- /examples/example.post_prob.R: -------------------------------------------------------------------------------- 1 | 2 | H0 <- structure(list(logml = -20.8084543022433, niter = 4, method = "normal"), 3 | .Names = c("logml", "niter", "method"), class = "bridge") 4 | H1 <- structure(list(logml = -17.9623077558729, niter = 4, method = "normal"), 5 | .Names = c("logml", "niter", "method"), class = "bridge") 6 | H2 <- structure(list(logml = -19, niter = 4, method = "normal"), 7 | .Names = c("logml", "niter", "method"), class = "bridge") 8 | 9 | 10 | post_prob(H0, H1, H2) 11 | post_prob(H1, H0) 12 | 13 | ## all produce the same (only names differ): 14 | post_prob(H0, H1, H2) 15 | post_prob(H0$logml, H1$logml, H2$logml) 16 | post_prob(c(H0$logml, H1$logml, H2$logml)) 17 | post_prob(H0$logml, c(H1$logml, H2$logml)) 18 | post_prob(H0$logml, c(H1$logml, H2$logml), model_names = c("H0", "H1", "H2")) 19 | 20 | 21 | ### with bridge list elements: 22 | H0L <- structure(list(logml = c(-20.8088381186739, -20.8072772698116, 23 | -20.808454454621, -20.8083419072281, -20.8087870541247, -20.8084887398113, 24 | -20.8086023582344, -20.8079083169745, -20.8083048489095, -20.8090050811436 25 | ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 4, 4), method = "normal", 26 | repetitions = 10), .Names = c("logml", "niter", "method", 27 | "repetitions"), class = "bridge_list") 28 | 29 | H1L <- structure(list(logml = c(-17.961665507006, -17.9611290723151, 30 | -17.9607509604499, -17.9608629535992, -17.9602093576442, -17.9600223300432, 31 | -17.9610157118017, -17.9615557696561, -17.9608437034849, -17.9606743200309 32 | ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 3, 4), method = "normal", 33 | repetitions = 10), .Names = c("logml", "niter", "method", 34 | "repetitions"), class = "bridge_list") 35 | 36 | post_prob(H1L, H0L) 37 | post_prob(H1L, H0L, H0) # last element recycled with warning. 38 | 39 | -------------------------------------------------------------------------------- /examples/example.turtles.R: -------------------------------------------------------------------------------- 1 | 2 | \dontrun{ 3 | 4 | ################################################################################ 5 | # BAYESIAN GENERALIZED LINEAR MIXED MODEL (PROBIT REGRESSION) 6 | ################################################################################ 7 | 8 | library(bridgesampling) 9 | library(rstan) 10 | 11 | data("turtles") 12 | 13 | #------------------------------------------------------------------------------- 14 | # plot data 15 | #------------------------------------------------------------------------------- 16 | 17 | # reproduce Figure 1 from Sinharay & Stern (2005) 18 | xticks <- pretty(turtles$clutch) 19 | yticks <- pretty(turtles$x) 20 | 21 | plot(1, type = "n", axes = FALSE, ylab = "", xlab = "", xlim = range(xticks), 22 | ylim = range(yticks)) 23 | points(turtles$clutch, turtles$x, pch = ifelse(turtles$y, 21, 4), cex = 1.3, 24 | col = ifelse(turtles$y, "black", "darkred"), bg = "grey", lwd = 1.3) 25 | axis(1, cex.axis = 1.4) 26 | mtext("Clutch Identifier", side = 1, line = 2.9, cex = 1.8) 27 | axis(2, las = 1, cex.axis = 1.4) 28 | mtext("Birth Weight (Grams)", side = 2, line = 2.6, cex = 1.8) 29 | 30 | #------------------------------------------------------------------------------- 31 | # Analysis: Natural Selection Study (compute same BF as Sinharay & Stern, 2005) 32 | #------------------------------------------------------------------------------- 33 | 34 | ### H0 (model without random intercepts) ### 35 | H0_code <- 36 | "data { 37 | int N; 38 | int y[N]; 39 | real x[N]; 40 | } 41 | parameters { 42 | real alpha0_raw; 43 | real alpha1_raw; 44 | } 45 | transformed parameters { 46 | real alpha0 = sqrt(10.0) * alpha0_raw; 47 | real alpha1 = sqrt(10.0) * alpha1_raw; 48 | } 49 | model { 50 | // priors 51 | target += normal_lpdf(alpha0_raw | 0, 1); 52 | target += normal_lpdf(alpha1_raw | 0, 1); 53 | 54 | // likelihood 55 | for (i in 1:N) { 56 | target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1 * x[i])); 57 | } 58 | }" 59 | 60 | ### H1 (model with random intercepts) ### 61 | H1_code <- 62 | "data { 63 | int N; 64 | int y[N]; 65 | real x[N]; 66 | int C; 67 | int clutch[N]; 68 | } 69 | parameters { 70 | real alpha0_raw; 71 | real alpha1_raw; 72 | vector[C] b_raw; 73 | real sigma2; 74 | } 75 | transformed parameters { 76 | vector[C] b; 77 | real sigma = sqrt(sigma2); 78 | real alpha0 = sqrt(10.0) * alpha0_raw; 79 | real alpha1 = sqrt(10.0) * alpha1_raw; 80 | b = sigma * b_raw; 81 | } 82 | model { 83 | // priors 84 | target += - 2 * log(1 + sigma2); // p(sigma2) = 1 / (1 + sigma2) ^ 2 85 | target += normal_lpdf(alpha0_raw | 0, 1); 86 | target += normal_lpdf(alpha1_raw | 0, 1); 87 | 88 | // random effects 89 | target += normal_lpdf(b_raw | 0, 1); 90 | 91 | // likelihood 92 | for (i in 1:N) { 93 | target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1 * x[i] + b[clutch[i]])); 94 | } 95 | }" 96 | 97 | set.seed(1) 98 | ### fit models ### 99 | stanfit_H0 <- stan(model_code = H0_code, 100 | data = list(y = turtles$y, x = turtles$x, N = nrow(turtles)), 101 | iter = 15500, warmup = 500, chains = 4, seed = 1) 102 | stanfit_H1 <- stan(model_code = H1_code, 103 | data = list(y = turtles$y, x = turtles$x, N = nrow(turtles), 104 | C = max(turtles$clutch), clutch = turtles$clutch), 105 | iter = 15500, warmup = 500, chains = 4, seed = 1) 106 | 107 | set.seed(1) 108 | ### compute (log) marginal likelihoods ### 109 | bridge_H0 <- bridge_sampler(stanfit_H0) 110 | bridge_H1 <- bridge_sampler(stanfit_H1) 111 | 112 | ### compute approximate percentage errors ### 113 | error_measures(bridge_H0)$percentage 114 | error_measures(bridge_H1)$percentage 115 | 116 | ### summary ### 117 | summary(bridge_H0) 118 | summary(bridge_H1) 119 | 120 | ### compute Bayes factor ("true" value: BF01 = 1.273) ### 121 | bf(bridge_H0, bridge_H1) 122 | 123 | } 124 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | bibentry(bibtype = "Article", 2 | title = "{bridgesampling}: An {R} Package for Estimating Normalizing Constants", 3 | author = c(person(given = c("Quentin", "F."), 4 | family = "Gronau", 5 | email = "Quentin.F.Gronau@gmail.com"), 6 | person(given = "Henrik", 7 | family = "Singmann", 8 | email = "Henrik.Singmann@warwick.ac.uk"), 9 | person(given = "Eric-Jan", 10 | family = "Wagenmakers", 11 | email = "EJ.Wagenmakers@gmail.com")), 12 | journal = "Journal of Statistical Software", 13 | year = "2020", 14 | volume = "92", 15 | number = "10", 16 | pages = "1--29", 17 | doi = "10.18637/jss.v092.i10", 18 | 19 | header = "To cite bridgesampling in publications use:" 20 | ) 21 | 22 | -------------------------------------------------------------------------------- /inst/extdata/vignette_example_jags.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/inst/extdata/vignette_example_jags.RData -------------------------------------------------------------------------------- /inst/extdata/vignette_example_nimble.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/inst/extdata/vignette_example_nimble.RData -------------------------------------------------------------------------------- /inst/extdata/vignette_example_stan.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/inst/extdata/vignette_example_stan.RData -------------------------------------------------------------------------------- /inst/extdata/vignette_stan_ttest.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/inst/extdata/vignette_stan_ttest.RData -------------------------------------------------------------------------------- /man/bf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bf.R 3 | \name{bf} 4 | \alias{bf} 5 | \alias{bayes_factor} 6 | \alias{bayes_factor.default} 7 | \alias{bf.bridge} 8 | \alias{bf.bridge_list} 9 | \alias{bf.default} 10 | \title{Bayes Factor(s) from Marginal Likelihoods} 11 | \usage{ 12 | bf(x1, x2, log = FALSE, ...) 13 | 14 | bayes_factor(x1, x2, log = FALSE, ...) 15 | 16 | \method{bayes_factor}{default}(x1, x2, log = FALSE, ...) 17 | 18 | \method{bf}{bridge}(x1, x2, log = FALSE, ...) 19 | 20 | \method{bf}{bridge_list}(x1, x2, log = FALSE, ...) 21 | 22 | \method{bf}{default}(x1, x2, log = FALSE, ...) 23 | } 24 | \arguments{ 25 | \item{x1}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x1} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise.} 26 | 27 | \item{x2}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}. Additionally, the default method assumes that \code{x2} is a single numeric log marginal likelihood (e.g., from \code{\link{logml}}) and will throw an error otherwise.} 28 | 29 | \item{log}{Boolean. If \code{TRUE}, the function returns the log of the Bayes factor. Default is \code{FALSE}.} 30 | 31 | \item{...}{currently not used here, but can be used by other methods.} 32 | } 33 | \value{ 34 | For the default method returns a list of class \code{"bf_default"} with components: 35 | \itemize{ 36 | \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. 37 | \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. 38 | } 39 | 40 | 41 | For the method for \code{"bridge"} objects returns a list of class \code{"bf_bridge"} with components: 42 | \itemize{ 43 | \item \code{bf}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2}. 44 | \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. 45 | } 46 | 47 | 48 | For the method for \code{"bridge_list"} objects returns a list of class \code{"bf_bridge_list"} with components: 49 | \itemize{ 50 | \item \code{bf}: a numeric vector consisting of Bayes factors where each element gives the Bayes factor for one set of logmls in favor of the model associated with \code{x1} over the model associated with \code{x2}. The length of this vector is given by the \code{"bridge_list"} element with the most \code{repetitions}. Elements with fewer repetitions will be recycled (with warning). 51 | \item \code{bf_median_based}: (scalar) value of the Bayes factor in favor of the model associated with \code{x1} over the model associated with \code{x2} that is based on the median values of the logml estimates. 52 | \item \code{log}: Boolean which indicates whether \code{bf} corresponds to the log Bayes factor. 53 | } 54 | } 55 | \description{ 56 | Generic function that computes Bayes factor(s) from marginal likelihoods. \code{bayes_factor()} is simply an (S3 generic) alias for \code{bf()}. 57 | } 58 | \details{ 59 | Computes the Bayes factor (Kass & Raftery, 1995) in favor of the model associated with \code{x1} over the model associated with \code{x2}. 60 | } 61 | \note{ 62 | For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} 63 | } 64 | \references{ 65 | Kass, R. E., & Raftery, A. E. (1995). Bayes factors. \emph{Journal of the American Statistical Association, 90(430)}, 773-795. \doi{10.1080/01621459.1995.10476572} 66 | } 67 | \author{ 68 | Quentin F. Gronau 69 | } 70 | -------------------------------------------------------------------------------- /man/bridge-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bridge_methods.R 3 | \name{bridge-methods} 4 | \alias{bridge-methods} 5 | \alias{summary.bridge} 6 | \alias{summary.bridge_list} 7 | \alias{print.summary.bridge} 8 | \alias{print.summary.bridge_list} 9 | \alias{print.bridge} 10 | \alias{print.bridge_list} 11 | \title{Methods for bridge and bridge_list objects} 12 | \usage{ 13 | \method{summary}{bridge}(object, na.rm = TRUE, ...) 14 | 15 | \method{summary}{bridge_list}(object, na.rm = TRUE, ...) 16 | 17 | \method{print}{summary.bridge}(x, ...) 18 | 19 | \method{print}{summary.bridge_list}(x, ...) 20 | 21 | \method{print}{bridge}(x, ...) 22 | 23 | \method{print}{bridge_list}(x, na.rm = TRUE, ...) 24 | } 25 | \arguments{ 26 | \item{object, x}{object of class \code{bridge} or \code{bridge_list} as returned from \code{\link{bridge_sampler}}.} 27 | 28 | \item{na.rm}{logical. Should NA estimates in \code{bridge_list} objects be removed? Passed to \code{\link{error_measures}}.} 29 | 30 | \item{...}{further arguments, currently ignored.} 31 | } 32 | \value{ 33 | The \code{summary} methods return a \code{data.frame} which contains the log marginal likelihood plus the result returned from invoking \code{\link{error_measures}}. 34 | 35 | The \code{print} methods simply print and return nothing. 36 | } 37 | \description{ 38 | Methods defined for objects returned from the generic \code{\link{bridge_sampler}} function. 39 | } 40 | -------------------------------------------------------------------------------- /man/error_measures.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/error_measures.R 3 | \name{error_measures} 4 | \alias{error_measures} 5 | \alias{error_measures.bridge} 6 | \alias{error_measures.bridge_list} 7 | \title{Error Measures for Estimated Marginal Likelihood} 8 | \usage{ 9 | error_measures(bridge_object, ...) 10 | 11 | \method{error_measures}{bridge}(bridge_object, ...) 12 | 13 | \method{error_measures}{bridge_list}(bridge_object, na.rm = TRUE, ...) 14 | } 15 | \arguments{ 16 | \item{bridge_object}{an object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}.} 17 | 18 | \item{...}{additional arguments (currently ignored).} 19 | 20 | \item{na.rm}{a logical indicating whether missing values in logml estimates should be removed. Ignored for the \code{bridge} method.} 21 | } 22 | \value{ 23 | If \code{bridge_object} is of class \code{"bridge"} and has been obtained with \code{method = "normal"} and \code{repetitions = 1}, returns a list with components: 24 | \itemize{ 25 | \item \code{re2}: approximate relative mean-squared error for marginal likelihood estimate. 26 | \item \code{cv}: approximate coefficient of variation for marginal likelihood estimate (assumes that bridge estimate is unbiased). 27 | \item \code{percentage}: approximate percentage error of marginal likelihood estimate. 28 | } 29 | If \code{bridge_object} is of class \code{"bridge_list"}, returns a list with components: 30 | \itemize{ 31 | \item \code{min}: minimum of the log marginal likelihood estimates. 32 | \item \code{max}: maximum of the log marginal likelihood estimates. 33 | \item \code{IQR}: interquartile range of the log marginal likelihood estimates. 34 | } 35 | } 36 | \description{ 37 | Computes error measures for estimated marginal likelihood. 38 | } 39 | \details{ 40 | Computes error measures for marginal likelihood bridge sampling estimates. The approximate errors for a \code{bridge_object} of class \code{"bridge"} that has been obtained with \code{method = "normal"} and \code{repetitions = 1} are based on Fruehwirth-Schnatter (2004). 41 | Not applicable in case the object of class \code{"bridge"} has been obtained with \code{method = "warp3"} and \code{repetitions = 1}. 42 | To assess the uncertainty of the estimate in this case, it is recommended to run the \code{"warp3"} procedure multiple times. 43 | } 44 | \note{ 45 | For examples, see \code{\link{bridge_sampler}} and the accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} \cr \code{vignette("bridgesampling_example_stan")} 46 | } 47 | \references{ 48 | Fruehwirth-Schnatter, S. (2004). Estimating marginal likelihoods for mixture and Markov switching models using bridge sampling techniques. \emph{The Econometrics Journal, 7}, 143-167. \doi{10.1111/j.1368-423X.2004.00125.x} 49 | } 50 | \seealso{ 51 | The \code{summary} methods for \code{bridge} and \code{bridge_list} objects automatically invoke this function, see \code{\link{bridge-methods}}. 52 | } 53 | \author{ 54 | Quentin F. Gronau 55 | } 56 | -------------------------------------------------------------------------------- /man/ier.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ier-data.R 3 | \docType{data} 4 | \encoding{UTF-8} 5 | \name{ier} 6 | \alias{ier} 7 | \title{Standardized International Exchange Rate Changes from 1975 to 1986} 8 | \format{ 9 | A matrix with 143 rows and 6 columns. 10 | } 11 | \source{ 12 | West, M., Harrison, J. (1997). \emph{Bayesian forecasting and dynamic models} (2nd ed.). Springer-Verlag, New York. 13 | 14 | Lopes, H. F., West, M. (2004). Bayesian model assessment in factor analysis. \emph{Statistica Sinica, 14}, 41-67. 15 | } 16 | \usage{ 17 | ier 18 | } 19 | \description{ 20 | This data set contains the changes in monthly international exchange rates for pounds sterling from January 1975 to December 1986 obtained from West and Harrison (1997, pp. 612-615). Currencies tracked are US Dollar (column \code{us_dollar}), Canadian Dollar (column \code{canadian_dollar}), Japanese Yen (column \code{yen}), French Franc (column \code{franc}), Italian Lira (column \code{lira}), and the (West) German Mark (column \code{mark}). Each series has been standardized with respect to its sample mean and standard deviation. 21 | } 22 | \examples{ 23 | 24 | \dontrun{ 25 | 26 | ################################################################################ 27 | # BAYESIAN FACTOR ANALYSIS (AS PROPOSED BY LOPES & WEST, 2004) 28 | ################################################################################ 29 | 30 | library(bridgesampling) 31 | library(rstan) 32 | 33 | cores <- 4 34 | options(mc.cores = cores) 35 | 36 | data("ier") 37 | 38 | #------------------------------------------------------------------------------- 39 | # plot data 40 | #------------------------------------------------------------------------------- 41 | 42 | currency <- colnames(ier) 43 | label <- c("US Dollar", "Canadian Dollar", "Yen", "Franc", "Lira", "Mark") 44 | op <- par(mfrow = c(3, 2), mar = c(6, 6, 3, 3)) 45 | 46 | for (i in seq_along(currency)) { 47 | plot(ier[,currency[i]], type = "l", col = "darkblue", axes = FALSE, 48 | ylim = c(-4, 4), ylab = "", xlab = "", lwd = 2) 49 | axis(1, at = 0:12*12, labels = 1975:1987, cex.axis = 1.7) 50 | axis(2, at = pretty(c(-4, 4)), las = 1, cex.axis = 1.7) 51 | mtext("Year", 1, cex = 1.5, line = 3.2) 52 | mtext("Exchange Rate Changes", 2, cex = 1.4, line = 3.2) 53 | mtext(label[i], 3, cex = 1.6, line = .1) 54 | } 55 | 56 | par(op) 57 | 58 | #------------------------------------------------------------------------------- 59 | # stan model 60 | #------------------------------------------------------------------------------- 61 | 62 | model_code <- 63 | "data { 64 | int T; // number of observations 65 | int m; // number of variables 66 | int k; // number of factors 67 | matrix[T,m] Y; // data matrix 68 | } 69 | transformed data { 70 | int r; 71 | vector[m] zeros; 72 | r = m * k - k * (k - 1) / 2; // number of non-zero factor loadings 73 | zeros = rep_vector(0.0, m); 74 | } 75 | parameters { 76 | real beta_lower[r - k]; // lower-diagonal elements of beta 77 | real beta_diag [k]; // diagonal elements of beta 78 | vector[m] sigma2; // residual variances 79 | } 80 | transformed parameters { 81 | matrix[m,k] beta; 82 | cov_matrix[m] Omega; 83 | // construct lower-triangular factor loadings matrix 84 | { 85 | int index_lower = 1; 86 | for (j in 1:k) { 87 | for (i in 1:m) { 88 | if (i == j) { 89 | beta[j,j] = beta_diag[j]; 90 | } else if (i >= j) { 91 | beta[i,j] = beta_lower[index_lower]; 92 | index_lower = index_lower + 1; 93 | } else { 94 | beta[i,j] = 0.0; 95 | } 96 | } 97 | } 98 | } 99 | Omega = beta * beta' + diag_matrix(sigma2); 100 | } 101 | model { 102 | // priors 103 | target += normal_lpdf(beta_diag | 0, 1) - k * normal_lccdf(0 | 0, 1); 104 | target += normal_lpdf(beta_lower | 0, 1); 105 | target += inv_gamma_lpdf(sigma2 | 2.2 / 2.0, 0.1 / 2.0); 106 | 107 | // likelihood 108 | for(t in 1:T) { 109 | target += multi_normal_lpdf(Y[t] | zeros, Omega); 110 | } 111 | }" 112 | 113 | # compile model 114 | model <- stan_model(model_code = model_code) 115 | 116 | 117 | #------------------------------------------------------------------------------- 118 | # fit models and compute log marginal likelihoods 119 | #------------------------------------------------------------------------------- 120 | 121 | # function for generating starting values 122 | init_fun <- function(nchains, k, m) { 123 | r <- m * k - k * (k - 1) / 2 124 | out <- vector("list", nchains) 125 | for (i in seq_len(nchains)) { 126 | beta_lower <- array(runif(r - k, 0.05, 1), dim = r - k) 127 | beta_diag <- array(runif(k, .05, 1), dim = k) 128 | sigma2 <- array(runif(m, .05, 1.5), dim = m) 129 | out[[i]] <- list(beta_lower = beta_lower, 130 | beta_diag = beta_diag, 131 | sigma2 = sigma2) 132 | } 133 | return(out) 134 | } 135 | 136 | set.seed(1) 137 | stanfit <- bridge <- vector("list", 3) 138 | for (k in 1:3) { 139 | stanfit[[k]] <- sampling(model, 140 | data = list(Y = ier, T = nrow(ier), 141 | m = ncol(ier), k = k), 142 | iter = 11000, warmup = 1000, chains = 4, 143 | init = init_fun(nchains = 4, k = k, m = ncol(ier)), 144 | cores = cores, seed = 1) 145 | bridge[[k]] <- bridge_sampler(stanfit[[k]], method = "warp3", 146 | repetitions = 10, cores = cores) 147 | } 148 | 149 | # example output 150 | summary(bridge[[2]]) 151 | 152 | #------------------------------------------------------------------------------- 153 | # compute posterior model probabilities 154 | #------------------------------------------------------------------------------- 155 | 156 | pp <- post_prob(bridge[[1]], bridge[[2]], bridge[[3]], 157 | model_names = c("k = 1", "k = 2", "k = 3")) 158 | pp 159 | 160 | op <- par(mar = c(6, 6, 3, 3)) 161 | boxplot(pp, axes = FALSE, 162 | ylim = c(0, 1), ylab = "", 163 | xlab = "") 164 | axis(1, at = 1:3, labels = colnames(pp), cex.axis = 1.7) 165 | axis(2, cex.axis = 1.1) 166 | mtext("Posterior Model Probability", 2, cex = 1.5, line = 3.2) 167 | mtext("Number of Factors", 1, cex = 1.4, line = 3.2) 168 | par(op) 169 | 170 | } 171 | } 172 | \keyword{dataset} 173 | -------------------------------------------------------------------------------- /man/logml.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/logml.R 3 | \name{logml} 4 | \alias{logml} 5 | \alias{logml.bridge} 6 | \alias{logml.bridge_list} 7 | \title{Log Marginal Likelihoods from Bridge Objects} 8 | \usage{ 9 | logml(x, ...) 10 | 11 | \method{logml}{bridge}(x, ...) 12 | 13 | \method{logml}{bridge_list}(x, fun = median, ...) 14 | } 15 | \arguments{ 16 | \item{x}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned from \code{\link{bridge_sampler}}.} 17 | 18 | \item{...}{Further arguments passed to \code{fun}.} 19 | 20 | \item{fun}{Function which returns a scalar value and is applied to the \code{logml} vector of \code{"bridge_list"} objects. Default is \code{\link{median}}.} 21 | } 22 | \value{ 23 | scalar numeric 24 | } 25 | \description{ 26 | Generic function that returns log marginal likelihood from bridge objects. For objects of class \code{"bridge_list"}, which contains multiple log marginal likelihoods, \code{fun} is performed on the vector and its result returned. 27 | } 28 | -------------------------------------------------------------------------------- /man/post_prob.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/post_prob.R 3 | \name{post_prob} 4 | \alias{post_prob} 5 | \alias{post_prob.bridge} 6 | \alias{post_prob.bridge_list} 7 | \alias{post_prob.default} 8 | \title{Posterior Model Probabilities from Marginal Likelihoods} 9 | \usage{ 10 | post_prob(x, ..., prior_prob = NULL, model_names = NULL) 11 | 12 | \method{post_prob}{bridge}(x, ..., prior_prob = NULL, model_names = NULL) 13 | 14 | \method{post_prob}{bridge_list}(x, ..., prior_prob = NULL, model_names = NULL) 15 | 16 | \method{post_prob}{default}(x, ..., prior_prob = NULL, model_names = NULL) 17 | } 18 | \arguments{ 19 | \item{x}{Object of class \code{"bridge"} or \code{"bridge_list"} as returned 20 | from \code{\link{bridge_sampler}}. Additionally, the default method assumes 21 | that all passed objects are numeric log marginal likelihoods (e.g., from 22 | \code{\link{logml}}) and will throw an error otherwise.} 23 | 24 | \item{...}{further objects of class \code{"bridge"} or \code{"bridge_list"} 25 | as returned from \code{\link{bridge_sampler}}. Or numeric values for the 26 | default method.} 27 | 28 | \item{prior_prob}{numeric vector with prior model probabilities. If omitted, 29 | a uniform prior is used (i.e., all models are equally likely a priori). The 30 | default \code{NULL} corresponds to equal prior model weights.} 31 | 32 | \item{model_names}{If \code{NULL} (the default) will use model names derived 33 | from deparsing the call. Otherwise will use the passed values as model 34 | names.} 35 | } 36 | \value{ 37 | For the default method and the method for \code{"bridge"} objects, a 38 | named numeric vector with posterior model probabilities (i.e., which sum to 39 | one). 40 | 41 | For the method for \code{"bridge_list"} objects, a matrix consisting of 42 | posterior model probabilities where each row sums to one and gives the 43 | model probabilities for one set of logmls. The (named) columns correspond 44 | to the models and the number of rows is given by the \code{"bridge_list"} 45 | element with the most \code{repetitions}. Elements with fewer repetitions 46 | will be recycled (with warning). 47 | } 48 | \description{ 49 | Generic function that computes posterior model probabilities from marginal 50 | likelihoods. 51 | } 52 | \note{ 53 | For realistic examples, see \code{\link{bridge_sampler}} and the 54 | accompanying vignettes: \cr \code{vignette("bridgesampling_example_jags")} 55 | \cr \code{vignette("bridgesampling_example_stan")} 56 | } 57 | \examples{ 58 | 59 | H0 <- structure(list(logml = -20.8084543022433, niter = 4, method = "normal"), 60 | .Names = c("logml", "niter", "method"), class = "bridge") 61 | H1 <- structure(list(logml = -17.9623077558729, niter = 4, method = "normal"), 62 | .Names = c("logml", "niter", "method"), class = "bridge") 63 | H2 <- structure(list(logml = -19, niter = 4, method = "normal"), 64 | .Names = c("logml", "niter", "method"), class = "bridge") 65 | 66 | 67 | post_prob(H0, H1, H2) 68 | post_prob(H1, H0) 69 | 70 | ## all produce the same (only names differ): 71 | post_prob(H0, H1, H2) 72 | post_prob(H0$logml, H1$logml, H2$logml) 73 | post_prob(c(H0$logml, H1$logml, H2$logml)) 74 | post_prob(H0$logml, c(H1$logml, H2$logml)) 75 | post_prob(H0$logml, c(H1$logml, H2$logml), model_names = c("H0", "H1", "H2")) 76 | 77 | 78 | ### with bridge list elements: 79 | H0L <- structure(list(logml = c(-20.8088381186739, -20.8072772698116, 80 | -20.808454454621, -20.8083419072281, -20.8087870541247, -20.8084887398113, 81 | -20.8086023582344, -20.8079083169745, -20.8083048489095, -20.8090050811436 82 | ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 4, 4), method = "normal", 83 | repetitions = 10), .Names = c("logml", "niter", "method", 84 | "repetitions"), class = "bridge_list") 85 | 86 | H1L <- structure(list(logml = c(-17.961665507006, -17.9611290723151, 87 | -17.9607509604499, -17.9608629535992, -17.9602093576442, -17.9600223300432, 88 | -17.9610157118017, -17.9615557696561, -17.9608437034849, -17.9606743200309 89 | ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 3, 4), method = "normal", 90 | repetitions = 10), .Names = c("logml", "niter", "method", 91 | "repetitions"), class = "bridge_list") 92 | 93 | post_prob(H1L, H0L) 94 | post_prob(H1L, H0L, H0) # last element recycled with warning. 95 | 96 | } 97 | \author{ 98 | Quentin F. Gronau and Henrik Singmann 99 | } 100 | -------------------------------------------------------------------------------- /man/turtles.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/turtles-data.R 3 | \docType{data} 4 | \encoding{UTF-8} 5 | \name{turtles} 6 | \alias{turtles} 7 | \title{Turtles Data from Janzen, Tucker, and Paukstis (2000)} 8 | \format{ 9 | A data.frame with 244 rows and 3 variables. 10 | } 11 | \source{ 12 | Janzen, F. J., Tucker, J. K., & Paukstis, G. L. (2000). Experimental 13 | analysis of an early life-history stage: Selection on size of hatchling 14 | turtles. \emph{Ecology, 81(8)}, 2290-2304. 15 | \doi{10.2307/177115} 16 | 17 | Overstall, A. M., & Forster, J. J. (2010). Default Bayesian model 18 | determination methods for generalised linear mixed models. 19 | \emph{Computational Statistics & Data Analysis, 54}, 3269-3288. 20 | \doi{10.1016/j.csda.2010.03.008} 21 | 22 | Sinharay, S., & Stern, H. S. (2005). An empirical comparison of methods for 23 | computing Bayes factors in generalized linear mixed models. \emph{Journal 24 | of Computational and Graphical Statistics, 14(2)}, 415-435. 25 | \doi{10.1198/106186005X47471} 26 | } 27 | \usage{ 28 | turtles 29 | } 30 | \description{ 31 | This data set contains information about 244 newborn turtles from 31 32 | different clutches. For each turtle, the data set includes information about 33 | survival status (column \code{y}; 0 = died, 1 = survived), birth weight in 34 | grams (column \code{x}), and clutch (family) membership (column 35 | \code{clutch}; an integer between one and 31). The clutches have been ordered 36 | according to mean birth weight. 37 | } 38 | \examples{ 39 | 40 | \dontrun{ 41 | 42 | ################################################################################ 43 | # BAYESIAN GENERALIZED LINEAR MIXED MODEL (PROBIT REGRESSION) 44 | ################################################################################ 45 | 46 | library(bridgesampling) 47 | library(rstan) 48 | 49 | data("turtles") 50 | 51 | #------------------------------------------------------------------------------- 52 | # plot data 53 | #------------------------------------------------------------------------------- 54 | 55 | # reproduce Figure 1 from Sinharay & Stern (2005) 56 | xticks <- pretty(turtles$clutch) 57 | yticks <- pretty(turtles$x) 58 | 59 | plot(1, type = "n", axes = FALSE, ylab = "", xlab = "", xlim = range(xticks), 60 | ylim = range(yticks)) 61 | points(turtles$clutch, turtles$x, pch = ifelse(turtles$y, 21, 4), cex = 1.3, 62 | col = ifelse(turtles$y, "black", "darkred"), bg = "grey", lwd = 1.3) 63 | axis(1, cex.axis = 1.4) 64 | mtext("Clutch Identifier", side = 1, line = 2.9, cex = 1.8) 65 | axis(2, las = 1, cex.axis = 1.4) 66 | mtext("Birth Weight (Grams)", side = 2, line = 2.6, cex = 1.8) 67 | 68 | #------------------------------------------------------------------------------- 69 | # Analysis: Natural Selection Study (compute same BF as Sinharay & Stern, 2005) 70 | #------------------------------------------------------------------------------- 71 | 72 | ### H0 (model without random intercepts) ### 73 | H0_code <- 74 | "data { 75 | int N; 76 | int y[N]; 77 | real x[N]; 78 | } 79 | parameters { 80 | real alpha0_raw; 81 | real alpha1_raw; 82 | } 83 | transformed parameters { 84 | real alpha0 = sqrt(10.0) * alpha0_raw; 85 | real alpha1 = sqrt(10.0) * alpha1_raw; 86 | } 87 | model { 88 | // priors 89 | target += normal_lpdf(alpha0_raw | 0, 1); 90 | target += normal_lpdf(alpha1_raw | 0, 1); 91 | 92 | // likelihood 93 | for (i in 1:N) { 94 | target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1 * x[i])); 95 | } 96 | }" 97 | 98 | ### H1 (model with random intercepts) ### 99 | H1_code <- 100 | "data { 101 | int N; 102 | int y[N]; 103 | real x[N]; 104 | int C; 105 | int clutch[N]; 106 | } 107 | parameters { 108 | real alpha0_raw; 109 | real alpha1_raw; 110 | vector[C] b_raw; 111 | real sigma2; 112 | } 113 | transformed parameters { 114 | vector[C] b; 115 | real sigma = sqrt(sigma2); 116 | real alpha0 = sqrt(10.0) * alpha0_raw; 117 | real alpha1 = sqrt(10.0) * alpha1_raw; 118 | b = sigma * b_raw; 119 | } 120 | model { 121 | // priors 122 | target += - 2 * log(1 + sigma2); // p(sigma2) = 1 / (1 + sigma2) ^ 2 123 | target += normal_lpdf(alpha0_raw | 0, 1); 124 | target += normal_lpdf(alpha1_raw | 0, 1); 125 | 126 | // random effects 127 | target += normal_lpdf(b_raw | 0, 1); 128 | 129 | // likelihood 130 | for (i in 1:N) { 131 | target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1 * x[i] + b[clutch[i]])); 132 | } 133 | }" 134 | 135 | set.seed(1) 136 | ### fit models ### 137 | stanfit_H0 <- stan(model_code = H0_code, 138 | data = list(y = turtles$y, x = turtles$x, N = nrow(turtles)), 139 | iter = 15500, warmup = 500, chains = 4, seed = 1) 140 | stanfit_H1 <- stan(model_code = H1_code, 141 | data = list(y = turtles$y, x = turtles$x, N = nrow(turtles), 142 | C = max(turtles$clutch), clutch = turtles$clutch), 143 | iter = 15500, warmup = 500, chains = 4, seed = 1) 144 | 145 | set.seed(1) 146 | ### compute (log) marginal likelihoods ### 147 | bridge_H0 <- bridge_sampler(stanfit_H0) 148 | bridge_H1 <- bridge_sampler(stanfit_H1) 149 | 150 | ### compute approximate percentage errors ### 151 | error_measures(bridge_H0)$percentage 152 | error_measures(bridge_H1)$percentage 153 | 154 | ### summary ### 155 | summary(bridge_H0) 156 | summary(bridge_H1) 157 | 158 | ### compute Bayes factor ("true" value: BF01 = 1.273) ### 159 | bf(bridge_H0, bridge_H1) 160 | 161 | } 162 | } 163 | \keyword{dataset} 164 | -------------------------------------------------------------------------------- /revdep/.gitignore: -------------------------------------------------------------------------------- 1 | checks 2 | library 3 | checks.noindex 4 | library.noindex 5 | data.sqlite 6 | *.html 7 | -------------------------------------------------------------------------------- /revdep/README.md: -------------------------------------------------------------------------------- 1 | # Platform 2 | 3 | |field |value | 4 | |:--------|:----------------------------| 5 | |version |R version 3.6.1 (2019-07-05) | 6 | |os |Linux Mint 19 | 7 | |system |x86_64, linux-gnu | 8 | |ui |RStudio | 9 | |language |en_US | 10 | |collate |en_US.UTF-8 | 11 | |ctype |en_US.UTF-8 | 12 | |tz |Europe/Zurich | 13 | |date |2019-12-13 | 14 | 15 | # Dependencies 16 | 17 | |package |old |new |Δ | 18 | |:--------------|:-----------|:-------|:--| 19 | |bridgesampling |0.7-2 |0.8-0 |* | 20 | |BH |1.69.0-1 |NA |* | 21 | |Brobdingnag |1.2-6 |1.2-6 | | 22 | |coda |0.19-3 |0.19-3 | | 23 | |colorspace |1.4-1 |1.4-1 | | 24 | |farver |2.0.1 |2.0.1 | | 25 | |glue |1.3.1 |1.3.1 | | 26 | |labeling |0.3 |0.3 | | 27 | |lattice |0.20-38 |0.20-38 | | 28 | |lifecycle |0.1.0 |0.1.0 | | 29 | |magrittr |1.5 |1.5 | | 30 | |Matrix |1.2-18 |1.2-18 | | 31 | |munsell |0.5.0 |0.5.0 | | 32 | |mvnfast |0.2.5 |NA |* | 33 | |mvtnorm |NA |1.0-11 |* | 34 | |R6 |2.4.1 |2.4.1 | | 35 | |RColorBrewer |1.1-2 |1.1-2 | | 36 | |Rcpp |1.0.3 |NA |* | 37 | |RcppArmadillo |0.9.800.3.0 |NA |* | 38 | |rlang |0.4.2 |0.4.2 | | 39 | |scales |1.1.0 |1.1.0 | | 40 | |stringi |1.4.3 |1.4.3 | | 41 | |stringr |1.4.0 |1.4.0 | | 42 | |viridisLite |0.3.0 |0.3.0 | | 43 | 44 | # Revdeps 45 | 46 | ## Failed to check (5) 47 | 48 | |package |version |error |warning |note | 49 | |:-------------|:-------|:-----|:-------|:----| 50 | |BayesianFROC |? | | | | 51 | |BayesianTools |? | | | | 52 | |bayestestR |? | | | | 53 | |brms |? | | | | 54 | |metaBMA |? | | | | 55 | 56 | -------------------------------------------------------------------------------- /revdep/email.yml: -------------------------------------------------------------------------------- 1 | release_date: ??? 2 | rel_release_date: ??? 3 | my_news_url: ??? 4 | release_version: ??? 5 | release_details: ??? 6 | -------------------------------------------------------------------------------- /revdep/failures.md: -------------------------------------------------------------------------------- 1 | # BayesianFROC 2 | 3 |
4 | 5 | * Version: 6 | * Source code: ??? 7 | * URL: https://github.com/quentingronau/bridgesampling 8 | * Number of recursive dependencies: 0 9 | 10 | Run `revdep_details(,"")` for more info 11 | 12 |
13 | 14 | ## Error before installation 15 | 16 | ### Devel 17 | 18 | ``` 19 | 20 | 21 | 22 | 23 | 24 | 25 | ``` 26 | ### CRAN 27 | 28 | ``` 29 | 30 | 31 | 32 | 33 | 34 | 35 | ``` 36 | # BayesianTools 37 | 38 |
39 | 40 | * Version: 41 | * Source code: ??? 42 | * URL: https://github.com/quentingronau/bridgesampling 43 | * Number of recursive dependencies: 0 44 | 45 | Run `revdep_details(,"")` for more info 46 | 47 |
48 | 49 | ## Error before installation 50 | 51 | ### Devel 52 | 53 | ``` 54 | 55 | 56 | 57 | 58 | 59 | 60 | ``` 61 | ### CRAN 62 | 63 | ``` 64 | 65 | 66 | 67 | 68 | 69 | 70 | ``` 71 | # bayestestR 72 | 73 |
74 | 75 | * Version: 76 | * Source code: ??? 77 | * URL: https://github.com/quentingronau/bridgesampling 78 | * Number of recursive dependencies: 0 79 | 80 | Run `revdep_details(,"")` for more info 81 | 82 |
83 | 84 | ## Error before installation 85 | 86 | ### Devel 87 | 88 | ``` 89 | 90 | 91 | 92 | 93 | 94 | 95 | ``` 96 | ### CRAN 97 | 98 | ``` 99 | 100 | 101 | 102 | 103 | 104 | 105 | ``` 106 | # brms 107 | 108 |
109 | 110 | * Version: 111 | * Source code: ??? 112 | * URL: https://github.com/quentingronau/bridgesampling 113 | * Number of recursive dependencies: 0 114 | 115 | Run `revdep_details(,"")` for more info 116 | 117 |
118 | 119 | ## Error before installation 120 | 121 | ### Devel 122 | 123 | ``` 124 | 125 | 126 | 127 | 128 | 129 | 130 | ``` 131 | ### CRAN 132 | 133 | ``` 134 | 135 | 136 | 137 | 138 | 139 | 140 | ``` 141 | # metaBMA 142 | 143 |
144 | 145 | * Version: 146 | * Source code: ??? 147 | * URL: https://github.com/quentingronau/bridgesampling 148 | * Number of recursive dependencies: 0 149 | 150 | Run `revdep_details(,"")` for more info 151 | 152 |
153 | 154 | ## Error before installation 155 | 156 | ### Devel 157 | 158 | ``` 159 | 160 | 161 | 162 | 163 | 164 | 165 | ``` 166 | ### CRAN 167 | 168 | ``` 169 | 170 | 171 | 172 | 173 | 174 | 175 | ``` 176 | -------------------------------------------------------------------------------- /revdep/problems.md: -------------------------------------------------------------------------------- 1 | *Wow, no problems at all. :)* -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | Sys.setenv("R_TESTS" = "") 2 | library(testthat) 3 | 4 | test_check("bridgesampling") 5 | -------------------------------------------------------------------------------- /tests/testthat/test-bf.R: -------------------------------------------------------------------------------- 1 | 2 | context('bridge sampling bf function') 3 | 4 | test_that("bf various basic checks", { 5 | 6 | # library(bridgesampling) 7 | library(mvtnorm) 8 | 9 | x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 10 | colnames(x) <- c("x1", "x2") 11 | log_density <- function(s, data) { 12 | -.5*t(s)%*%s 13 | } 14 | 15 | lb <- rep(-Inf, 2) 16 | ub <- rep(Inf, 2) 17 | names(lb) <- names(ub) <- colnames(x) 18 | 19 | # repetitions = 1 20 | bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, 21 | data = NULL, lb = lb, ub = ub, 22 | method = "normal", silent = TRUE) 23 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, 24 | data = NULL, lb = lb, ub = ub, 25 | method = "warp3", silent = TRUE) 26 | expect_error(bf(bridge_normal, 4), "class 'bridge' or 'bridge_list'") 27 | 28 | BF <- bf(bridge_normal, bridge_warp3) 29 | log_BF <- bf(bridge_normal, bridge_warp3, log = TRUE) 30 | 31 | expect_output(print(BF), "Estimated Bayes factor") 32 | expect_output(print(log_BF), "Estimated log Bayes factor") 33 | 34 | BF2 <- bayes_factor(bridge_normal, bridge_warp3) 35 | log_BF2 <- bayes_factor(bridge_normal, bridge_warp3, log = TRUE) 36 | 37 | expect_output(print(BF2), "Estimated Bayes factor") 38 | expect_output(print(log_BF2), "Estimated log Bayes factor") 39 | 40 | 41 | # repetitions > 1 42 | bridge_normal_mult <- bridge_sampler(samples = x, log_posterior = log_density, 43 | data = NULL, lb = lb, ub = ub, 44 | method = "normal", silent = TRUE, repetitions = 2) 45 | bridge_warp3_mult <- bridge_sampler(samples = x, log_posterior = log_density, 46 | data = NULL, lb = lb, ub = ub, 47 | method = "warp3", silent = TRUE, repetitions = 2) 48 | 49 | BF_mult <- bf(bridge_normal_mult, bridge_warp3_mult) 50 | log_BF_mult <- bf(bridge_normal_mult, bridge_warp3_mult, log = TRUE) 51 | 52 | expect_output(print(BF_mult), "based on medians") 53 | expect_output(print(log_BF_mult), "based on medians") 54 | 55 | ## bf with multi and singular objects 56 | expect_is(suppressWarnings(bf(bridge_normal_mult, bridge_normal)), "bf_bridge_list") 57 | expect_is(bf(bridge_normal, bridge_normal_mult), "bf_bridge") 58 | expect_error(bf(bridge_normal_mult, 4), "class 'bridge' or 'bridge_list'") 59 | 60 | # default 61 | BF <- bf(1, 2) 62 | log_BF <- bf(1, 2, log = TRUE) 63 | 64 | expect_output(print(BF), "Bayes factor") 65 | expect_output(print(log_BF), "Log Bayes factor") 66 | 67 | }) 68 | -------------------------------------------------------------------------------- /tests/testthat/test-bridge_sampler_Rcpp.R: -------------------------------------------------------------------------------- 1 | 2 | context('basic bridge sampling behavior normal Rcpp') 3 | 4 | test_that("bridge sampler matches anlytical value normal example", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | library(mvtnorm) 11 | if(require(RcppEigen)) { 12 | 13 | x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 14 | colnames(x) <- c("x1", "x2") 15 | 16 | lb <- rep(-Inf, 2) 17 | ub <- rep(Inf, 2) 18 | names(lb) <- names(ub) <- colnames(x) 19 | 20 | Rcpp::sourceCpp("unnormalized_normal_density.cpp") 21 | 22 | bridge_normal <- bridge_sampler(samples = x, log_posterior = log_densityRcpp, 23 | data = NULL, lb = lb, ub = ub, 24 | method = "normal", 25 | silent = TRUE) 26 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_densityRcpp, 27 | data = NULL, lb = lb, ub = ub, 28 | method = "warp3", 29 | silent = TRUE) 30 | 31 | expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) 32 | expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) 33 | 34 | # test dots argument 35 | mu <- c(1, 2) 36 | x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) 37 | colnames(x) <- c("x1", "x2") 38 | 39 | lb <- rep(-Inf, 2) 40 | ub <- rep(Inf, 2) 41 | names(lb) <- names(ub) <- colnames(x) 42 | 43 | Rcpp::sourceCpp("unnormalized_normal_density_mu.cpp") 44 | 45 | bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = log_densityRcpp_mu, 46 | mu, data = NULL, lb = lb, ub = ub, 47 | method = "normal", 48 | silent = TRUE) 49 | bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = log_densityRcpp_mu, 50 | mu, data = NULL, lb = lb, ub = ub, 51 | method = "warp3", 52 | silent = TRUE) 53 | 54 | expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) 55 | expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) 56 | 57 | } 58 | 59 | }) 60 | -------------------------------------------------------------------------------- /tests/testthat/test-bridge_sampler_Rcpp_parallel.R: -------------------------------------------------------------------------------- 1 | 2 | context('basic bridge sampling behavior normal Rcpp parallel') 3 | 4 | test_that("bridge sampler matches anlytical value normal example", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | library(mvtnorm) 11 | if(require(RcppEigen)) { 12 | 13 | x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 14 | colnames(x) <- c("x1", "x2") 15 | 16 | lb <- rep(-Inf, 2) 17 | ub <- rep(Inf, 2) 18 | names(lb) <- names(ub) <- colnames(x) 19 | Rcpp::sourceCpp(file = "unnormalized_normal_density.cpp") 20 | Rcpp::sourceCpp(file = "unnormalized_normal_density.cpp", env = .GlobalEnv) 21 | bridge_normal <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp", 22 | data = NULL, lb = lb, ub = ub, 23 | method = "normal", packages = "RcppEigen", 24 | rcppFile = "unnormalized_normal_density.cpp", 25 | cores = 2, silent = TRUE) 26 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp", 27 | data = NULL, lb = lb, ub = ub, 28 | method = "warp3", packages = "RcppEigen", 29 | rcppFile = "unnormalized_normal_density.cpp", 30 | cores = 2, silent = TRUE) 31 | 32 | expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) 33 | expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) 34 | 35 | # test dots argument 36 | mu <- c(1, 2) 37 | x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) 38 | colnames(x) <- c("x1", "x2") 39 | 40 | lb <- rep(-Inf, 2) 41 | ub <- rep(Inf, 2) 42 | names(lb) <- names(ub) <- colnames(x) 43 | Rcpp::sourceCpp(file = "unnormalized_normal_density_mu.cpp") 44 | Rcpp::sourceCpp(file = "unnormalized_normal_density_mu.cpp", env = .GlobalEnv) 45 | 46 | bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp_mu", 47 | mu, data = NULL, lb = lb, ub = ub, 48 | method = "normal", packages = "RcppEigen", 49 | rcppFile = "unnormalized_normal_density_mu.cpp", 50 | cores = 2, silent = TRUE) 51 | bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = "log_densityRcpp_mu", 52 | mu, data = NULL, lb = lb, ub = ub, 53 | method = "warp3", packages = "RcppEigen", 54 | rcppFile = "unnormalized_normal_density_mu.cpp", 55 | cores = 2, silent = TRUE) 56 | 57 | expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) 58 | expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) 59 | 60 | } 61 | 62 | }) 63 | -------------------------------------------------------------------------------- /tests/testthat/test-bridge_sampler_parallel.R: -------------------------------------------------------------------------------- 1 | 2 | context('basic bridge sampling behavior normal parallel') 3 | 4 | test_that("bridge sampler matches anlytical value normal example", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | library(mvtnorm) 11 | 12 | x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 13 | colnames(x) <- c("x1", "x2") 14 | log_density <- function(s, data) { 15 | -.5*t(s)%*%s 16 | } 17 | assign("log_density", log_density, envir = .GlobalEnv) 18 | lb <- rep(-Inf, 2) 19 | ub <- rep(Inf, 2) 20 | names(lb) <- names(ub) <- colnames(x) 21 | bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, 22 | data = NULL, lb = lb, ub = ub, 23 | method = "normal", cores = 2, silent = TRUE) 24 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, 25 | data = NULL, lb = lb, ub = ub, 26 | method = "warp3", cores = 2, silent = TRUE) 27 | bridge_normal_c <- bridge_sampler(samples = x, log_posterior = "log_density", 28 | data = NULL, lb = lb, ub = ub, 29 | method = "normal", cores = 2, silent = TRUE, 30 | envir = sys.frame(sys.nframe())) 31 | bridge_warp3_c <- bridge_sampler(samples = x, log_posterior = "log_density", 32 | data = NULL, lb = lb, ub = ub, 33 | method = "warp3", cores = 2, silent = TRUE, 34 | envir = sys.frame(sys.nframe())) 35 | 36 | expect_equal(bridge_normal$logml, expected = log(2*pi), tolerance = 0.01) 37 | expect_equal(bridge_warp3$logml, expected = log(2*pi), tolerance = 0.01) 38 | expect_equal(bridge_normal_c$logml, expected = log(2*pi), tolerance = 0.01) 39 | expect_equal(bridge_warp3_c$logml, expected = log(2*pi), tolerance = 0.01) 40 | 41 | # test dots argument 42 | mu <- c(1, 2) 43 | x <- rmvnorm(1e4, mean = mu, sigma = diag(2)) 44 | colnames(x) <- c("x1", "x2") 45 | log_density <- function(s, data, ...) { 46 | -.5*t(s - mu) %*% (s - mu) 47 | } 48 | assign("log_density", log_density, envir = .GlobalEnv) 49 | 50 | lb <- rep(-Inf, 2) 51 | ub <- rep(Inf, 2) 52 | names(lb) <- names(ub) <- colnames(x) 53 | 54 | bridge_normal_dots <- bridge_sampler(samples = x, log_posterior = log_density, 55 | mu, data = NULL, lb = lb, ub = ub, 56 | method = "normal", cores = 2, silent = TRUE) 57 | bridge_warp3_dots <- bridge_sampler(samples = x, log_posterior = log_density, 58 | mu, data = NULL, lb = lb, ub = ub, 59 | method = "warp3", cores = 2, silent = TRUE) 60 | bridge_normal_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", 61 | mu, data = NULL, lb = lb, ub = ub, 62 | method = "normal", cores = 2, silent = TRUE, 63 | envir = sys.frame(sys.nframe())) 64 | # ls.str(envir = sys.frame(sys.nframe())) 65 | bridge_warp3_c_dots <- bridge_sampler(samples = x, log_posterior = "log_density", 66 | mu, data = NULL, lb = lb, ub = ub, 67 | method = "warp3", cores = 2, silent = TRUE, 68 | envir = sys.frame(sys.nframe())) 69 | 70 | expect_equal(bridge_normal_dots$logml, expected = log(2*pi), tolerance = 0.01) 71 | expect_equal(bridge_warp3_dots$logml, expected = log(2*pi), tolerance = 0.01) 72 | expect_equal(bridge_normal_c_dots$logml, expected = log(2*pi), tolerance = 0.01) 73 | expect_equal(bridge_warp3_c_dots$logml, expected = log(2*pi), tolerance = 0.01) 74 | 75 | }) 76 | -------------------------------------------------------------------------------- /tests/testthat/test-bridge_sampler_print_method.R: -------------------------------------------------------------------------------- 1 | 2 | context('bridge sampling print method') 3 | 4 | test_that("bridge sampler print method correctly displayed", { 5 | 6 | # library(bridgesampling) 7 | library(mvtnorm) 8 | 9 | x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 10 | colnames(x) <- c("x1", "x2") 11 | log_density <- function(s, data) { 12 | -.5*t(s)%*%s 13 | } 14 | 15 | lb <- rep(-Inf, 2) 16 | ub <- rep(Inf, 2) 17 | names(lb) <- names(ub) <- colnames(x) 18 | 19 | # repetitions = 1 20 | bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, 21 | data = NULL, lb = lb, ub = ub, 22 | method = "normal", silent = TRUE) 23 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, 24 | data = NULL, lb = lb, ub = ub, 25 | method = "warp3", silent = TRUE) 26 | 27 | expect_output(print(bridge_normal), "Bridge sampling estimate of the log marginal likelihood") 28 | expect_output(print(bridge_warp3), "Bridge sampling estimate of the log marginal likelihood") 29 | 30 | # repetitions > 1 31 | bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, 32 | data = NULL, lb = lb, ub = ub, 33 | method = "normal", silent = TRUE, repetitions = 2) 34 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, 35 | data = NULL, lb = lb, ub = ub, 36 | method = "warp3", silent = TRUE, repetitions = 2) 37 | 38 | expect_output(print(bridge_normal), "Median of") 39 | expect_output(print(bridge_warp3), "Median of") 40 | 41 | }) 42 | 43 | test_that("prints with NAs with warning.", { 44 | bridge_o <- structure(list(logml = c(4291.14352476047, 4293.29076119542, 45 | 4291.96372581169, 4293.02187182362, NA, NA, 4290.9761730488, 46 | 4293.32075269401, 4293.5762219227, 4294.02761288449), niter = c(104, 47 | 16, 52, 8, 1000, 1000, 167, 16, 21, 44), method = "normal", repetitions = 10), .Names = c("logml", 48 | "niter", "method", "repetitions"), class = "bridge_list") 49 | 50 | expect_warning(print(bridge_o), "NA") 51 | }) 52 | -------------------------------------------------------------------------------- /tests/testthat/test-bridge_sampler_summary_method.R: -------------------------------------------------------------------------------- 1 | 2 | context('bridge sampling summary method') 3 | 4 | test_that("bridge sampler summary method correctly displayed", { 5 | 6 | # library(bridgesampling) 7 | library(mvtnorm) 8 | 9 | x <- rmvnorm(1e4, mean = rep(0, 2), sigma = diag(2)) 10 | colnames(x) <- c("x1", "x2") 11 | log_density <- function(s, data) { 12 | -.5*t(s)%*%s 13 | } 14 | 15 | lb <- rep(-Inf, 2) 16 | ub <- rep(Inf, 2) 17 | names(lb) <- names(ub) <- colnames(x) 18 | 19 | # repetitions = 1 20 | bridge_normal <- bridge_sampler(samples = x, log_posterior = log_density, 21 | data = NULL, lb = lb, ub = ub, 22 | method = "normal", silent = TRUE) 23 | bridge_warp3 <- bridge_sampler(samples = x, log_posterior = log_density, 24 | data = NULL, lb = lb, ub = ub, 25 | method = "warp3", silent = TRUE) 26 | s_normal <- summary(bridge_normal) 27 | s_warp3 <- summary(bridge_warp3) 28 | 29 | expect_equal(names(s_normal), c("Logml_Estimate", 30 | "Relative_Mean_Squared_Error", 31 | "Coefficient_of_Variation", 32 | "Percentage_Error", 33 | "Method", "Repetitions")) 34 | expect_equal(names(s_warp3), c("Logml_Estimate", "Method", "Repetitions")) 35 | 36 | expect_output(print(s_normal), 'All error measures are approximate.') 37 | expect_output(print(s_warp3), 'No error measures are available for method = "warp3"') 38 | 39 | 40 | # repetitions > 1 41 | bridge_normal_2 <- bridge_sampler(samples = x, log_posterior = log_density, 42 | data = NULL, lb = lb, ub = ub, 43 | method = "normal", silent = TRUE, 44 | repetitions = 2) 45 | bridge_warp3_2 <- bridge_sampler(samples = x, log_posterior = log_density, 46 | data = NULL, lb = lb, ub = ub, 47 | method = "warp3", silent = TRUE, 48 | repetitions = 2) 49 | s_normal_2 <- summary(bridge_normal_2) 50 | s_warp3_2 <- summary(bridge_warp3_2) 51 | 52 | expect_equal(names(s_normal_2), c("Logml_Estimate", 53 | "Min", 54 | "Max", 55 | "Interquartile_Range", 56 | "Method", "Repetitions")) 57 | expect_equal(names(s_warp3_2), c("Logml_Estimate", 58 | "Min", 59 | "Max", 60 | "Interquartile_Range", 61 | "Method", "Repetitions")) 62 | 63 | expect_output(print(s_normal_2), 'All error measures are based on 2 estimates.') 64 | expect_output(print(s_warp3_2), 'All error measures are based on 2 estimates.') 65 | 66 | }) 67 | -------------------------------------------------------------------------------- /tests/testthat/test-nimble_bridge_sampler.R: -------------------------------------------------------------------------------- 1 | 2 | context('bridge_sampler.nimble works.') 3 | 4 | test_that("nimble support works", { 5 | testthat::skip_on_cran() 6 | testthat::skip_on_travis() 7 | 8 | testthat::skip_if_not_installed("nimble") 9 | if (require(nimble)) { 10 | set.seed(12345) 11 | mu <- 0 12 | tau2 <- 0.5 13 | sigma2 <- 1 14 | n <- 20 15 | theta <- rnorm(n, mu, sqrt(tau2)) 16 | y <- rnorm(n, theta, sqrt(sigma2)) 17 | # create model 18 | codeH1 <- nimbleCode({ 19 | mu ~ dnorm(0, sd = 1) 20 | invTau2 ~ dgamma(1, 1) 21 | tau2 <- 1/invTau2 22 | for (i in 1:20) { 23 | theta[i] ~ dnorm(mu, sd = sqrt(tau2)) 24 | y[i] ~ dnorm(theta[i], sd = 1) 25 | } 26 | }) 27 | 28 | modelH1 <- nimbleModel(codeH1) 29 | modelH1$setData(y = y) # set data 30 | 31 | # make compiled version from generated C++ 32 | cmodelH1 <- compileNimble(modelH1) 33 | 34 | # build an MCMC, skipping customization of the configuration. 35 | mcmcH1 <- buildMCMC(modelH1, 36 | monitors = modelH1$getNodeNames(stochOnly = TRUE, 37 | includeData = FALSE)) 38 | # compile the MCMC via generated C++ 39 | cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) 40 | 41 | # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. 42 | # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 43 | samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, 44 | progressBar = FALSE) 45 | 46 | # bridge sampling 47 | bridge_H1 <- bridge_sampler(samples = cmcmcH1, 48 | cores = 1, 49 | method = "warp3", 50 | repetitions = 2) 51 | 52 | expect_equal(bridge_H1$logml, rep(-37.7983064265064, 2), tolerance = 0.01) 53 | 54 | } 55 | 56 | 57 | }) 58 | -------------------------------------------------------------------------------- /tests/testthat/test-post_prob.R: -------------------------------------------------------------------------------- 1 | 2 | context('post_prob with lists') 3 | 4 | 5 | test_that("post_prob works with lists and with NAs.", { 6 | bridge_o <- structure(list(logml = c(4291.14352476047, 4293.29076119542, 7 | 4291.96372581169, 4293.02187182362, NA, NA, 4290.9761730488, 8 | 4293.32075269401, 4293.5762219227, 4294.02761288449), niter = c(104, 9 | 16, 52, 8, 1000, 1000, 167, 16, 21, 44), method = "normal", repetitions = 10), .Names = c("logml", 10 | "niter", "method", "repetitions"), class = "bridge_list") 11 | 12 | H0L <- structure(list(logml = c(-20.8088381186739, -20.8072772698116, 13 | -20.808454454621, -20.8083419072281, -20.8087870541247, -20.8084887398113, 14 | -20.8086023582344, -20.8079083169745, -20.8083048489095, -20.8090050811436 15 | ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 4, 4), method = "normal", 16 | repetitions = 10), .Names = c("logml", "niter", "method", 17 | "repetitions"), class = "bridge_list") 18 | 19 | H1L <- structure(list(logml = c(-17.961665507006, -17.9611290723151, 20 | -17.9607509604499, -17.9608629535992, -17.9602093576442, -17.9600223300432, 21 | -17.9610157118017, -17.9615557696561, -17.9608437034849, -17.9606743200309 22 | ), niter = c(4, 4, 4, 4, 4, 4, 4, 4, 3, 4), method = "normal", 23 | repetitions = 10), .Names = c("logml", "niter", "method", 24 | "repetitions"), class = "bridge_list") 25 | 26 | H0 <- structure(list(logml = -20.8084543022433, niter = 4, method = "normal"), 27 | .Names = c("logml", "niter", "method"), class = "bridge") 28 | 29 | expect_is(post_prob(H1L, H0L), "matrix") 30 | expect_warning(post_prob(H1L, H0L, H0), "recycled") 31 | expect_warning(post_prob(H1L, H0L, 4), "ignored") 32 | expect_warning(post_prob(H0, H0L, 4), "ignored") 33 | expect_warning(post_prob(H1L, H0L, bridge_o), "NA") 34 | expect_error(post_prob(H1L, 4, 5, 6), "one object") 35 | expect_error(post_prob(H0, 4, 5, 6), "one object") 36 | }) 37 | -------------------------------------------------------------------------------- /tests/testthat/test-stan_bridge_sampler_basic.R: -------------------------------------------------------------------------------- 1 | 2 | context('bridge_sampler.stanfit works.') 3 | 4 | 5 | ### H0: mu = 0 6 | mH0 <- function(y, sigma2 = 1, alpha = 2, beta = 3, rel.tol = 10^(-10)) { 7 | n <- length(y) 8 | mH0integrand <- function(tau2, y, sigma2, alpha, beta) { 9 | (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/(2*(sigma2 + tau2))) * 10 | tau2^(-alpha - 1) * exp(-beta/tau2) 11 | } 12 | (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, 13 | y = y, sigma2 = sigma2, alpha = alpha, 14 | beta = beta)$value 15 | } 16 | 17 | 18 | test_that("stan_bridge_sampler", { 19 | testthat::skip_on_os("windows") 20 | if (require(rstan)) { 21 | set.seed(12345) 22 | 23 | mu <- 0 24 | tau2 <- 0.5 25 | sigma2 <- 1 26 | 27 | n <- 20 28 | theta <- rnorm(n, mu, sqrt(tau2)) 29 | y <- rnorm(n, theta, sqrt(sigma2)) 30 | 31 | ### set prior parameters ### 32 | mu0 <- 0 33 | tau20 <- 1 34 | alpha <- 1 35 | beta <- 1 36 | 37 | # models 38 | stancodeH0 <- 'data { 39 | int n; // number of observations 40 | vector[n] y; // observations 41 | real alpha; 42 | real beta; 43 | real sigma2; 44 | } 45 | parameters { 46 | real tau2; // group-level variance 47 | vector[n] theta; // participant effects 48 | } 49 | model { 50 | target += inv_gamma_lpdf(tau2 | alpha, beta); 51 | target += normal_lpdf(theta | 0, sqrt(tau2)); 52 | target += normal_lpdf(y | theta, sqrt(sigma2)); 53 | } 54 | ' 55 | 56 | # compile models 57 | stanmodelH0 <- suppressWarnings( 58 | stan_model(model_code = stancodeH0, model_name="stanmodel") 59 | ) 60 | 61 | 62 | # fit models 63 | stanobjectH0 <- sampling(stanmodelH0, data = list(y = y, n = n, 64 | alpha = alpha, 65 | beta = beta, 66 | sigma2 = sigma2), 67 | iter = 3500, warmup = 500, chains = 4, 68 | show_messages = FALSE, refresh = 0) 69 | expect_is( 70 | H0_bridge_norm <- bridge_sampler(samples = stanobjectH0, 71 | method = "normal", 72 | silent = TRUE) 73 | , "bridge") 74 | 75 | expect_is( 76 | H0_bridge_norm_rep <-bridge_sampler(stanobjectH0, 77 | method = "normal", 78 | repetitions = 2, silent = TRUE) 79 | , "bridge_list") 80 | 81 | expect_is( 82 | H0_bridge_warp3 <- bridge_sampler(stanobjectH0, method = "warp3", 83 | silent = TRUE) 84 | , "bridge") 85 | 86 | expect_is( 87 | H0_bridge_warp3_rep <- bridge_sampler(stanobjectH0, method = "warp3", repetitions = 2, silent = TRUE) 88 | , "bridge_list") 89 | 90 | expect_equal( 91 | H0_bridge_norm$logml, 92 | log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 93 | tolerance = 0.1) 94 | expect_equal( 95 | H0_bridge_warp3$logml, 96 | log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 97 | tolerance = 0.1) 98 | 99 | expect_equal( 100 | H0_bridge_norm_rep$logml, 101 | rep(log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 2), 102 | tolerance = 0.1) 103 | expect_equal( 104 | H0_bridge_warp3_rep$logml, 105 | rep(log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 2), 106 | tolerance = 0.1) 107 | 108 | } 109 | }) 110 | 111 | 112 | test_that("stan_bridge_sampler in multicore", { 113 | testthat::skip_on_cran() 114 | testthat::skip_on_travis() 115 | testthat::skip_on_os("windows") 116 | if (require(rstan)) { 117 | set.seed(12345) 118 | 119 | mu <- 0 120 | tau2 <- 0.5 121 | sigma2 <- 1 122 | 123 | n <- 20 124 | theta <- rnorm(n, mu, sqrt(tau2)) 125 | y <- rnorm(n, theta, sqrt(sigma2)) 126 | 127 | ### set prior parameters ### 128 | mu0 <- 0 129 | tau20 <- 1 130 | alpha <- 1 131 | beta <- 1 132 | 133 | # models 134 | stancodeH0 <- 'data { 135 | int n; // number of observations 136 | vector[n] y; // observations 137 | real alpha; 138 | real beta; 139 | real sigma2; 140 | } 141 | parameters { 142 | real tau2; // group-level variance 143 | vector[n] theta; // participant effects 144 | } 145 | model { 146 | target += inv_gamma_lpdf(tau2 | alpha, beta); 147 | target += normal_lpdf(theta | 0, sqrt(tau2)); 148 | target += normal_lpdf(y | theta, sqrt(sigma2)); 149 | } 150 | ' 151 | 152 | # compile models 153 | stanmodelH0 <- suppressWarnings( 154 | stan_model(model_code = stancodeH0, model_name="stanmodel") 155 | ) 156 | 157 | # fit models 158 | stanobjectH0 <- sampling(stanmodelH0, data = list(y = y, n = n, 159 | alpha = alpha, 160 | beta = beta, 161 | sigma2 = sigma2), 162 | iter = 2500, warmup = 500, chains = 4, 163 | show_messages = FALSE, refresh = 0) 164 | expect_is( 165 | H0_bridge_norm <- bridge_sampler(stanobjectH0, 166 | method = "normal", 167 | silent = TRUE, cores = 2) 168 | , "bridge") 169 | 170 | expect_is( 171 | H0_bridge_warp3 <- bridge_sampler(stanobjectH0, method = "warp3", 172 | silent = TRUE, cores = 2) 173 | , "bridge") 174 | 175 | expect_equal( 176 | H0_bridge_norm$logml, 177 | log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 178 | tolerance = 0.1) 179 | expect_equal( 180 | H0_bridge_warp3$logml, 181 | log(mH0(y = y, sigma2 = sigma2, alpha = alpha, beta = beta)), 182 | tolerance = 0.1) 183 | 184 | } 185 | }) 186 | -------------------------------------------------------------------------------- /tests/testthat/test-stan_bridge_sampler_bugs.R: -------------------------------------------------------------------------------- 1 | 2 | context('Stan Bridge Sampler Bugs') 3 | 4 | test_that("subscript out of bounds error", { 5 | ## https://github.com/quentingronau/bridgesampling/issues/26 6 | stan_mod = " 7 | data{ 8 | int M; 9 | int J; 10 | int T; 11 | int E; 12 | int G; 13 | int N[G]; 14 | int ii[M]; 15 | int jj[M]; 16 | int gg[M]; 17 | int g_all[sum(N)]; 18 | int y[M]; 19 | matrix[J,J] obs_corr[G]; 20 | } 21 | 22 | transformed data{ 23 | int N_all = sum(N); 24 | } 25 | 26 | parameters{ 27 | ordered[T] thresholds_raw[G,J]; 28 | matrix[E,J] lam[G]; 29 | matrix[N_all,E] eta; 30 | matrix[N_all,J] ystar_raw; 31 | } 32 | 33 | transformed parameters { 34 | ordered[T] thresholds[G,J]; 35 | 36 | for(g in 1:G) 37 | for(j in 1:J) 38 | thresholds[g,j] = thresholds_raw[g,j] * 5; 39 | } 40 | 41 | 42 | model{ 43 | matrix[N_all,J] ystar; 44 | int pos = 1; 45 | 46 | target += std_normal_lpdf(to_vector(ystar_raw)); 47 | target += std_normal_lpdf(to_vector(eta)); 48 | 49 | for(g in 1:G){ 50 | int g_ids[N[g]] = segment(g_all,pos,N[g]); 51 | target += normal_lpdf(to_vector(eta)| 0,5); 52 | 53 | for(j in 1:J) 54 | target += std_normal_lpdf(thresholds_raw[g,j]); 55 | 56 | ystar[g_ids,] = eta[g_ids,] * lam[g] + ystar_raw[g_ids,]; 57 | pos += N[g]; 58 | } 59 | 60 | for(m in 1:M) 61 | target += ordered_logistic_lpmf(y[m] | ystar[ii[m],jj[m]], 62 | thresholds[gg[m],jj[m]]); 63 | } 64 | " 65 | testthat::skip_on_cran() 66 | testthat::skip_on_travis() 67 | testthat::skip_if_not_installed("rstan") 68 | library("rstan") 69 | # source("tests/testthat/test_dat.txt") 70 | source("test_dat.txt") 71 | 72 | suppressWarnings( 73 | mod <- stan(model_code=stan_mod,data=test_dat, chains = 2, refresh = 0) 74 | ) 75 | 76 | expect_warning(object = bridge_sampler(mod, silent=TRUE), 77 | regexp = "Infinite value in iterative scheme, returning NA.") 78 | }) 79 | 80 | test_that("bridge_sampler.stanfit multicore works for one-parameter model.", { 81 | 82 | skip_on_cran() 83 | skip_on_travis() 84 | skip_on_os("windows") 85 | 86 | if (require(rstan)) { 87 | set.seed(12345) 88 | 89 | # compute difference scores 90 | n <- 10 91 | y <- rnorm(n) 92 | 93 | # models 94 | stancodeH0 <- ' 95 | data { 96 | int n; // number of observations 97 | vector[n] y; // observations 98 | } 99 | parameters { 100 | real sigma2; // variance parameter 101 | } 102 | model { 103 | target += log(1/sigma2); // Jeffreys prior on sigma2 104 | target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood 105 | } 106 | ' 107 | # compile models 108 | suppressWarnings( 109 | stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 110 | ) 111 | # fit models 112 | stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n), 113 | iter = 10000, warmup = 1000, chains = 4, 114 | control = list(adapt_delta = 0.95), 115 | refresh = 0) 116 | 117 | ######### bridge sampling ########### 118 | suppressWarnings(H0 <- bridge_sampler(stanfitH0, cores = 2, silent = TRUE)) 119 | 120 | expect_s3_class(H0, "bridge") 121 | 122 | } 123 | }) 124 | 125 | test_that("turtle example",{ 126 | skip_on_cran() 127 | skip_on_travis() 128 | 129 | 130 | if (require(rstan)) { 131 | 132 | data("turtles") 133 | 134 | ### m1 (model with random intercepts) ### 135 | m1_code_nc <- 136 | "data { 137 | int nobs; 138 | int y[nobs]; 139 | real x[nobs]; 140 | int m; 141 | int clutch[nobs]; 142 | } 143 | parameters { 144 | real alpha0_raw; 145 | real alpha1_raw; 146 | vector[m] b_raw; 147 | real sigma2; 148 | } 149 | transformed parameters { 150 | vector[m] b; 151 | real sigma = sqrt(sigma2); 152 | real alpha0 = sqrt(10.0)*alpha0_raw; 153 | real alpha1 = sqrt(10.0)*alpha1_raw; 154 | b = b_raw*sigma; 155 | } 156 | model { 157 | // priors 158 | target += -2*log(1 + sigma2); // p(sigma2) = 1/(1 + sigma2)^2 159 | target += normal_lpdf(alpha0_raw | 0, 1); 160 | target += normal_lpdf(alpha1_raw | 0, 1); 161 | 162 | // random effects 163 | target += normal_lpdf(b_raw | 0, 1); 164 | 165 | // likelihood 166 | for (i in 1:nobs) 167 | target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i] + b[clutch[i]])); 168 | }" 169 | 170 | suppressWarnings( 171 | stanobject_m1_nc <- stan(model_code = m1_code_nc, 172 | data = list(y = turtles$y, x = turtles$x, 173 | nobs = nrow(turtles), 174 | m = max(turtles$clutch), 175 | clutch = turtles$clutch), 176 | iter = 10500, warmup = 500, chains = 4, 177 | refresh = 0) 178 | ) 179 | bs_m1_nc <- bridge_sampler(stanobject_m1_nc, method = "warp3", 180 | repetitions = 25, silent=TRUE) 181 | 182 | m0_code_nc <- 183 | "data { 184 | int nobs; 185 | int y[nobs]; 186 | real x[nobs]; 187 | } 188 | parameters { 189 | real alpha0_raw; 190 | real alpha1_raw; 191 | } 192 | transformed parameters { 193 | real alpha0 = sqrt(10.0)*alpha0_raw; 194 | real alpha1 = sqrt(10.0)*alpha1_raw; 195 | } 196 | model { 197 | // priors 198 | target += normal_lpdf(alpha0_raw | 0, 1); 199 | target += normal_lpdf(alpha1_raw | 0, 1); 200 | 201 | // likelihood 202 | for (i in 1:nobs) 203 | target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i])); 204 | }" 205 | 206 | suppressWarnings( 207 | stanobject_m0_nc <- stan(model_code = m0_code_nc, 208 | data = list(y = turtles$y, x = turtles$x, 209 | nobs = nrow(turtles), 210 | m = max(turtles$clutch), 211 | clutch = turtles$clucth), 212 | iter = 10500, warmup = 500, chains = 4, 213 | refresh = 0) 214 | ) 215 | 216 | bs_m0_nc <- bridge_sampler(stanobject_m0_nc, method = "warp3", 217 | repetitions = 25, silent=TRUE) 218 | expect_equal(bf(bs_m0_nc, bs_m1_nc)$bf, rep(1.27, 25), tolerance = 0.02) 219 | } 220 | }) 221 | -------------------------------------------------------------------------------- /tests/testthat/test-stanreg_bridge_sampler_basic.R: -------------------------------------------------------------------------------- 1 | 2 | context('bridge_sampler.stanreg works.') 3 | 4 | test_that("stan_bridge_sampler", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | if (require(rstanarm)) { 10 | 11 | fit_1 <- stan_glm(mpg ~ wt + qsec + am, data = mtcars, 12 | chains = 2, cores = 2, iter = 5000, 13 | diagnostic_file = file.path(tempdir(), "df.csv")) 14 | bridge_norm <- bridge_sampler(fit_1) 15 | 16 | fit_2 <- update(fit_1, formula = . ~ . + cyl) 17 | bridge_warp <- bridge_sampler(fit_2, method = "warp3") 18 | 19 | expect_true(bridge_norm$logml > bridge_warp$logml) 20 | } 21 | }) 22 | 23 | 24 | test_that("stan_bridge_sampler in multicore", { 25 | testthat::skip_on_cran() 26 | testthat::skip_on_travis() 27 | #testthat::skip_on_os("windows") 28 | if (require(rstanarm)) { 29 | fit_1 <- stan_glm(mpg ~ wt + qsec + am, data = mtcars, 30 | chains = 2, cores = 2, iter = 5000, 31 | diagnostic_file = file.path(tempdir(), "df.csv")) 32 | bridge_norm <- bridge_sampler(fit_1, cores = 2) 33 | 34 | fit_2 <- update(fit_1, formula = . ~ . + cyl) 35 | bridge_warp <- bridge_sampler(fit_2, method = "warp3", cores = 2) 36 | 37 | expect_true(bridge_norm$logml > bridge_warp$logml) 38 | } 39 | }) 40 | -------------------------------------------------------------------------------- /tests/testthat/test-vignette_example_jags.R: -------------------------------------------------------------------------------- 1 | 2 | context('test vignette bridgesampling_example_jags.Rmd') 3 | 4 | test_that("bridge sampler yields correct results", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | if (require(R2jags)) { 11 | 12 | ### generate data ### 13 | set.seed(12345) 14 | 15 | mu <- 0 16 | tau2 <- 0.5 17 | sigma2 <- 1 18 | 19 | n <- 20 20 | theta <- rnorm(n, mu, sqrt(tau2)) 21 | y <- rnorm(n, theta, sqrt(sigma2)) 22 | 23 | ### set prior parameters ### 24 | mu0 <- 0 25 | tau20 <- 1 26 | alpha <- 1 27 | beta <- 1 28 | 29 | ### functions to get posterior samples ### 30 | 31 | # H0: mu = 0 32 | getSamplesModelH0 <- function(data, 33 | niter = 52000, 34 | nburnin = 2000, 35 | nchains = 3) { 36 | 37 | model <- " 38 | model { 39 | for (i in 1:n) { 40 | theta[i] ~ dnorm(0, invTau2) 41 | y[i] ~ dnorm(theta[i], 1/sigma2) 42 | } 43 | invTau2 ~ dgamma(alpha, beta) 44 | tau2 <- 1/invTau2 45 | }" 46 | 47 | s <- jags(data, parameters.to.save = c("theta", "invTau2"), 48 | model.file = textConnection(model), 49 | n.chains = nchains, n.iter = niter, 50 | n.burnin = nburnin, n.thin = 1) 51 | 52 | return(s) 53 | 54 | } 55 | 56 | # H1: mu != 0 57 | getSamplesModelH1 <- function(data, 58 | niter = 52000, 59 | nburnin = 2000, 60 | nchains = 3) { 61 | 62 | model <- " 63 | model { 64 | for (i in 1:n) { 65 | theta[i] ~ dnorm(mu, invTau2) 66 | y[i] ~ dnorm(theta[i], 1/sigma2) 67 | } 68 | mu ~ dnorm(mu0, 1/tau20) 69 | invTau2 ~ dgamma(alpha, beta) 70 | tau2 <- 1/invTau2 71 | }" 72 | 73 | s <- jags(data, parameters.to.save = c("theta", "mu", "invTau2"), 74 | model.file = textConnection(model), 75 | n.chains = nchains, n.iter = niter, 76 | n.burnin = nburnin, n.thin = 1) 77 | 78 | return(s) 79 | 80 | } 81 | 82 | ### get posterior samples ### 83 | 84 | # create data lists for JAGS 85 | data_H0 <- list(y = y, n = length(y), alpha = alpha, 86 | beta = beta, sigma2 = sigma2) 87 | data_H1 <- list(y = y, n = length(y), mu0 = mu0, 88 | tau20 = tau20, alpha = alpha, 89 | beta = beta, sigma2 = sigma2) 90 | 91 | # fit models 92 | samples_H0 <- getSamplesModelH0(data_H0) 93 | samples_H1 <- getSamplesModelH1(data_H1) 94 | 95 | ### functions for evaluating the unnormalized posteriors on log scale ### 96 | 97 | log_posterior_H0 <- function(samples.row, data) { 98 | 99 | mu <- 0 100 | invTau2 <- samples.row[[ "invTau2" ]] 101 | theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] 102 | 103 | sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + 104 | sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + 105 | dgamma(invTau2, data$alpha, data$beta, log = TRUE) 106 | 107 | } 108 | 109 | log_posterior_H1 <- function(samples.row, data) { 110 | 111 | mu <- samples.row[[ "mu" ]] 112 | invTau2 <- samples.row[[ "invTau2" ]] 113 | theta <- samples.row[ paste0("theta[", seq_along(data$y), "]") ] 114 | 115 | sum(dnorm(data$y, theta, data$sigma2, log = TRUE)) + 116 | sum(dnorm(theta, mu, 1/sqrt(invTau2), log = TRUE)) + 117 | dnorm(mu, data$mu0, sqrt(data$tau20), log = TRUE) + 118 | dgamma(invTau2, data$alpha, data$beta, log = TRUE) 119 | 120 | } 121 | 122 | # specify parameter bounds H0 123 | cn <- colnames(samples_H0$BUGSoutput$sims.matrix) 124 | cn <- cn[cn != "deviance"] 125 | lb_H0 <- rep(-Inf, length(cn)) 126 | ub_H0 <- rep(Inf, length(cn)) 127 | names(lb_H0) <- names(ub_H0) <- cn 128 | lb_H0[[ "invTau2" ]] <- 0 129 | 130 | # specify parameter bounds H1 131 | cn <- colnames(samples_H1$BUGSoutput$sims.matrix) 132 | cn <- cn[cn != "deviance"] 133 | lb_H1 <- rep(-Inf, length(cn)) 134 | ub_H1 <- rep(Inf, length(cn)) 135 | names(lb_H1) <- names(ub_H1) <- cn 136 | lb_H1[[ "invTau2" ]] <- 0 137 | 138 | # compute log marginal likelihood via bridge sampling for H0 139 | H0.bridge <- bridge_sampler(samples = samples_H0, data = data_H0, 140 | log_posterior = log_posterior_H0, lb = lb_H0, 141 | ub = ub_H0, silent = TRUE) 142 | 143 | # compute log marginal likelihood via bridge sampling for H1 144 | H1.bridge <- bridge_sampler(samples = samples_H1, data = data_H1, 145 | log_posterior = log_posterior_H1, lb = lb_H1, 146 | ub = ub_H1, silent = TRUE) 147 | 148 | # compute percentage errors 149 | H0.error <- error_measures(H0.bridge)$percentage 150 | H1.error <- error_measures(H1.bridge)$percentage 151 | 152 | # compute Bayes factor 153 | BF01 <- bf(H0.bridge, H1.bridge) 154 | 155 | # compute posterior model probabilities (assuming equal prior model probabilities) 156 | post1 <- post_prob(H0.bridge, H1.bridge) 157 | 158 | # compute posterior model probabilities (using user-specified prior model probabilities) 159 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 160 | 161 | # "exact" ml H1 162 | mH1 <- function(data, rel.tol = 1e-10) { 163 | 164 | y <- data$y 165 | n <- data$n 166 | mu0 <- data$mu0 167 | tau20 <- data$tau20 168 | alpha <- data$alpha 169 | beta <- data$beta 170 | sigma2 <- data$sigma2 171 | 172 | mH1integrand <- function(tau2, y, sigma2, mu0, tau20, alpha, beta) { 173 | 174 | (sigma2 + tau2)^(-n/2) * 175 | exp(-1/2 * ((n*mean(y)^2 + (n - 1)*sd(y)^2)/(sigma2 + tau2) + 176 | mu0^2/tau20 - ((n*mean(y))/(sigma2 + tau2) + 177 | mu0/tau20)^2 / 178 | (n/(sigma2 + tau2) + 1/tau20))) * 179 | (n/(sigma2 + tau2) + 1/tau20)^(-1/2) * tau2^(-alpha - 1) * 180 | exp(-beta/tau2) 181 | 182 | } 183 | 184 | (2*pi)^(-n/2) * (tau20)^(-1/2) * beta^alpha/gamma(alpha) * 185 | integrate(mH1integrand, 0, Inf, 186 | rel.tol = rel.tol, y = y, 187 | sigma2 = sigma2, mu0 = mu0, 188 | tau20 = tau20, alpha = alpha, 189 | beta = beta)$value 190 | 191 | } 192 | 193 | exact_logmlH1 <- log(mH1(data_H1)) 194 | 195 | # "exact" ml H1 196 | mH0 <- function(data, rel.tol = 1e-10) { 197 | 198 | y <- data$y 199 | n <- data$n 200 | alpha <- data$alpha 201 | beta <- data$beta 202 | sigma2 <- data$sigma2 203 | 204 | mH0integrand <- function(tau2, y, sigma2, alpha, beta) { 205 | 206 | n <- length(y) 207 | 208 | (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/ 209 | (2*(sigma2 + tau2))) * 210 | tau2^(-alpha - 1) * exp(-beta/tau2) 211 | 212 | } 213 | 214 | (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * 215 | integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, 216 | y = y, sigma2 = sigma2, alpha = alpha, 217 | beta = beta)$value 218 | 219 | } 220 | 221 | exact_logmlH0 <- log(mH0(data_H0)) 222 | 223 | exact_BF01 <- exp(exact_logmlH0 - exact_logmlH1) 224 | 225 | H0.bridge.curr <- H0.bridge 226 | H1.bridge.curr <- H1.bridge 227 | BF01.curr <- BF01 228 | post1.curr <- post1 229 | post2.curr <- post2 230 | 231 | load(system.file("extdata/", "vignette_example_jags.RData", 232 | package = "bridgesampling")) 233 | 234 | expect_equal( 235 | H0.bridge.curr$logml, 236 | expected = exact_logmlH0, 237 | tolerance = 0.01 238 | ) 239 | expect_equal( 240 | H1.bridge.curr$logml, 241 | expected = exact_logmlH1, 242 | tolerance = 0.01 243 | ) 244 | expect_equal( 245 | BF01.curr$bf, 246 | expected = exact_BF01, 247 | tolerance = 0.01 248 | ) 249 | expect_equal( 250 | H0.bridge.curr$logml, 251 | expected = H0.bridge$logml, 252 | tolerance = 0.01 253 | ) 254 | expect_equal( 255 | H1.bridge.curr$logml, 256 | expected = H1.bridge$logml, 257 | tolerance = 0.01 258 | ) 259 | expect_equal( 260 | BF01.curr$bf, 261 | expected = BF01$bf, 262 | tolerance = 0.01 263 | ) 264 | expect_equal( 265 | post1.curr, 266 | expected = post1, 267 | tolerance = 0.01 268 | ) 269 | expect_equal( 270 | post2.curr, 271 | expected = post2, 272 | tolerance = 0.01 273 | ) 274 | 275 | } 276 | 277 | }) 278 | -------------------------------------------------------------------------------- /tests/testthat/test-vignette_example_nimble.R: -------------------------------------------------------------------------------- 1 | 2 | context('test vignette bridgesampling_example_nimble.Rmd') 3 | 4 | test_that("bridge sampler yields correct results", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | if (require(nimble)) { 11 | 12 | ### generate data ### 13 | set.seed(12345) 14 | 15 | mu <- 0 16 | tau2 <- 0.5 17 | sigma2 <- 1 18 | 19 | n <- 20 20 | theta <- rnorm(n, mu, sqrt(tau2)) 21 | y <- rnorm(n, theta, sqrt(sigma2)) 22 | 23 | ### set prior parameters ### 24 | mu0 <- 0 25 | tau20 <- 1 26 | alpha <- 1 27 | beta <- 1 28 | 29 | # models 30 | codeH0 <- nimbleCode({ 31 | invTau2 ~ dgamma(1, 1) 32 | tau2 <- 1/invTau2 33 | for (i in 1:20) { 34 | theta[i] ~ dnorm(0, sd = sqrt(tau2)) 35 | y[i] ~ dnorm(theta[i], sd = 1) 36 | } 37 | }) 38 | codeH1 <- nimbleCode({ 39 | mu ~ dnorm(0, sd = 1) 40 | invTau2 ~ dgamma(1, 1) 41 | tau2 <- 1/invTau2 42 | for (i in 1:20) { 43 | theta[i] ~ dnorm(mu, sd = sqrt(tau2)) 44 | y[i] ~ dnorm(theta[i], sd = 1) 45 | } 46 | }) 47 | 48 | ## steps for H0: 49 | modelH0 <- nimbleModel(codeH0) 50 | modelH0$setData(y = y) # set data 51 | cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ 52 | 53 | ## steps for H1: 54 | modelH1 <- nimbleModel(codeH1) 55 | modelH1$setData(y = y) # set data 56 | cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ 57 | 58 | # build MCMC functions, skipping customization of the configuration. 59 | mcmcH0 <- buildMCMC(modelH0, 60 | monitors = modelH0$getNodeNames(stochOnly = TRUE, 61 | includeData = FALSE)) 62 | mcmcH1 <- buildMCMC(modelH1, 63 | monitors = modelH1$getNodeNames(stochOnly = TRUE, 64 | includeData = FALSE)) 65 | # compile the MCMC function via generated C++ 66 | cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) 67 | cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) 68 | 69 | # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. 70 | # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 71 | samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, 72 | progressBar = FALSE) 73 | samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, 74 | progressBar = FALSE) 75 | 76 | # compute log marginal likelihood via bridge sampling for H0 77 | H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) 78 | 79 | # compute log marginal likelihood via bridge sampling for H1 80 | H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) 81 | 82 | # compute percentage errors 83 | H0.error <- error_measures(H0.bridge)$percentage 84 | H1.error <- error_measures(H1.bridge)$percentage 85 | 86 | # compute Bayes factor 87 | BF01 <- bf(H0.bridge, H1.bridge) 88 | 89 | # compute posterior model probabilities (assuming equal prior model probabilities) 90 | post1 <- post_prob(H0.bridge, H1.bridge) 91 | 92 | # compute posterior model probabilities (using user-specified prior model probabilities) 93 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 94 | 95 | # "exact" ml H1 96 | mH1 <- function(data, rel.tol = 1e-10) { 97 | 98 | y <- data$y 99 | n <- data$n 100 | mu0 <- data$mu0 101 | tau20 <- data$tau20 102 | alpha <- data$alpha 103 | beta <- data$beta 104 | sigma2 <- data$sigma2 105 | 106 | mH1integrand <- function(tau2, y, sigma2, mu0, tau20, alpha, beta) { 107 | 108 | (sigma2 + tau2)^(-n/2) * 109 | exp(-1/2 * ((n*mean(y)^2 + (n - 1)*sd(y)^2)/(sigma2 + tau2) + 110 | mu0^2/tau20 - ((n*mean(y))/(sigma2 + tau2) + 111 | mu0/tau20)^2 / 112 | (n/(sigma2 + tau2) + 1/tau20))) * 113 | (n/(sigma2 + tau2) + 1/tau20)^(-1/2) * tau2^(-alpha - 1) * 114 | exp(-beta/tau2) 115 | 116 | } 117 | 118 | (2*pi)^(-n/2) * (tau20)^(-1/2) * beta^alpha/gamma(alpha) * 119 | integrate(mH1integrand, 0, Inf, 120 | rel.tol = rel.tol, y = y, 121 | sigma2 = sigma2, mu0 = mu0, 122 | tau20 = tau20, alpha = alpha, 123 | beta = beta)$value 124 | 125 | } 126 | 127 | exact_logmlH1 <- log(mH1(list(y = y, n = n, 128 | mu0 = mu0, 129 | tau20 = tau20, 130 | alpha = alpha, 131 | beta = beta, 132 | sigma2 = sigma2))) 133 | 134 | # "exact" ml H1 135 | mH0 <- function(data, rel.tol = 1e-10) { 136 | 137 | y <- data$y 138 | n <- data$n 139 | alpha <- data$alpha 140 | beta <- data$beta 141 | sigma2 <- data$sigma2 142 | 143 | mH0integrand <- function(tau2, y, sigma2, alpha, beta) { 144 | 145 | n <- length(y) 146 | 147 | (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/ 148 | (2*(sigma2 + tau2))) * 149 | tau2^(-alpha - 1) * exp(-beta/tau2) 150 | 151 | } 152 | 153 | (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * 154 | integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, 155 | y = y, sigma2 = sigma2, alpha = alpha, 156 | beta = beta)$value 157 | 158 | } 159 | 160 | exact_logmlH0 <- log(mH0(list(y = y, n = n, 161 | alpha = alpha, 162 | beta = beta, 163 | sigma2 = sigma2))) 164 | 165 | exact_BF01 <- exp(exact_logmlH0 - exact_logmlH1) 166 | 167 | H0.bridge.curr <- H0.bridge 168 | H1.bridge.curr <- H1.bridge 169 | BF01.curr <- BF01 170 | post1.curr <- post1 171 | post2.curr <- post2 172 | 173 | # load(system.file("extdata/", "vignette_example_nimble.RData", 174 | # package = "bridgesampling")) 175 | 176 | expect_equal( 177 | H0.bridge.curr$logml, 178 | expected = exact_logmlH0, 179 | tolerance = 0.01 180 | ) 181 | expect_equal( 182 | H1.bridge.curr$logml, 183 | expected = exact_logmlH1, 184 | tolerance = 0.01 185 | ) 186 | expect_equal( 187 | BF01.curr$bf, 188 | expected = exact_BF01, 189 | tolerance = 0.01 190 | ) 191 | expect_equal( 192 | H0.bridge.curr$logml, 193 | expected = H0.bridge$logml, 194 | tolerance = 0.01 195 | ) 196 | expect_equal( 197 | H1.bridge.curr$logml, 198 | expected = H1.bridge$logml, 199 | tolerance = 0.01 200 | ) 201 | expect_equal( 202 | BF01.curr$bf, 203 | expected = BF01$bf, 204 | tolerance = 0.01 205 | ) 206 | expect_equal( 207 | post1.curr, 208 | expected = post1, 209 | tolerance = 0.01 210 | ) 211 | expect_equal( 212 | post2.curr, 213 | expected = post2, 214 | tolerance = 0.01 215 | ) 216 | 217 | } 218 | 219 | }) 220 | -------------------------------------------------------------------------------- /tests/testthat/test-vignette_example_stan.R: -------------------------------------------------------------------------------- 1 | 2 | context('test vignette bridgesampling_example_stan.Rmd') 3 | 4 | test_that("bridge sampler yields correct results", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | if (require(rstan)) { 11 | 12 | ### generate data ### 13 | set.seed(12345) 14 | 15 | mu <- 0 16 | tau2 <- 0.5 17 | sigma2 <- 1 18 | 19 | n <- 20 20 | theta <- rnorm(n, mu, sqrt(tau2)) 21 | y <- rnorm(n, theta, sqrt(sigma2)) 22 | 23 | ### set prior parameters ### 24 | mu0 <- 0 25 | tau20 <- 1 26 | alpha <- 1 27 | beta <- 1 28 | 29 | # models 30 | stancodeH0 <- 'data { 31 | int n; // number of observations 32 | vector[n] y; // observations 33 | real alpha; 34 | real beta; 35 | real sigma2; 36 | } 37 | parameters { 38 | real tau2; // group-level variance 39 | vector[n] theta; // participant effects 40 | } 41 | model { 42 | target += inv_gamma_lpdf(tau2 | alpha, beta); 43 | target += normal_lpdf(theta | 0, sqrt(tau2)); 44 | target += normal_lpdf(y | theta, sqrt(sigma2)); 45 | } 46 | ' 47 | stancodeH1 <- 'data { 48 | int n; // number of observations 49 | vector[n] y; // observations 50 | real mu0; 51 | real tau20; 52 | real alpha; 53 | real beta; 54 | real sigma2; 55 | } 56 | parameters { 57 | real mu; 58 | real tau2; // group-level variance 59 | vector[n] theta; // participant effects 60 | } 61 | model { 62 | target += normal_lpdf(mu | mu0, sqrt(tau20)); 63 | target += inv_gamma_lpdf(tau2 | alpha, beta); 64 | target += normal_lpdf(theta | mu, sqrt(tau2)); 65 | target += normal_lpdf(y | theta, sqrt(sigma2)); 66 | } 67 | ' 68 | # compile models 69 | stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 70 | stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") 71 | 72 | # fit models 73 | stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, 74 | alpha = alpha, 75 | beta = beta, 76 | sigma2 = sigma2), 77 | iter = 50000, warmup = 1000, chains = 3, cores = 1) 78 | stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, 79 | mu0 = mu0, 80 | tau20 = tau20, 81 | alpha = alpha, 82 | beta = beta, 83 | sigma2 = sigma2), 84 | iter = 50000, warmup = 1000, chains = 3, cores = 1) 85 | 86 | # compute log marginal likelihood via bridge sampling for H0 87 | H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) 88 | 89 | # compute log marginal likelihood via bridge sampling for H1 90 | H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) 91 | 92 | # compute percentage errors 93 | H0.error <- error_measures(H0.bridge)$percentage 94 | H1.error <- error_measures(H1.bridge)$percentage 95 | 96 | # compute Bayes factor 97 | BF01 <- bf(H0.bridge, H1.bridge) 98 | 99 | # compute posterior model probabilities (assuming equal prior model probabilities) 100 | post1 <- post_prob(H0.bridge, H1.bridge) 101 | 102 | # compute posterior model probabilities (using user-specified prior model probabilities) 103 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 104 | 105 | # "exact" ml H1 106 | mH1 <- function(data, rel.tol = 1e-10) { 107 | 108 | y <- data$y 109 | n <- data$n 110 | mu0 <- data$mu0 111 | tau20 <- data$tau20 112 | alpha <- data$alpha 113 | beta <- data$beta 114 | sigma2 <- data$sigma2 115 | 116 | mH1integrand <- function(tau2, y, sigma2, mu0, tau20, alpha, beta) { 117 | 118 | (sigma2 + tau2)^(-n/2) * 119 | exp(-1/2 * ((n*mean(y)^2 + (n - 1)*sd(y)^2)/(sigma2 + tau2) + 120 | mu0^2/tau20 - ((n*mean(y))/(sigma2 + tau2) + 121 | mu0/tau20)^2 / 122 | (n/(sigma2 + tau2) + 1/tau20))) * 123 | (n/(sigma2 + tau2) + 1/tau20)^(-1/2) * tau2^(-alpha - 1) * 124 | exp(-beta/tau2) 125 | 126 | } 127 | 128 | (2*pi)^(-n/2) * (tau20)^(-1/2) * beta^alpha/gamma(alpha) * 129 | integrate(mH1integrand, 0, Inf, 130 | rel.tol = rel.tol, y = y, 131 | sigma2 = sigma2, mu0 = mu0, 132 | tau20 = tau20, alpha = alpha, 133 | beta = beta)$value 134 | 135 | } 136 | 137 | exact_logmlH1 <- log(mH1(list(y = y, n = n, 138 | mu0 = mu0, 139 | tau20 = tau20, 140 | alpha = alpha, 141 | beta = beta, 142 | sigma2 = sigma2))) 143 | 144 | # "exact" ml H1 145 | mH0 <- function(data, rel.tol = 1e-10) { 146 | 147 | y <- data$y 148 | n <- data$n 149 | alpha <- data$alpha 150 | beta <- data$beta 151 | sigma2 <- data$sigma2 152 | 153 | mH0integrand <- function(tau2, y, sigma2, alpha, beta) { 154 | 155 | n <- length(y) 156 | 157 | (sigma2 + tau2)^(-n/2) * exp(-(n*mean(y)^2 + (n - 1)*sd(y)^2)/ 158 | (2*(sigma2 + tau2))) * 159 | tau2^(-alpha - 1) * exp(-beta/tau2) 160 | 161 | } 162 | 163 | (2*pi)^(-n/2) * beta^alpha/gamma(alpha) * 164 | integrate(mH0integrand, 0, Inf, rel.tol = rel.tol, 165 | y = y, sigma2 = sigma2, alpha = alpha, 166 | beta = beta)$value 167 | 168 | } 169 | 170 | exact_logmlH0 <- log(mH0(list(y = y, n = n, 171 | alpha = alpha, 172 | beta = beta, 173 | sigma2 = sigma2))) 174 | 175 | exact_BF01 <- exp(exact_logmlH0 - exact_logmlH1) 176 | 177 | H0.bridge.curr <- H0.bridge 178 | H1.bridge.curr <- H1.bridge 179 | BF01.curr <- BF01 180 | post1.curr <- post1 181 | post2.curr <- post2 182 | 183 | load(system.file("extdata/", "vignette_example_stan.RData", 184 | package = "bridgesampling")) 185 | 186 | expect_equal( 187 | H0.bridge.curr$logml, 188 | expected = exact_logmlH0, 189 | tolerance = 0.01 190 | ) 191 | expect_equal( 192 | H1.bridge.curr$logml, 193 | expected = exact_logmlH1, 194 | tolerance = 0.01 195 | ) 196 | expect_equal( 197 | BF01.curr$bf, 198 | expected = exact_BF01, 199 | tolerance = 0.01 200 | ) 201 | expect_equal( 202 | H0.bridge.curr$logml, 203 | expected = H0.bridge$logml, 204 | tolerance = 0.01 205 | ) 206 | expect_equal( 207 | H1.bridge.curr$logml, 208 | expected = H1.bridge$logml, 209 | tolerance = 0.01 210 | ) 211 | expect_equal( 212 | BF01.curr$bf, 213 | expected = BF01$bf, 214 | tolerance = 0.01 215 | ) 216 | expect_equal( 217 | post1.curr, 218 | expected = post1, 219 | tolerance = 0.01 220 | ) 221 | expect_equal( 222 | post2.curr, 223 | expected = post2, 224 | tolerance = 0.01 225 | ) 226 | 227 | } 228 | 229 | }) 230 | -------------------------------------------------------------------------------- /tests/testthat/test-vignette_stan_ttest.R: -------------------------------------------------------------------------------- 1 | 2 | context('test vignette bridgesampling_stan_ttest.Rmd') 3 | 4 | test_that("bridge sampler yields correct results", { 5 | 6 | testthat::skip_on_cran() 7 | testthat::skip_on_travis() 8 | 9 | # library(bridgesampling) 10 | if (require(rstan) && require(BayesFactor)) { 11 | 12 | set.seed(12345) 13 | 14 | # Sleep data from t.test example 15 | data(sleep) 16 | 17 | # compute difference scores 18 | y <- sleep$extra[sleep$group == 2] - sleep$extra[sleep$group == 1] 19 | n <- length(y) 20 | 21 | # models 22 | stancodeH0 <- ' 23 | data { 24 | int n; // number of observations 25 | vector[n] y; // observations 26 | } 27 | parameters { 28 | real sigma2; // variance parameter 29 | } 30 | model { 31 | target += log(1/sigma2); // Jeffreys prior on sigma2 32 | target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood 33 | } 34 | ' 35 | stancodeH1 <- ' 36 | data { 37 | int n; // number of observations 38 | vector[n] y; // observations 39 | real r; // Cauchy prior scale 40 | } 41 | parameters { 42 | real delta; 43 | real sigma2;// variance parameter 44 | } 45 | model { 46 | target += cauchy_lpdf(delta | 0, r); // Cauchy prior on delta 47 | target += log(1/sigma2); // Jeffreys prior on sigma2 48 | target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood 49 | } 50 | ' 51 | # compile models 52 | stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 53 | stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") 54 | 55 | # fit models 56 | stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n), 57 | iter = 20000, warmup = 1000, chains = 4, cores = 1, 58 | control = list(adapt_delta = .99)) 59 | stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, r = 1/sqrt(2)), 60 | iter = 20000, warmup = 1000, chains = 4, cores = 1, 61 | control = list(adapt_delta = .99)) 62 | 63 | set.seed(12345) 64 | suppressWarnings(H0 <- bridge_sampler(stanfitH0, silent = TRUE)) 65 | H1 <- bridge_sampler(stanfitH1, silent = TRUE) 66 | 67 | # compute percentage errors 68 | H0.error <- error_measures(H0)$percentage 69 | H1.error <- error_measures(H1)$percentage 70 | 71 | # compute Bayes factor 72 | BF10 <- bf(H1, H0) 73 | 74 | # BayesFactor result 75 | BF10.BayesFactor <- extractBF(ttestBF(y), onlybf = TRUE, logbf = FALSE) 76 | 77 | # one-sided test 78 | stancodeHplus <- ' 79 | data { 80 | int n; // number of observations 81 | vector[n] y; // observations 82 | real r; // Cauchy prior scale 83 | } 84 | parameters { 85 | real delta; // constrained to be positive 86 | real sigma2;// variance parameter 87 | } 88 | model { 89 | target += cauchy_lpdf(delta | 0, r) - cauchy_lccdf(0 | 0, r); // Cauchy prior on delta 90 | target += log(1/sigma2); // Jeffreys prior on sigma2 91 | target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood 92 | } 93 | ' 94 | # compile and fit model 95 | stanmodelHplus <- stan_model(model_code = stancodeHplus, model_name="stanmodel") 96 | stanfitHplus <- sampling(stanmodelHplus, data = list(y = y, n = n, r = 1/sqrt(2)), 97 | iter = 30000, warmup = 1000, chains = 4, 98 | control = list(adapt_delta = .99)) 99 | 100 | Hplus <- bridge_sampler(stanfitHplus, silent = TRUE) 101 | Hplus.error <- error_measures(Hplus)$percentage 102 | 103 | # compute Bayes factor 104 | BFplus0 <- bf(Hplus, H0) 105 | 106 | BFplus0.BayesFactor <- extractBF(ttestBF(y, nullInterval = c(0, Inf)), 107 | onlybf = TRUE, logbf = FALSE)[1] 108 | 109 | H0.curr <- H0 110 | H1.curr <- H1 111 | Hplus.curr <- Hplus 112 | BF10.curr <- BF10 113 | BFplus0.curr <- BFplus0 114 | 115 | load(system.file("extdata/", "vignette_stan_ttest.RData", 116 | package = "bridgesampling")) 117 | 118 | expect_equal( 119 | H0.curr$logml, 120 | expected = H0$logml, 121 | tolerance = 0.01 122 | ) 123 | expect_equal( 124 | H1.curr$logml, 125 | expected = H1$logml, 126 | tolerance = 0.01 127 | ) 128 | expect_equal( 129 | BF10.curr$bf, 130 | expected = BF10$bf, 131 | tolerance = 0.01 132 | ) 133 | expect_equal( 134 | BF10.curr$bf, 135 | expected = BF10.BayesFactor, 136 | tolerance = 0.03 137 | ) 138 | expect_equal( 139 | BFplus0.curr$bf, 140 | expected = BFplus0$bf, 141 | tolerance = 0.01 142 | ) 143 | expect_equal( 144 | BFplus0.curr$bf, 145 | expected = BFplus0.BayesFactor, 146 | tolerance = 0.03 147 | ) 148 | } 149 | 150 | }) 151 | -------------------------------------------------------------------------------- /tests/testthat/unnormalized_normal_density.cpp: -------------------------------------------------------------------------------- 1 | // load Rcpp 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace Rcpp; 7 | using Eigen::VectorXd; 8 | using Eigen::Map; 9 | 10 | //------------------------------------------------------------------------------ 11 | // unnormalized standard multivariate normal density function (log) 12 | //------------------------------------------------------------------------------ 13 | 14 | // [[Rcpp::depends(RcppEigen)]] 15 | // [[Rcpp::export]] 16 | double log_densityRcpp(NumericVector x, SEXP data) { 17 | 18 | VectorXd xe(as >(x)); 19 | return -0.5*xe.transpose()*xe; 20 | 21 | } 22 | -------------------------------------------------------------------------------- /tests/testthat/unnormalized_normal_density_mu.cpp: -------------------------------------------------------------------------------- 1 | // load Rcpp 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace Rcpp; 7 | using Eigen::VectorXd; 8 | using Eigen::Map; 9 | 10 | //------------------------------------------------------------------------------ 11 | // unnormalized standard multivariate normal density function (log) 12 | //------------------------------------------------------------------------------ 13 | 14 | // [[Rcpp::depends(RcppEigen)]] 15 | // [[Rcpp::export]] 16 | double log_densityRcpp_mu(NumericVector x, SEXP data, NumericVector mu) { 17 | 18 | VectorXd xe(as >(x)); 19 | VectorXd mue(as >(mu)); 20 | return -0.5*(xe - mue).transpose()*(xe - mue); 21 | 22 | } 23 | -------------------------------------------------------------------------------- /vignettes/bridgesampling_example_nimble.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hierarchical Normal Example (nimble)" 3 | author: "Quentin F. Gronau, Henrik Singmann & Perry de Valpine" 4 | date: "`r Sys.Date()`" 5 | show_toc: true 6 | output: 7 | knitr:::html_vignette: 8 | toc: yes 9 | vignette: > 10 | %\VignetteIndexEntry{Hierarchical Normal Example Nimble} 11 | %\VignetteEngine{knitr::rmarkdown} 12 | %\VignetteEncoding{UTF-8} 13 | --- 14 | 15 | In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `nimble`. The [`nimble` documentation](https://r-nimble.org/html_manual/cha-welcome-nimble.html) provides a comprehensive overview. This vignette uses the same models and data as the [`Stan` vignette](bridgesampling_example_stan.html) and [`Jags` vignette](bridgesampling_example_jags.html). 16 | 17 | ## Model and Data 18 | The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. 19 | 20 | In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: 21 | 22 | ```{r} 23 | library(bridgesampling) 24 | 25 | ### generate data ### 26 | set.seed(12345) 27 | 28 | mu <- 0 29 | tau2 <- 0.5 30 | sigma2 <- 1 31 | 32 | n <- 20 33 | theta <- rnorm(n, mu, sqrt(tau2)) 34 | y <- rnorm(n, theta, sqrt(sigma2)) 35 | 36 | ``` 37 | 38 | Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: 39 | 40 | ```{r,eval=FALSE} 41 | ### set prior parameters ### 42 | mu0 <- 0 43 | tau20 <- 1 44 | alpha <- 1 45 | beta <- 1 46 | ``` 47 | 48 | ## Specifying the Models 49 | Next, we implement the models in `nimble`. This requires to first transform the code into a `nimbleModel`, then we need to set the data, and then we can compile the model. Given that `nimble` is build on BUGS, the similarity between the `nimble` code and the [`Jags` code](bridgesampling_example_jags.html) is not too surprising. 50 | 51 | ```{r, eval=FALSE} 52 | library("nimble") 53 | 54 | # models 55 | codeH0 <- nimbleCode({ 56 | invTau2 ~ dgamma(1, 1) 57 | tau2 <- 1/invTau2 58 | for (i in 1:20) { 59 | theta[i] ~ dnorm(0, sd = sqrt(tau2)) 60 | y[i] ~ dnorm(theta[i], sd = 1) 61 | } 62 | }) 63 | codeH1 <- nimbleCode({ 64 | mu ~ dnorm(0, sd = 1) 65 | invTau2 ~ dgamma(1, 1) 66 | tau2 <- 1/invTau2 67 | for (i in 1:20) { 68 | theta[i] ~ dnorm(mu, sd = sqrt(tau2)) 69 | y[i] ~ dnorm(theta[i], sd = 1) 70 | } 71 | }) 72 | 73 | ## steps for H0: 74 | modelH0 <- nimbleModel(codeH0) 75 | modelH0$setData(y = y) # set data 76 | cmodelH0 <- compileNimble(modelH0) # make compiled version from generated C++ 77 | 78 | ## steps for H1: 79 | modelH1 <- nimbleModel(codeH1) 80 | modelH1$setData(y = y) # set data 81 | cmodelH1 <- compileNimble(modelH1) # make compiled version from generated C++ 82 | 83 | ``` 84 | ## Fitting the Models 85 | Fitting a model with `nimble` requires one to first create an MCMC function from the (compiled or uncompiled) model. This function then needs to be compiled again. With this object we can then create the samples. Note that nimble uses a reference object semantic so we do not actually need the samples object, as the samples will be saved in the MCMC function objects. But as `runMCMC` returns them anyway, we nevertheless save them. 86 | 87 | One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. 88 | 89 | ```{r, eval=FALSE} 90 | 91 | # build MCMC functions, skipping customization of the configuration. 92 | mcmcH0 <- buildMCMC(modelH0, 93 | monitors = modelH0$getNodeNames(stochOnly = TRUE, 94 | includeData = FALSE)) 95 | mcmcH1 <- buildMCMC(modelH1, 96 | monitors = modelH1$getNodeNames(stochOnly = TRUE, 97 | includeData = FALSE)) 98 | # compile the MCMC function via generated C++ 99 | cmcmcH0 <- compileNimble(mcmcH0, project = modelH0) 100 | cmcmcH1 <- compileNimble(mcmcH1, project = modelH1) 101 | 102 | # run the MCMC. This is a wrapper for cmcmc$run() and extraction of samples. 103 | # the object samplesH1 is actually not needed as the samples are also in cmcmcH1 104 | samplesH0 <- runMCMC(cmcmcH0, niter = 1e5, nburnin = 1000, nchains = 2, 105 | progressBar = FALSE) 106 | samplesH1 <- runMCMC(cmcmcH1, niter = 1e5, nburnin = 1000, nchains = 2, 107 | progressBar = FALSE) 108 | ``` 109 | 110 | ## Computing the (Log) Marginal Likelihoods 111 | Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the compiled MCMC function objects (of class `"MCMC_refClass"`) which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: 112 | ```{r, echo=FALSE} 113 | load(system.file("extdata/", "vignette_example_nimble.RData", 114 | package = "bridgesampling")) 115 | ``` 116 | 117 | ```{r,eval=FALSE} 118 | # compute log marginal likelihood via bridge sampling for H0 119 | H0.bridge <- bridge_sampler(cmcmcH0, silent = TRUE) 120 | 121 | # compute log marginal likelihood via bridge sampling for H1 122 | H1.bridge <- bridge_sampler(cmcmcH1, silent = TRUE) 123 | ``` 124 | We obtain: 125 | ```{r} 126 | print(H0.bridge) 127 | print(H1.bridge) 128 | ``` 129 | We can use the `error_measures` function to compute an approximate percentage error of the estimates: 130 | ```{r,eval=FALSE} 131 | # compute percentage errors 132 | H0.error <- error_measures(H0.bridge)$percentage 133 | H1.error <- error_measures(H1.bridge)$percentage 134 | ``` 135 | 136 | We obtain: 137 | ```{r} 138 | print(H0.error) 139 | print(H1.error) 140 | ``` 141 | 142 | ## Bayesian Model Comparison 143 | To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. 144 | In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: 145 | ```{r} 146 | # compute Bayes factor 147 | BF01 <- bf(H0.bridge, H1.bridge) 148 | print(BF01) 149 | ``` 150 | In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: 151 | ```{r} 152 | # compute posterior model probabilities (assuming equal prior model probabilities) 153 | post1 <- post_prob(H0.bridge, H1.bridge) 154 | print(post1) 155 | ``` 156 | When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: 157 | ```{r} 158 | # compute posterior model probabilities (using user-specified prior model probabilities) 159 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 160 | print(post2) 161 | ``` 162 | -------------------------------------------------------------------------------- /vignettes/bridgesampling_example_simplex_or_circular_parameter_spaces.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Bridge sampling using simplex parameters (ie. mixture weights) or circular parameters" 3 | author: "Kees Mulder" 4 | date: "`r Sys.Date()`" 5 | show_toc: true 6 | output: 7 | knitr:::html_vignette: 8 | toc: yes 9 | vignette: > 10 | %\VignetteIndexEntry{Simplex and Circular Parameters} 11 | %\VignetteEncoding{UTF-8} 12 | %\VignetteEngine{knitr::rmarkdown} 13 | editor_options: 14 | chunk_output_type: console 15 | --- 16 | 17 | In this vignette, we explain how one can bridge sampling can be performed when we are faced with parameter spaces that are in some way non-standard. We will look at simplex parameters and circular parameters. 18 | 19 | Simplex parameters are encountered often, in particular in mixture models or when modeling compositional data, where a set of parameters $\theta_1, \dots, \theta_k$ is used that are constrained by $0 \leq \theta \leq 1$ and $\sum_{j=1}^k \theta_j = 1$. This happens often when we use relative weights of several components, or when we model proportions or probabilities. 20 | 21 | Circular parameters are angles that lie on the circle, that is, the parameters are given in degrees ($0^\circ - 360^\circ$) or radians ($0 - 2\pi$). The core property of this type of parameter space is that it is periodical, that is, for example $\theta = 0^\circ = 360^\circ.$ Another way to think of such parameters is as two-dimensional unit vectors, $\boldsymbol{x} = \{x_1, x_2\}$, which are constrained by $\sqrt{x_1^2 + x_2^2} = 1$. 22 | 23 | This vignette will only focus on the computing the marginal likelihood for such parameters. For information on further computations, see the other vignettes. 24 | 25 | ## Posterior sample 26 | 27 | We will assume a posterior sample was obtained through some method, and we will show how to compute the marginal likelihood from this sample. The example posterior sample will simply be generated randomly, just for illustration. 28 | 29 | The model under consideration will be a circular mixture model, where for simplicity we only look at its mixture weigths and circular means. 30 | 31 | ```{r} 32 | library(bridgesampling) 33 | 34 | # Posterior sample size Q, number of components nc, and data sample size n. 35 | Q <- 50 36 | nc <- 3 37 | n <- 100 38 | 39 | # Sample simplex parameters. 40 | ru <- replicate(nc, runif(Q)) 41 | simplex_param <- ru / rowSums(ru) 42 | colnames(simplex_param) <- paste0("sim", 1:nc) 43 | 44 | # Sample circular parameters. 45 | th <- replicate(nc, atan2(rnorm(Q, 2), rnorm(Q, 1))) 46 | colnames(th) <- paste0("circ", 1:nc) 47 | 48 | 49 | # Example circular mixture data. 50 | data <- c(atan2(rnorm(n, 2), rnorm(n, 1)), 51 | atan2(rnorm(n, 6), rnorm(n, 3)), 52 | atan2(rnorm(n, 2), rnorm(n, -1))) 53 | 54 | # Posterior sample. 55 | post_sample <- cbind(simplex_param, circ = th) 56 | 57 | # Dummy log-posterior. 58 | log_posterior = function(s, data) -.5*t(s) %*% s 59 | 60 | ``` 61 | 62 | Note that the posterior of the specific model you are using should be used. Here, a simple dummy posterior is used that does not depend on the data, but it is just by means of example. 63 | 64 | ## Bridge sampling 65 | 66 | For this posterior sample, we can provide the variable types to the bridge sampling functions. In order to use simplex and circular parameters, we must use `bridge_sampler.matrix`, the bridgesampling method for matrices of posterior samples. 67 | 68 | Using this method, we must pass the type of the parameters under consideration. Here, we pass respectively `"simplex"` and `"circular"` to the `param_types` argument of `bridge_sampler`. We can do this as follows: 69 | 70 | ```{r} 71 | # Give the type of parameter. 72 | parameter_types <- c(rep("simplex", nc), 73 | rep("circular", nc)) 74 | lb <- c(rep(0, 3), rep(0, 3)) 75 | ub <- c(rep(1, 3), rep(2*pi, 3)) 76 | 77 | bs_obj <- bridge_sampler(post_sample, 78 | data = data, 79 | param_types = parameter_types, 80 | log_posterior = log_posterior, 81 | lb = lb, 82 | ub = ub) 83 | bs_obj 84 | ``` 85 | 86 | Because we have told the `bridge_sampler` function to treat these variables as simplex or circular variables, it can select the correct transformations behind the scenes to ensure that the bridge sampling can proceed as normal. Note that after this procedure, all the available methods for bridge sampling objects can be used. 87 | 88 | A few notes on the use of these variables: 89 | 90 | - Simplex parameters that are provided must always be between zero and one, and sum to one. 91 | 92 | - Circular variables must always be provided in radians. 93 | 94 | - Although they must be provided to the function, the lower and upper bounds, `lb` and `ub`, are ignored for simplex and circular variables. Of course, the `lb` and `ub` vectors should still contain their usual information for the parameters that are not simplex or circular. 95 | 96 | - Only one set of simplex parameters can be provided at a time. That is, all simplex parameters are assumed to be part of the same set. 97 | -------------------------------------------------------------------------------- /vignettes/bridgesampling_example_stan.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hierarchical Normal Example (Stan)" 3 | author: "Quentin F. Gronau" 4 | date: "`r Sys.Date()`" 5 | show_toc: true 6 | output: 7 | knitr:::html_vignette: 8 | toc: yes 9 | vignette: > 10 | %\VignetteIndexEntry{Hierarchical Normal Example Stan} 11 | %\VignetteEngine{knitr::rmarkdown} 12 | %\VignetteEncoding{UTF-8} 13 | --- 14 | 15 | In this vignette, we explain how one can compute marginal likelihoods, Bayes factors, and posterior model probabilities using a simple hierarchical normal model implemented in `Stan`. This vignette uses the same models and data as the [`Jags` vignette](bridgesampling_example_jags.html). 16 | 17 | ## Model and Data 18 | The model that we will use assumes that each of the $n$ observations $y_i$ (where $i$ indexes the observation, $i = 1,2,...,n$) is normally distributed with corresponding mean $\theta_i$ and a common known variance $\sigma^2$: $y_i \sim \mathcal{N}(\theta_i, \sigma^2)$. Each $\theta_i$ is drawn from a normal group-level distribution with mean $\mu$ and variance $\tau^2$: $\theta_i \sim \mathcal{N}(\mu, \tau^2)$. For the group-level mean $\mu$, we use a normal prior distribution of the form $\mathcal{N}(\mu_0, \tau^2_0)$. For the group-level variance $\tau^2$, we use an inverse-gamma prior of the form $\text{Inv-Gamma}(\alpha, \beta)$. 19 | 20 | In this example, we are interested in comparing the null model $\mathcal{H}_0$, which posits that the group-level mean $\mu = 0$, to the alternative model $\mathcal{H}_1$, which allows $\mu$ to be different from zero. First, we generate some data from the null model: 21 | 22 | ```{r} 23 | library(bridgesampling) 24 | 25 | ### generate data ### 26 | set.seed(12345) 27 | 28 | mu <- 0 29 | tau2 <- 0.5 30 | sigma2 <- 1 31 | 32 | n <- 20 33 | theta <- rnorm(n, mu, sqrt(tau2)) 34 | y <- rnorm(n, theta, sqrt(sigma2)) 35 | 36 | ``` 37 | 38 | Next, we specify the prior parameters $\mu_0$, $\tau^2_0$, $\alpha$, and $\beta$: 39 | 40 | ```{r,eval=FALSE} 41 | ### set prior parameters ### 42 | mu0 <- 0 43 | tau20 <- 1 44 | alpha <- 1 45 | beta <- 1 46 | ``` 47 | 48 | ## Specifying the Models 49 | Next, we implement the models in `Stan`. Note that to compute the (log) marginal likelihood for a `Stan` model, we need to specify the model in a certain way. Instad of using `"~"` signs for specifying distributions, we need to directly use the (log) density functions. The reason for this is that when using the `"~"` sign, constant terms are dropped which are not needed for sampling from the posterior. However, for computing the marginal likelihood, these constants need to be retained. For instance, instead of writing `y ~ normal(mu, sigma)` we would need to write `target += normal_lpdf(y | mu, sigma)`. The models can then be specified and compiled as follows (note that it is necessary to install `rstan` for this): 50 | ```{r, eval=FALSE} 51 | library(rstan) 52 | 53 | # models 54 | stancodeH0 <- 'data { 55 | int n; // number of observations 56 | vector[n] y; // observations 57 | real alpha; 58 | real beta; 59 | real sigma2; 60 | } 61 | parameters { 62 | real tau2; // group-level variance 63 | vector[n] theta; // participant effects 64 | } 65 | model { 66 | target += inv_gamma_lpdf(tau2 | alpha, beta); 67 | target += normal_lpdf(theta | 0, sqrt(tau2)); 68 | target += normal_lpdf(y | theta, sqrt(sigma2)); 69 | } 70 | ' 71 | stancodeH1 <- 'data { 72 | int n; // number of observations 73 | vector[n] y; // observations 74 | real mu0; 75 | real tau20; 76 | real alpha; 77 | real beta; 78 | real sigma2; 79 | } 80 | parameters { 81 | real mu; 82 | real tau2; // group-level variance 83 | vector[n] theta; // participant effects 84 | } 85 | model { 86 | target += normal_lpdf(mu | mu0, sqrt(tau20)); 87 | target += inv_gamma_lpdf(tau2 | alpha, beta); 88 | target += normal_lpdf(theta | mu, sqrt(tau2)); 89 | target += normal_lpdf(y | theta, sqrt(sigma2)); 90 | } 91 | ' 92 | # compile models 93 | stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel") 94 | stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel") 95 | ``` 96 | ## Fitting the Models 97 | Now we can fit the null and the alternative model in `Stan`. One usually requires a larger number of posterior samples for estimating the marginal likelihood than for simply estimating the model parameters. This is the reason for using a comparatively large number of samples for these simple models. 98 | ```{r, eval=FALSE} 99 | # fit models 100 | stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n, 101 | alpha = alpha, 102 | beta = beta, 103 | sigma2 = sigma2), 104 | iter = 50000, warmup = 1000, chains = 3, cores = 1) 105 | stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, 106 | mu0 = mu0, 107 | tau20 = tau20, 108 | alpha = alpha, 109 | beta = beta, 110 | sigma2 = sigma2), 111 | iter = 50000, warmup = 1000, chains = 3, cores = 1) 112 | ``` 113 | 114 | ## Computing the (Log) Marginal Likelihoods 115 | Computing the (log) marginal likelihoods via the `bridge_sampler` function is now easy: we only need to pass the `stanfit` objects which contain all information necessary. We use `silent = TRUE` to suppress printing the number of iterations to the console: 116 | ```{r, echo=FALSE} 117 | load(system.file("extdata/", "vignette_example_stan.RData", 118 | package = "bridgesampling")) 119 | ``` 120 | 121 | ```{r,eval=FALSE} 122 | # compute log marginal likelihood via bridge sampling for H0 123 | H0.bridge <- bridge_sampler(stanfitH0, silent = TRUE) 124 | 125 | # compute log marginal likelihood via bridge sampling for H1 126 | H1.bridge <- bridge_sampler(stanfitH1, silent = TRUE) 127 | ``` 128 | We obtain: 129 | ```{r} 130 | print(H0.bridge) 131 | print(H1.bridge) 132 | ``` 133 | We can use the `error_measures` function to compute an approximate percentage error of the estimates: 134 | ```{r,eval=FALSE} 135 | # compute percentage errors 136 | H0.error <- error_measures(H0.bridge)$percentage 137 | H1.error <- error_measures(H1.bridge)$percentage 138 | ``` 139 | We obtain: 140 | ```{r} 141 | print(H0.error) 142 | print(H1.error) 143 | ``` 144 | 145 | ## Bayesian Model Comparison 146 | To compare the null model and the alternative model, we can compute the Bayes factor by using the `bf` function. 147 | In our case, we compute $\text{BF}_{01}$, that is, the Bayes factor which quantifies how much more likely the data are under the null versus the alternative model: 148 | ```{r} 149 | # compute Bayes factor 150 | BF01 <- bf(H0.bridge, H1.bridge) 151 | print(BF01) 152 | ``` 153 | In this case, the Bayes factor is close to one, indicating that there is not much evidence for either model. We can also compute posterior model probabilities by using the `post_prob` function: 154 | ```{r} 155 | # compute posterior model probabilities (assuming equal prior model probabilities) 156 | post1 <- post_prob(H0.bridge, H1.bridge) 157 | print(post1) 158 | ``` 159 | When the argument `prior_prob` is not specified, as is the case here, the prior model probabilities of all models under consideration are set equal (i.e., in this case with two models to 0.5). However, if we had prior knowledge about how likely both models are, we could use the `prior_prob` argument to specify different prior model probabilities: 160 | ```{r} 161 | # compute posterior model probabilities (using user-specified prior model probabilities) 162 | post2 <- post_prob(H0.bridge, H1.bridge, prior_prob = c(.6, .4)) 163 | print(post2) 164 | ``` 165 | -------------------------------------------------------------------------------- /vignettes/bridgesampling_paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/vignettes/bridgesampling_paper.pdf -------------------------------------------------------------------------------- /vignettes/bridgesampling_paper.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (JSS version)} 2 | %\VignetteEngine{R.rsp::asis} 3 | -------------------------------------------------------------------------------- /vignettes/bridgesampling_paper_extended.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/vignettes/bridgesampling_paper_extended.pdf -------------------------------------------------------------------------------- /vignettes/bridgesampling_paper_extended.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{bridgesampling: An R Package for Estimating Normalizing Constants (Extended)} 2 | %\VignetteEngine{R.rsp::asis} 3 | -------------------------------------------------------------------------------- /vignettes/bridgesampling_tutorial.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quentingronau/bridgesampling/6793386ffe30205d649d2fe5ce9597c0bff83142/vignettes/bridgesampling_tutorial.pdf -------------------------------------------------------------------------------- /vignettes/bridgesampling_tutorial.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{A Tutorial on Bridge Sampling} 2 | %\VignetteEngine{R.rsp::asis} 3 | --------------------------------------------------------------------------------