├── .Rbuildignore ├── .gitignore ├── .travis.yml ├── CHANGELOG ├── DESCRIPTION ├── LICENSE ├── LaplacesDemon.Rproj ├── NAMESPACE ├── R ├── ABB.R ├── AcceptanceRate.R ├── BMK.Diagnostic.R ├── BayesFactor.R ├── BayesTheorem.R ├── BayesianBootstrap.R ├── BigData.R ├── Blocks.R ├── CSF.R ├── CenterScale.R ├── Combine.R ├── Consort.R ├── ESS.R ├── Elicitation.R ├── GIV.R ├── Gelfand.Diagnostic.R ├── Gelman.Diagnostic.R ├── Geweke.Diagnostic.R ├── Hangartner.Diagnostic.R ├── Heidelberger.Diagnostic.R ├── IAT.R ├── Importance.R ├── IterativeQuadrature.R ├── Juxtapose.R ├── KLD.R ├── KS.Diagnostic.R ├── LML.R ├── LPL.interval.R ├── LaplaceApproximation.R ├── LaplacesDemon.R ├── LaplacesDemon.RAM.R ├── LaplacesDemon.hpc.R ├── Levene.Test.R ├── LossMatrix.R ├── MCSE.R ├── MISS.R ├── Math.R ├── Matrices.R ├── MinnesotaPrior.R ├── Mode.R ├── Model.Spec.Time.R ├── PMC.R ├── PMC.RAM.R ├── PosteriorChecks.R ├── Precision.R ├── Raftery.Diagnostic.R ├── RejectionSampling.R ├── SIR.R ├── SensitivityAnalysis.R ├── Stick.R ├── Thin.R ├── Validate.R ├── VariationalBayes.R ├── WAIC.R ├── as.covar.R ├── as.initial.values.R ├── as.parm.names.R ├── as.ppc.R ├── burnin.R ├── caterpillar.plot.R ├── cond.plot.R ├── de.Finetti.Game.R ├── deburn.R ├── distributions.R ├── hpc_server.R ├── interval.R ├── is.appeased.R ├── is.bayesian.R ├── is.class.R ├── is.constant.R ├── is.constrained.R ├── is.data.R ├── is.model.R ├── is.proper.R ├── is.stationary.R ├── joint.density.plot.R ├── joint.pr.plot.R ├── log-log.R ├── logit.R ├── p.interval.R ├── plot.bmk.R ├── plot.demonoid.R ├── plot.demonoid.hpc.R ├── plot.demonoid.ppc.R ├── plot.importance.R ├── plot.iterquad.R ├── plot.iterquad.ppc.R ├── plot.juxtapose.R ├── plot.laplace.R ├── plot.laplace.ppc.R ├── plot.miss.R ├── plot.pmc.R ├── plot.pmc.ppc.R ├── plot.vb.R ├── plot.vb.ppc.R ├── plotMatrix.R ├── plotSamples.R ├── predict.demonoid.R ├── predict.iterquad.R ├── predict.laplace.R ├── predict.pmc.R ├── predict.vb.R ├── print.demonoid.R ├── print.heidelberger.R ├── print.iterquad.R ├── print.laplace.R ├── print.miss.R ├── print.pmc.R ├── print.raftery.R ├── print.vb.R ├── summary.demonoid.ppc.R ├── summary.iterquad.ppc.R ├── summary.laplace.ppc.R ├── summary.miss.R ├── summary.pmc.ppc.R └── summary.vb.ppc.R ├── README ├── README.md ├── data ├── demonchoice.txt ├── demonfx.txt ├── demonsessions.txt ├── demonsnacks.txt └── demontexas.txt ├── development.R ├── inst └── CITATION ├── man ├── ABB.Rd ├── AcceptanceRate.Rd ├── BMK.Diagnostic.Rd ├── BayesFactor.Rd ├── BayesTheorem.Rd ├── BayesianBootstrap.Rd ├── BigData.Rd ├── Blocks.Rd ├── CSF.Rd ├── CenterScale.Rd ├── Combine.Rd ├── Consort.Rd ├── ESS.Rd ├── Elicitation.Rd ├── GIV.Rd ├── Gelfand.Diagnostic.Rd ├── Gelman.Diagnostic.Rd ├── Geweke.Diagnostic.Rd ├── Hangartner.Diagnostic.Rd ├── Heidelberger.Diagnostic.Rd ├── IAT.Rd ├── Importance.Rd ├── IterativeQuadrature.Rd ├── Juxtapose.Rd ├── KLD.Rd ├── KS.Diagnostic.Rd ├── LML.Rd ├── LPL.interval.Rd ├── LaplaceApproximation.Rd ├── LaplacesDemon-package.Rd ├── LaplacesDemon.RAM.Rd ├── LaplacesDemon.Rd ├── Levene.Test.Rd ├── LossMatrix.Rd ├── MCSE.Rd ├── MISS.Rd ├── Math.Rd ├── Matrices.Rd ├── MinnesotaPrior.Rd ├── Mode.Rd ├── Model.Spec.Time.Rd ├── PMC.RAM.Rd ├── PMC.Rd ├── PosteriorChecks.Rd ├── Precision.Rd ├── Raftery.Diagnostic.Rd ├── RejectionSampling.Rd ├── SIR.Rd ├── SensitivityAnalysis.Rd ├── Stick.Rd ├── Thin.Rd ├── Validate.Rd ├── VariationalBayes.Rd ├── WAIC.Rd ├── as.covar.Rd ├── as.initial.values.Rd ├── as.parm.names.Rd ├── as.ppc.Rd ├── burnin.Rd ├── caterpillar.plot.Rd ├── cond.plot.Rd ├── data.demonchoice.Rd ├── data.demonfx.Rd ├── data.demonsessions.Rd ├── data.demonsnacks.Rd ├── data.demontexas.Rd ├── de.Finetti.Game.Rd ├── deburn.Rd ├── dist.Asymmetric.Laplace.Rd ├── dist.Asymmetric.Log.Laplace.Rd ├── dist.Asymmetric.Multivariate.Laplace.Rd ├── dist.Bernoulli.Rd ├── dist.Categorical.Rd ├── dist.ContinuousRelaxation.Rd ├── dist.Dirichlet.Rd ├── dist.Generalized.Pareto.Rd ├── dist.Generalized.Poisson.Rd ├── dist.HalfCauchy.Rd ├── dist.HalfNorm.Rd ├── dist.Halft.Rd ├── dist.Horseshoe.Rd ├── dist.HuangWand.Rd ├── dist.Inverse.Beta.Rd ├── dist.Inverse.ChiSquare.Rd ├── dist.Inverse.Gamma.Rd ├── dist.Inverse.Gaussian.Rd ├── dist.Inverse.Matrix.Gamma.Rd ├── dist.Inverse.Wishart.Cholesky.Rd ├── dist.Inverse.Wishart.Rd ├── dist.LASSO.Rd ├── dist.Laplace.Mixture.Rd ├── dist.Laplace.Precision.Rd ├── dist.Laplace.Rd ├── dist.Log.Laplace.Rd ├── dist.Log.Normal.Precision.Rd ├── dist.Matrix.Gamma.Rd ├── dist.Matrix.Normal.Rd ├── dist.Multivariate.Cauchy.Cholesky.Rd ├── dist.Multivariate.Cauchy.Precision.Cholesky.Rd ├── dist.Multivariate.Cauchy.Precision.Rd ├── dist.Multivariate.Cauchy.Rd ├── dist.Multivariate.Laplace.Cholesky.Rd ├── dist.Multivariate.Laplace.Rd ├── dist.Multivariate.Normal.Cholesky.Rd ├── dist.Multivariate.Normal.Precision.Cholesky.Rd ├── dist.Multivariate.Normal.Precision.Rd ├── dist.Multivariate.Normal.Rd ├── dist.Multivariate.Polya.Rd ├── dist.Multivariate.Power.Exponential.Cholesky.Rd ├── dist.Multivariate.Power.Exponential.Rd ├── dist.Multivariate.t.Cholesky.Rd ├── dist.Multivariate.t.Precision.Cholesky.Rd ├── dist.Multivariate.t.Precision.Rd ├── dist.Multivariate.t.Rd ├── dist.Normal.Inverse.Wishart.Rd ├── dist.Normal.Laplace.Rd ├── dist.Normal.Mixture.Rd ├── dist.Normal.Precision.Rd ├── dist.Normal.Variance.Rd ├── dist.Normal.Wishart.Rd ├── dist.Pareto.Rd ├── dist.Power.Exponential.Rd ├── dist.Scaled.Inverse.Wishart.Rd ├── dist.Skew.Discrete.Laplace.Rd ├── dist.Skew.Laplace.Rd ├── dist.Stick.Rd ├── dist.Student.t.Precision.Rd ├── dist.Student.t.Rd ├── dist.Truncated.Rd ├── dist.Wishart.Cholesky.Rd ├── dist.Wishart.Rd ├── dist.YangBerger.Rd ├── dist.Zellner.Rd ├── hpc_server.Rd ├── interval.Rd ├── is.appeased.Rd ├── is.bayesian.Rd ├── is.class.Rd ├── is.constant.Rd ├── is.constrained.Rd ├── is.data.Rd ├── is.model.Rd ├── is.proper.Rd ├── is.stationary.Rd ├── joint.density.plot.Rd ├── joint.pr.plot.Rd ├── log-log.Rd ├── logit.Rd ├── p.interval.Rd ├── plot.bmk.Rd ├── plot.demonoid.Rd ├── plot.demonoid.ppc.Rd ├── plot.importance.Rd ├── plot.iterquad.Rd ├── plot.iterquad.ppc.Rd ├── plot.juxtapose.Rd ├── plot.laplace.Rd ├── plot.laplace.ppc.Rd ├── plot.miss.Rd ├── plot.pmc.Rd ├── plot.pmc.ppc.Rd ├── plot.vb.Rd ├── plot.vb.ppc.Rd ├── plotMatrix.Rd ├── plotSamples.Rd ├── predict.demonoid.Rd ├── predict.iterquad.Rd ├── predict.laplace.Rd ├── predict.pmc.Rd ├── predict.vb.Rd ├── print.demonoid.Rd ├── print.heidelberger.Rd ├── print.iterquad.Rd ├── print.laplace.Rd ├── print.miss.Rd ├── print.pmc.Rd ├── print.raftery.Rd ├── print.vb.Rd ├── summary.demonoid.ppc.Rd ├── summary.iterquad.ppc.Rd ├── summary.laplace.ppc.Rd ├── summary.miss.Rd ├── summary.pmc.ppc.Rd └── summary.vb.ppc.Rd └── vignettes ├── BayesianInference.Stex ├── Examples.Stex ├── LDlogo.png ├── LaplacesDemonTutorial.Stex └── References.bib /.Rbuildignore: -------------------------------------------------------------------------------- 1 | \.gitignore 2 | .*~ 3 | \#.*\#. 4 | \.RData 5 | \.Rhistory 6 | ^.*\.Rproj$ 7 | ^\.Rproj\.user$ 8 | development.R 9 | README.md 10 | .travis.yml 11 | 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | #.*# 3 | .RData 4 | .Rhistory 5 | .Rproj.user 6 | /inst/doc/BayesianInference.R 7 | /inst/doc/BayesianInference.Stex 8 | /inst/doc/Examples.R 9 | /inst/doc/Examples.Stex 10 | /inst/doc/LaplacesDemonTutorial.R 11 | /inst/doc/LaplacesDemonTutorial.Stex 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r 2 | 3 | language: R 4 | r: 5 | - oldrel 6 | - release 7 | - devel 8 | 9 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: LaplacesDemon 2 | Version: 16.1.6 3 | Title: Complete Environment for Bayesian Inference 4 | Authors@R: c(person("Byron", "Hall", role = "aut"), 5 | person("Martina", "Hall", role = "aut"), 6 | person(family="Statisticat, LLC", role = "aut"), 7 | person(given="Eric", family="Brown", role = "ctb"), 8 | person(given="Richard", family="Hermanson", role = "ctb"), 9 | person(given="Emmanuel", family="Charpentier", role = "ctb"), 10 | person(given="Daniel", family="Heck", role = "ctb"), 11 | person(given="Stephane", family="Laurent", role = "ctb"), 12 | person(given="Quentin F.", family="Gronau", role = "ctb"), 13 | person(given="Henrik", family="Singmann", 14 | email="singmann+LaplacesDemon@gmail.com", role="cre")) 15 | Depends: R (>= 3.0.0) 16 | Imports: parallel, grDevices, graphics, stats, utils 17 | Suggests: KernSmooth 18 | ByteCompile: TRUE 19 | Description: Provides a complete environment for Bayesian inference using a variety of different samplers (see ?LaplacesDemon for an overview). 20 | License: MIT + file LICENSE 21 | URL: https://github.com/LaplacesDemonR/LaplacesDemon 22 | BugReports: https://github.com/LaplacesDemonR/LaplacesDemon/issues 23 | 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | LaplacesDemon Package: 2 | YEAR: 2010-2015 3 | COPYRIGHT HOLDER: Statisticat, LLC 4 | 5 | TR method in LaplaceApproximation function is derived from trust::trust: 6 | YEAR: 2005 7 | COPYRIGHT HOLDER: Charles J. Geyer 8 | 9 | -------------------------------------------------------------------------------- /LaplacesDemon.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | BuildType: Package 16 | PackageUseDevtools: Yes 17 | PackageInstallArgs: --no-multiarch --with-keep.source 18 | PackageBuildArgs: --compact-vignettes=both 19 | -------------------------------------------------------------------------------- /R/ABB.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Approximate Bayesian Bootstrap (ABB) # 3 | # # 4 | # The purpose of the ABB function is to perform Multiple Imputation (MI) # 5 | # with the Approximate Bayesian Bootstrap (ABB). # 6 | ########################################################################### 7 | 8 | ABB <- function(X, K=1) 9 | { 10 | ### Initial Checks 11 | if(missing(X)) stop("X is a required argument.") 12 | if(!is.matrix(X)) X <- as.matrix(X) 13 | J <- ncol(X) 14 | N <- nrow(X) 15 | ### Missingness Indicator 16 | M <- X*0 17 | M[which(is.na(X))] <- 1 18 | if(sum(M) == 0) stop("There are no missing values to impute.") 19 | M.sums <- colSums(M) 20 | ### Approximate Bayesian Bootstrap 21 | MI <- list() 22 | for (k in 1:K) { 23 | imp <- NULL 24 | for (j in 1:J) { 25 | if(M.sums[j] > 0) { 26 | ### Sample X.star.obs | X.obs 27 | X.obs <- X[which(M[,j] == 0),j] 28 | X.star.obs <- sample(X.obs, length(X.obs), 29 | replace=TRUE) 30 | ### Sample X.star.mis | X.star.obs 31 | X.star.mis <- sample(X.star.obs, M.sums[j], 32 | replace=TRUE) 33 | if(length(imp) > 0) imp <- c(imp, X.star.mis) 34 | else imp <- X.star.mis} 35 | } 36 | MI[[k]] <- imp 37 | } 38 | return(MI) 39 | } 40 | 41 | #End 42 | -------------------------------------------------------------------------------- /R/AcceptanceRate.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # AcceptanceRate # 3 | # # 4 | # The purpose of the AcceptanceRate function is to calculate the # 5 | # acceptance rate of each chain from its samples. # 6 | ########################################################################### 7 | 8 | AcceptanceRate <- function(x) 9 | { 10 | if(missing(x)) stop("x is a required argument.") 11 | if(!is.matrix(x)) x <- as.matrix(x) 12 | out <- colMeans(x[-nrow(x),] != x[-1,]) 13 | names(out) <- colnames(x) 14 | return(out) 15 | } 16 | 17 | x <- matrix(rnorm(10*10),10,10) 18 | colnames(x) <- paste("V", 1:10, sep="") 19 | x[2,] <- x[1,] 20 | AcceptanceRate(x) 21 | 22 | #End 23 | -------------------------------------------------------------------------------- /R/BayesFactor.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # BayesFactor # 3 | # # 4 | # The purpose of the BayesFactor function is to estimate a Bayes factor # 5 | # from two objects, either of class demonoid, laplace, or pmc. # 6 | ########################################################################### 7 | 8 | BayesFactor <- function(x) 9 | { 10 | ### Initial Checks 11 | if(missing(x)) stop("x is required.") 12 | Model.num <- length(x) 13 | for (i in 1:Model.num) { 14 | if(!identical(class(x[[i]]), "demonoid") & 15 | !identical(class(x[[i]]), "laplace") & 16 | !identical(class(x[[i]]), "pmc") & 17 | !identical(class(x[[i]]), "vb")) 18 | stop("x is not of class demonoid, laplace, pmc, or vb.") 19 | if(identical(class(x[[i]]), "laplace") & 20 | identical(x[[i]]$Converged, FALSE)) { 21 | stop("LaplaceApproximation() did not converge in ", 22 | "M[",i,"].\n", sep="")} 23 | if(identical(class(x[[i]]), "vb") & 24 | identical(x[[i]]$Converged, FALSE)) { 25 | stop("VariationalBayes() did not converge in ", 26 | "M[",i,"].\n", sep="")} 27 | if(is.na(x[[i]]$LML)) 28 | stop(cat("LML is missing in M[",i,"].", sep="")) 29 | } 30 | ### Bayes factor 31 | B <- matrix(NA, Model.num, Model.num) 32 | for (i in 1:Model.num) {for (j in 1:Model.num) { 33 | B[i,j] <- exp(x[[i]]$LML - x[[j]]$LML)}} 34 | strength <- rep(NA,6) 35 | strength[1] <- "-Inf < B <= 0.1 Strong against" 36 | strength[2] <- "0.1 < B <= (1/3) Substantial against" 37 | strength[3] <- "(1/3) < B < 1 Barely worth mentioning against" 38 | strength[4] <- "1 <= B < 3 Barely worth mentioning for" 39 | strength[5] <- "3 <= B < 10 Substantial for" 40 | strength[6] <- "10 <= B < Inf Strong for" 41 | ### Posterior Probability 42 | ML <- rep(NA, Model.num) 43 | for (i in 1:Model.num) {ML[i] <- exp(x[[i]]$LML)} 44 | Posterior.Probability <- ML / sum(ML) 45 | ### Output 46 | BF.out <- list(B=B, Hypothesis="row > column", 47 | Strength.of.Evidence=strength, 48 | Posterior.Probability=Posterior.Probability) 49 | class(BF.out) <- "bayesfactor" 50 | return(BF.out) 51 | } 52 | 53 | #End 54 | -------------------------------------------------------------------------------- /R/BayesTheorem.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # BayesTheorem # 3 | ########################################################################### 4 | 5 | BayesTheorem <- function(PrA, PrBA) 6 | { 7 | if(missing(PrA)) stop("The PrA argument is required.") 8 | if(missing(PrBA)) stop("The PrBA argument is required.") 9 | if(any(PrA < 0) | any(PrA > 1)) 10 | stop("PrA is not in the interval [0,1].") 11 | if(any(PrBA < 0) | any(PrBA > 1)) 12 | stop("PrBA is not in the interval [0,1].") 13 | PrAB <- (PrBA * PrA) / sum(PrBA * PrA) 14 | class(PrAB) <- "bayestheorem" 15 | return(PrAB) 16 | } 17 | 18 | #End 19 | -------------------------------------------------------------------------------- /R/BayesianBootstrap.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # BayesianBootstrap # 3 | # # 4 | # The purpose of the BayesianBootstrap is to allow the user to produce # 5 | # either bootstrapped weights or statistics. # 6 | ########################################################################### 7 | 8 | BayesianBootstrap <- function(X, n=1000, Method="weights", Status=NULL) 9 | { 10 | ### Initial Checks 11 | if(missing(X)) stop("X is a required argument.") 12 | if(!is.matrix(X)) X <- as.matrix(X) 13 | if(any(!is.finite(X))) stop("Non-finite values found in X.") 14 | S <- round(abs(n)) 15 | if(S < 1) S <- 1 16 | if(!(is.numeric(Status) & (length(Status) == 1))) Status <- S + 1 17 | else { 18 | Status <- round(abs(Status)) 19 | if(Status < 1 | Status > S) Status <- S + 1} 20 | N <- nrow(X) 21 | J <- ncol(X) 22 | if(identical(Method, "weights")) { 23 | BB <- replicate(S, diff(c(0, sort(runif(N-1)), 1))) 24 | return(BB)} 25 | ### Bayesian Bootstrap: Statistics 26 | BB <- vector("list", S) 27 | for (s in 1:S) { 28 | if(s %% Status == 0) cat("\nBootstrapped Samples:", s) 29 | u <- c(0, sort(runif(N - 1)), 1) 30 | g <- diff(u) 31 | BB[[s]] <- Method(X, g)} 32 | if(Status < S) cat("\n\nThe Bayesian Bootstrap has finished.\n\n") 33 | ### Output 34 | BB <- lapply(BB, identity) 35 | if(is.vector(BB[[1]])) 36 | if(length(BB[[1]]) == 1) BB <- as.matrix(BB) 37 | else { 38 | B <- matrix(unlist(BB), S, length(BB[[1]]), byrow=TRUE) 39 | colnames(B) <- names(BB[[1]]) 40 | BB <- B 41 | } 42 | else { 43 | if(is.null(dim(BB[[1]]))) 44 | stop("Method must return a vector, matrix or array") 45 | B <- array(NA, dim=c(S, dim(BB[[1]]))) 46 | for (s in 1:S) {B[s,,] <- BB[[s]]} 47 | BB <- B 48 | } 49 | return(BB) 50 | } 51 | 52 | #End 53 | -------------------------------------------------------------------------------- /R/BigData.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # BigData # 3 | # # 4 | # The purpose of the BigData function is to enable the use of a data set # 5 | # that is larger than the computer memory (RAM). # 6 | ########################################################################### 7 | 8 | BigData <- function(file, nrow, ncol, size=1, Method="add", CPUs=1, 9 | Type="PSOCK", FUN, ...) 10 | { 11 | FUN <- match.fun(FUN) 12 | N <- trunc(nrow / size) 13 | ### Non-Parallel Processing 14 | if(CPUs == 1) { 15 | con <- file(file, open="r") 16 | on.exit(close(con)) 17 | for (i in 1:N) { 18 | ### Read in a Batch 19 | X <- matrix(scan(file=con, sep=",", #skip=skip.rows[i], 20 | nlines=size, quiet=TRUE), size, ncol, byrow=TRUE) 21 | ### Perform Function 22 | if(Method == "rbind") { 23 | if(i == 1) out <- FUN(X, ...) 24 | else out <- rbind(out, FUN(X, ...))} 25 | else if(Method == "add") { 26 | if(i == 1) out <- FUN(X, ...) 27 | else out <- out + FUN(X, ...)}} 28 | } 29 | else { ### Parallel Processing 30 | skip.rows <- c(0, size * 1:(N-1)) 31 | batch <- function(x) { 32 | #seek(con, 0) 33 | con <- file(file, open="r") 34 | on.exit(close(con)) 35 | X <- matrix(scan(file=con, sep=",", skip=skip.rows[x], 36 | nlines=size, quiet=TRUE), size, ncol, byrow=TRUE) 37 | if(Method == "add") out <- sum(FUN(X, ...)) 38 | else out <- FUN(X, ...) 39 | return(out) 40 | } 41 | #library(parallel, quietly=TRUE) 42 | detectedCores <- max(detectCores(), 43 | as.integer(Sys.getenv("NSLOTS")), na.rm=TRUE) 44 | if(CPUs > detectedCores) CPUs <- detectedCores 45 | cl <- makeCluster(CPUs, Type) 46 | clusterSetRNGStream(cl) 47 | out <- parLapply(cl, 1:N, function(x) batch(x)) 48 | stopCluster(cl) 49 | if(Method == "rbind") { 50 | out <- unlist(out) 51 | out <- matrix(out, length(out), 1) 52 | } 53 | else if(Method == "add") { 54 | out <- sum(unlist(out))} 55 | } 56 | return(out) 57 | } 58 | 59 | #End 60 | -------------------------------------------------------------------------------- /R/CenterScale.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # CenterScale # 3 | # # 4 | # The purpose of the CenterScale function is to center and scale a # 5 | # continuous variable. Options are also provided for binary variables. # 6 | # This function is very similar to Gelman's rescale function in his arm # 7 | # package. # 8 | ########################################################################### 9 | 10 | CenterScale <- function(x, Binary="none", Inverse=FALSE, mu, sigma, Range, 11 | Min) 12 | { 13 | if(identical(Inverse, FALSE)) { 14 | ### Initial Checks 15 | if(!is.numeric(x)){ 16 | x <- as.numeric(factor(x)) 17 | x.obs <- x[is.finite(x)]} 18 | x.obs <- x[is.finite(x)] 19 | ### Binary Variables 20 | if(identical(length(unique(x.obs)), 2)){ 21 | if(identical(Binary, "none")){ 22 | return((x-min(x.obs)) / (max(x.obs)-min(x.obs)))} 23 | else if(identical(Binary, "center")) { 24 | return(x-mean(x.obs))} 25 | else if(identical(Binary, "center0")) { 26 | x <- (x-min(x.obs)) / (max(x.obs)-min(x.obs)) 27 | return(x-0.5)} 28 | else if(identical(Binary, "centerscale")) { 29 | return({x-mean(x.obs)} / {2*sd(x.obs)})} 30 | } 31 | ### Continuous Variables 32 | else {return({x-mean(x.obs)} / {2*sd(x.obs)})}} 33 | else { 34 | ### Initial Checks 35 | if(!is.numeric(x)){ 36 | x <- as.numeric(factor(x)) 37 | x.obs <- x[is.finite(x)]} 38 | x.obs <- x[is.finite(x)] 39 | ### Binary Variables 40 | if(identical(length(unique(x.obs)), 2)){ 41 | if(identical(Binary, "none")) { 42 | return(x * Range + Min)} 43 | else if(identical(Binary, "center")) { 44 | return(x + mu)} 45 | else if(identical(Binary, "center0")) { 46 | return(x * Range + Min)} 47 | else if(identical(Binary, "centerscale")) { 48 | return(x * (2*sigma) + mu)} 49 | } 50 | ### Continuous Variables 51 | else {return(x * (2*sigma) + mu)} 52 | } 53 | } 54 | 55 | #End 56 | -------------------------------------------------------------------------------- /R/ESS.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # ESS # 3 | # # 4 | # The purpose of the ESS function is to estimate the effective sample # 5 | # size (ESS) of a target distribution after taking autocorrelation into # 6 | # account. Although the code is slightly different, it is essentially the # 7 | # same as the effectiveSize function in the coda package. # 8 | ########################################################################### 9 | 10 | ESS <- function(x) 11 | { 12 | x <- as.matrix(x) 13 | v0 <- order <- rep(0, ncol(x)) 14 | names(v0) <- names(order) <- colnames(x) 15 | N <- nrow(x) 16 | z <- 1:N 17 | for (i in 1:ncol(x)) { 18 | lm.out <- lm(x[, i] ~ z) 19 | if(!identical(all.equal(sd(residuals(lm.out)), 0), TRUE)) { 20 | ar.out <- try(ar(x[,i], aic=TRUE), silent=TRUE) 21 | if(!inherits(ar.out, "try-error")) { 22 | v0[i] <- ar.out$var.pred / {1 - sum(ar.out$ar)}^2 23 | order[i] <- ar.out$order}}} 24 | spec <- list(spec=v0, order=order) 25 | spec <- spec$spec 26 | temp <- N * .colVars(x) / spec 27 | out <- spec 28 | out[which(spec != 0)] <- temp[which(spec != 0)] 29 | out[which(out < .Machine$double.eps)] <- .Machine$double.eps 30 | out[which(out > N)] <- N 31 | return(out) 32 | } 33 | 34 | #End 35 | -------------------------------------------------------------------------------- /R/Gelfand.Diagnostic.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Gelfand.Diagnostic # 3 | # # 4 | # The Gelfand.Diagnostic function is an interpretation of Gelfand's # 5 | # ``thick felt-tip pen'' MCMC convergence diagnostic (Gelfand et al., # 6 | # 1990). # 7 | ########################################################################### 8 | 9 | Gelfand.Diagnostic <- function(x, k=3, pen=FALSE) 10 | { 11 | ### Initial Checks 12 | if(missing(x)) stop("The x argument is required.") 13 | if(!is.vector(x)) x <- as.vector(x) 14 | if(k < 2) k <- 2 15 | if(k > length(x)/2) k <- round(length(x)/2) 16 | if({length(x)/k} < 2) stop("k is too large relative to length(x).") 17 | ### KDE 18 | quantiles <- seq(from=0, to=1, by=1/k) 19 | breaks <- round(as.vector(quantiles)*length(x)) 20 | breaks <- breaks[-1] 21 | d.temp <- density(x) 22 | d <- array(c(d.temp$x, d.temp$y), dim=c(length(d.temp$x), 2, 23 | length(breaks))) 24 | d.temp <- density(x[1:breaks[1]]) 25 | d[,,1] <- c(d.temp$x, d.temp$y) 26 | for (i in 2:length(breaks)) { 27 | d.temp <- density(x[1:breaks[i]]) 28 | d[,,i] <- c(d.temp$x, d.temp$y)} 29 | ### Plots 30 | ymax <- max(d[,2,]) 31 | col.list <- c("red", "green", "blue", "yellow", "purple", "orange", 32 | "brown", "gray", "burlywood", "aquamarine") 33 | col.list <- rep(col.list, len=length(breaks)) 34 | rgb.temp <- as.vector(col2rgb(col.list[1])) 35 | mycol <- rgb(red=rgb.temp[1], green=rgb.temp[2], blue=rgb.temp[3], 36 | alpha=50, maxColorValue=255) 37 | plot(d[,1,1], d[,2,1], type="l", col=mycol, xlim=c(range(d[,1,])), 38 | ylim=c(0,ymax), main="Gelfand Diagnostic", 39 | xlab=deparse(substitute(x)), ylab="Density") 40 | polygon(x=d[,1,1], y=d[,2,1], col=mycol, border=NULL) 41 | for (i in 2:length(breaks)) { 42 | rgb.temp <- as.vector(col2rgb(col.list[i])) 43 | mycol <- rgb(red=rgb.temp[1], green=rgb.temp[2], 44 | blue=rgb.temp[3], alpha=50, maxColorValue=255) 45 | lines(d[,1,i], d[,2,i], col=mycol) 46 | polygon(x=d[,1,i], y=d[,2,i], col=mycol, border=mycol) 47 | lines(d[,1,i], d[,2,i], lty=i)} 48 | if(pen == TRUE) abline(v=mean(range(d[,1,])), col="black", lwd=10) 49 | legend(quantile(d[,1,], probs=0.025), round(ymax*0.9,2), 50 | legend=paste("1:",breaks,sep=""), lty=1:k, title="Samples") 51 | return(invisible(x)) 52 | } 53 | 54 | #End 55 | -------------------------------------------------------------------------------- /R/Geweke.Diagnostic.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Geweke.Diagnostic # 3 | # # 4 | # The purpose of the Geweke.Diagnostic function is to estimate # 5 | # stationarity in samples according to Geweke's diagnostic. Although the # 6 | # code is slightly different, it is essentially the same as the # 7 | # geweke.diag function in the coda package. # 8 | ########################################################################### 9 | 10 | Geweke.Diagnostic <- function(x) 11 | { 12 | x <- as.matrix(x) 13 | if(nrow(x) < 100) return(rep(NA, ncol(x))) 14 | frac1 <- 0.1; frac2 <- 0.5 15 | startx <- 1; endx <- nrow(x) 16 | xstart <- c(startx, endx - frac2 * {endx - startx}) 17 | xend <- c(startx + frac1 * {endx - startx}, endx) 18 | y.variance <- y.mean <- vector("list", 2) 19 | for (i in 1:2) { 20 | y <- x[xstart[i]:xend[i],] 21 | y.mean[[i]] <- colMeans(as.matrix(y)) 22 | yy <- as.matrix(y) 23 | y <- as.matrix(y) 24 | max.freq <- 0.5; order <- 1; max.length <- 200 25 | if(nrow(yy) > max.length) { 26 | batch.size <- ceiling(nrow(yy) / max.length) 27 | yy <- aggregate(ts(yy, frequency=batch.size), nfreq=1, 28 | FUN=mean)} 29 | else {batch.size <- 1} 30 | yy <- as.matrix(yy) 31 | fmla <- switch(order + 1, 32 | spec ~ one, 33 | spec ~ f1, 34 | spec ~ f1 + f2) 35 | if(is.null(fmla)) stop("Invalid order.") 36 | N <- nrow(yy) 37 | Nfreq <- floor(N/2) 38 | freq <- seq(from=1/N, by=1/N, length=Nfreq) 39 | f1 <- sqrt(3) * {4 * freq - 1} 40 | f2 <- sqrt(5) * {24 * freq * freq - 12 * freq + 1} 41 | v0 <- numeric(ncol(yy)) 42 | for (j in 1:ncol(yy)) { 43 | zz <- yy[,j] 44 | if(var(zz) == 0) v0[j] <- 0 45 | else { 46 | yfft <- fft(zz) 47 | spec <- Re(yfft * Conj(yfft)) / N 48 | spec.data <- data.frame(one=rep(1, Nfreq), f1=f1, 49 | f2=f2, spec=spec[1 + {1:Nfreq}], 50 | inset=I(freq <= max.freq)) 51 | glm.out <- try(glm(fmla, family=Gamma(link="log"), 52 | data=spec.data), silent=TRUE) 53 | if(!inherits(glm.out, "try-error")) 54 | v0[j] <- predict(glm.out, type="response", 55 | newdata=data.frame(spec=0, one=1, 56 | f1=-sqrt(3), f2=sqrt(5))) 57 | } 58 | } 59 | spec <- list(spec=v0) 60 | spec$spec <- spec$spec * batch.size 61 | y.variance[[i]] <- spec$spec / nrow(y) 62 | } 63 | z <- {y.mean[[1]] - y.mean[[2]]} / 64 | sqrt(y.variance[[1]] + y.variance[[2]]) 65 | return(z) 66 | } 67 | 68 | #End 69 | -------------------------------------------------------------------------------- /R/Hangartner.Diagnostic.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Hangartner.Diagnostic # 3 | ########################################################################### 4 | 5 | Hangartner.Diagnostic <- function(x, J=2) { 6 | x <- as.vector(x) 7 | if(!all(x == round(x))) stop("x is not discrete.") 8 | N <- length(x) 9 | j <- rep(1:J, each=N/J) 10 | if(N %% J != 0) stop("N must be divisible by J.") 11 | tab <- table(x, j) 12 | out <- chisq.test(x, j) 13 | class(out) <- "hangartner" 14 | return(out) 15 | } 16 | 17 | #End 18 | -------------------------------------------------------------------------------- /R/IAT.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # IAT # 3 | # # 4 | # The purpose of the IAT function is to estimate the integrated # 5 | # autocorrelation time of a chain, given its samples. Although the code # 6 | # is slightly different, it is essentially the same as the IAT function # 7 | # in the Rtwalk package, which is currently unavailable on CRAN. # 8 | ########################################################################### 9 | 10 | IAT <- function(x) 11 | { 12 | if(missing(x)) stop("The x argument is required.") 13 | if(!is.vector(x)) x <- as.vector(x) 14 | dt <- x 15 | n <- length(x) 16 | mu <- mean(dt) 17 | s2 <- var(dt) 18 | ### The maximum lag is half the sample size 19 | maxlag <- max(3, floor(n/2)) 20 | #### The gammas are sums of two consecutive autocovariances 21 | Ga <- rep(0,2) 22 | Ga[1] <- s2 23 | lg <- 1 24 | Ga[1] <- Ga[1] + sum((dt[1:(n-lg)]-mu)*(dt[(lg+1):n]-mu)) / n 25 | m <- 1 26 | lg <- 2*m 27 | Ga[2] <- sum((dt[1:(n-lg)]-mu)*(dt[(lg+1):n]-mu)) / n 28 | lg <- 2*m + 1 29 | Ga[2] <- Ga[2] + sum((dt[1:(n-lg)]-mu)*(dt[(lg+1):n]-mu)) / n 30 | IAT <- Ga[1]/s2 # Add the autocorrelations 31 | ### RULE: while Gamma stays positive and decreasing 32 | while ((Ga[2] > 0.0) & (Ga[2] < Ga[1])) { 33 | m <- m + 1 34 | if(2*m + 1 > maxlag) { 35 | cat("Not enough data, maxlag=", maxlag, "\n") 36 | break} 37 | Ga[1] <- Ga[2] 38 | lg <- 2*m 39 | Ga[2] <- sum((dt[1:(n-lg)]-mu)*(dt[(lg+1):n]-mu)) / n 40 | lg <- 2*m + 1 41 | Ga[2] <- Ga[2] + sum((dt[1:(n-lg)]-mu)*(dt[(lg+1):n]-mu)) / n 42 | IAT <- IAT + Ga[1] / s2 43 | } 44 | IAT <- -1 + 2*IAT #Calculates the IAT from the gammas 45 | return(IAT) 46 | } 47 | 48 | #End 49 | -------------------------------------------------------------------------------- /R/Importance.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Importance # 3 | # # 4 | # The purpose of the Importance function is to compare the impact of # 5 | # design matrix X on replicates when each column vector (predictor) is # 6 | # sequentially removed. # 7 | ########################################################################### 8 | 9 | Importance <- function(object, Model, Data, Categorical=FALSE, Discrep, 10 | d=0, CPUs=1, Type="PSOCK") 11 | { 12 | if(missing(object)) stop("The object argument is required.") 13 | if(missing(Model)) stop("The Model arguement is required.") 14 | if(missing(Data)) stop("The Data argument is required.") 15 | if(is.null(Data[["X"]])) stop("Data must have X.") 16 | if(missing(Discrep)) Discrep <- NULL 17 | X.orig <- Data[["X"]] 18 | cat("\nX has", ncol(X.orig), "variables") 19 | cat("\nEstimating the full model...") 20 | Pred <- predict(object, Model, Data) 21 | Summ <- summary(Pred, Categorical=Categorical, Discrep=Discrep, d=d, 22 | Quiet=TRUE) 23 | out <- matrix(0, ncol(X.orig) + 1, 4) 24 | out[1,1] <- Summ$BPIC[1,3] 25 | if(Categorical == FALSE) out[1,2] <- round(Summ$Concordance, 3) 26 | else out[1,2] <- round(Summ$Mean.Lift, 3) 27 | out[1,3] <- Summ$Discrepancy.Statistic 28 | if(Categorical == FALSE) { 29 | out[1,4] <- Summ$L.criterion 30 | S.L <- Summ$S.L} 31 | else S.L <- NA 32 | for (i in 1:ncol(X.orig)) { 33 | cat("\nEstimating without X[,", i, "]...", sep="") 34 | X.temp <- X.orig 35 | X.temp[,i] <- 0 36 | Data[["X"]] <- X.temp 37 | Pred <- predict(object, Model, Data, CPUs, Type) 38 | Summ <- summary(Pred, Categorical=Categorical, 39 | Discrep=Discrep, d=d, Quiet=TRUE) 40 | out[i+1,1] <- Summ$BPIC[1,3] 41 | if(Categorical == FALSE) out[i+1,2] <- round(Summ$Concordance, 3) 42 | else out[i+1,2] <- round(Summ$Mean.Lift, 3) 43 | out[i+1,3] <- Summ$Discrepancy.Statistic 44 | if(Categorical == FALSE) { 45 | out[i+1,4] <- Summ$L.criterion 46 | S.L <- c(S.L, Summ$S.L)}} 47 | if(Categorical == FALSE) cat("\n\nS.L:", S.L) 48 | colnames(out) <- c("BPIC","Concordance", "Discrep", "L-criterion") 49 | rownames(out) <- c("Full", paste("X[,-", 1:ncol(X.orig), "]", sep="")) 50 | attr(out, "S.L") <- S.L 51 | class(out) <- "importance" 52 | cat("\n\n") 53 | return(out) 54 | } 55 | 56 | #End 57 | -------------------------------------------------------------------------------- /R/KLD.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Kullback-Leibler Divergence (KLD) # 3 | # # 4 | # The purpose of the KLD function is to calculate the Kullback-Leibler # 5 | # divergences between two probability distributions, p(x) and p(y). # 6 | ########################################################################### 7 | 8 | KLD <- function(px, py, base=exp(1)) 9 | { 10 | ### Initial Checks 11 | if(!is.vector(px)) px <- as.vector(px) 12 | if(!is.vector(py)) py <- as.vector(py) 13 | n1 <- length(px) 14 | n2 <- length(py) 15 | if(!identical(n1, n2)) stop("px and py must have the same length.") 16 | if(any(!is.finite(px)) || any(!is.finite(py))) 17 | stop("px and py must have finite values.") 18 | if(any(px <= 0)) px <- exp(px) 19 | if(any(py <= 0)) py <- exp(py) 20 | px[which(px < .Machine$double.xmin)] <- .Machine$double.xmin 21 | py[which(py < .Machine$double.xmin)] <- .Machine$double.xmin 22 | ### Normalize 23 | px <- px / sum(px) 24 | py <- py / sum(py) 25 | ### Kullback-Leibler Calculations 26 | KLD.px.py <- px * (log(px, base=base)-log(py, base=base)) 27 | KLD.py.px <- py * (log(py, base=base)-log(px, base=base)) 28 | sum.KLD.px.py <- sum(KLD.px.py) 29 | sum.KLD.py.px <- sum(KLD.py.px) 30 | mean.KLD <- (KLD.px.py + KLD.py.px) / 2 31 | mean.sum.KLD <- (sum.KLD.px.py + sum.KLD.py.px) / 2 32 | ### Output 33 | out <- list(KLD.px.py=KLD.px.py, #KLD[i](p(x[i]) || p(y[i])) 34 | KLD.py.px=KLD.py.px, #KLD[i](p(y[i]) || p(x[i])) 35 | mean.KLD=mean.KLD, 36 | sum.KLD.px.py=sum.KLD.px.py, #KLD(p(x) || p(y)) 37 | sum.KLD.py.px=sum.KLD.py.px, #KLD(p(y) || p(x)) 38 | mean.sum.KLD=mean.sum.KLD, 39 | intrinsic.discrepancy=min(sum.KLD.px.py, sum.KLD.py.px)) 40 | return(out) 41 | } 42 | 43 | #End 44 | -------------------------------------------------------------------------------- /R/KS.Diagnostic.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # KS.Diagnostic # 3 | # # 4 | # The purpose of the KS.Diagnostic is to assess the stationarity of a # 5 | # MCMC chain, given its posterior samples. # 6 | ########################################################################### 7 | 8 | KS.Diagnostic <- function(x) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | if(!is.vector(x)) x <- as.vector(x) 12 | n <- length(x) 13 | half <- round(n/2) 14 | out <- ks.test(x[1:half], x[-c(1:half)])$p.value 15 | return(out) 16 | } 17 | 18 | #End 19 | 20 | 21 | -------------------------------------------------------------------------------- /R/LaplacesDemon.RAM.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # LaplacesDemon.RAM # 3 | # # 4 | # The purpose of the LaplacesDemon.RAM function is to estimate the RAM # 5 | # required to update a given model and data in LaplacesDemon. # 6 | ########################################################################### 7 | 8 | LaplacesDemon.RAM <- function(Model, Data, Iterations, Thinning, 9 | Algorithm="RWM") 10 | { 11 | if(missing(Model)) 12 | stop("The Model argument is required.") 13 | if(missing(Data)) 14 | stop("The Data argument is required.") 15 | Const <- 1048600 16 | LIV <- length(Data[["parm.names"]]) 17 | LM <- length(Data[["mon.names"]]) 18 | Covar <- 0 19 | if(Algorithm %in% c("ADMG","AFSS","AM","AMM","DRAM","DRM","ESS","IM", 20 | "INCA","MALA","OHSS","RWM","RAM","UESS")) { 21 | ### Covariance is required 22 | Covar <- Covar + as.vector(object.size(matrix(runif(LIV*LIV), 23 | LIV, LIV))) / Const 24 | } 25 | else if(Algorithm %in% c("AGG","AM","AMM","AMWG","DRAM","DRM","INCA", 26 | "MWG","RWM","SAMWG","SMWG","USAMWG","USMWG")) { 27 | ### Variance is required 28 | Covar <- Covar + as.vector(object.size(runif(LIV))) / Const} 29 | Data <- as.vector(object.size(Data)) / Const 30 | Deviance <- as.vector(object.size(runif(round(Iterations / 31 | Thinning)))) / Const 32 | Initial.Values <- as.vector(object.size(runif(LIV))) / Const 33 | Model <- as.vector(object.size(Model)) / Const 34 | Monitor <- as.vector(object.size(matrix(runif(Iterations*LM), 35 | round(Iterations / Thinning), LM))) / Const 36 | post <- 0 37 | if(Algorithm %in% c("AHMC","AM","DRAM","INCA","NUTS","OHSS")) 38 | post <- as.vector(object.size(matrix(runif(Iterations*LIV), 39 | Iterations, LIV))) / Const 40 | Posterior1 <- as.vector(object.size(matrix(runif(round(Iterations / 41 | Thinning)), round(Iterations / Thinning), LIV))) / Const 42 | Posterior2 <- as.vector(object.size(matrix(runif(round(Iterations / 43 | Thinning)), round(Iterations / Thinning), LIV))) / Const 44 | Summary1 <- as.vector(object.size(matrix(runif((LIV+1+LM)*7), 45 | LIV+1+LM, 7))) / Const 46 | Summary2 <- as.vector(object.size(matrix(runif((LIV+1+LM)*7), 47 | LIV+1+LM, 7))) / Const 48 | mem.list <- list(Covar=Covar, 49 | Data=Data, 50 | Deviance=Deviance, 51 | Initial.Values=Initial.Values, 52 | Model=Model, 53 | Monitor=Monitor, 54 | post=post, 55 | Posterior1=Posterior1, 56 | Posterior2=Posterior2, 57 | Summary1=Summary1, 58 | Summary2=Summary2, 59 | Total=sum(Covar,Data,Deviance,Initial.Values,Model,Monitor, 60 | post,Posterior1,Posterior2,Summary1,Summary2)) 61 | return(mem.list) 62 | } 63 | 64 | #End 65 | -------------------------------------------------------------------------------- /R/LossMatrix.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # LossMatrix # 3 | # # 4 | # The purpose of the LossMatrix function is facilitate Bayesian decision # 5 | # theory for discrete actions among discrete states. # 6 | ########################################################################### 7 | 8 | LossMatrix <- function(L, p.theta) 9 | { 10 | ### Initial Checks 11 | if(missing(L)) stop("L is a required argument.") 12 | if(missing(p.theta)) stop("p.theta is a required argument.") 13 | if(!is.matrix(L) & !is.array(L)) stop("L must be a matrix or array.") 14 | if(!is.array(p.theta)) 15 | stop("p.theta must be a vector or matrix.") 16 | d.L <- dim(L) 17 | d.p.theta <- dim(p.theta) 18 | if(any(d.L[1:2] != d.p.theta[1:2])) 19 | stop("The rows or columns of L and p.theta differ.") 20 | if(length(d.L) == 3 & length(d.p.theta) == 3) 21 | if(d.L[3] != d.p.theta[3]) 22 | stop("The number of samples in L and p.theta differ.") 23 | if(length(d.L) > 3 | length(d.p.theta) > 3) 24 | stop("L and p.theta may have no more than 3 dimensions.") 25 | if(length(d.p.theta) == 2) { 26 | if(any(colSums(p.theta) != 1)) 27 | stop("Each column in p.theta must sum to one.") 28 | } 29 | else { 30 | for (i in 1:d.p.theta[3]) { 31 | if(any(colSums(p.theta[,,i]) != 1)) 32 | stop("Each column in p.theta must sum to one.")}} 33 | ### Expected Loss 34 | if(length(d.L) == 2 & length(d.p.theta) == 2) { 35 | E.Loss <- colSums(L * p.theta) 36 | } 37 | else if(length(d.L) == 3 & length(d.p.theta) == 2) { 38 | E.Loss <- rep(0, d.L[3]) 39 | for (i in 1:d.L[3]) { 40 | E.Loss <- E.Loss + colSums(L[,,i] * p.theta)} 41 | E.Loss <- E.Loss / d.L[3] 42 | } 43 | else if(length(d.L) == 2 & length(d.p.theta) == 3) { 44 | E.Loss <- rep(0, d.p.theta[3]) 45 | for (i in 1:d.p.theta[3]) { 46 | E.Loss <- E.Loss + colSums(L * p.theta[,,i])} 47 | E.Loss <- E.Loss / d.p.theta[3] 48 | } 49 | else { 50 | E.Loss <- rep(0, d.L[3]) 51 | for (i in 1:d.L[3]) { 52 | E.Loss <- E.Loss + colSums(L[,,i] * p.theta[,,i])}} 53 | action <- which.min(E.Loss) 54 | cat("\nAction", action, "minimizes expected loss.\n") 55 | out <- list(BayesAction=action, E.Loss=E.Loss) 56 | return(out) 57 | } 58 | 59 | #End 60 | -------------------------------------------------------------------------------- /R/MinnesotaPrior.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # MinnesotaPrior # 3 | # # 4 | # The purpose of the MinnesotaPrior function is to return prior # 5 | # covariance matrices for autoregressive parameters in vector # 6 | # autoregression (VAR) models. # 7 | ########################################################################### 8 | 9 | MinnesotaPrior <- function(J, lags=c(1,2), lambda=1, theta=0.5, sigma) 10 | { 11 | theta <- max(min(theta, 1), 0) 12 | Iden <- diag(J) 13 | L <- length(lags) 14 | V <- array(0, dim=c(J,J,length(lags))) 15 | for (l in 1:L) { 16 | ### Diagonal elements 17 | V[,,l] <- V[,,l] + Iden * (lambda/lags[l])^2 18 | ### Off-diagonal elements 19 | V[,,l] <- V[,,l] + (1 - Iden) * 20 | ((lambda*theta*matrix(sigma, J, J, byrow=TRUE)) / 21 | (lags[l]*matrix(sigma, J, J)))^2} 22 | return(V) 23 | } 24 | 25 | #End 26 | -------------------------------------------------------------------------------- /R/Model.Spec.Time.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Model.Spec.Time # 3 | # # 4 | # The purpose of the Model.Spec.Time function is to return three things: # 5 | # the amount of time in minutes that it took to evaluate a model # 6 | # specification a number of times, the evaluations per minute, and the # 7 | # componentwise iterations per minute. # 8 | ########################################################################### 9 | 10 | Model.Spec.Time <- function(Model, Initial.Values, Data, n=1000) 11 | { 12 | if(missing(Model)) stop("The Model argument is required.") 13 | if(missing(Initial.Values)) 14 | stop("The Initial.Values argument is required.") 15 | if(missing(Data)) stop("The Data argument is required.") 16 | t <- as.vector(system.time(for (i in 1:n) {Model(Initial.Values, Data)})[3]) 17 | out <- list(Time=round(t/60,3), 18 | Evals.per.Minute=round(n/(t/60),3), 19 | Componentwise.Iters.per.Minute=round(n/(t/60)/length(Initial.Values),3)) 20 | return(out) 21 | } 22 | 23 | #End 24 | -------------------------------------------------------------------------------- /R/PMC.RAM.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # PMC.RAM # 3 | # # 4 | # The purpose of the PMC.RAM function is to estimate the RAM required to # 5 | # update a given model and data in PMC. # 6 | ########################################################################### 7 | 8 | PMC.RAM <- function(Model, Data, Iterations, Thinning, M, N) 9 | { 10 | if(missing(Model)) 11 | stop("The Model argument is required.") 12 | if(missing(Data)) 13 | stop("The Data argument is required.") 14 | Const <- 1048600 15 | LIV <- length(Data[["parm.names"]]) 16 | LM <- length(Data[["mon.names"]]) 17 | alpha <- as.vector(object.size(matrix(rep(1/M, M), M, Iterations))) / 18 | Const 19 | Covar <- as.vector(object.size(array(0, 20 | dim=c(LIV,LIV,Iterations,M)))) / Const 21 | Data <- as.vector(object.size(Data)) / Const 22 | Deviance <- as.vector(object.size(rep(0,N))) / Const 23 | Initial.Values <- as.vector(object.size(matrix(0, M, 24 | length(Data[["parm.names"]])))) / Const 25 | LH <- as.vector(object.size(array(0, dim=c(N, Iterations, M)))) / 26 | Const 27 | LP <- as.vector(object.size(array(0, dim=c(N, Iterations, M)))) / 28 | Const 29 | Model <- as.vector(object.size(Model)) / Const 30 | Monitor <- as.vector(object.size(matrix(runif(N*LM), N, LM))) / Const 31 | Mu <- as.vector(object.size(array(0, dim=c(Iterations, LIV, M)))) / 32 | Const 33 | Posterior1 <- as.vector(object.size(array(0, 34 | dim=c(N, LIV, Iterations, M)))) / Const 35 | Posterior2 <- Posterior1[,,Iterations,1] 36 | Posterior2 <- as.vector(object.size(Posterior2)) / Const 37 | #Note: Posterior2 gets thinned, but at one point it's this large. 38 | Summary <- as.vector(object.size(matrix(0, LIV+1+LM, 7))) / Const 39 | W <- as.vector(object.size(matrix(0, N, Iterations))) / Const 40 | mem.list <- list(alpha=alpha, 41 | Covar=Covar, 42 | Data=Data, 43 | Deviance=Deviance, 44 | Initial.Values=Initial.Values, 45 | LH=LH, 46 | LP=LP, 47 | Model=Model, 48 | Monitor=Monitor, 49 | Mu=Mu, 50 | Posterior1=Posterior1, 51 | Posterior2=Posterior2, 52 | Summary=Summary, 53 | W=W, 54 | Total=sum(alpha,Covar,Data,Deviance,Initial.Values,LH,LP, 55 | Model,Monitor,Mu,Posterior1,Posterior2,Summary,W)) 56 | return(mem.list) 57 | } 58 | 59 | #End 60 | -------------------------------------------------------------------------------- /R/Precision.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Precision # 3 | # # 4 | # The purpose of these functions is to facilitate conversions between the # 5 | # precision, standard deviation, and variance of scalars, vectors, and # 6 | # matrices. # 7 | ########################################################################### 8 | 9 | Cov2Prec <- function(Cov) 10 | { 11 | if(any(!is.finite(Cov))) stop("Cov must be finite.") 12 | if(is.matrix(Cov)) { 13 | if(!is.positive.definite(Cov)) 14 | stop("Cov is not positive-definite.") 15 | Prec <- as.inverse(Cov)} 16 | else if(is.vector(Cov)) { 17 | k <- as.integer(sqrt(length(Cov))) 18 | Cov <- matrix(Cov, k, k) 19 | if(!is.positive.definite(Cov)) 20 | stop("Cov is not positive-definite.") 21 | Prec <- as.inverse(Cov)} 22 | return(Prec) 23 | } 24 | Prec2Cov <- function(Prec) 25 | { 26 | if(any(!is.finite(Prec))) stop("Prec must be finite.") 27 | if(is.matrix(Prec)) { 28 | if(!is.positive.definite(Prec)) 29 | stop("Prec is not positive-definite.") 30 | Cov <- as.inverse(Prec)} 31 | else if(is.vector(Prec)) { 32 | k <- as.integer(sqrt(length(Prec))) 33 | Prec <- matrix(Prec, k, k) 34 | if(!is.positive.definite(Prec)) 35 | stop("Prec is not positive-definite.") 36 | Cov <- as.inverse(Prec)} 37 | return(Cov) 38 | } 39 | prec2sd <- function(prec=1) 40 | { 41 | prec <- as.vector(prec) 42 | if(prec <=0) stop("prec must be positive.") 43 | return(sqrt(1/prec)) 44 | } 45 | prec2var <- function(prec=1) 46 | { 47 | prec <- as.vector(prec) 48 | if(prec <=0) stop("prec must be positive.") 49 | return(1/prec) 50 | } 51 | sd2prec <- function(sd=1) 52 | { 53 | sd <- as.vector(sd) 54 | if(sd <=0) stop("sd must be positive.") 55 | return(1/sd^2) 56 | } 57 | sd2var <- function(sd=1) 58 | { 59 | sd <- as.vector(sd) 60 | if(sd <=0) stop("sd must be positive.") 61 | return(sd^2) 62 | } 63 | var2prec <- function(var=1) 64 | { 65 | var <- as.vector(var) 66 | if(var <=0) stop("var must be positive.") 67 | return(1/var) 68 | } 69 | var2sd <- function(var=1) 70 | { 71 | var <- as.vector(var) 72 | if(var <=0) stop("var must be positive.") 73 | return(sqrt(var)) 74 | } 75 | 76 | #End 77 | -------------------------------------------------------------------------------- /R/RejectionSampling.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # RejectionSampling # 3 | # # 4 | # The purpose of the RejectionSampling function is to perform rejection # 5 | # sampling. # 6 | ########################################################################### 7 | 8 | RejectionSampling <- function(Model, Data, mu, S, df=Inf, logc, n=1000, 9 | CPUs=1, Type="PSOCK") 10 | { 11 | ### Initial Checks 12 | if(missing(Model)) stop("The Model argument is required.") 13 | if(missing(Data)) stop("The Data argument is required.") 14 | if(missing(mu)) stop("The mu argument is required.") 15 | if(missing(S)) stop("The S argument is required.") 16 | if(missing(df)) stop("The df argument is required.") 17 | if(missing(logc)) stop("The logc argument is required.") 18 | df <- abs(df) 19 | ### Rejection Sampling 20 | k <- length(mu) 21 | if(df == Inf) theta <- rmvn(n, mu, S) 22 | else theta <- rmvt(n, mu, S, df) 23 | lf <- rep(0, nrow(theta)) 24 | ### Non-Parallel Processing 25 | if(CPUs == 1) { 26 | for (i in 1:nrow(theta)) { 27 | mod <- Model(theta[i,], Data) 28 | lf[i] <- mod[["LP"]] 29 | theta[i,] <- mod[["parm"]]} 30 | } 31 | else { ### Parallel Processing 32 | detectedCores <- max(detectCores(), 33 | as.integer(Sys.getenv("NSLOTS")), na.rm=TRUE) 34 | cat("\n\nCPUs Detected:", detectedCores, "\n") 35 | if(CPUs > detectedCores) { 36 | cat("\nOnly", detectedCores, "will be used.\n") 37 | CPUs <- detectedCores} 38 | cl <- makeCluster(CPUs, Type) 39 | varlist <- unique(c(ls(), ls(envir=.GlobalEnv), 40 | ls(envir=parent.env(environment())))) 41 | clusterExport(cl, varlist=varlist, envir=environment()) 42 | clusterSetRNGStream(cl) 43 | mod <- parLapply(cl, 1:nrow(theta), 44 | function(x) Model(theta[x,], Data)) 45 | stopCluster(cl) 46 | lf <- unlist(lapply(mod, 47 | function(x) x[["LP"]]))[1:nrow(theta)] 48 | theta <- matrix(unlist(lapply(mod, 49 | function(x) x[["parm"]])), nrow(theta), ncol(theta)) 50 | rm(mod)} 51 | if(df == Inf) lg <- dmvn(theta, mu, S, log=TRUE) 52 | else lg <- dmvt(theta, mu, S, df, log=TRUE) 53 | prob <- exp(lf - lg - logc) 54 | if(k == 1) theta <- theta[runif(n) < prob] 55 | else theta <- theta[runif(n) < prob,] 56 | theta <- class("rejection") 57 | return(theta) 58 | } 59 | 60 | #End 61 | -------------------------------------------------------------------------------- /R/Stick.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Stick # 3 | # # 4 | # The purpose of the Stick function is provide the utility of truncated # 5 | # stick-breaking regarding the vector theta. # 6 | ########################################################################### 7 | 8 | Stick <- function(theta) 9 | { 10 | M <- length(theta) + 1 11 | theta <- c(theta, 1) 12 | p <- rep(theta[1], length(theta)) 13 | for (m in 1:(M-1)) { 14 | p[m+1] <- theta[m+1] * (1-theta[m]) * p[m] / theta[m]} 15 | return(p) 16 | } 17 | 18 | #End 19 | -------------------------------------------------------------------------------- /R/Thin.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Thin # 3 | # # 4 | # The purpose of the Thin function is to facilitate the thinning of a # 5 | # matrix of posterior samples. # 6 | ########################################################################### 7 | 8 | Thin <- function(x, By=1) 9 | { 10 | ### Initial Checks 11 | if(!is.matrix(x)) x <- as.matrix(x) 12 | rownum <- nrow(x) 13 | By <- abs(round(By)) 14 | if(By > rownum) stop("By exceeds number of rows in x.") 15 | ### Thin 16 | keeprows <- which(rep(1:By, len=rownum) == By) 17 | z <- x[keeprows,] 18 | return(z) 19 | } 20 | 21 | #End 22 | -------------------------------------------------------------------------------- /R/WAIC.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # WAIC # 3 | # # 4 | # The purpose of the WAIC function is to calculate the Widely Applicable # 5 | # Information Criterion. # 6 | ########################################################################### 7 | 8 | WAIC <- function(x) 9 | { 10 | lppd <- sum (log(rowMeans(exp(x)))) 11 | pWAIC1 <- 2*sum(log(rowMeans(exp(x))) - rowMeans(x)) 12 | pWAIC2 <- sum(.rowVars(x)) 13 | WAIC <- -2*lppd + 2*pWAIC2 14 | return(list(WAIC=WAIC, lppd=lppd, pWAIC=pWAIC2, pWAIC1=pWAIC1)) 15 | } 16 | 17 | #End 18 | -------------------------------------------------------------------------------- /R/as.covar.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # as.covar # 3 | # # 4 | # The purpose of the as.covar function is to retrieve the covariance # 5 | # matrix from an object of class demonoid, demonoid.hpc, iterquad, # 6 | # laplace, pmc, or vb, or in the case of an object of class pmc with # 7 | # mixture components, to retrieve multiple covariance matrices. # 8 | ########################################################################### 9 | 10 | as.covar <- function(x) 11 | { 12 | if(!identical(class(x), "demonoid") & 13 | !identical(class(x), "demonoid.hpc") & 14 | !identical(class(x), "iterquad") & 15 | !identical(class(x), "laplace") & 16 | !identical(class(x), "pmc") & 17 | !identical(class(x), "vb")) 18 | stop("The class of x is unknown.") 19 | if(identical(class(x), "demonoid")) { 20 | if(is.matrix(x$Covar)) { 21 | covar <- x$Covar 22 | } 23 | else if(is.vector(x$Covar)) { 24 | covar <- diag(length(x$Covar)) 25 | diag(covar) <- x$Covar 26 | } 27 | else { 28 | covar <- x$Covar 29 | if(is.list(x$Covar)) 30 | cat("\nThe covariance matrix is blocked.\n") 31 | } 32 | } 33 | else if(identical(class(x), "demonoid.hpc")) { 34 | Chains <- length(x) 35 | Deviance <- list() 36 | for (i in 1:Chains) {Deviance[[i]] <- x[[i]][["Deviance"]]} 37 | j <- which.min(sapply(Deviance, function(x) 38 | {min(x[length(x)])})) 39 | cat("\nChain",j,"has the lowest deviance.\n") 40 | if(is.matrix(x[[j]]$Covar)) { 41 | covar <- x[[j]]$Covar 42 | } 43 | else if(is.vector(x$Covar)) { 44 | covar <- diag(length(x$Covar)) 45 | diag(covar) <- x$Covar 46 | } 47 | else { 48 | covar <- x$Covar 49 | if(is.list(x$Covar)) 50 | cat("\nThe covariance matrix is blocked.\n") 51 | } 52 | } 53 | else if(identical(class(x), "iterquad")) covar <- x$Covar 54 | else if(identical(class(x), "laplace")) covar <- x$Covar 55 | else if(identical(class(x), "vb")) covar <- x$Covar 56 | else covar <- x$Covar[,,x$Iterations,] 57 | return(covar) 58 | } 59 | 60 | #End 61 | -------------------------------------------------------------------------------- /R/as.initial.values.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # as.initial.values # 3 | # # 4 | # The purpose of the as.initial.values function is to retrieve the last # 5 | # posterior samples from an object of class demonoid, demonoid.hpc, # 6 | # iterquad, laplace, or pmc to serve as initial values for future # 7 | # updating. # 8 | ########################################################################### 9 | 10 | as.initial.values <- function(x) 11 | { 12 | if(!identical(class(x), "demonoid") & 13 | !identical(class(x), "demonoid.hpc") & 14 | !identical(class(x), "iterquad") & 15 | !identical(class(x), "laplace") & 16 | !identical(class(x), "pmc") & 17 | !identical(class(x), "vb")) 18 | stop("The class of x is unknown.") 19 | if(identical(class(x), "demonoid")) { 20 | initial.values <- as.vector(x$Posterior1[x$Thinned.Samples,]) 21 | } 22 | else if(identical(class(x), "demonoid.hpc")) { 23 | Chains <- length(x) 24 | LIV <- x[[1]][["Parameters"]] 25 | initial.values <- matrix(0, Chains, LIV) 26 | for (i in 1:Chains) { 27 | initial.values[i,] <- as.vector(x[[i]][["Posterior1"]][x[[i]][["Thinned.Samples"]],])}} 28 | else if(identical(class(x), "iterquad")) 29 | initial.values <- as.vector(x$Summary1[,"Mean"]) 30 | else if(identical(class(x), "laplace")) 31 | initial.values <- as.vector(x$Summary1[,"Mode"]) 32 | else if(identical(class(x), "vb")) 33 | initial.values <- as.vector(x$Summary1[,"Mean"]) 34 | else if(x$M == 1) 35 | initial.values <- colMeans(x$Posterior2) 36 | else if(x$M > 1) 37 | initial.values <- t(x$Mu[dim(x$Mu)[1],,]) 38 | return(initial.values) 39 | } 40 | 41 | #End 42 | -------------------------------------------------------------------------------- /R/as.ppc.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # as.ppc # 3 | # # 4 | # The purpose of the as.ppc function is to convert an object of class # 5 | # demonoid.val to an object of class demonoid.ppc, after which the object # 6 | # is ready for posterior predictive checks. # 7 | ########################################################################### 8 | 9 | as.ppc <- function(x, set=3) 10 | { 11 | ### Initial Checks 12 | if(missing(x)) stop("x is required.") 13 | if(!identical(class(x), "demonoid.val")) 14 | stop("x is not of class demonoid.val.") 15 | set <- round(abs(set)) 16 | if(set < 1) set <- 1 17 | else if(set > 3) set <- 3 18 | ### ppc 19 | if(set == 1) ppc <- list(y=x[[1]]$y, yhat=x[[1]]$yhat) 20 | else if(set == 2) ppc <- list(y=x[[2]]$y, yhat=x[[2]]$yhat) 21 | else ppc <- list(y=c(x[[1]]$y, x[[2]]$y), yhat=rbind(x[[1]]$yhat, 22 | x[[2]]$yhat)) 23 | class(ppc) <- "demonoid.ppc" 24 | return(ppc) 25 | } 26 | 27 | #End 28 | -------------------------------------------------------------------------------- /R/burnin.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # burnin # 3 | # # 4 | # The purpose of the burnin function is to estimate the duration of # 5 | # burn-in in iterations for one or more MCMC chains. # 6 | ########################################################################### 7 | 8 | burnin <- function(x, method="BMK") 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | if(is.vector(x)) x <- matrix(x, length(x), 1) 12 | n <- nrow(x) 13 | burn <- rep(0,ncol(x)) 14 | if(method == "BMK") { 15 | if(n %% 10 == 0) x2 <- x 16 | if(n %% 10 != 0) x2 <- x[1:(10*trunc(n/10)),] 17 | HD <- BMK.Diagnostic(x2, 10) 18 | Ind <- 1 * (HD > 0.5) 19 | burn <- n 20 | batch.list <- seq(from=1, to=nrow(x2), by=floor(nrow(x2)/10)) 21 | for (i in 1:9) { 22 | if(sum(Ind[,i:9]) == 0) { 23 | burn <- batch.list[i] - 1 24 | break 25 | } 26 | } 27 | } 28 | else { 29 | for (i in 1:ncol(x)) { 30 | iter <- 1 31 | stationary <- 0 32 | jump <- round(n/10) 33 | while(stationary == 0) { 34 | if(method == "KS") { 35 | p <- KS.Diagnostic(x[iter:n]) 36 | if(p <= 0.05) stationary <- 1} 37 | else { #method == Geweke 38 | z <- try(Geweke.Diagnostic(x[iter:n]), 39 | silent=TRUE) 40 | if(inherits(z, "try-error")) z <- 3 41 | if(abs(z < 2)) stationary <- 1} 42 | if(stationary == 0) iter <- iter + jump 43 | if(iter >= n) stationary <- 1} 44 | if(iter > 1) iter <- iter - 1 45 | if(iter > n) iter <- n 46 | burn[i] <- iter 47 | } 48 | } 49 | return(burn) 50 | } 51 | 52 | #End 53 | 54 | 55 | -------------------------------------------------------------------------------- /R/de.Finetti.Game.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # de.Finetti.Game # 3 | # # 4 | # The purpose of the de.Finetti.Game function is to elicit the interval # 5 | # of a subjective probability about a possible event in the near future. # 6 | ########################################################################### 7 | 8 | de.Finetti.Game <- function(width) 9 | { 10 | if(missing(width)) stop("The width argument is required.") 11 | if((width <= 0) | (width > 1)) 12 | stop("The width argument is incoherent.") 13 | ques <- paste("\nDescribe a possible event in the near", 14 | "future (such as ``rain tomorrow''): ") 15 | event <- readline(ques) 16 | region <- c(0,1) 17 | while((region[2] - region[1]) > width) { 18 | x <- round(mean(region) * 100) 19 | y <- 100 - x 20 | cat("\nYou have two options:") 21 | cat("\n\n1.Wait and receive $1 if the event happens.") 22 | cat("\n2.Draw a marble from an urn with", x, 23 | "black marbles and", y, "white marbles. Drawing a black", 24 | "marble results in receiving $1.") 25 | ans <- readline("\n\nChoose 1 or 2: ") 26 | if(ans == 1) region[1] <- x / 100 27 | else if(ans == 2) region[2] <- x / 100 28 | else region <- c(0,0) 29 | } 30 | if(sum(region) == 0) cat("\nTry again. Valid answers are 1 or 2.") 31 | else {cat("\n\nYour subjective probability is in the interval [", 32 | region[1], ",", region[2], "] regarding ", event, ".\n\n", 33 | sep="")} 34 | return(region) 35 | } 36 | 37 | #End 38 | -------------------------------------------------------------------------------- /R/hpc_server.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # server_Listening # 3 | # # 4 | # The server_Listening function is not intended to be called directly by # 5 | # the user. It is an internal-only function that is intended to prevent # 6 | # cluster problems while using the INCA algorithm through the # 7 | # LaplacesDemon.hpc function. # 8 | ########################################################################### 9 | 10 | server_Listening <- function(n=2, port=19009) 11 | { 12 | slist <- vector('list', n) 13 | for (i in 1:n) { 14 | slist[[i]] <- socketConnection("localhost", port, server=TRUE, 15 | open="r+") 16 | cat("\nClient", i, "Connected")} 17 | tmp <- NULL 18 | trow <- 0 19 | stop_server <- FALSE 20 | cat("\nStart listening...") 21 | repeat 22 | { 23 | ready <- which(socketSelect(slist, TRUE)) 24 | for (i in ready) { 25 | #print(paste("Socket", i, "ready to write")) 26 | con <- slist[[i]] 27 | #print("Write message...") 28 | if(is.null(tmp)) serialize(tmp, con) 29 | else serialize(tmp[-(((i-1)*trow+1):(i*trow))], con) 30 | #print("Read message...") 31 | buf <- try(unserialize(con), silent=TRUE) 32 | if(is.matrix(buf)) { 33 | if(is.null(tmp)) { 34 | tmp <- matrix(0, nrow=n*nrow(buf), ncol=ncol(buf)) 35 | trow <- nrow(buf) 36 | } 37 | tmp[((i-1)*trow+1):(i*trow),] <- buf 38 | } 39 | else { 40 | stop_server <- TRUE 41 | break 42 | } 43 | } 44 | if(stop_server == TRUE) break 45 | } 46 | for (i in 1:n) { 47 | close(slist[[i]]) 48 | cat("\nClose connection", i) 49 | } 50 | cat("\n") 51 | } 52 | 53 | #End 54 | -------------------------------------------------------------------------------- /R/interval.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # interval # 3 | # # 4 | # The purpose of the interval function is to constrain the element(s) of # 5 | # a scalar, vector, matrix, or array to the interval [a,b]. # 6 | ########################################################################### 7 | 8 | interval <- function(x, a=-Inf, b=Inf, reflect=TRUE) 9 | { 10 | ### Initial Checks 11 | if(missing(x)) stop("The x argument is required.") 12 | if(a > b) stop("a > b.") 13 | if(reflect & is.finite(a) & is.finite(b) & any(!is.finite(x))) { 14 | if(is.array(x)) { 15 | d <- dim(x) 16 | x <- as.vector(x)} 17 | x.inf.pos <- !is.finite(x); 18 | x[x.inf.pos] <- interval(x[x.inf.pos], a, b, reflect=FALSE) 19 | if(is.array(x)) x <- array(x, dim=d) 20 | } 21 | ### Scalar 22 | if(is.vector(x) & {length(x) == 1}) { 23 | if(reflect == FALSE) x <- max(a, min(b, x)) 24 | else if(x < a | x > b) { 25 | out <- TRUE 26 | while(out) { 27 | if(x < a) x <- a + a - x 28 | if(x > b) x <- b + b - x 29 | if(x >= a & x <= b) out <- FALSE 30 | }}} 31 | ### Vector 32 | else if(is.vector(x) & {length(x) > 1}) { 33 | if(reflect == FALSE) { 34 | x.num <- which(x < a) 35 | x[x.num] <- a 36 | x.num <- which(x > b) 37 | x[x.num] <- b} 38 | else if(any(x < a) | any(x > b)) { 39 | out <- TRUE 40 | while(out) { 41 | x.num <- which(x < a) 42 | x[x.num] <- a + a - x[x.num] 43 | x.num <- which(x > b) 44 | x[x.num] <- b + b - x[x.num] 45 | if(all(x >= a) & all(x <= b)) out <- FALSE 46 | }}} 47 | ### Matrix or Array 48 | else if(is.array(x)) { 49 | d <- dim(x) 50 | x <- as.vector(x) 51 | if(reflect == FALSE) { 52 | x.num <- which(x < a) 53 | x[x.num] <- a 54 | x.num <- which(x > b) 55 | x[x.num] <- b} 56 | else if(any(x < a) | any(x > b)) { 57 | out <- TRUE 58 | while(out) { 59 | x.num <- which(x < a) 60 | x[x.num] <- a + a - x[x.num] 61 | x.num <- which(x > b) 62 | x[x.num] <- b + b - x[x.num] 63 | if(all(x >= a) & all(x <= b)) out <- FALSE 64 | }} 65 | x <- array(x, dim=d)} 66 | return(x) 67 | } 68 | 69 | #End 70 | -------------------------------------------------------------------------------- /R/is.appeased.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.appeased # 3 | # # 4 | # The purpose of the is.appeased function is to perform a logical test of # 5 | # whether or not Laplace's Demon is appeased with an object of class # 6 | # demonoid. # 7 | ########################################################################### 8 | 9 | is.appeased <- function(x) 10 | { 11 | appeased <- FALSE 12 | if(!identical(class(x), "demonoid")) 13 | stop("x must be of class demonoid.") 14 | captive <- capture.output(Consort(x)) 15 | z <- grep("has been appeased", captive) 16 | if(length(z) > 0) appeased <- TRUE 17 | return(appeased) 18 | } 19 | 20 | #End 21 | -------------------------------------------------------------------------------- /R/is.bayesian.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.bayesian # 3 | # # 4 | # The purpose of the is.bayesian function is to determine whether or not # 5 | # a model is Bayesian by comparing the log-posterior (LP) and the LL. # 6 | ########################################################################### 7 | 8 | is.bayesian <- function(Model, Initial.Values, Data) 9 | { 10 | if(missing(Model)) stop("The Model argument is required.") 11 | if(missing(Initial.Values)) 12 | stop("The Initial.Values argument is required.") 13 | if(missing(Data)) stop("The Data argument is required.") 14 | bayesian <- FALSE 15 | Mo <- Model(Initial.Values, Data) 16 | LL <- Mo[["Dev"]] / -2 17 | if(Mo[["LP"]] != LL) bayesian <- TRUE 18 | return(bayesian) 19 | } 20 | 21 | #End 22 | -------------------------------------------------------------------------------- /R/is.constant.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.constant # 3 | # # 4 | # The purpose of the is.constant function is to provide a logical test of # 5 | # whether or not a vector is a constant. # 6 | ########################################################################### 7 | 8 | is.constant <- function(x) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | if(!is.vector(x)) x <- as.vector(x) 12 | uni <- length(unique(x)) 13 | return(uni <= 1) 14 | } 15 | 16 | #End 17 | -------------------------------------------------------------------------------- /R/is.constrained.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.constrained # 3 | # # 4 | # The purpose of the is.constrained function is to provide a logical test # 5 | # of whether or not initial values change as they are passed through the # 6 | # Model specification function, given data. # 7 | ########################################################################### 8 | 9 | is.constrained <- function(Model, Initial.Values, Data) 10 | { 11 | if(missing(Model)) 12 | stop("The Model argument is required.") 13 | if(missing(Initial.Values)) 14 | stop("The Initial.Values argument is required.") 15 | if(missing(Data)) 16 | stop("The Data argument is required.") 17 | Mo <- Model(Initial.Values, Data) 18 | constr <- Initial.Values != Mo[["parm"]] 19 | return(constr) 20 | } 21 | 22 | #End 23 | -------------------------------------------------------------------------------- /R/is.data.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.data # 3 | # # 4 | # The purpose of the is.data function is to estimate if a list of data is # 5 | # data as far as IterativeQuadrature, LaplaceApproximation, # 6 | # LaplacesDemon, PMC, and VariationalBayes are concerned. # 7 | ########################################################################### 8 | 9 | is.data <- function(Data) 10 | { 11 | if(missing(Data)) 12 | stop("The Data argument is required.") 13 | isdata <- TRUE 14 | if(!is.list(Data)) { 15 | cat("\nData must be a list.\n") 16 | isdata <- FALSE} 17 | if(is.null(Data[["mon.names"]])) { 18 | cat("\nmon.names is NULL.\n") 19 | isdata <- FALSE} 20 | if(is.null(Data[["parm.names"]])) { 21 | cat("\nparm.names is NULL.\n") 22 | isdata <- FALSE} 23 | return(isdata) 24 | } 25 | 26 | #End 27 | -------------------------------------------------------------------------------- /R/is.model.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.model # 3 | # # 4 | # The purpose of the is.model function is to estimate if a model # 5 | # specification function meets some minimum criteria. # 6 | ########################################################################### 7 | 8 | is.model <- function(Model, Initial.Values, Data) 9 | { 10 | if(missing(Model)) stop("The Model argument is required.") 11 | ismodel <- TRUE 12 | if(!is.function(Model)) { 13 | cat("\nModel must be a function.\n") 14 | ismodel <- FALSE} 15 | if(missing(Initial.Values)) 16 | stop("Initial.Values argument is required.") 17 | if(!is.vector(Initial.Values)) 18 | stop("Initial.Values must be a vector.") 19 | if(missing(Data)) stop("The Data argument is required.") 20 | if(!is.data(Data)) stop("The Data argument is not Data.") 21 | if(!identical(length(Initial.Values), length(Data[["parm.names"]]))) 22 | stop("Lengths of Initial.Values and parm.names differ.") 23 | Mo <- try(Model(Initial.Values, Data), silent=TRUE) 24 | if(inherits(Mo, "try-error")) stop("Error in executing the Model.") 25 | if(!is.list(Mo)) { 26 | cat("\nModel must return a list.\n") 27 | ismodel <- FALSE} 28 | else if(length(Mo) != 5) { 29 | cat("\nModel must return 5 list components.\n") 30 | ismodel <- FALSE} 31 | else if(!identical(Mo[[1]], Mo[["LP"]])) { 32 | cat("\nThe first output component must be named LP.\n") 33 | ismodel <- FALSE} 34 | else if(length(Mo[["LP"]]) != 1) { 35 | cat("\nThe length of LP must be 1.\n") 36 | ismodel <- FALSE} 37 | else if(!identical(Mo[[2]], Mo[["Dev"]])) { 38 | cat("\nThe second output component must be named Dev.\n") 39 | ismodel <- FALSE} 40 | else if(length(Mo[["Dev"]]) != 1) { 41 | cat("\nThe length of Dev must be 1.\n") 42 | ismodel <- FALSE} 43 | else if(!identical(Mo[[3]], Mo[["Monitor"]])) { 44 | cat("\nThe third output component must be named Monitor.\n") 45 | ismodel <- FALSE} 46 | else if(!identical(length(Mo[["Monitor"]]), 47 | length(Data[["mon.names"]]))) { 48 | cat("\nThe lengths of Monitor values and mon.names differ.\n") 49 | ismodel <- FALSE} 50 | else if(!identical(Mo[[4]], Mo[["yhat"]])) { 51 | cat("\nThe fourth output component must be named yhat.\n") 52 | ismodel <- FALSE} 53 | else if(!identical(Mo[[5]], Mo[["parm"]])) { 54 | cat("\nThe fifth output component must be named parm.\n") 55 | ismodel <- FALSE} 56 | else if(!identical(length(Mo[["parm"]]), 57 | length(Data[["parm.names"]]))) { 58 | cat("\nThe lengths of parm and parm.names differ.\n") 59 | ismodel <- FALSE} 60 | return(ismodel) 61 | } 62 | 63 | #End 64 | -------------------------------------------------------------------------------- /R/is.proper.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.proper # 3 | # # 4 | # The purpose of the is.proper function is to provide a logical check of # 5 | # whether or not a probability distribution is proper, meaning whether or # 6 | # not it integrates to one. # 7 | ########################################################################### 8 | 9 | is.proper <- function(f, a, b, tol=1e-5) 10 | { 11 | ### Initial Checks 12 | if(!is.function(f) & (class(f) != "demonoid") & 13 | (class(f) != "iterquad") & (class(f) != "laplace") & 14 | (class(f) != "pmc")) 15 | stop("f is not a function or object of class demonoid, iterquad, laplace, or pmc.") 16 | ### Propriety 17 | propriety <- FALSE 18 | if(is.function(f)) { 19 | if(a >= b) stop("a >= b.") 20 | area <- integrate(f,a,b)$value 21 | if((area >= (1-tol)) & (area <= (1+tol))) propriety <- TRUE 22 | } 23 | else if(is.finite(f$LML)) propriety <- TRUE 24 | return(propriety) 25 | } 26 | 27 | #End 28 | -------------------------------------------------------------------------------- /R/is.stationary.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # is.stationary # 3 | # # 4 | # The purpose of the is.stationary function is to provide a logical test # 5 | # regarding whether or not a vector, matrix, or demonoid object is # 6 | # stationary. The Geweke.Diagnostic function is used. # 7 | ########################################################################### 8 | 9 | is.stationary <- function(x) 10 | { 11 | if(missing(x)) stop("The x argument is required.") 12 | stationary <- FALSE 13 | if(is.vector(x)) { 14 | if(is.constant(x)) return(TRUE) 15 | options(warn=-1) 16 | test <- try(as.vector(Geweke.Diagnostic(x)), silent=TRUE) 17 | options(warn=0) 18 | if(!inherits(test, "try-error") & is.finite(test)) 19 | if((test > -2) & (test < 2)) stationarity <- TRUE 20 | } 21 | else if(is.matrix(x)) { 22 | options(warn=-1) 23 | test <- try(as.vector(Geweke.Diagnostic(x)), silent=TRUE) 24 | options(warn=0) 25 | if(!inherits(test, "try-error") & all(is.finite(test))) 26 | if(all(test > -2) & all(test < 2)) stationary <- TRUE 27 | } 28 | else if(identical(class(x), "demonoid")) { 29 | if(x$Rec.BurnIn.Thinned < nrow(x$Posterior1)) stationary <- TRUE} 30 | else if(identical(class(x), "laplace")) { 31 | warning("x is an object of class laplace.") 32 | stationary <- TRUE} 33 | else warning("x is an unrecognized object.") 34 | return(stationary) 35 | } 36 | 37 | #End 38 | -------------------------------------------------------------------------------- /R/log-log.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Log-Log # 3 | # # 4 | # The logit and probit links are symmetric, because the probabilities # 5 | # approach zero or one at the same rate. The log-log and complementary # 6 | # log-log links are asymmetric. Complementary log-log links approach zero # 7 | # slowly and one quickly. Log-log links approach zero quickly and one # 8 | # slowly. Either the log-log or complementary log-log link will tend to # 9 | # fit better than logistic and probit, and are frequently used when the # 10 | # probability of an event is small or large. A mixture of the two links, # 11 | # the log-log and complementary log-log is often used, where each link is # 12 | # weighted. The reason that logit is so prevalent is because logistic # 13 | # parameters can be interpreted as odds ratios. # 14 | ########################################################################### 15 | 16 | loglog <- function(p) 17 | { 18 | if({any(p < 0)} || {any(p > 1)}) stop("p must be in [0,1].") 19 | x <- log(-log(p)) 20 | return(x) 21 | } 22 | invloglog <- function(x) 23 | { 24 | p <- exp(-exp(x)) 25 | return(p) 26 | } 27 | cloglog <- function(p) 28 | { 29 | if({any(p < 0)} || {any(p > 1)}) stop("p must be in [0,1].") 30 | x <- log(-log(1 - p)) 31 | return(x) 32 | } 33 | invcloglog <- function(x) 34 | { 35 | p <- 1 - exp(-exp(x)) 36 | return(p) 37 | } 38 | 39 | #End 40 | -------------------------------------------------------------------------------- /R/logit.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # logit # 3 | # # 4 | # The logit function is the inverse of the sigmoid or logistic function, # 5 | # and transforms a continuous value (usually probability p) in the # 6 | # interval [0,1] to the real line (where it usually is the logarithm of # 7 | # the odds). The invlogit function (called either the inverse logit or # 8 | # the logistic function) transforms a real number (usually the logarithm # 9 | # of the odds) to a value (usually probability p) in the interval [0,1]. # 10 | # If p is a probability, then p/(1-p) is the corresponding odds, while # 11 | # logit of p is the logarithm of the odds. The difference between the # 12 | # logits of two probabilities is the logarithm of the odds ratio. The # 13 | # derivative of probability p in a logistic function is: # 14 | # (d / dx) = p * (1 - p). # 15 | ########################################################################### 16 | 17 | invlogit <- function(x) 18 | { 19 | InvLogit <- 1 / {1 + exp(-x)} 20 | return(InvLogit) 21 | } 22 | logit <- function(p) 23 | { 24 | if({any(p < 0)} || {any(p > 1)}) stop("p must be in [0,1].") 25 | Logit <- log(p / {1 - p}) 26 | return(Logit) 27 | } 28 | 29 | #End 30 | -------------------------------------------------------------------------------- /R/plot.bmk.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # plot.bmk # 3 | # # 4 | # The purpose of the plot.bmk function is to plot an object of class bmk. # 5 | ########################################################################### 6 | 7 | plot.bmk <- function(x, col=colorRampPalette(c("black","red"))(100), 8 | title="", PDF=FALSE, Parms=NULL, ...) 9 | { 10 | ### Initial Checks 11 | if(missing(x)) stop("x is a required argument.") 12 | if(!identical(class(x), "bmk")) 13 | stop("x must be an object of class bmk.") 14 | ### Selecting Parms 15 | if(!is.null(Parms)) { 16 | Parms <- sub("\\[","\\\\[",Parms) 17 | Parms <- sub("\\]","\\\\]",Parms) 18 | Parms <- sub("\\.","\\\\.",Parms) 19 | if(length(grep(Parms[1], rownames(x))) == 0) 20 | stop("Parameter in Parms does not exist.") 21 | keeprows <- grep(Parms[1], rownames(x)) 22 | if(length(Parms) > 1) { 23 | for (i in 2:length(Parms)) { 24 | if(length(grep(Parms[i], rownames(x))) == 0) 25 | stop("Parameter in Parms does not exist.") 26 | keeprows <- c(keeprows, 27 | grep(Parms[i], rownames(x)))}} 28 | x.temp <- as.matrix(x[keeprows,]) 29 | rownames(x.temp) <- rownames(x)[keeprows] 30 | x <- x.temp 31 | rm(x.temp)} 32 | ### Initial Settings 33 | min <- 0 34 | max <- 1 35 | if(is.null(rownames(x))) xLabels <- 1:nrow(x) 36 | else xLabels <- colnames(x) 37 | if(is.null(colnames(x))) yLabels <- 1:ncol(x) 38 | else yLabels <- rownames(x) 39 | ### plot.bmk 40 | if(PDF == TRUE) pdf("plot.bmk.pdf") 41 | ### Layout and Colors 42 | layout(matrix(data=c(1,2), nrow=1, ncol=2), widths=c(4,1), 43 | heights=c(1,1)) 44 | ColorRamp <- col 45 | ColorLevels <- seq(min, max, length=length(ColorRamp)) 46 | ### Reverse y-axis 47 | reverse <- nrow(x):1 48 | yLabels <- yLabels[reverse] 49 | x <- x[reverse,] 50 | ### Data Map 51 | par(mar = c(3,5,2.5,2)) 52 | image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, 53 | xlab="", ylab="", axes=FALSE, zlim=c(min,max)) 54 | if(!is.null(title)) title(main=title) 55 | axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7) 56 | axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, 57 | las=HORIZONTAL<-1, cex.axis=0.7) 58 | par(mar=c(3,2.5,2.5,2)) 59 | image(1, ColorLevels, 60 | matrix(data=ColorLevels, ncol=length(ColorLevels), nrow=1), 61 | col=ColorRamp, xlab="", ylab="", xaxt="n") 62 | layout(1) 63 | if(PDF == TRUE) dev.off() 64 | } 65 | 66 | #End 67 | -------------------------------------------------------------------------------- /R/plot.importance.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # plot.importance # 3 | # # 4 | # The purpose of the plot.importance function is to plot variable # 5 | # importance according either to BPIC or the L-criterion in an object of # 6 | # class importance. # 7 | ########################################################################### 8 | 9 | plot.importance <- function(x, Style="BPIC", ...) 10 | { 11 | ### Initial Checks 12 | if(missing(x)) stop("The x argument is required.") 13 | if(!identical(Style, "BPIC") & !identical(Style, "Concordance") & 14 | !identical(Style, "Discrep") & !identical(Style, "L-criterion")) 15 | stop("Style is unrecognized.") 16 | if(!identical(class(x), "importance")) 17 | stop("x must be of class importance.") 18 | if(identical(Style, "BPIC")) 19 | dotchart(x[,1], main="Variable Importance", xlab="BPIC", pch=20) 20 | else if(identical(Style, "Concordance")) 21 | dotchart(x[,2], main="Variable Importance", xlab="Concordance", 22 | pch=20) 23 | else if(identical(Style, "Discrep")) 24 | dotchart(x[,3], main="Variable Importance", 25 | xlab="Discrepancy Statistic", pch=20) 26 | else dotchart(x[,4], main="Variable Importance", xlab="L-criterion", 27 | pch=20) 28 | return(invisible()) 29 | } 30 | 31 | #End 32 | -------------------------------------------------------------------------------- /R/plot.juxtapose.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # plot.juxtapose # 3 | # # 4 | # The purpose of the plot.juxtapose function is to plot a comparison of # 5 | # MCMC algorithms according either to IAT or ISM. # 6 | ########################################################################### 7 | 8 | plot.juxtapose <- function(x, Style="ISM", ...) 9 | { 10 | ### Initial Checks 11 | if(missing(x)) stop("The x argument is required.") 12 | if(!identical(class(x), "juxtapose")) 13 | stop("The x argument is not of class juxtapose.") 14 | if((Style != "IAT") & (Style != "ISM")) 15 | stop("Style must be IAT or ISM") 16 | Title <- "MCMC Juxtaposition" 17 | if(identical(Style, "IAT")) { 18 | ### Basic Plot 19 | plot(0, 0, ylim=c(0, ncol(x) + 1), xlim=c(1, max(x[6,])), 20 | main=Title, sub="", xlab="Integrated Autocorrelation Time", 21 | ylab="", type="n", ann=TRUE, yaxt="n") 22 | abline(v=1, col="gray") 23 | ### Add Medians 24 | points(x[5,], ncol(x):1, pch=20) 25 | ### Add Horizontal Lines for 2.5%-97.5% Quantiles 26 | for (i in 1:ncol(x)) { 27 | lines(x[c(4,6),i], c(ncol(x)-i+1, ncol(x)-i+1))} 28 | ### Add y-axis labels 29 | yy <- ncol(x):1 30 | cex.labels <- 1 / {log(ncol(x))/5 + 1} 31 | axis(2, labels=colnames(x), tick=FALSE, las=1, at=yy, 32 | cex.axis=cex.labels)} 33 | else { 34 | ### Basic Plot 35 | plot(0, 0, ylim=c(0, ncol(x) + 1), xlim=c(0, max(x[9,])), 36 | main=Title, sub="", xlab="Independent Samples per Minute", 37 | ylab="", type="n", ann=TRUE, yaxt="n") 38 | abline(v=0, col="gray") 39 | ### Add Medians 40 | points(x[8,], ncol(x):1, pch=20) 41 | ### Add Horizontal Lines for 2.5%-97.5% Quantiles 42 | for (i in 1:ncol(x)) { 43 | lines(x[c(7,9),i], c(ncol(x)-i+1, ncol(x)-i+1))} 44 | ### Add y-axis labels 45 | yy <- ncol(x):1 46 | cex.labels <- 1 / {log(ncol(x))/5 + 1} 47 | axis(2, labels=colnames(x), tick=FALSE, las=1, at=yy, 48 | cex.axis=cex.labels)} 49 | } 50 | 51 | #End 52 | -------------------------------------------------------------------------------- /R/plot.miss.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # plot.miss # 3 | # # 4 | # The purpose of the plot.miss function is to plot an object of class # 5 | # miss. # 6 | ########################################################################### 7 | 8 | plot.miss <- function(x, PDF=FALSE, ...) 9 | { 10 | ### Initial Checks 11 | if(missing(x)) stop("The x argument is required.") 12 | if(PDF == TRUE) { 13 | pdf("MISS.Plots.pdf") 14 | par(mfrow=c(3,3)) 15 | } 16 | else par(mfrow=c(3,3), ask=TRUE) 17 | ### Plot Imputations 18 | for (i in 1:nrow(x$Imp)) { 19 | plot(1:ncol(x$Imp), x$Imp[i,], type="l", xlab="Iterations", 20 | ylab="Value", main=paste("Imp[", i, ",]", sep="")) 21 | panel.smooth(1:ncol(x$Imp), x$Imp[i,], pch="") 22 | plot(density(x$Imp[i,]), xlab="Value", 23 | main=paste("Imp[", i, ",]")) 24 | polygon(density(x$Imp[i,]), col="black", border="black") 25 | ### Only plot an ACF if there's > 1 unique values 26 | if(!is.constant(x$Imp[i,])) { 27 | z <- acf(x$Imp[i,], plot=FALSE) 28 | se <- 1/sqrt(length(x$Imp[i,])) 29 | plot(z$lag, z$acf, ylim=c(min(z$acf,-2*se),1), type="h", 30 | main=paste("Imp[", i, ",]"), xlab="Lag", 31 | ylab="Correlation") 32 | abline(h=(2*se), col="red", lty=2) 33 | abline(h=(-2*se), col="red", lty=2) 34 | } 35 | else plot(0, 0, main=paste("Imp[", i, ",]"), 36 | "is a constant.")} 37 | if(PDF == TRUE) dev.off() 38 | } 39 | 40 | #End 41 | -------------------------------------------------------------------------------- /R/print.demonoid.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.demonoid # 3 | # # 4 | # The purpose of the print.demonoid function is to print the contents of # 5 | # an object of class demonoid to the screen. # 6 | ########################################################################### 7 | 8 | print.demonoid <- function(x, ...) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | cat("Call:\n") 12 | print(x$Call) 13 | cat("\nAcceptance Rate: ", round(x$Acceptance.Rate,5), 14 | "\n", sep="") 15 | cat("Algorithm: ", x$Algorithm, "\n", sep="") 16 | cat("Covariance Matrix: (NOT SHOWN HERE; diagonal shown instead)\n") 17 | if(is.matrix(x$Covar)) { 18 | print(diag(x$Covar)) 19 | } 20 | else if(!is.list(x$Covar) & is.vector(x$Covar)) { 21 | print(x$Covar) 22 | } 23 | else for (i in 1:length(x$Covar)) { 24 | cat("Block:", i, "\n") 25 | print(diag(x$Covar[[i]])) 26 | cat("\n")} 27 | cat("\nCovariance (Diagonal) History: (NOT SHOWN HERE)\n") 28 | cat("Deviance Information Criterion (DIC):\n") 29 | DIC <- matrix(c(round(x$DIC1[1],3), round(x$DIC1[2],3), 30 | round(x$DIC1[3],3), round(x$DIC2[1],3), round(x$DIC2[2],3), 31 | round(x$DIC2[3],3)), 3, 2, 32 | dimnames=list(c("Dbar","pD","DIC"),c("All","Stationary"))) 33 | print(DIC) 34 | cat("Initial Values:\n") 35 | print(x$Initial.Values) 36 | cat("\nIterations: ", x$Iterations, "\n", sep="") 37 | cat("Log(Marginal Likelihood): ", x$LML, "\n", sep="") 38 | cat("Minutes of run-time: ", round(x$Minutes,2), "\n", 39 | sep="") 40 | cat("Model: (NOT SHOWN HERE)\n") 41 | cat("Monitor: (NOT SHOWN HERE)\n") 42 | cat("Parameters (Number of): ", x$Parameters, "\n", 43 | sep="") 44 | cat("Posterior1: (NOT SHOWN HERE)\n") 45 | cat("Posterior2: (NOT SHOWN HERE)\n") 46 | cat("Recommended Burn-In of Thinned Samples: ", 47 | x$Rec.BurnIn.Thinned, "\n", sep="") 48 | cat("Recommended Burn-In of Un-thinned Samples: ", 49 | x$Rec.BurnIn.UnThinned, "\n", sep="") 50 | cat("Recommended Thinning: ", x$Rec.Thinning, "\n", sep="") 51 | cat("Specs: (NOT SHOWN HERE)\n") 52 | cat("Status is displayed every ", x$Status, 53 | " iterations\n", sep="") 54 | cat("Summary1: (SHOWN BELOW)\n") 55 | cat("Summary2: (SHOWN BELOW)\n") 56 | cat("Thinned Samples: ", x$Thinned.Samples, "\n", 57 | sep="") 58 | cat("Thinning: ", x$Thinning, "\n", sep="") 59 | cat("\n\nSummary of All Samples\n") 60 | print(x$Summary1) 61 | cat("\n\nSummary of Stationary Samples\n") 62 | print(x$Summary2) 63 | invisible(x) 64 | } 65 | 66 | #End 67 | -------------------------------------------------------------------------------- /R/print.heidelberger.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.heidelberger # 3 | # # 4 | # The purpose of the print.heidelberger function is to print the contents # 5 | # of an object of class raftery to the screen. # 6 | ########################################################################### 7 | 8 | print.heidelberger <- function(x, digits=3, ...) 9 | { 10 | HW.title <- matrix(c("Stationarity", "test", "start", "iteration", 11 | "p-value", "", "Halfwidth", "test", "Mean", "", "Halfwidth", ""), 12 | nrow=2) 13 | y <- matrix("", nrow=nrow(x), ncol=6) 14 | for (j in 1:ncol(y)) y[,j] <- format(x[,j], digits=digits) 15 | y[,c(1,4)] <- ifelse(x[,c(1,4)], "passed", "failed") 16 | y <- rbind(HW.title, y) 17 | vnames <- if(is.null(rownames(x))) paste("[,", 1:nrow(x), "]", sep="") 18 | else rownames(x) 19 | dimnames(y) <- list(c("", "", vnames), rep("", 6)) 20 | print.default(y[, 1:3], quote=FALSE, ...) 21 | print.default(y[, 4:6], quote=FALSE, ...) 22 | invisible(x) 23 | } 24 | 25 | #End 26 | -------------------------------------------------------------------------------- /R/print.iterquad.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.iterquad # 3 | # # 4 | # The purpose of the print.iterquad function is to print the contents of # 5 | # an object of class iterquad to the screen. # 6 | ########################################################################### 7 | 8 | print.iterquad <- function(x, ...) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | cat("\nAlgorithm: ", x$Algorithm, sep="") 12 | cat("\nCall:\n") 13 | print(x$Call) 14 | cat("\nConverged: ", x$Converged, "\n", sep="") 15 | cat("Covariance Matrix: (NOT SHOWN HERE; diagonal shown instead)\n") 16 | print(diag(x$Covar)) 17 | cat("\nDeviance (Final): ", x$Deviance[length(x$Deviance)], "\n") 18 | cat("History: (NOT SHOWN HERE)\n") 19 | cat("Initial Values:\n") 20 | print(x$Initial.Values) 21 | cat("\nIterations: ", x$Iterations, "\n", sep="") 22 | cat("Log(Marginal Likelihood): ", x$LML, "\n", sep="") 23 | cat("Log-Posterior (Final): ", x$LP.Final, "\n", sep="") 24 | cat("Log-Posterior (Initial): ", x$LP.Initial, "\n", sep="") 25 | cat("Log-Posterior (Weights): (NOT SHOWN HERE)\n") 26 | cat("M: (NOT SHOWN HERE)\n") 27 | cat("Minutes of run-time: ", x$Minutes, "\n", sep="") 28 | cat("Monitor: (NOT SHOWN HERE)\n") 29 | cat("Nodes: ", x$N, "\n", sep="") 30 | cat("Posterior: (NOT SHOWN HERE)\n") 31 | cat("Summary1: (SHOWN BELOW)\n") 32 | cat("Summary2: (SHOWN BELOW)\n") 33 | cat("Tolerance (Final):\n") 34 | print(x$Tolerance.Final) 35 | cat("Tolerance (Stop):\n") 36 | print(x$Tolerance.Stop) 37 | cat("Z: (NOT SHOWN HERE)\n") 38 | 39 | cat("\nSummary1:\n") 40 | print(x$Summary1) 41 | if({x$Converged == TRUE} && !any(is.na(x$Posterior))) { 42 | cat("\nSummary2:\n") 43 | print(x$Summary2)} 44 | 45 | invisible(x) 46 | } 47 | 48 | #End 49 | -------------------------------------------------------------------------------- /R/print.laplace.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.laplace # 3 | # # 4 | # The purpose of the print.laplace function is to print the contents of # 5 | # an object of class laplace to the screen. # 6 | ########################################################################### 7 | 8 | print.laplace <- function(x, ...) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | cat("\nCall:\n") 12 | print(x$Call) 13 | cat("\nConverged: ", x$Converged, "\n", sep="") 14 | cat("Covariance Matrix: (NOT SHOWN HERE; diagonal shown instead)\n") 15 | print(diag(x$Covar)) 16 | cat("\nDeviance (Final): ", x$Deviance[length(x$Deviance)], "\n") 17 | cat("History: (NOT SHOWN HERE)\n") 18 | cat("Initial Values:\n") 19 | print(x$Initial.Values) 20 | cat("\nIterations: ", x$Iterations, "\n", sep="") 21 | cat("Log(Marginal Likelihood): ", x$LML, "\n", sep="") 22 | cat("Log-Posterior (Final): ", x$LP.Final, "\n", sep="") 23 | cat("Log-Posterior (Initial): ", x$LP.Initial, "\n", sep="") 24 | cat("Minutes of run-time: ", x$Minutes, "\n", sep="") 25 | cat("Monitor: (NOT SHOWN HERE)\n") 26 | cat("Posterior: (NOT SHOWN HERE)\n") 27 | cat("Step Size (Final): ") 28 | print(x$Step.Size.Final) 29 | cat("Step Size (Initial): ", x$Step.Size.Initial, "\n", sep="") 30 | cat("Summary1: (SHOWN BELOW)\n") 31 | cat("Summary2: (SHOWN BELOW)\n") 32 | cat("Tolerance (Final): ", x$Tolerance.Final, "\n", sep="") 33 | cat("Tolerance (Stop): ", x$Tolerance.Stop, "\n", sep="") 34 | 35 | cat("\nSummary1:\n") 36 | print(x$Summary1) 37 | if({x$Converged == TRUE} && !any(is.na(x$Posterior))) { 38 | cat("\nSummary2:\n") 39 | print(x$Summary2)} 40 | 41 | invisible(x) 42 | } 43 | 44 | #End 45 | -------------------------------------------------------------------------------- /R/print.miss.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.miss # 3 | # # 4 | # The purpose of the print.miss function is to print the contents of an # 5 | # object of class miss to the screen. # 6 | ########################################################################### 7 | 8 | print.miss <- function(x, ...) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | cat("\nAlgorithm:", x$Algorithm) 12 | cat("\nImp:") 13 | cat("\n Missing Values:", nrow(x$Imp)) 14 | cat("\n Iterations:", ncol(x$Imp)) 15 | cat("\nparm: (NOT SHOWN HERE)") 16 | cat("\nPostMode: (NOT SHOWN HERE)") 17 | cat("\nType: (NOT SHOWN HERE)\n") 18 | invisible(x) 19 | } 20 | 21 | #End 22 | -------------------------------------------------------------------------------- /R/print.pmc.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.pmc # 3 | # # 4 | # The purpose of the print.pmc function is to print the contents of an # 5 | # object of class pmc to the screen. # 6 | ########################################################################### 7 | 8 | print.pmc <- function(x, ...) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | cat("Call:\n") 12 | print(x$Call) 13 | cat("\nalpha:\n", sep="") 14 | print(x$alpha) 15 | cat("Covariance Matrix: (NOT SHOWN HERE)\n") 16 | cat("Deviance: (NOT SHOWN HERE)\n") 17 | cat("Deviance Information Criterion (DIC):\n") 18 | DIC <- matrix(c(round(x$DIC[1],3), round(x$DIC[2],3), 19 | round(x$DIC[3],3)), 3, 1, 20 | dimnames=list(c("Dbar","pD","DIC"),c("All"))) 21 | print(DIC) 22 | cat("ESSN:\n") 23 | print(x$ESSN) 24 | cat("Initial Values:\n") 25 | print(x$Initial.Values) 26 | cat("\nIterations: ", x$Iterations, "\n", sep="") 27 | cat("Log(Marginal Likelihood): ", x$LML, "\n", sep="") 28 | cat("M (Mixture Components): ", x$M, "\n", sep="") 29 | cat("Minutes of run-time: ", round(x$Minutes,2), "\n", 30 | sep="") 31 | cat("Model: (NOT SHOWN HERE)\n") 32 | cat("Monitor: (NOT SHOWN HERE)\n") 33 | cat("Mu: (NOT SHOWN HERE)\n") 34 | cat("Number of Samples: ", x$N, "\n", sep="") 35 | cat("nu: ", x$nu, "\n", sep="") 36 | cat("Parameters (Number of): ", x$Parameters, "\n", 37 | sep="") 38 | cat("Perpexity, Normalized:\n") 39 | print(x$Perplexity) 40 | cat("Posterior1: (NOT SHOWN HERE)\n") 41 | cat("Posterior2: (NOT SHOWN HERE)\n") 42 | cat("Summary: (SHOWN BELOW)\n") 43 | cat("Thinned Samples: ", x$Thinned.Samples, "\n", 44 | sep="") 45 | cat("Thinning: ", x$Thinning, "\n", sep="") 46 | cat("Weights: (NOT SHOWN HERE)\n") 47 | cat("\n\nSummary:\n") 48 | print(x$Summary) 49 | invisible(x) 50 | } 51 | 52 | #End 53 | -------------------------------------------------------------------------------- /R/print.raftery.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.raftery # 3 | # # 4 | # The purpose of the print.raftery function is to print the contents of # 5 | # an object of class raftery to the screen. # 6 | ########################################################################### 7 | 8 | print.raftery <- function(x, digits=3, ...) 9 | { 10 | cat("\nQuantile (q) =", x$params["q"]) 11 | cat("\nAccuracy (r) = +/-", x$params["r"]) 12 | cat("\nProbability (s) =", x$params["s"], "\n") 13 | if(x$resmatrix[1] == "Error") 14 | cat("\nYou need a sample size of at least", x$resmatrix[2], 15 | "with these values of q, r and s.\n") 16 | else { 17 | out <- x$resmatrix 18 | for (i in ncol(out)) out[, i] <- format(out[, i], digits=digits) 19 | out <- rbind(matrix(c("Burn-in ", "Total", "Lower bound ", 20 | "Dependence", "(M)", "(N)", "(Nmin)", "factor (I)"), 21 | byrow=TRUE, nrow=2), out) 22 | if(!is.null(rownames(x$resmatrix))) 23 | out <- cbind(c("", "", rownames(x$resmatrix)), out) 24 | dimnames(out) <- list(rep("", nrow(out)), rep("", ncol(out))) 25 | print.default(out, quote=FALSE, ...) 26 | cat("\n")} 27 | invisible(x) 28 | } 29 | 30 | #End 31 | -------------------------------------------------------------------------------- /R/print.vb.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # print.vb # 3 | # # 4 | # The purpose of the print.vb function is to print the contents of an # 5 | # object of class vb to the screen. # 6 | ########################################################################### 7 | 8 | print.vb <- function(x, ...) 9 | { 10 | if(missing(x)) stop("The x argument is required.") 11 | cat("\nCall:\n") 12 | print(x$Call) 13 | cat("\nConverged: ", x$Converged, "\n", sep="") 14 | cat("Covariance Matrix: (NOT SHOWN HERE; diagonal shown instead)\n") 15 | print(diag(x$Covar)) 16 | cat("\nDeviance (Final): ", x$Deviance[length(x$Deviance)], "\n") 17 | cat("History: (NOT SHOWN HERE)\n") 18 | cat("Initial Values:\n") 19 | print(x$Initial.Values) 20 | cat("\nIterations: ", x$Iterations, "\n", sep="") 21 | cat("Log(Marginal Likelihood): ", x$LML, "\n", sep="") 22 | cat("Log-Posterior (Final): ", x$LP.Final, "\n", sep="") 23 | cat("Log-Posterior (Initial): ", x$LP.Initial, "\n", sep="") 24 | cat("Minutes of run-time: ", x$Minutes, "\n", sep="") 25 | cat("Monitor: (NOT SHOWN HERE)\n") 26 | cat("Posterior: (NOT SHOWN HERE)\n") 27 | cat("Step Size (Final): ") 28 | print(x$Step.Size.Final) 29 | cat("Step Size (Initial): ", x$Step.Size.Initial, "\n", sep="") 30 | cat("Summary1: (SHOWN BELOW)\n") 31 | cat("Summary2: (SHOWN BELOW)\n") 32 | cat("Tolerance (Final): ", x$Tolerance.Final, "\n", sep="") 33 | cat("Tolerance (Stop): ", x$Tolerance.Stop, "\n", sep="") 34 | 35 | cat("\nSummary1:\n") 36 | print(x$Summary1) 37 | if({x$Converged == TRUE} && !any(is.na(x$Posterior))) { 38 | cat("\nSummary2:\n") 39 | print(x$Summary2)} 40 | 41 | invisible(x) 42 | } 43 | 44 | #End 45 | -------------------------------------------------------------------------------- /R/summary.miss.R: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # summary.miss # 3 | # # 4 | # The purpose of the summary.miss function is to summarize an object of # 5 | # class miss. # 6 | ########################################################################### 7 | 8 | summary.miss <- function(object=NULL, ...) 9 | { 10 | if(is.null(object)) stop("The object argument is NULL.") 11 | x <- object$Imp 12 | Summ <- matrix(NA, nrow(x), 7, dimnames=list(1:nrow(x), 13 | c("Mean","SD","MCSE","ESS","LB","Median","UB"))) 14 | Summ[,1] <- rowMeans(x) 15 | Summ[,2] <- sqrt(.rowVars(x)) 16 | Summ[,3] <- 0 17 | Summ[,4] <- 0 18 | Summ[,5] <- apply(x, 1, quantile, c(0.025), na.rm=TRUE) 19 | Summ[,6] <- apply(x, 1, quantile, c(0.500), na.rm=TRUE) 20 | Summ[,7] <- apply(x, 1, quantile, c(0.925), na.rm=TRUE) 21 | acf.temp <- matrix(1, trunc(10*log10(ncol(x))), nrow(x)) 22 | for (i in 1:nrow(x)) { 23 | ### MCSE 24 | temp <- try(MCSE(x[i,]), silent=TRUE) 25 | if(!inherits(temp, "try-error")) Summ[i,3] <- temp 26 | else Summ[i,3] <- MCSE(x[i,], method="sample.variance") 27 | ### ESS 28 | Summ[i,4] <- ESS(x[i,])} 29 | print(Summ) 30 | return(invisible(Summ)) 31 | } 32 | 33 | #End 34 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | LaplacesDemon was initially developed and uploaded to CRAN by Byron Hall, the owner of Statisticat, LLC. Later on, the maintainer of the package changed to Martina Hall. 2 | 3 | The last version available on CRAN from the original authors and maintainers was version 13.03.04, which was removed from CRAN on 2013-07-16 at the request of the maintainer. 4 | 5 | After removal from CRAN, the development of LaplacesDemon continued for some time on GitHub under the name of Statisticat LLC (presumably still run by Byron Hall). The last commit by Statisticat for LaplacesDemon on GitHub was performed on 25. Mar 2015. After that Statisticat deleted their account on GitHub and ceased further development of the package. 6 | 7 | As Statisticat could not be reached, neither by e-mail nor by snail-mail (the latter was attempted by Rasmus Bååth), Henrik Singmann took over as maintainer of LaplacesDemon in July 2016 with the goal to resubmit the package to CRAN (as version 16.0.x). Henrik Singmann does not actively continue the development of LaplacesDemon but only retains it on CRAN in its current state. 8 | 9 | Note that in order to resubmit the package to CRAN all links to the now defunct website of Statisticat (formerly: http://www.bayesian-inference.com) were replaced with links to versions of this website on the web archive (https://web.archive.org/web/20141224051720/http://www.bayesian-inference.com/index). 10 | 11 | To contribute to the development of LaplacesDemon or discuss the development please visit its new repository: https://github.com/LaplacesDemonR/LaplacesDemon 12 | 13 | -------------------------------------------------------------------------------- /data/demonsessions.txt: -------------------------------------------------------------------------------- 1 | Africa Americas Asia Europe Oceania Not.Set 2 | 201301 5 250 80 200 16 12 3 | 201302 22 455 118 290 34 18 4 | 201303 27 1143 287 826 59 49 5 | 201304 98 1332 402 976 136 24 6 | 201305 62 1171 398 1051 124 19 7 | 201306 78 1201 427 972 150 32 8 | 201307 51 1322 588 1133 146 24 9 | 201308 67 1319 531 1078 190 21 10 | 201309 75 1860 695 1293 239 26 11 | 201310 70 2475 657 1757 250 19 12 | 201311 103 3058 893 2184 249 43 13 | 201312 112 2697 956 1902 165 55 14 | 201401 158 2947 1114 2730 207 24 15 | 201402 133 3683 1084 2815 282 12 16 | 201403 185 4732 1337 3306 400 18 17 | 201404 201 4956 1523 3320 357 28 18 | 201405 172 4321 1610 3728 486 26 19 | 201406 124 3578 1467 3221 465 21 20 | 201407 189 3313 1158 2655 328 23 21 | 201408 164 3552 1194 2496 547 16 22 | 201409 148 4674 1486 3100 474 19 23 | 201410 147 5817 1697 3951 516 26 24 | 201411 215 6649 2039 4639 542 46 25 | 201412 156 5619 1987 3828 303 37 26 | 201501 206 5364 1910 4561 344 26 27 | 201502 224 7069 1823 4607 442 15 28 | -------------------------------------------------------------------------------- /data/demonsnacks.txt: -------------------------------------------------------------------------------- 1 | Serving.Size Calories Total.Fat Saturated.Fat Cholesterol Sodium Total.Carbohydrate Dietary.Fiber Sugars Protein 2 | Apple 125 65 0 0 0 1 17 3 13 0 3 | Apricots.Dried 250 213 0 0 0 10 55 7 49 3 4 | Banana.Chips 85 441 29 25 0 5 50 7 30 2 5 | Banana 225 200 1 0 0 2 51 6 28 2 6 | Beef.Jerky 20 82 5 2 10 443 2 0 2 7 7 | Beer.Light 29 9 0 0 0 1 0 0 0 0 8 | Bit.O.Honey 40 150 3 2 0 118 32 0 19 1 9 | Carrots.Baby 15 5 0 0 0 12 1 0 1 0 10 | Cherries 155 77 0 0 0 5 19 2 13 2 11 | Cranberries.Dried 40 123 1 0 0 1 33 2 26 0 12 | Dates 24 66 0 0 0 0 18 2 16 0 13 | Fritolay 28 137 6 1 0 92 19 2 2 2 14 | Granola.Bar 24 116 6 1 0 68 15 1 0 2 15 | Grapes 92 62 0 0 0 2 16 1 15 1 16 | Jellybeans 11 41 0 0 0 5 10 0 8 0 17 | Mangos 165 107 0 0 0 3 28 3 24 1 18 | M&Ms 208 1023 44 27 29 127 148 6 132 9 19 | Marshmallows 50 159 0 0 0 40 41 0 29 1 20 | Nuts.Mixed 142 876 80 12 0 595 30 13 6 24 21 | Nuts.Pistachio 123 702 57 7 0 12 34 13 10 26 22 | Oatmeal.Quaker 40 133 1 0 0 1 29 5 0 5 23 | Oranges 185 85 0 0 0 0 21 4 17 1 24 | Pickles.Dill 143 17 0 0 0 1251 4 2 2 1 25 | Popcorn.Microwave 28 120 3 0 0 137 21 4 0 4 26 | Potato.Chips 198 1026 56 7 0 1220 125 10 2 12 27 | Pretzels 143 483 4 1 4 2008 99 2 0 12 28 | Pumpkin.Seeds.Salted 64 285 12 2 0 368 34 0 0 12 29 | Rolo 48 228 10 7 6 90 33 0 31 2 30 | Salmon.Atlantic.Wild 154 280 13 2 109 86 0 0 0 39 31 | Snickers 15 71 4 1 2 37 9 0 8 1 32 | Strawberries 152 49 0 0 0 2 12 3 7 1 33 | Sunflower.Seeds.Salted 128 745 64 7 0 525 31 12 3 25 34 | Tortilla.Chips 28 137 7 1 0 118 18 1 0 2 35 | Trail.Mix.Regular 150 693 44 8 0 343 67 0 0 21 36 | Trail.Mix.Tropical 140 570 24 12 0 14 92 0 0 9 37 | Turkey.Breast 45 42 0 0 19 540 1 0 0 9 38 | Scooby.Snacks 312 1566 78 59 22 618 203 3 151 15 39 | Twix 58 311 19 9 3 131 31 2 21 5 40 | Twizzlers 71 249 2 0 0 204 57 0 28 2 41 | -------------------------------------------------------------------------------- /development.R: -------------------------------------------------------------------------------- 1 | require(devtools) 2 | load_all() 3 | check(document = FALSE) 4 | 5 | build_vignettes() 6 | 7 | # build with: 8 | # bash R-devel CMD build LaplacesDemon --compact-vignettes="gs+qpdf" 9 | devtools::build(args = '--compact-vignettes=gs+qpdf') 10 | 11 | # 12 | devtools::revdep_maintainers() 13 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | if(!exists('meta') || is.null(meta)) meta <- packageDescription("LaplacesDemon") 2 | 3 | year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date) 4 | vers <- paste("R package version", meta$Version) 5 | author <- "Statisticat, LLC." #as.personList(meta$Author) 6 | url <- "https://web.archive.org/web/20150206004624/http://www.bayesian-inference.com/software" 7 | 8 | citHeader(sprintf("To cite package '%s' in publications use:", meta$Package)) 9 | 10 | citEntry(entry="Manual", 11 | author = author, 12 | year = year, 13 | title =sprintf("%s: %s", meta$Package, meta$Title), 14 | note = vers, 15 | url = url, 16 | publisher = "Bayesian-Inference.com", 17 | textVersion = sprintf("%s (%s). %s: %s. Bayesian-Inference.com. %s. [%s]", author, year, meta$Package, meta$Title, vers, url), 18 | header = "Technical documentation:" 19 | ) 20 | 21 | citEntry(entry="Manual", 22 | author = author, 23 | year = year, 24 | title = vign <- "Bayesian Inference", 25 | note = vers, 26 | url = url, 27 | publisher = "Bayesian-Inference.com", 28 | textVersion = sprintf("%s (%s). %s. Bayesian-Inference.com. %s. [%s]", author, year, vign, vers, url), 29 | header = "Vignette(s):" 30 | ) 31 | 32 | citEntry(entry="Manual", 33 | author = author, 34 | year = year, 35 | title = vign <- "LaplacesDemon Examples", 36 | note = vers, 37 | url = url, 38 | publisher = "Bayesian-Inference.com", 39 | textVersion = sprintf("%s (%s). %s. Bayesian-Inference.com. %s. [%s]", author, year, vign, vers, url), 40 | header = "Vignette(s):" 41 | ) 42 | 43 | citEntry(entry="Manual", 44 | author = author, 45 | year = year, 46 | title = vign <- "LaplacesDemon Tutorial", 47 | note = vers, 48 | url = url, 49 | publisher = "Bayesian-Inference.com", 50 | textVersion = sprintf("%s (%s). %s. Bayesian-Inference.com. %s. [%s]", author, year, vign, vers, url), 51 | header = "Vignette(s):" 52 | ) 53 | -------------------------------------------------------------------------------- /man/Gelfand.Diagnostic.Rd: -------------------------------------------------------------------------------- 1 | \name{Gelfand.Diagnostic} 2 | \alias{Gelfand.Diagnostic} 3 | \title{Gelfand's Convergence Diagnostic} 4 | \description{ 5 | Gelfand et al. (1990) proposed a convergence diagnostic for Markov 6 | chains. The \code{Gelfand.Diagnostic} function is an interpretation of 7 | Gelfand's ``thick felt-tip pen'' MCMC convergence diagnostic. This 8 | diagnostic plots a series of kernel density plots at \eqn{k} 9 | intervals of cumulative samples. Given a vector of \eqn{S} samples 10 | from a marginal posterior distribution, \eqn{\theta}{theta}, multiple 11 | kernel density lines are plotted together, where each includes samples 12 | from a different interval. It is assumed that \code{\link{burnin}} 13 | iterations have been discarded. 14 | 15 | Gelfand et al. (1990) assert that convergence is violated when the 16 | plotted lines are farther apart than the width of a thick, felt-tip 17 | pen. This depends on the size of the plot, and, of course, the 18 | pen. The estimated width of a ``thick felt-tip pen'' is included as a 19 | black, vertical line. The pen in \code{Gelfand.Diagnostic} is included 20 | for historical reasons. This diagnostic requires numerous samples. 21 | } 22 | \usage{ 23 | Gelfand.Diagnostic(x, k=3, pen=FALSE) 24 | } 25 | \arguments{ 26 | \item{x}{This required argument is a vector of marginal posterior 27 | samples, such as selected from the output of 28 | \code{\link{LaplacesDemon}}.} 29 | \item{k}{This argument specifies the number \eqn{k} of kernel 30 | density plots given cumulative intervals of samples. This argument 31 | defaults to \eqn{k=3}{k=3}.} 32 | \item{pen}{Logical. This argument defaults to \code{pen=FALSE}. When 33 | \code{pen=TRUE}, the thick felt-tip pen is included as a black, 34 | vertical line.} 35 | } 36 | \value{ 37 | The \code{Gelfand.Diagnostic} returns a plot. 38 | } 39 | \references{ 40 | Gelfand, A.E., Hills, S., Racine-Poon, A., and Smith, 41 | A.F.M. (1990). "Illustration of Bayesian Inference in Normal Data 42 | Models Using Gibbs Sampling". \emph{Journal of the American 43 | Statistical Association}, 85, p. 972--985. 44 | } 45 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 46 | \seealso{ 47 | \code{\link{burnin}} and 48 | \code{\link{LaplacesDemon}}. 49 | } 50 | \examples{ 51 | library(LaplacesDemon) 52 | x <- rnorm(1000) 53 | Gelfand.Diagnostic(x) 54 | } 55 | \keyword{Diagnostic} 56 | \keyword{MCMC} 57 | -------------------------------------------------------------------------------- /man/Geweke.Diagnostic.Rd: -------------------------------------------------------------------------------- 1 | \name{Geweke.Diagnostic} 2 | \alias{Geweke.Diagnostic} 3 | \title{Geweke's Convergence Diagnostic} 4 | \description{ 5 | Geweke (1992) proposed a convergence diagnostic for Markov chains. 6 | This diagnostic is based on a test for equality of the means of the 7 | first and last part of a Markov chain (by default the first 10\% and 8 | the last 50\%). If the samples are drawn from a stationary 9 | distribution of the chain, then the two means are equal and Geweke's 10 | statistic has an asymptotically standard normal distribution. 11 | 12 | The test statistic is a standard Z-score: the difference between the 13 | two sample means divided by its estimated standard error. The 14 | standard error is estimated from the spectral density at zero, and so 15 | takes into account any autocorrelation. 16 | 17 | The Z-score is calculated under the assumption that the two parts of 18 | the chain are asymptotically independent. 19 | 20 | The \code{Geweke.Diagnostic} is a univariate diagnostic that is 21 | usually applied to each marginal posterior distribution. A 22 | multivariate form is not included. By chance alone due to multiple 23 | independent tests, 5\% of the marginal posterior distributions should 24 | appear non-stationary when stationarity exists. Assessing multivariate 25 | convergence is difficult. 26 | } 27 | \usage{Geweke.Diagnostic(x)} 28 | \arguments{ 29 | \item{x}{This required argument is a vector or matrix of posterior 30 | samples, such as from the output of the \code{\link{LaplacesDemon}} 31 | function. Each column vector in a matrix is a chain to be assessed. 32 | A minimum of 100 samples are required.} 33 | } 34 | \details{ 35 | The \code{Geweke.Diagnostic} is essentially the same as the 36 | \code{geweke.diag} function in the \code{coda} package, but 37 | programmed to accept a simple vector or matrix, so it does not require 38 | an \code{mcmc} object. 39 | } 40 | \value{ 41 | A vector is returned, in which each element is a Z-score for a test of 42 | equality that compares the means of the first and last parts of each 43 | chain supplied as \code{x} to \code{Geweke.Diagnostic}. 44 | } 45 | \references{ 46 | Geweke, J. (1992). "Evaluating the Accuracy of Sampling-Based 47 | Approaches to Calculating Posterior Moments". In 48 | \emph{Bayesian Statistics 4} (ed JM Bernardo, JO Berger, AP Dawid, 49 | and AFM Smith). Clarendon Press, Oxford, UK. 50 | } 51 | \seealso{ 52 | \code{\link{burnin}}, 53 | \code{\link{is.stationary}}, and 54 | \code{\link{LaplacesDemon}} 55 | } 56 | \examples{ 57 | library(LaplacesDemon) 58 | Geweke.Diagnostic(rnorm(100)) 59 | Geweke.Diagnostic(matrix(rnorm(100),10,10)) 60 | } 61 | \keyword{Diagnostic} 62 | \keyword{MCMC} 63 | \keyword{Stationarity} 64 | \keyword{Utility} -------------------------------------------------------------------------------- /man/Hangartner.Diagnostic.Rd: -------------------------------------------------------------------------------- 1 | \name{Hangartner.Diagnostic} 2 | \alias{Hangartner.Diagnostic} 3 | \title{Hangartner's Convergence Diagnostic} 4 | \description{ 5 | Hangartner et al. (2011) proposed a convergence diagnostic for 6 | discrete Markov chains. A simple Pearson's Chi-squared test for 7 | two or more non-overlapping periods of a discrete Markov chain 8 | is a reliable diagnostic of convergence. It does not rely upon the 9 | estimation of spectral density, on suspect normality assumptions, or 10 | determining overdispersion within a small number of outcomes, all of 11 | which can be problematic with discrete measures. A discrete Markov 12 | chain is split into two or more non-overlapping windows. Two windows 13 | are recommended, and results may be sensitive to the number of 14 | selected windows, as well as sample size. As such, a user may try 15 | several window configurations before concluding there is no evidence 16 | of non-convergence. 17 | 18 | As the number of discrete events in the sample space increases, this 19 | diagnostic becomes less appropriate and standard diagnostics become 20 | more appropriate. 21 | } 22 | \usage{ 23 | Hangartner.Diagnostic(x, J=2) 24 | } 25 | \arguments{ 26 | \item{x}{This required argument is a vector of marginal posterior 27 | samples of a discrete Markov chain, such as selected from the output 28 | of \code{\link{LaplacesDemon}}.} 29 | \item{J}{This argument specifies the number \eqn{J} of windows to be 30 | used, and defaults to \eqn{J=2}.} 31 | } 32 | \value{ 33 | The \code{Hangartner.Diagnostic} returns an object of class 34 | \code{hangartner}, including the output from a Pearson's Chi-squared 35 | test. A frequentist p-value less than or equal to 0.05 is usually 36 | considered to be indicative of non-convergence. 37 | } 38 | \references{ 39 | Hangartner, D., Gill, J., and Cranmer, S., (2011). "An MCMC Diagnostic 40 | for Purely Discrete Parameters". Paper presented at the annual meeting 41 | of the Southern Political Science Association, Hotel InterContinental, 42 | New Orleans, Louisiana Online. 43 | } 44 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 45 | \seealso{ 46 | \code{\link{LaplacesDemon}} and 47 | \code{\link{TransitionMatrix}}. 48 | } 49 | \examples{ 50 | library(LaplacesDemon) 51 | N <- 1000 52 | K <- 3 53 | x <- rcat(N, rep(1/K,K)) 54 | hd <- Hangartner.Diagnostic(x, J=2) 55 | hd 56 | } 57 | \keyword{Diagnostic} 58 | \keyword{MCMC} 59 | -------------------------------------------------------------------------------- /man/KS.Diagnostic.Rd: -------------------------------------------------------------------------------- 1 | \name{KS.Diagnostic} 2 | \alias{KS.Diagnostic} 3 | \title{Kolmogorov-Smirnov Convergence Diagnostic} 4 | \description{ 5 | The Kolmogorov-Smirnov test is a nonparametric test of stationarity 6 | that has been applied as an MCMC diagnostic (Brooks et al, 2003), such 7 | as to the posterior samples from the \code{\link{LaplacesDemon}} 8 | function. The first and last halves of the chain are compared. This 9 | test assumes IID, which is violated in the presence of 10 | autocorrelation. 11 | 12 | The \code{KS.Diagnostic} is a univariate diagnostic that is usually 13 | applied to each marginal posterior distribution. A multivariate form 14 | is not included. By chance alone due to multiple independent tests, 15 | 5\% of the marginal posterior distributions should appear 16 | non-stationary when stationarity exists. Assessing multivariate 17 | convergence is difficult. 18 | } 19 | \usage{ 20 | KS.Diagnostic(x) 21 | } 22 | \arguments{ 23 | \item{x}{This is a vector of posterior samples for which a 24 | Kolmogorov-Smirnov test will be applied that compares the first and 25 | last halves for stationarity.} 26 | } 27 | \details{ 28 | There are two main approaches to using the Kolmogorov-Smirnov test as 29 | an MCMC diagnostic. There is a version of the test that has 30 | been adapted to account for autocorrelation (and is not included 31 | here). Otherwise, the chain is thinned enough that autocorrelation is 32 | not present or is minimized, in which case the two-sample 33 | Kolmogorov-Smirnov test is applied. The CDFs of both samples are 34 | compared. The \code{ks.test} function in base R is used. 35 | 36 | The advantage of the Kolmogorov-Smirnov test is that it is easier and 37 | faster to calculate. The disadvantages are that autocorrelation biases 38 | results, and the test is generally biased on the conservative side 39 | (indicating stationarity when it should not). 40 | } 41 | \value{ 42 | The \code{KS.Diagnostic} function returns a frequentist p-value, and 43 | stationarity is indicated when p > 0.05. 44 | } 45 | \references{ 46 | Brooks, S.P., Giudici, P., and Philippe, A. (2003). "Nonparametric 47 | Convergence Assessment for MCMC Model Selection". \emph{Journal of 48 | Computational and Graphical Statistics}. 12(1), p. 1--22. 49 | } 50 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 51 | \seealso{ 52 | \code{\link{is.stationary}}, 53 | \code{\link{ks.test}}, and 54 | \code{\link{LaplacesDemon}}. 55 | } 56 | \examples{ 57 | library(LaplacesDemon) 58 | x <- rnorm(1000) 59 | KS.Diagnostic(x) 60 | } 61 | \keyword{Diagnostic} 62 | \keyword{MCMC} 63 | -------------------------------------------------------------------------------- /man/Thin.Rd: -------------------------------------------------------------------------------- 1 | \name{Thin} 2 | \alias{Thin} 3 | \title{Thin} 4 | \description{ 5 | This function reduces the number of posterior samples by retaining 6 | every \eqn{k}th sample. 7 | } 8 | \usage{ 9 | Thin(x, By=1) 10 | } 11 | \arguments{ 12 | \item{x}{This is a vector or matrix of posterior samples to be 13 | thinned.} 14 | \item{By}{This argument specifies that every \eqn{k}th posterior 15 | sample will be retained, and \code{By} defaults to 1, meaning that 16 | thinning will not occur, because every sample will be retained.} 17 | } 18 | \details{ 19 | A thinned matrix of posterior samples is a matrix in which only every 20 | \eqn{k}th posterior sample (or row) in the original matrix is 21 | retained. The act of thinning posterior samples has been criticized as 22 | throwing away information, which is correct. However, it is common 23 | practice to thin posterior samples, usually associated with MCMC such 24 | as \code{\link{LaplacesDemon}}, for two reasons. First, Each chain 25 | (column vector) in a matrix of posterior samples probably has higher 26 | autocorrelation than desired, which reduces the effective sample size 27 | (see \code{\link{ESS}} for more information). Therefore, a thinned 28 | matrix usually contains posterior samples that are closer to 29 | independent than an un-thinned matrix. The other reason for the 30 | popularity of thinning is that it a user may not have the 31 | random-access memory (RAM) to store large, un-thinned matrices of 32 | posterior samples. 33 | 34 | \code{\link{LaplacesDemon}} and \code{\link{PMC}} automatically thin 35 | posterior samples, deviance samples, and samples of monitored 36 | variables, according to its own user-specified argument. The 37 | \code{Thin} function is made available here, should it be necessary to 38 | thin posterior samples outside of objects of class \code{demonoid} or 39 | \code{pmc}. 40 | } 41 | \value{ 42 | The \code{Thin} argument returns a thinned matrix. When \code{x} is a 43 | vector, the returned object is a matrix with 1 column. 44 | } 45 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 46 | \seealso{ 47 | \code{\link{ESS}}, 48 | \code{\link{LaplacesDemon}}, and 49 | \code{\link{PMC}}. 50 | } 51 | \examples{ 52 | library(LaplacesDemon) 53 | x <- matrix(runif(100), 10, 10) 54 | Thin(x, By=2) 55 | } 56 | \keyword{Diagnostic} 57 | \keyword{MCMC} 58 | \keyword{Monte Carlo} 59 | \keyword{Utility} 60 | -------------------------------------------------------------------------------- /man/as.covar.Rd: -------------------------------------------------------------------------------- 1 | \name{as.covar} 2 | \alias{as.covar} 3 | \title{Proposal Covariance} 4 | \description{ 5 | This function returns the most recent covariance matrix or a list of 6 | blocking covariance matrices from an object of class \code{demonoid}, 7 | the most recent covariance matrix from \code{iterquad}, 8 | \code{laplace}, or \code{vb}, the most recent covariance matrix from 9 | the chain with the lowest deviance in an object of class 10 | \code{demonoid.hpc}, and a number of covariance matrices of an object 11 | of class \code{pmc} equal to the number of mixture components. The 12 | returned covariance matrix or matrices are intended to be the initial 13 | proposal covariance matrix or matrices for future updates. A variance 14 | vector from an object of class \code{demonoid} or \code{demonoid.hpc} 15 | is converted to a covariance matrix. 16 | } 17 | \usage{ 18 | as.covar(x) 19 | } 20 | \arguments{ 21 | \item{x}{This is an object of class \code{demonoid}, 22 | \code{demonoid.hpc}, \code{iterquad}, \code{laplace}, \code{pmc}, or 23 | \code{vb}.} 24 | } 25 | \details{ 26 | Unless it is known beforehand how many iterations are required for 27 | iterative quadrature, Laplace Approximation, or Variational Bayes to 28 | converge, MCMC to appear converged, or the normalized perplexity to 29 | stabilize in PMC, multiple updates are necessary. An additional 30 | update, however, should not begin with the same proposal covariance 31 | matrix or matrices as the original update, because it will have to 32 | repeat the work already accomplished. For this reason, the 33 | \code{as.covar} function may be used at the end of an update to change 34 | the previous initial values to the latest values. 35 | 36 | The \code{as.covar} function is most helpful with objects of class 37 | \code{pmc} that have multiple mixture components. For more 38 | information, see \code{\link{PMC}}. 39 | } 40 | \value{ 41 | The returned value is a matrix (or array in the case of PMC with 42 | multiple mixture components) of the latest observed or proposal 43 | covariance, which may now be used as an initial proposal covariance 44 | matrix or matrices for a future update. 45 | } 46 | \author{Statisticat, LLC \email{software@bayesian-inference.com}} 47 | \seealso{ 48 | \code{\link{IterativeQuadrature}}, 49 | \code{\link{LaplaceApproximation}}, 50 | \code{\link{LaplacesDemon}}, 51 | \code{\link{LaplacesDemon.hpc}}, 52 | \code{\link{PMC}}, and 53 | \code{\link{VariationalBayes}}. 54 | } 55 | \keyword{Utility} -------------------------------------------------------------------------------- /man/as.initial.values.Rd: -------------------------------------------------------------------------------- 1 | \name{as.initial.values} 2 | \alias{as.initial.values} 3 | \title{Initial Values} 4 | \description{ 5 | This function returns the most recent posterior samples from an object 6 | of class \code{demonoid} or \code{demonoid.hpc}, the posterior means 7 | of an object of class \code{iterquad}, the posterior modes of an 8 | object of class \code{laplace} or \code{vb}, the posterior means of an 9 | object of class \code{pmc} with one mixture component, or the latest 10 | means of the importance sampling distribution of an object of class 11 | \code{pmc} with multiple mixture components. The returned values are 12 | intended to be the initial values for future updates. 13 | } 14 | \usage{ 15 | as.initial.values(x) 16 | } 17 | \arguments{ 18 | \item{x}{This is an object of class \code{demonoid}, 19 | \code{demonoid.hpc}, \code{iterquad}, \code{laplace}, \code{pmc}, or 20 | \code{vb}.} 21 | } 22 | \details{ 23 | Unless it is known beforehand how many iterations are required for 24 | \code{\link{IterativeQuadrature}}, \code{\link{LaplaceApproximation}}, 25 | or \code{\link{VariationalBayes}} to converge, MCMC in 26 | \code{\link{LaplacesDemon}} to appear converged, or the normalized 27 | perplexity to stabilize in \code{\link{PMC}}, multiple updates are 28 | necessary. An additional update, however, should not begin with the 29 | same initial values as the original update, because it will have to 30 | repeat the work already accomplished. For this reason, the 31 | \code{as.initial.values} function may be used at the end of an update 32 | to change the previous initial values to the latest values. 33 | 34 | When using \code{\link{LaplacesDemon.hpc}}, \code{as.initial.values} 35 | should be used when the output is of class \code{demonoid.hpc}, before 36 | the \code{\link{Combine}} function is used to combine the multiple 37 | chains for use with \code{\link{Consort}} and other functions, because 38 | the \code{\link{Combine}} function returns an object of class 39 | \code{demonoid}, and the number of chains will become unknown. The 40 | \code{\link{Consort}} function may suggest using 41 | \code{as.initial.values}, but when applied to an object of class 42 | \code{demonoid}, it will return the latest values as if there were 43 | only one chain. 44 | } 45 | \value{ 46 | The returned value is a vector (or matrix in the case of an object of 47 | class \code{demonoid.hpc}, or \code{pmc} with multiple mixture 48 | components) of the latest values, which may now be used as initial 49 | values for a future update. 50 | } 51 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 52 | \seealso{ 53 | \code{\link{Combine}}, 54 | \code{\link{IterativeQuadrature}}, 55 | \code{\link{LaplaceApproximation}}, 56 | \code{\link{LaplacesDemon}}, 57 | \code{\link{LaplacesDemon.hpc}}, 58 | \code{\link{PMC}}, and 59 | \code{\link{VariationalBayes}}. 60 | } 61 | \keyword{Initial Values} -------------------------------------------------------------------------------- /man/as.ppc.Rd: -------------------------------------------------------------------------------- 1 | \name{as.ppc} 2 | \alias{as.ppc} 3 | \title{As Posterior Predictive Check} 4 | \description{ 5 | This function converts an object of class \code{demonoid.val} to an 6 | object of class \code{demonoid.ppc}. 7 | } 8 | \usage{ 9 | as.ppc(x, set=3) 10 | } 11 | \arguments{ 12 | \item{x}{This is an object of class \code{demonoid.val}.} 13 | \item{set}{This is an integer that indicates which list component is 14 | to be used. When \code{set=1}, the modeled data set is used. When 15 | \code{set=2}, the validation data set is used. When \code{set=3}, 16 | both data sets are used.} 17 | } 18 | \details{ 19 | After using the \code{\link{Validate}} function for holdout 20 | validation, it is often suggested to perform posterior predictive 21 | checks. The \code{as.ppc} function converts the output object of 22 | \code{\link{Validate}}, which is an object of class 23 | \code{demonoid.val}, to an object of class \code{demonoid.ppc}. The 24 | returned object is the same as if it were created with the 25 | \code{\link{predict.demonoid}} function, rather than the 26 | \code{\link{Validate}} function. 27 | 28 | After this conversion, the user may use posterior predictive checks, 29 | as usual, with the \code{\link{summary.demonoid.ppc}} function. 30 | } 31 | \value{ 32 | The returned object is an object of class \code{demonoid.ppc}. 33 | } 34 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 35 | \seealso{ 36 | \code{\link{predict.demonoid}}, 37 | \code{\link{summary.demonoid.ppc}}, and 38 | \code{\link{Validate}}. 39 | } 40 | \keyword{Initial Values} -------------------------------------------------------------------------------- /man/burnin.Rd: -------------------------------------------------------------------------------- 1 | \name{burnin} 2 | \alias{burnin} 3 | \title{Burn-in} 4 | \description{ 5 | The \code{burnin} function estimates the duration of burn-in in 6 | iterations for one or more Markov chains. ``Burn-in'' refers to the 7 | initial portion of a Markov chain that is not stationary and is still 8 | affected by its initial value. 9 | } 10 | \usage{ 11 | burnin(x, method="BMK") 12 | } 13 | \arguments{ 14 | \item{x}{This is a vector or matrix of posterior samples for which a 15 | the number of burn-in iterations will be estimated.} 16 | \item{method}{This argument defaults to \code{"BMK"}, in which case 17 | stationarity is estimated with the \code{\link{BMK.Diagnostic}} 18 | function. Alternatively, the \code{\link{Geweke.Diagnostic}} 19 | function may be used when \code{method="Geweke"} or the 20 | \code{\link{KS.Diagnostic}} function may be used when 21 | \code{method="KS"}.} 22 | } 23 | \details{ 24 | Burn-in is a colloquial term for the initial iterations in a Markov 25 | chain prior to its convergence to the target distribution. During 26 | burn-in, the chain is not considered to have ``forgotten'' its initial 27 | value. 28 | 29 | Burn-in is not a theoretical part of MCMC, but its use is the norm 30 | because of the need to limit the number of posterior samples due to 31 | computer memory. If burn-in were retained rather than discarded, then 32 | more posterior samples would have to be retained. If a Markov chain 33 | starts anywhere close to the center of its target distribution, then 34 | burn-in iterations do not need to be discarded. 35 | 36 | In the \code{\link{LaplacesDemon}} function, stationarity is estimated 37 | with the \code{\link{BMK.Diagnostic}} function on all thinned 38 | posterior samples of each chain, beginning at cumulative 10\% intervals 39 | relative to the total number of samples, and the lowest number in 40 | which all chains are stationary is considered the burn-in. 41 | 42 | The term, ``burn-in'', originated in electronics regarding the initial 43 | testing of component failure at the factory to eliminate initial 44 | failures (Geyer, 2011). Although ``burn-in' has been the standard term 45 | for decades, some are referring to these as ``warm-up'' iterations. 46 | } 47 | \value{ 48 | The \code{burnin} function returns a vector equal in length to the 49 | number of MCMC chains in \code{x}, and each element indicates the 50 | maximum iteration in burn-in. 51 | } 52 | \references{ 53 | Geyer, C.J. (2011). "Introduction to Markov Chain Monte Carlo". In 54 | S Brooks, A Gelman, G Jones, and M Xiao-Li (eds.), "Handbook of 55 | Markov Chain Monte Carlo", p. 3--48. Chapman and Hall, Boca Raton, FL. 56 | } 57 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 58 | \seealso{ 59 | \code{\link{BMK.Diagnostic}}, 60 | \code{\link{deburn}}, 61 | \code{\link{Geweke.Diagnostic}}, 62 | \code{\link{KS.Diagnostic}}, and 63 | \code{\link{LaplacesDemon}}. 64 | } 65 | \examples{ 66 | library(LaplacesDemon) 67 | x <- rnorm(1000) 68 | burnin(x) 69 | } 70 | \keyword{Diagnostic} 71 | \keyword{Stationarity} 72 | \keyword{Utility} 73 | -------------------------------------------------------------------------------- /man/cond.plot.Rd: -------------------------------------------------------------------------------- 1 | \name{cond.plot} 2 | \alias{cond.plot} 3 | \title{Conditional Plots} 4 | \description{ 5 | This function provides several styles of conditional plots with base 6 | graphics. 7 | } 8 | \usage{ 9 | cond.plot(x, y, z, Style="smoothscatter") 10 | } 11 | \arguments{ 12 | \item{x}{This required argument accepts a numeric vector.} 13 | \item{y}{This argument accepts a numeric vector, and is only used with 14 | some styles.} 15 | \item{z}{This required argument accepts a discrete vector.} 16 | \item{Style}{This argument specifies the style of plot, and accepts 17 | "boxplot", "densover" (density overlay), "hist", "scatter", or 18 | "smoothscatter".} 19 | } 20 | \details{ 21 | The \code{cond.plot} function provides simple conditional plots with 22 | base graphics. All plot styles are conditional upon \code{z}. Up to 23 | nine conditional plots are produced in a panel. 24 | 25 | Plots include: 26 | 27 | boxplot: y ~ x | z 28 | densover: f(x | z) 29 | hist: x | z 30 | scatter: x, y | z 31 | smoothscatter: x, y | z 32 | 33 | The \code{cond.plot} function is not intended to try to compete with 34 | some of the better graphics packages, but merely to provide simple 35 | functionality. 36 | } 37 | \value{ 38 | Conditional plots are returned. 39 | } 40 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 41 | \seealso{ 42 | \code{\link{joint.density.plot}} and 43 | \code{\link{joint.pr.plot}}. 44 | } 45 | \examples{ 46 | library(LaplacesDemon) 47 | x <- rnorm(1000) 48 | y <- runif(1000) 49 | z <- rcat(1000, rep(1/4,4)) 50 | cond.plot(x, y, z, Style="smoothscatter") 51 | } 52 | \keyword{Plot} -------------------------------------------------------------------------------- /man/data.demonchoice.Rd: -------------------------------------------------------------------------------- 1 | \name{data.demonchoice} 2 | \alias{demonchoice} 3 | \title{Demon Choice Data Set} 4 | \usage{data(demonchoice)} 5 | \description{ 6 | This data set is for discrete choice models and consists of the choice 7 | of commuting route to school: arterial, two-lane, or freeway. There 8 | were 151 Pennsylvania commuters who started from a residential complex 9 | in State College, PA, and commute to downtown State College. 10 | } 11 | \format{ 12 | This data frame contains 151 rows of individual choices and 9 13 | columns. The following data dictionary describes each variable or 14 | column. 15 | \describe{ 16 | \item{\code{Choice}}{This is the route choice: four-lane arterial 17 | (35 MPH speed limit), two-lane highway (35 MPH speed limit, with 18 | one lane in each direction), or a limited-access four-lane freeway 19 | (55 MPH speed liimit.)} 20 | \item{\code{HH.Income}}{This is an ordinal variable of annual 21 | household income of the commuter in USD. There are four 22 | categories: 1 is less than 20,000 USD, 2 is 20,000-29,999 USD, 3 23 | is 30,000-39,999 USD, and 4 is 40,000 USD or greater.} 24 | \item{\code{Vehicle.Age}}{This is the age in years of the vehicle of 25 | the commuter.} 26 | \item{\code{Stop.Signs.Arterial}}{This is the number of stop signs 27 | along the arterial route.} 28 | \item{\code{Stop.Signs.Two.Lane}}{This is the number of stop signs 29 | along the two-lane route.} 30 | \item{\code{Stop.Signs.Freeway}}{This is the number of stop signs 31 | along the freeway route.} 32 | \item{\code{Distance.Arterial}}{This is distance in miles of the 33 | arterial route.} 34 | \item{\code{Distance.Two.Lane}}{This is the distance in miles of 35 | the two-lane route.} 36 | \item{\code{Distance.Freeway}}{This is the distance in miles of 37 | the freeway route.} 38 | } 39 | } 40 | \source{Washington, S., Congdon, P., Karlaftis, M., and Mannering, 41 | F. (2009). "Bayesian Multinomial Logit: Theory and Route Choice 42 | Example". Transportation Research Record, 2136, p. 28--36.} 43 | \keyword{datasets} 44 | -------------------------------------------------------------------------------- /man/data.demonsessions.Rd: -------------------------------------------------------------------------------- 1 | \name{data.demonsessions} 2 | \alias{demonsessions} 3 | \title{Demon Sessions Data Set} 4 | \usage{data(demonsessions)} 5 | \description{ 6 | These are the monthly number of user sessions at 7 | \url{https://web.archive.org/web/20141224051720/http://www.bayesian-inference.com/index} by continent. Additional data 8 | may be added in the future. 9 | } 10 | \format{ 11 | This data frame contains 26 rows (with row names) and 6 columns. The 12 | following data dictionary describes each variable or column. 13 | \describe{ 14 | \item{\code{Africa}}{This is the African continent.} 15 | \item{\code{Americas}}{This is North and South America.} 16 | \item{\code{Asia}}{This is the Asian continent.} 17 | \item{\code{Europe}}{This is Europe as a continent.} 18 | \item{\code{Oceania}}{This is Oceania, such as Australia.} 19 | \item{\code{Not.Set}}{This includes sessions in which the continent 20 | was not set, or is unknown.} 21 | } 22 | } 23 | \source{\url{https://web.archive.org/web/20141224051720/http://www.bayesian-inference.com/index}} 24 | \keyword{datasets} 25 | -------------------------------------------------------------------------------- /man/data.demonsnacks.Rd: -------------------------------------------------------------------------------- 1 | \name{data.demonsnacks} 2 | \alias{demonsnacks} 3 | \title{Demon Snacks Data Set} 4 | \usage{data(demonsnacks)} 5 | \description{ 6 | Late one night, after witnessing Laplace's Demon in action, I followed 7 | him back to what seemed to be his lair. Minutes later, he left again. 8 | I snuck inside and saw something labeled 'Demon Snacks'. Hurriedly, I 9 | recorded the 39 items, each with a name and 10 nutritional attributes. 10 | } 11 | \format{ 12 | This data frame contains 39 rows (with row names) and 10 columns. The 13 | following data dictionary describes each variable or column. 14 | \describe{ 15 | \item{\code{Serving.Size}}{This is serving size in grams.} 16 | \item{\code{Calories}}{This is the number of calories.} 17 | \item{\code{Total.Fat}}{This is total fat in grams.} 18 | \item{\code{Saturated.Fat}}{This is saturated fat in grams.} 19 | \item{\code{Cholesterol}}{This is cholesterol in milligrams.} 20 | \item{\code{Sodium}}{This is sodium in milligrams.} 21 | \item{\code{Total.Carbohydrate}}{This is the total carbohydrates in grams.} 22 | \item{\code{Dietary.Fiber}}{This is dietary fiber in grams.} 23 | \item{\code{Sugars}}{This is sugar in grams.} 24 | \item{\code{Protein}}{This is protein in grams.} 25 | } 26 | } 27 | \source{This data was obtained from the lair of Laplace's Demon!} 28 | \keyword{datasets} 29 | -------------------------------------------------------------------------------- /man/de.Finetti.Game.Rd: -------------------------------------------------------------------------------- 1 | \name{de.Finetti.Game} 2 | \alias{de.Finetti.Game} 3 | \title{de Finetti's Game} 4 | \description{ 5 | The \code{de.Finetti.Game} function estimates the interval of a 6 | subjective probability regarding a possible event in the near future. 7 | } 8 | \usage{ 9 | de.Finetti.Game(width) 10 | } 11 | \arguments{ 12 | \item{width}{This is the maximum acceptable width of the interval for 13 | the returned subjective probability. The user must specify a width 14 | between 0 and 1.} 15 | } 16 | \details{ 17 | This function is a variation on the game introduced by de Finetti, 18 | who is one of the main developers of subjective probability, 19 | along with Ramsey and Savage. In the original context, de Finetti 20 | proposed a gamble regarding life on Mars one billion years ago. 21 | 22 | The frequentist interpretation of probability defines the probability 23 | of an event as the limit of its relative frequency in a large number 24 | of trials. Frequentist inference is undefined, for example, when there 25 | are no trials from which to calculate a probability. By defining 26 | probability relative to frequencies of physical events, frequentists 27 | attempt to objectify probability. However, de Finetti asserts that the 28 | frequentist (or objective) interpretation always reduces to a subjective 29 | interpretation of probability, because probability is a human 30 | construct and does not exist independently of humans in 31 | nature. Therefore, probability is a degree of belief, and is called 32 | subjective or personal probability. 33 | } 34 | \value{ 35 | The \code{de.Finetti.Game} function returns a vector of length 36 | two. The respective elements are the lower and upper bounds of the 37 | subjective probability of the participant regarding the possible event 38 | in the near future. 39 | } 40 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 41 | \seealso{ 42 | \code{\link{elicit}} 43 | } 44 | \keyword{Personal Probability} 45 | \keyword{Subjective Probability} 46 | \keyword{Utility} 47 | -------------------------------------------------------------------------------- /man/deburn.Rd: -------------------------------------------------------------------------------- 1 | \name{deburn} 2 | \alias{deburn} 3 | \title{De-Burn} 4 | \description{ 5 | The \code{deburn} function discards or removes a user-specified number 6 | of burn-in iterations from an object of class \code{demonoid}. 7 | } 8 | \usage{ 9 | deburn(x, BurnIn=0) 10 | } 11 | \arguments{ 12 | \item{x}{This is an object of class \code{demonoid}.} 13 | \item{BurnIn}{This argument defaults to \code{BurnIn=0}, and accepts 14 | an integer that indicates the number of iterations to discard as 15 | burn-in.} 16 | } 17 | \details{ 18 | Documentation for the \code{\link{burnin}} function provides an 19 | introduction to the concept of burn-in as it relates to Markov chains. 20 | 21 | The \code{deburn} function discards a number of the first posterior 22 | samples, as specified by the \code{BurnIn} argument. Stationarity is 23 | not checked, because it is assumed the user has a reason for using the 24 | \code{deburn} function, rather than using the results from the object 25 | of class \code{demonoid}. Therefore, the posterior samples in 26 | \code{Posterior1} and \code{Posterior2} are identical, as are 27 | \code{Summary1} and \code{Summary2}. 28 | } 29 | \value{ 30 | The \code{deburn} function returns an object of class \code{demonoid}. 31 | } 32 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 33 | \seealso{ 34 | \code{\link{burnin}} and 35 | \code{\link{LaplacesDemon}}. 36 | } 37 | \examples{ 38 | ### Assuming the user has Fit which is an object of class demonoid: 39 | #library(LaplacesDemon) 40 | #Fit2 <- deburn(Fit, BurnIn=100) 41 | } 42 | \keyword{Stationarity} 43 | \keyword{Utility} 44 | -------------------------------------------------------------------------------- /man/dist.Bernoulli.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Bernoulli} 2 | \alias{dbern} 3 | \alias{pbern} 4 | \alias{qbern} 5 | \alias{rbern} 6 | \title{Bernoulli Distribution} 7 | \description{ 8 | These functions provide the density, distribution function, quantile 9 | function, and random generation for the Bernoulli distribution. 10 | } 11 | \usage{ 12 | dbern(x, prob, log=FALSE) 13 | pbern(q, prob, lower.tail=TRUE, log.p=FALSE) 14 | qbern(p, prob, lower.tail=TRUE, log.p=FALSE) 15 | rbern(n, prob) 16 | } 17 | \arguments{ 18 | \item{x, q}{These are each a vector of quantiles.} 19 | \item{p}{This is a vector of probabilities.} 20 | \item{n}{This is the number of observations. If \code{length(n) > 1}, 21 | then the length is taken to be the number required.} 22 | \item{prob}{This is the probability of success on each trial.} 23 | \item{log, log.p}{Logical. if \code{TRUE}, probabilities \eqn{p} are 24 | given as \eqn{\log(p)}{log(p)}.} 25 | \item{lower.tail}{Logical. if \code{TRUE} (default), probabilities 26 | are \eqn{Pr[X \le x]}{Pr[X <= x]}, otherwise, 27 | \eqn{Pr[X > x]}{Pr[X > x]}.} 28 | } 29 | \details{ 30 | \itemize{ 31 | \item Application: Continuous Univariate 32 | \item Density: \eqn{p(\theta) = {p}^{\theta} 33 | {(1-p)}^{1-\theta}}{p(theta) = p^theta (1-p)^(1-theta)}, \eqn{\theta = 0,1}{theta = 0,1} 34 | \item Inventor: Jacob Bernoulli 35 | \item Notation 1: \eqn{\theta \sim \mathcal{BERN}(p)}{theta ~ Bern(p)} 36 | \item Notation 2: \eqn{p(\theta) = \mathcal{BERN}(\theta | p)}{p(theta) = Bern(theta | p)} 37 | \item Parameter 1: probability parameter \eqn{0 \le p \le 1}{0 <= p <= 38 | 1} 39 | \item Mean: \eqn{E(\theta) = p}{E(theta) = p} 40 | \item Variance: \eqn{var(\theta) = \frac{p}{1-p}}{var(theta) = p / (1-p)} 41 | \item Mode: \eqn{mode(\theta) =}{mode(theta) =} 42 | } 43 | 44 | The Bernoulli distribution is a binomial distribution with 45 | \eqn{n=1}{n=1}, and one instance of a Bernoulli distribution is called a 46 | Bernoulli trial. One coin flip is a Bernoulli trial, for example. The 47 | categorical distribution is the generalization of the Bernoulli 48 | distribution for variables with more than two discrete values. The 49 | beta distribution is the conjugate prior distribution of the Bernoulli 50 | distribution. The geometric distribution is the number of Bernoulli 51 | trials needed to get one success. 52 | } 53 | \value{ 54 | \code{dbern} gives the density, 55 | \code{pbern} gives the distribution function, 56 | \code{qbern} gives the quantile function, and 57 | \code{rbern} generates random deviates. 58 | } 59 | \seealso{ 60 | \code{\link{dbinom}} 61 | } 62 | \examples{ 63 | library(LaplacesDemon) 64 | dbern(1, 0.7) 65 | rbern(10, 0.5) 66 | } 67 | \keyword{Distribution} 68 | -------------------------------------------------------------------------------- /man/dist.Categorical.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Categorical} 2 | \alias{dcat} 3 | \alias{qcat} 4 | \alias{rcat} 5 | \title{Categorical Distribution} 6 | \description{ 7 | This is the density and random deviates function for the categorical 8 | distribution with probabilities parameter \eqn{p}. 9 | } 10 | \usage{ 11 | dcat(x, p, log=FALSE) 12 | qcat(pr, p, lower.tail=TRUE, log.pr=FALSE) 13 | rcat(n, p) 14 | } 15 | \arguments{ 16 | \item{x}{This is a vector of discrete data with \eqn{k} discrete 17 | categories, and is of length \eqn{n}. This function also accepts 18 | \eqn{x} after it has been converted to an \eqn{n \times k}{n x k} 19 | indicator matrix, such as with the \code{as.indicator.matrix} function.} 20 | \item{n}{This is the number of observations, which must be a positive 21 | integer that has length 1. When \code{p} is supplied to \code{rcat} 22 | as a matrix, \code{n} must equal the number of rows in \code{p}.} 23 | \item{p}{This is a vector of length \eqn{k} or \eqn{n \times k}{n x k} 24 | matrix of probabilities. The \code{qcat} function requires a 25 | vector.} 26 | \item{pr}{This is a vector of probabilities, or log-probabilities.} 27 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 28 | density is returned.} 29 | \item{log.pr}{Logical. if \code{TRUE}, probabilities \eqn{pr} are 30 | given as \eqn{\log(pr)}{log(pr)}.} 31 | \item{lower.tail}{Logical. if \code{TRUE} (default), probabilities 32 | are \eqn{Pr[X \le x]}{Pr[X <= x]}, otherwise, 33 | \eqn{Pr[X > x]}{Pr[X > x]}.} 34 | } 35 | \details{ 36 | \itemize{ 37 | \item Application: Discrete Univariate 38 | \item Density: \eqn{p(\theta) = \sum \theta p}{p(theta) = Sum (theta * p)} 39 | \item Inventor: Unknown (to me, anyway) 40 | \item Notation 1: \eqn{\theta \sim \mathcal{CAT}(p)}{theta ~ CAT(p)} 41 | \item Notation 2: \eqn{p(\theta) = \mathcal{CAT}(\theta | p)}{p(theta) = CAT(theta | p)} 42 | \item Parameter 1: probabilities \eqn{p} 43 | \item Mean: \eqn{E(\theta)}{E(theta)} = Unknown 44 | \item Variance: \eqn{var(\theta)}{var(theta)} = Unknown 45 | \item Mode: \eqn{mode(\theta)}{mode(theta)} = Unknown 46 | } 47 | Also called the discrete distribution, the categorical distribution 48 | describes the result of a random event that can take on one of \eqn{k} 49 | possible outcomes, with the probability \eqn{p} of each outcome 50 | separately specified. The vector \eqn{p} of probabilities for each 51 | event must sum to 1. The categorical distribution is often used, for 52 | example, in the multinomial logit model. The conjugate prior is the 53 | Dirichlet distribution. 54 | } 55 | \value{ 56 | \code{dcat} gives the density and 57 | \code{rcat} generates random deviates. 58 | } 59 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 60 | \seealso{ 61 | \code{\link{as.indicator.matrix}}, 62 | \code{\link{ddirichlet}}, and 63 | \code{\link{dmultinom}}. 64 | } 65 | \examples{ 66 | library(LaplacesDemon) 67 | dcat(x=1, p=c(0.3,0.3,0.4)) 68 | rcat(n=10, p=c(0.1,0.3,0.6)) 69 | } 70 | \keyword{Distribution} 71 | -------------------------------------------------------------------------------- /man/dist.HalfCauchy.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.HalfCauchy} 2 | \alias{dhalfcauchy} 3 | \alias{phalfcauchy} 4 | \alias{qhalfcauchy} 5 | \alias{rhalfcauchy} 6 | \title{Half-Cauchy Distribution} 7 | \description{ 8 | These functions provide the density, distribution function, quantile 9 | function, and random generation for the half-Cauchy distribution. 10 | } 11 | \usage{ 12 | dhalfcauchy(x, scale=25, log=FALSE) 13 | phalfcauchy(q, scale=25) 14 | qhalfcauchy(p, scale=25) 15 | rhalfcauchy(n, scale=25) 16 | } 17 | \arguments{ 18 | \item{x,q}{These are each a vector of quantiles.} 19 | \item{p}{This is a vector of probabilities.} 20 | \item{n}{This is the number of observations, which must be a positive 21 | integer that has length 1.} 22 | \item{scale}{This is the scale parameter \eqn{\alpha}{alpha}, which 23 | must be positive.} 24 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 25 | density is returned.} 26 | } 27 | \details{ 28 | \itemize{ 29 | \item Application: Continuous Univariate 30 | \item Density: \eqn{p(\theta) = \frac{2 \alpha}{\pi(\theta^2 + 31 | \alpha^2)}, \quad \theta > 0}{p(theta) = 2alpha / pi(theta^2 + alpha^2), theta >= 0} 32 | \item Inventor: Derived from Cauchy 33 | \item Notation 1: \eqn{\theta \sim \mathcal{HC}(\alpha)}{theta ~ HC(alpha)} 34 | \item Notation 2: \eqn{p(\theta) = \mathcal{HC}(\theta | \alpha)}{p(theta) = HC(theta | alpha)} 35 | \item Parameter 1: scale parameter \eqn{\alpha > 0}{alpha > 0} 36 | \item Mean: \eqn{E(\theta)}{E(theta)} = does not exist 37 | \item Variance: \eqn{var(\theta)}{var(theta)} = does not exist 38 | \item Mode: \eqn{mode(\theta) = 0}{mode(theta) = 0} 39 | } 40 | 41 | The half-Cauchy distribution with scale \eqn{\alpha=25}{alpha=25} is a 42 | recommended, default, weakly informative prior distribution for a scale 43 | parameter. Otherwise, the scale, \eqn{\alpha}{alpha}, is recommended to 44 | be set to be just a little larger than the expected standard deviation, 45 | as a weakly informative prior distribution on a standard deviation 46 | parameter. 47 | 48 | The Cauchy distribution is known as a pathological distribution because 49 | its mean and variance are undefined, and it does not satisfy the central 50 | limit theorem. 51 | } 52 | \value{ 53 | \code{dhalfcauchy} gives the density, 54 | \code{phalfcauchy} gives the distribution function, 55 | \code{qhalfcauchy} gives the quantile function, and 56 | \code{rhalfcauchy} generates random deviates. 57 | } 58 | \seealso{ 59 | \code{\link{dcauchy}} 60 | } 61 | \examples{ 62 | library(LaplacesDemon) 63 | x <- dhalfcauchy(1,25) 64 | x <- phalfcauchy(1,25) 65 | x <- qhalfcauchy(0.5,25) 66 | x <- rhalfcauchy(1,25) 67 | 68 | #Plot Probability Functions 69 | x <- seq(from=0, to=20, by=0.1) 70 | plot(x, dhalfcauchy(x,1), ylim=c(0,1), type="l", main="Probability Function", 71 | ylab="density", col="red") 72 | lines(x, dhalfcauchy(x,5), type="l", col="green") 73 | lines(x, dhalfcauchy(x,10), type="l", col="blue") 74 | legend(2, 0.9, expression(alpha==1, alpha==5, alpha==10), 75 | lty=c(1,1,1), col=c("red","green","blue")) 76 | } 77 | \keyword{Distribution} -------------------------------------------------------------------------------- /man/dist.HalfNorm.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.HalfNormal} 2 | \alias{dhalfnorm} 3 | \alias{phalfnorm} 4 | \alias{qhalfnorm} 5 | \alias{rhalfnorm} 6 | \title{Half-Normal Distribution} 7 | \description{ 8 | These functions provide the density, distribution function, quantile 9 | function, and random generation for the half-normal distribution. 10 | } 11 | \usage{ 12 | dhalfnorm(x, scale=sqrt(pi/2), log=FALSE) 13 | phalfnorm(q, scale=sqrt(pi/2), lower.tail=TRUE, log.p=FALSE) 14 | qhalfnorm(p, scale=sqrt(pi/2), lower.tail=TRUE, log.p=FALSE) 15 | rhalfnorm(n, scale=sqrt(pi/2)) 16 | } 17 | \arguments{ 18 | \item{x,q}{These are each a vector of quantiles.} 19 | \item{p}{This is a vector of probabilities.} 20 | \item{n}{This is the number of observations, which must be a positive 21 | integer that has length 1.} 22 | \item{scale}{This is the scale parameter \eqn{\sigma}{sigma}, which 23 | must be positive.} 24 | \item{log,log.p}{Logical. If \code{log=TRUE}, then the logarithm of the 25 | density or result is returned.} 26 | \item{lower.tail}{Logical. If \code{lower.tail=TRUE} (default), 27 | probabilities are \eqn{Pr[X \le x]}{Pr[X <= x]}, otherwise, 28 | \eqn{Pr[X > x]}{Pr[X > x]}.} 29 | } 30 | \details{ 31 | \itemize{ 32 | \item Application: Continuous Univariate 33 | \item Density: \eqn{p(\theta) = \frac{2 \sigma}{\pi} 34 | \exp(-\frac{\theta^2 \sigma^2}{\pi}), \quad \theta \ge 0}{p(theta) = 2*sigma/pi e^-(theta^2*sigma^2/pi), theta >= 0} 35 | \item Inventor: Derived from the normal or Gaussian 36 | \item Notation 1: \eqn{\theta \sim \mathcal{HN}(\sigma)}{theta ~ HALF-N(sigma)} 37 | \item Notation 2: \eqn{p(\theta) = \mathcal{HN}(\theta | \sigma)}{p(theta) = HN(theta | sigma)} 38 | \item Parameter 1: scale parameter \eqn{\sigma > 0}{sigma > 0} 39 | \item Mean: \eqn{E(\theta) = \frac{1}{\sigma}}{E(theta) = 1 / sigma} 40 | \item Variance: \eqn{var(\theta) = \frac{\pi-2}{2 \sigma^2}}{var(theta) = (pi-2)/(2*sigma^2)} 41 | \item Mode: \eqn{mode(\theta) = 0}{mode(theta) = 0} 42 | } 43 | 44 | The half-normal distribution is recommended as a weakly informative prior 45 | distribution for a scale parameter that may be useful as an alternative 46 | to the half-Cauchy, half-t, or vague gamma. 47 | } 48 | \value{ 49 | \code{dhalfnorm} gives the density, 50 | \code{phalfnorm} gives the distribution function, 51 | \code{qhalfnorm} gives the quantile function, and 52 | \code{rhalfnorm} generates random deviates. 53 | } 54 | \seealso{ 55 | \code{\link{dnorm}}, 56 | \code{\link{dnormp}}, and 57 | \code{\link{dnormv}}. 58 | } 59 | \examples{ 60 | library(LaplacesDemon) 61 | x <- dhalfnorm(1) 62 | x <- phalfnorm(1) 63 | x <- qhalfnorm(0.5) 64 | x <- rhalfnorm(10) 65 | 66 | #Plot Probability Functions 67 | x <- seq(from=0.1, to=20, by=0.1) 68 | plot(x, dhalfnorm(x,0.1), ylim=c(0,1), type="l", main="Probability Function", 69 | ylab="density", col="red") 70 | lines(x, dhalfnorm(x,0.5), type="l", col="green") 71 | lines(x, dhalfnorm(x,1), type="l", col="blue") 72 | legend(2, 0.9, expression(sigma==0.1, sigma==0.5, sigma==1), 73 | lty=c(1,1,1), col=c("red","green","blue")) 74 | } 75 | \keyword{Distribution} -------------------------------------------------------------------------------- /man/dist.Inverse.Beta.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Inverse.Beta} 2 | \alias{dinvbeta} 3 | \alias{rinvbeta} 4 | \title{Inverse Beta Distribution} 5 | \description{ 6 | This is the density function and random generation from the inverse 7 | beta distribution. 8 | } 9 | \usage{ 10 | dinvbeta(x, a, b, log=FALSE) 11 | rinvbeta(n, a, b) 12 | } 13 | \arguments{ 14 | \item{n}{This is the number of draws from the distribution.} 15 | \item{x}{This is a location vector at which to evaluate density.} 16 | \item{a}{This is the scalar shape parameter \eqn{\alpha}{alpha}.} 17 | \item{b}{This is the scalar shape parameter \eqn{\beta}{beta}} 18 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 19 | density is returned.} 20 | } 21 | \details{ 22 | \itemize{ 23 | \item Application: Continuous Univariate 24 | \item Density: \eqn{p(\theta) = \frac{\theta^{\alpha - 1} (1 + 25 | \theta)^{-\alpha - \beta}}{\beta(\alpha, \beta)}}{(theta^(alpha - 26 | 1) * (1 + theta)^(-alpha - beta)) / beta(alpha, beta)} 27 | \item Inventor: Dubey (1970) 28 | \item Notation 1: \eqn{\theta \sim \mathcal{B}^{-1}(\alpha, \beta)}{theta ~ B^-1(alpha, beta)} 29 | \item Notation 2: \eqn{p(\theta) = \mathcal{B}^{-1}(\theta | \alpha, 30 | \beta)}{p(theta) = B^-1(theta | alpha, beta)} 31 | \item Parameter 1: shape \eqn{\alpha > 0}{alpha > 0} 32 | \item Parameter 2: shape \eqn{\beta > 0}{beta > 0} 33 | \item Mean: \eqn{E(\theta) = \frac{\alpha}{\beta - 1}}{E(theta) = 34 | alpha / (beta - 1)}, for \eqn{\beta > 1}{beta > 1} 35 | \item Variance: \eqn{var(\theta) = \frac{\alpha(\alpha + \beta - 36 | 1)}{(\beta - 1)^2 (\beta - 2)}}{var(theta) = (alpha * (alpha + beta 37 | - 1)) / ((beta - 1)^2 * (beta - 2))} 38 | \item Mode: \eqn{mode(\theta) = \frac{\alpha - 1}{\beta + 39 | 1}}{mode(theta) = (alpha - 1) / (beta + 1)} 40 | } 41 | 42 | The inverse-beta, also called the beta prime distribution, applies to 43 | variables that are continuous and positive. The inverse beta is the 44 | conjugate prior distribution of a parameter of a Bernoulli distribution 45 | expressed in odds. 46 | 47 | The inverse-beta distribution has also been extended to the generalized 48 | beta prime distribution, though it is not (yet) included here. 49 | 50 | } 51 | \value{ 52 | \code{dinvbeta} gives the density and 53 | \code{rinvbeta} generates random deviates. 54 | } 55 | \references{ 56 | Dubey, S.D. (1970). "Compound Gamma, Beta and F Distributions". 57 | \emph{Metrika}, 16, p. 27--31. 58 | } 59 | \seealso{ 60 | \code{\link{dbeta}} 61 | } 62 | \examples{ 63 | library(LaplacesDemon) 64 | x <- dinvbeta(5:10, 2, 3) 65 | x <- rinvbeta(10, 2, 3) 66 | 67 | #Plot Probability Functions 68 | x <- seq(from=0.1, to=20, by=0.1) 69 | plot(x, dinvbeta(x,2,2), ylim=c(0,1), type="l", main="Probability Function", 70 | ylab="density", col="red") 71 | lines(x, dinvbeta(x,2,3), type="l", col="green") 72 | lines(x, dinvbeta(x,3,2), type="l", col="blue") 73 | legend(2, 0.9, expression(paste(alpha==2, ", ", beta==2), 74 | paste(alpha==2, ", ", beta==3), paste(alpha==3, ", ", beta==2)), 75 | lty=c(1,1,1), col=c("red","green","blue")) 76 | } 77 | \keyword{Distribution} 78 | 79 | -------------------------------------------------------------------------------- /man/dist.Inverse.ChiSquare.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Inverse.ChiSquare} 2 | \alias{dinvchisq} 3 | \alias{rinvchisq} 4 | \title{(Scaled) Inverse Chi-Squared Distribution} 5 | \description{ 6 | This is the density function and random generation for the (scaled) 7 | inverse chi-squared distribution. 8 | } 9 | \usage{ 10 | dinvchisq(x, df, scale, log=FALSE) 11 | rinvchisq(n, df, scale=1/df) 12 | } 13 | \arguments{ 14 | \item{x}{This is a vector of quantiles.} 15 | \item{n}{This is the number of observations. If \code{length(n) > 1}, 16 | then the length is taken to be the number required.} 17 | \item{df}{This is the degrees of freedom parameter, usually 18 | represented as \eqn{\nu}{nu}.} 19 | \item{scale}{This is the scale parameter, usually represented as 20 | \eqn{\lambda}{lambda}.} 21 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 22 | density is returned.} 23 | } 24 | \details{ 25 | \itemize{ 26 | \item Application: Continuous Univariate 27 | \item Density: \deqn{p(\theta) = \frac{{\nu/2}^{\nu/2}}{\Gamma(\nu/2)} 28 | \lambda^\nu \frac{1}{\theta}^{\nu/2+1} \exp(-\frac{\nu 29 | \lambda^2}{2\theta}), \theta \ge 0}{p(theta) = 30 | ((nu/2)^(nu/2))/(\Gamma(nu/2)) lambda^nu (1/theta)^((nu/2)+1) 31 | exp(-(nu lambda^2)/(2*theta)), theta >= 0} 32 | \item Inventor: Derived from the chi-squared distribution 33 | \item Notation 1: \eqn{\theta \sim \chi^{-2}(\nu, \lambda)}{theta ~ 34 | chi^(-2)(nu, lambda)} 35 | \item Notation 2: \eqn{p(\theta) = \chi^{-2}(\theta | \nu, 36 | \lambda)}{p(theta) = chi^(-2)(theta | nu, lambda)} 37 | \item Parameter 1: degrees of freedom parameter \eqn{\nu > 0}{nu > 0} 38 | \item Parameter 2: scale parameter \eqn{\lambda}{lambda} 39 | \item Mean: \eqn{E(\theta)}{E(theta)} = unknown 40 | \item Variance: \eqn{var(\theta)}{var(theta)} = unknown 41 | \item Mode: \eqn{mode(\theta) = }{mode(theta) = } 42 | } 43 | 44 | The inverse chi-squared distribution, also called the 45 | inverted chi-square distribution, is the multiplicate inverse of the 46 | chi-squared distribution. If \eqn{x} has the chi-squared distribution 47 | with \eqn{\nu}{nu} degrees of freedom, then \eqn{1 / x} has the 48 | inverse chi-squared distribution with \eqn{\nu}{nu} degrees of freedom, 49 | and \eqn{\nu / x}{nu / x} has the inverse chi-squared distribution with 50 | \eqn{\nu}{nu} degrees of freedom. 51 | 52 | These functions are similar to those in the GeoR package. 53 | } 54 | \value{ 55 | \code{dinvchisq} gives the density and 56 | \code{rinvchisq} generates random deviates. 57 | } 58 | \seealso{ 59 | \code{\link{dchisq}} 60 | } 61 | \examples{ 62 | library(LaplacesDemon) 63 | x <- dinvchisq(1,1,1) 64 | x <- rinvchisq(10,1) 65 | 66 | #Plot Probability Functions 67 | x <- seq(from=0.1, to=5, by=0.01) 68 | plot(x, dinvchisq(x,0.5,1), ylim=c(0,1), type="l", main="Probability Function", 69 | ylab="density", col="red") 70 | lines(x, dinvchisq(x,1,1), type="l", col="green") 71 | lines(x, dinvchisq(x,5,1), type="l", col="blue") 72 | legend(3, 0.9, expression(paste(nu==0.5, ", ", lambda==1), 73 | paste(nu==1, ", ", lambda==1), paste(nu==5, ", ", lambda==1)), 74 | lty=c(1,1,1), col=c("red","green","blue")) 75 | } 76 | \keyword{Distribution} 77 | -------------------------------------------------------------------------------- /man/dist.Inverse.Matrix.Gamma.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Inverse.Matrix.Gamma} 2 | \alias{dinvmatrixgamma} 3 | \title{Inverse Matrix Gamma Distribution} 4 | \description{ 5 | This function provides the density for the inverse matrix gamma 6 | distribution. 7 | } 8 | \usage{ 9 | dinvmatrixgamma(X, alpha, beta, Psi, log=FALSE) 10 | } 11 | \arguments{ 12 | \item{X}{This is a \eqn{k \times k}{k x k} positive-definite 13 | covariance matrix.} 14 | \item{alpha}{This is a scalar shape parameter (the degrees of freedom), 15 | \eqn{\alpha}{alpha}.} 16 | \item{beta}{This is a scalar, positive-only scale parameter, 17 | \eqn{\beta}{beta}.} 18 | \item{Psi}{This is a \eqn{k \times k}{k x k} positive-definite scale 19 | matrix.} 20 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 21 | density is returned.} 22 | } 23 | \details{ 24 | \itemize{ 25 | \item Application: Continuous Multivariate Matrix 26 | \item Density: \eqn{p(\theta) = \frac{|\Psi|^\alpha}{\beta^{k 27 | \alpha} \Gamma_k(\alpha)} 28 | |\theta|^{-\alpha-(k+1)/2}\exp(tr(-\frac{1}{\beta}\Psi\theta^{-1}))}{p(theta) = {|Psi|^alpha / [beta^(k alpha) Gamma[k](alpha)]} |theta|^[-alpha-(k+1)/2] exp(tr(-(1/beta)Psi theta^(-1)))} 29 | \item Inventors: Unknown 30 | \item Notation 1: \eqn{\theta \sim \mathcal{IMG}_k(\alpha, \beta, 31 | \Psi)}{theta ~ IMG[k](alpha, beta, Psi)} 32 | \item Notation 2: \eqn{p(\theta) = \mathcal{IMG}_k(\theta | \alpha, 33 | \beta, \Psi)}{p(theta) = IMG[k](theta | alpha, beta, Psi)} 34 | \item Parameter 1: shape \eqn{\alpha > 2}{alpha > 2} 35 | \item Parameter 2: scale \eqn{\beta > 0}{beta > 0} 36 | \item Parameter 3: positive-definite \eqn{k \times k}{k x k} scale 37 | matrix \eqn{\Psi}{Psi} 38 | \item Mean: 39 | \item Variance: 40 | \item Mode: 41 | } 42 | 43 | The inverse matrix gamma (IMG), also called the inverse matrix-variate 44 | gamma, distribution is a generalization of the inverse gamma 45 | distribution to positive-definite matrices. It is a more general and 46 | flexible version of the inverse Wishart distribution 47 | (\code{\link{dinvwishart}}), and is a conjugate prior of the covariance 48 | matrix of a multivariate normal distribution (\code{\link{dmvn}}) and 49 | matrix normal distribution (\code{\link{dmatrixnorm}}). 50 | 51 | The compound distribution resulting from compounding a matrix normal 52 | with an inverse matrix gamma prior over the covariance matrix is a 53 | generalized matrix t-distribution. 54 | 55 | The inverse matrix gamma distribution is identical to the inverse 56 | Wishart distribution when \eqn{\alpha = \nu / 2}{alpha = nu / 2} and 57 | \eqn{\beta = 2}{beta = 2}. 58 | } 59 | \value{ 60 | \code{dinvmatrixgamma} gives the density. 61 | } 62 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 63 | \seealso{ 64 | \code{\link{dinvgamma}} 65 | \code{\link{dmatrixnorm}}, 66 | \code{\link{dmvn}}, and 67 | \code{\link{dinvwishart}} 68 | } 69 | \examples{ 70 | library(LaplacesDemon) 71 | k <- 10 72 | dinvmatrixgamma(X=diag(k), alpha=(k+1)/2, beta=2, Psi=diag(k), log=TRUE) 73 | dinvwishart(Sigma=diag(k), nu=k+1, S=diag(k), log=TRUE) 74 | } 75 | \keyword{Distribution} -------------------------------------------------------------------------------- /man/dist.Matrix.Gamma.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Matrix.Gamma} 2 | \alias{dmatrixgamma} 3 | \title{Matrix Gamma Distribution} 4 | \description{ 5 | This function provides the density for the matrix gamma distribution. 6 | } 7 | \usage{ 8 | dmatrixgamma(X, alpha, beta, Sigma, log=FALSE) 9 | } 10 | \arguments{ 11 | \item{X}{This is a \eqn{k \times k}{k x k} positive-definite precision 12 | matrix.} 13 | \item{alpha}{This is a scalar shape parameter (the degrees of freedom), 14 | \eqn{\alpha}{alpha}.} 15 | \item{beta}{This is a scalar, positive-only scale parameter, 16 | \eqn{\beta}{beta}.} 17 | \item{Sigma}{This is a \eqn{k \times k}{k x k} positive-definite scale 18 | matrix.} 19 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 20 | density is returned.} 21 | } 22 | \details{ 23 | \itemize{ 24 | \item Application: Continuous Multivariate Matrix 25 | \item Density: \eqn{p(\theta) = \frac{|\Sigma|^{-\alpha}}{\beta^{k 26 | \alpha} \Gamma_k(\alpha)} 27 | |\theta|^{\alpha-(k+1)/2}\exp(tr(-\frac{1}{\beta}\Sigma^{-1}\theta))}{p(theta) = {|Sigma|^(-alpha) / [beta^(k alpha) Gamma[k](alpha)]} |theta|^[alpha-(k+1)/2] exp(tr(-(1/beta)Sigma^(-1)theta))} 28 | \item Inventors: Unknown 29 | \item Notation 1: \eqn{\theta \sim \mathcal{MG}_k(\alpha, \beta, 30 | \Sigma)}{theta ~ MG[k](alpha, beta, Sigma)} 31 | \item Notation 2: \eqn{p(\theta) = \mathcal{MG}_k(\theta | \alpha, 32 | \beta, \Sigma)}{p(theta) = MG[k](theta | alpha, beta, Sigma)} 33 | \item Parameter 1: shape \eqn{\alpha > 2}{alpha > 2} 34 | \item Parameter 2: scale \eqn{\beta > 0}{beta > 0} 35 | \item Parameter 3: positive-definite \eqn{k \times k}{k x k} scale matrix \eqn{\Sigma}{Sigma} 36 | \item Mean: 37 | \item Variance: 38 | \item Mode: 39 | } 40 | 41 | The matrix gamma (MG), also called the matrix-variate gamma, 42 | distribution is a generalization of the gamma distribution to 43 | positive-definite matrices. It is a more general and flexible version of 44 | the Wishart distribution (\code{\link{dwishart}}), and is a conjugate 45 | prior of the precision matrix of a multivariate normal distribution 46 | (\code{\link{dmvnp}}) and matrix normal distribution 47 | (\code{\link{dmatrixnorm}}). 48 | 49 | The compound distribution resulting from compounding a matrix normal 50 | with a matrix gamma prior over the precision matrix is a generalized 51 | matrix t-distribution. 52 | 53 | The matrix gamma distribution is identical to the Wishart distribution 54 | when \eqn{\alpha = \nu / 2}{alpha = nu / 2} and 55 | \eqn{\beta = 2}{beta = 2}. 56 | } 57 | \value{ 58 | \code{dmatrixgamma} gives the density. 59 | } 60 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 61 | \seealso{ 62 | \code{\link{dgamma}} 63 | \code{\link{dmatrixnorm}}, 64 | \code{\link{dmvnp}}, and 65 | \code{\link{dwishart}} 66 | } 67 | \examples{ 68 | library(LaplacesDemon) 69 | k <- 10 70 | dmatrixgamma(X=diag(k), alpha=(k+1)/2, beta=2, Sigma=diag(k), log=TRUE) 71 | dwishart(Omega=diag(k), nu=k+1, S=diag(k), log=TRUE) 72 | } 73 | \keyword{Distribution} -------------------------------------------------------------------------------- /man/dist.Multivariate.Polya.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.Multivariate.Polya} 2 | \alias{dmvpolya} 3 | \alias{rmvpolya} 4 | \title{Multivariate Polya Distribution} 5 | \description{ 6 | These functions provide the density and random number generation for 7 | the multivariate Polya distribution. 8 | } 9 | \usage{ 10 | dmvpolya(x, alpha, log=FALSE) 11 | rmvpolya(n, alpha) 12 | } 13 | \arguments{ 14 | \item{x}{This is data or parameters in the form of a vector of length 15 | \eqn{k}.} 16 | \item{n}{This is the number of random draws to take from the 17 | distribution.} 18 | \item{alpha}{This is shape vector \eqn{\alpha}{alpha} with length 19 | \eqn{k}.} 20 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 21 | density is returned.} 22 | } 23 | \details{ 24 | \itemize{ 25 | \item Application: Discrete Multivariate 26 | \item Density: \deqn{p(\theta) = \frac{N!}{\prod_k N_k!} \frac{(\sum_k 27 | \alpha_k - 1)!}{(\sum_k \theta_k + \sum_k \alpha_k - 1)!} 28 | \frac{\prod (\theta + \alpha - 1)!}{(\alpha - 1)!}}{p(theta) = (N! / 29 | prod(N[k]!)) * ((sum alpha[k] - 1)! / (sum theta[k] + sum alpha[k] - 30 | 1)!) * prod((theta + alpha - 1)! / (alpha - 1)!)} 31 | \item Inventor: George Polya (1887-1985) 32 | \item Notation 1: \eqn{\theta \sim \mathcal{MPO}(\alpha)}{theta ~ MPO(alpha)} 33 | \item Notation 3: \eqn{p(\theta) = \mathcal{MPO}(\theta | 34 | \alpha)}{p(theta) = MPO(theta | alpha)} 35 | \item Parameter 1: shape parameter vector \eqn{\alpha}{alpha} 36 | \item Mean: \eqn{E(\theta) = }{E(theta) = } 37 | \item Variance: \eqn{var(\theta) =}{var(theta) = } 38 | \item Mode: \eqn{mode(\theta) = }{mode(theta) = } 39 | } 40 | 41 | The multivariate Polya distribution is named after George Polya 42 | (1887-1985). It is also called the Dirichlet compound multinomial 43 | distribution or the Dirichlet-multinomial distribution. The multivariate 44 | Polya distribution is a compound probability distribution, where a 45 | probability vector \eqn{p} is drawn from a Dirichlet distribution with 46 | parameter vector \eqn{\alpha}{alpha}, and a set of \eqn{N} discrete 47 | samples is drawn from the categorical distribution with probability 48 | vector \eqn{p} and having \eqn{K} discrete categories. The compounding 49 | corresponds to a Polya urn scheme. In document classification, for 50 | example, the distribution is used to represent probabilities over word 51 | counts for different document types. The multivariate Polya distribution 52 | is a multivariate extension of the univariate Beta-binomial distribution. 53 | } 54 | \value{ 55 | \code{dmvpolya} gives the density and \code{rmvpolya} generates random 56 | deviates. 57 | } 58 | \author{Statisticat, LLC \email{software@bayesian-inference.com}} 59 | \seealso{ 60 | \code{\link{dcat}}, 61 | \code{\link{ddirichlet}}, and 62 | \code{\link{dmultinom}}. 63 | } 64 | \examples{ 65 | library(LaplacesDemon) 66 | dmvpolya(x=1:3, alpha=1:3, log=TRUE) 67 | x <- rmvpolya(1000, c(0.1,0.3,0.6)) 68 | } 69 | \keyword{Distribution} -------------------------------------------------------------------------------- /man/dist.YangBerger.Rd: -------------------------------------------------------------------------------- 1 | \name{dist.YangBerger} 2 | \alias{dyangberger} 3 | \alias{dyangbergerc} 4 | \title{Yang-Berger Distribution} 5 | \description{ 6 | This is the density function for the Yang-Berger prior distribution 7 | for a covariance matrix or precision matrix. 8 | } 9 | \usage{ 10 | dyangberger(x, log=FALSE) 11 | dyangbergerc(x, log=FALSE) 12 | } 13 | \arguments{ 14 | \item{x}{This is the \eqn{k \times k}{k x k} positive-definite 15 | covariance matrix or precision matrix for \code{dyangberger} or the 16 | Cholesky factor \eqn{\textbf{U}}{U} of the covariance matrix or 17 | precision matrix for \code{dyangbergerc}.} 18 | \item{log}{Logical. If \code{log=TRUE}, then the logarithm of the 19 | density is returned.} 20 | } 21 | \details{ 22 | \itemize{ 23 | \item Application: Continuous Multivariate 24 | \item Density: \eqn{p(\theta) = \frac{1}{|\theta|^{\prod (d_j - 25 | d_{j-1})}}}{p(theta) = 1 / |theta|^(prod (d[j] - d[j-1]))}, 26 | where \eqn{d} are increasing eigenvalues. See equation 13 in 27 | Yang and Berger (1994). 28 | \item Inventor: Yang and Berger (1994) 29 | \item Notation 1: \eqn{\theta \sim \mathcal{YB}}{p(theta) ~ YB} 30 | \item Mean: 31 | \item Variance: 32 | \item Mode: 33 | } 34 | 35 | Yang and Berger (1994) derived a least informative prior (LIP) for a 36 | covariance matrix or precision matrix. The Yang-Berger (YB) distribution 37 | does not have any parameters. It is a reference prior for objective 38 | Bayesian inference. The Cholesky parameterization is also provided here. 39 | 40 | The YB prior distribution results in a proper posterior. It involves an 41 | eigendecomposition of the covariance matrix or precision matrix. It is 42 | difficult to interpret a model that uses the YB prior, due to a lack of 43 | intuition regarding the relationship between eigenvalues and 44 | correlations. 45 | 46 | Compared to Jeffreys prior for a covariance matrix, this reference prior 47 | encourages equal eigenvalues, and therefore results in a covariance 48 | matrix or precision matrix with a better shrinkage of its 49 | eigenstructure. 50 | } 51 | \value{ 52 | \code{dyangberger} and \code{dyangbergerc} give the density. 53 | } 54 | \references{ 55 | Yang, R. and Berger, J.O. (1994). "Estimation of a Covariance Matrix 56 | using the Reference Prior". \emph{Annals of Statistics}, 2, 57 | p. 1195-1211. 58 | } 59 | \seealso{ 60 | \code{\link{dinvwishart}} and 61 | \code{\link{dwishart}} 62 | } 63 | \examples{ 64 | library(LaplacesDemon) 65 | X <- matrix(c(1,0.8,0.8,1), 2, 2) 66 | dyangberger(X, log=TRUE) 67 | } 68 | \keyword{Distribution} 69 | -------------------------------------------------------------------------------- /man/hpc_server.Rd: -------------------------------------------------------------------------------- 1 | \name{hpc_server} 2 | \alias{server_Listening} 3 | \title{Server Listening} 4 | \description{ 5 | This function is not intended to be called directly by the user. It is 6 | an internal-only function to prevent cluster problems while using the 7 | \code{INCA} algorithm in the \code{LaplacesDemon.hpc} function. 8 | } 9 | \usage{ 10 | server_Listening(n=2, port=19009) 11 | } 12 | \arguments{ 13 | \item{n}{This is the number of CPUs. For more information, see 14 | \code{\link{LaplacesDemon.hpc}}.} 15 | \item{port}{This is a port for server listening, and defaults to 16 | port \code{19009}.} 17 | } 18 | \details{ 19 | For the \code{INCA} algorithm, a server has been built into the 20 | \code{LaplacesDemon.hpc} function. The server exchanges information 21 | between processes, and has been designed to be portable. The 22 | \code{server_Listening} function is run as a separate process via the 23 | \code{system} function, when \code{INCA} is selected in 24 | \code{LaplacesDemon.hpc}. 25 | 26 | Socket connections and the \code{serialize} function are used as per 27 | the \pkg{Snow} package to update a single proposal covariance matrix 28 | given all parallel chains. The sockets are opened/closed in each 29 | process with a small random sleep time to avoid collisions during 30 | connections to the internal server of 31 | \code{LaplacesDemon.hpc}. Blocking sockets are used to synchronize 32 | processes. 33 | } 34 | \author{Silvere Vialet-Chabrand \email{silvere@vialet-chabrand.com}} 35 | \seealso{ 36 | \code{\link{LaplacesDemon}} and 37 | \code{\link{LaplacesDemon.hpc}}. 38 | } 39 | \keyword{High Performance Computing} 40 | \keyword{Parallel Chains} -------------------------------------------------------------------------------- /man/is.appeased.Rd: -------------------------------------------------------------------------------- 1 | \name{is.appeased} 2 | \alias{is.appeased} 3 | \title{Appeased} 4 | \description{ 5 | This function returns \code{TRUE} if Laplace's Demon is appeased by the 6 | object of class \code{demonoid}, and \code{FALSE} otherwise. If 7 | appeased, then the object passes several tests that indicate potential 8 | convergence of the Markov chains. 9 | } 10 | \usage{ 11 | is.appeased(x) 12 | } 13 | \arguments{ 14 | \item{x}{This is an object of class \code{demonoid}.} 15 | } 16 | \details{ 17 | After updating a model with the \code{\link{LaplacesDemon}} function, 18 | an output object is created. The output object is of class 19 | \code{demonoid}. The object may be passed to the \code{\link{Consort}} 20 | function, which will apply several criteria regarding the potential 21 | convergence of its Markov chains. If all criteria are met, then 22 | Laplace's Demon is appeased. Otherwise, Laplace's Demon suggests R 23 | code to be copy/pasted and executed. The \code{\link{Consort}} 24 | function prints a large amount of information to the screen. The 25 | \code{is.appeased} function may be applied as an alternative, though 26 | it only informs the user as to whether or not Laplace's Demon was 27 | appeased, as \code{TRUE} or \code{FALSE}. 28 | } 29 | \value{ 30 | The \code{is.appeased} function returns a logical value indicating 31 | whether or not the supplied object passes several potential Markov 32 | chain convergence criteria. If the object passes all criteria, then 33 | Laplace's Demon is appeased, and the logical value returned is 34 | \code{TRUE}. 35 | } 36 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 37 | \seealso{ 38 | \code{\link{Consort}} and 39 | \code{\link{LaplacesDemon}}. 40 | } 41 | \keyword{Diagnostic} 42 | \keyword{Utility} -------------------------------------------------------------------------------- /man/is.bayesian.Rd: -------------------------------------------------------------------------------- 1 | \name{is.bayesian} 2 | \alias{is.bayesian} 3 | \title{Logical Check of a Bayesian Model} 4 | \description{ 5 | This function provides a logical test of whether or not a \code{Model} 6 | specification function is Bayesian. 7 | } 8 | \usage{ 9 | is.bayesian(Model, Initial.Values, Data) 10 | } 11 | \arguments{ 12 | \item{Model}{This is a model specification function. For more 13 | information, see the \code{\link{LaplacesDemon}} function.} 14 | \item{Initial.Values}{This is a vector of initial values, or current 15 | parameter values. For more information, see the 16 | \code{\link{LaplacesDemon}} function.} 17 | \item{Data}{This is a list of data. For more information, see the 18 | \code{\link{LaplacesDemon}} function.} 19 | } 20 | \details{ 21 | This function tests whether or not a model is Bayesian by comparing 22 | the first two returned arguments: the logarithm of the unnormalized 23 | joint posterior density (\code{LP}) and deviance (\code{Dev}). The 24 | deviance (D) is 25 | 26 | \deqn{\mathrm{D} = -2 \mathrm{LL}}{D = -2 LL}, 27 | 28 | where LL is the log-likelihood. Consequently, 29 | 30 | \deqn{\mathrm{LL} = \mathrm{D} / -2}{LL = D / -2}, 31 | 32 | and LP is the sum of LL and prior probability densities. If LP = LL, 33 | then the model is not Bayesian, because prior densities are absent. 34 | } 35 | \value{ 36 | The \code{is.bayesian} function returns a logical value of \code{TRUE} 37 | when the model is Bayesian, and \code{FALSE} otherwise. 38 | } 39 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 40 | \seealso{ 41 | \code{\link{LaplacesDemon}}. 42 | } 43 | \keyword{Utility} -------------------------------------------------------------------------------- /man/is.constant.Rd: -------------------------------------------------------------------------------- 1 | \name{is.constant} 2 | \alias{is.constant} 3 | \title{Logical Check of a Constant} 4 | \description{ 5 | This function provides a logical test of whether or not a vector is a 6 | constant. 7 | } 8 | \usage{ 9 | is.constant(x) 10 | } 11 | \arguments{ 12 | \item{x}{This is a vector.} 13 | } 14 | \details{ 15 | As opposed to a variable, a constant is a vector in which the elements 16 | contain less than or equal to one unique value. 17 | } 18 | \value{ 19 | The \code{is.constant} function returns a logical result, reporting 20 | \code{TRUE} when a vector is a constant, or \code{FALSE} otherwise. 21 | } 22 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 23 | \seealso{ 24 | \code{\link{unique}} 25 | } 26 | \examples{ 27 | library(LaplacesDemon) 28 | is.constant(rep(1,10)) #TRUE 29 | is.constant(1:10) #FALSE 30 | } 31 | \keyword{Utility} -------------------------------------------------------------------------------- /man/is.constrained.Rd: -------------------------------------------------------------------------------- 1 | \name{is.constrained} 2 | \alias{is.constrained} 3 | \title{Logical Check of Constraints} 4 | \description{ 5 | This function provides a logical test of constraints for each initial 6 | value or parameter for a model specification, given data. 7 | } 8 | \usage{ 9 | is.constrained(Model, Initial.Values, Data) 10 | } 11 | \arguments{ 12 | \item{Model}{This is a model specification function. For more 13 | information, see the \code{\link{LaplacesDemon}} function.} 14 | \item{Initial.Values}{This is a vector of initial values, or current 15 | parameter values. For more information, see the 16 | \code{\link{LaplacesDemon}} function.} 17 | \item{Data}{This is a list of data. For more information, see the 18 | \code{\link{LaplacesDemon}} function.} 19 | } 20 | \details{ 21 | This function is useful for testing whether or not initial values 22 | changed due to constraints when being passed through a \code{Model} 23 | specification function. If any initial value changes, then the 24 | constrained values that are ouput in the fifth component of the 25 | \code{Model} specification are suitable as initial values, not the 26 | tested initial values. 27 | 28 | A parameter may be constrained and this function may not discover the 29 | constraint, since the discovery depends on the initial values and 30 | whether or not they change as they are passed through the model. 31 | } 32 | \value{ 33 | The \code{is.constrained} function returns a logical vector, equal in 34 | length to the number of initial values. Each element receives 35 | \code{TRUE} if the corresponding initial value changed due to a 36 | constraint, or \code{FALSE} if it did not. 37 | } 38 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 39 | \seealso{ 40 | \code{\link{LaplacesDemon}}. 41 | } 42 | \keyword{Utility} -------------------------------------------------------------------------------- /man/is.data.Rd: -------------------------------------------------------------------------------- 1 | \name{is.data} 2 | \alias{is.data} 3 | \title{Logical Check of Data} 4 | \description{ 5 | This function provides a logical test of whether or not a given list 6 | of data meets minimum criteria to be considered data for 7 | \code{\link{IterativeQuadrature}}, \code{\link{LaplaceApproximation}}, 8 | \code{\link{LaplacesDemon}}, \code{\link{PMC}}, or 9 | \code{\link{VariationalBayes}}. 10 | } 11 | \usage{ 12 | is.data(Data) 13 | } 14 | \arguments{ 15 | \item{Data}{This is a list of data. For more information, see the 16 | \code{\link{LaplacesDemon}} function.} 17 | } 18 | \details{ 19 | This function is useful for testing whether or not a list of data 20 | meets minimum criteria to be considered data in this package. The 21 | minimum requirements are that \code{Data} is a list, and it contains 22 | \code{mon.names} and \code{parm.names}. 23 | 24 | This function is not extensive. For example, it does not match the 25 | length of \code{parm.names} with the length of \code{Initial.Values}, 26 | or compare the length of \code{mon.names} to the number of monitored 27 | variables output from the \code{Model} specification 28 | function. Additional checks are conducted in 29 | \code{\link{IterativeQuadrature}}, \code{\link{LaplaceApproximation}}, 30 | \code{\link{LaplacesDemon}}, \code{\link{PMC}}, and 31 | \code{\link{VariationalBayes}}. 32 | } 33 | \value{ 34 | The \code{is.data} function returns a logical value. It returns 35 | \code{TRUE} if \code{Data} meets minimum requirements to be considered 36 | data in this package, and \code{FALSE} otherwise. 37 | } 38 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 39 | \seealso{ 40 | \code{\link{IterativeQuadrature}} 41 | \code{\link{LaplaceApproximation}}, 42 | \code{\link{LaplacesDemon}}, 43 | \code{\link{PMC}}, and 44 | \code{\link{VariationalBayes}}. 45 | } 46 | \keyword{Utility} -------------------------------------------------------------------------------- /man/is.model.Rd: -------------------------------------------------------------------------------- 1 | \name{is.model} 2 | \alias{is.model} 3 | \title{Logical Check of a Model} 4 | \description{ 5 | This function provides a logical test of whether or not a \code{Model} 6 | specification function meets mininum requirements to be considered as 7 | such. 8 | } 9 | \usage{ 10 | is.model(Model, Initial.Values, Data) 11 | } 12 | \arguments{ 13 | \item{Model}{This is a model specification function. For more 14 | information, see the \code{\link{LaplacesDemon}} function.} 15 | \item{Initial.Values}{This is a vector of initial values, or current 16 | parameter values. For more information, see the 17 | \code{\link{LaplacesDemon}} function.} 18 | \item{Data}{This is a list of data. For more information, see the 19 | \code{\link{LaplacesDemon}} function.} 20 | } 21 | \details{ 22 | This function tests for minimum criteria for \code{Model} to be 23 | considered a model specification function. Specifically, it tests: 24 | 25 | \itemize{ 26 | \item \code{Model} must be a function 27 | \item \code{Model} must execute without errors 28 | \item \code{Model} must return a list 29 | \item \code{Model} must have five components in the list 30 | \item The first component must be named LP and have length 1 31 | \item The second component must be named Dev and have length 1 32 | \item The third component must be named Monitor 33 | \item The lengths of Monitor and mon.names must be equal 34 | \item The fourth component must be named yhat 35 | \item The fifth component must be named parm 36 | \item The lengths of parm and parm.names must be equal 37 | } 38 | 39 | This function is not extensive, and checks only for these minimum 40 | criteria. Additional checks are conducted in 41 | \code{\link{IterativeQuadrature}}, \code{\link{LaplaceApproximation}}, 42 | \code{\link{LaplacesDemon}}, \code{\link{PMC}}, and 43 | \code{\link{VariationalBayes}}. 44 | } 45 | \value{ 46 | The \code{is.model} function returns a logical value of \code{TRUE} 47 | when \code{Model} meets minimum criteria of a model specification 48 | function, and \code{FALSE} otherwise. 49 | } 50 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 51 | \seealso{ 52 | \code{\link{IterativeQuadrature}}, 53 | \code{\link{LaplaceApproximation}}, 54 | \code{\link{LaplacesDemon}}, 55 | \code{\link{PMC}}, and 56 | \code{\link{VariationalBayes}}. 57 | } 58 | \keyword{Utility} -------------------------------------------------------------------------------- /man/is.stationary.Rd: -------------------------------------------------------------------------------- 1 | \name{is.stationary} 2 | \alias{is.stationary} 3 | \title{Logical Check of Stationarity} 4 | \description{ 5 | This function returns \code{TRUE} if the object is stationary 6 | according to the \code{\link{Geweke.Diagnostic}} function, and 7 | \code{FALSE} otherwise. 8 | } 9 | \usage{ 10 | is.stationary(x) 11 | } 12 | \arguments{ 13 | \item{x}{This is a vector, matrix, or object of class 14 | \code{demonoid}.} 15 | } 16 | \details{ 17 | Stationarity, here, refers to the limiting distribution in a Markov 18 | chain. A series of samples from a Markov chain, in which each sample 19 | is the result of an iteration of a Markov chain Monte Carlo (MCMC) 20 | algorithm, is analyzed for stationarity, meaning whether or not the 21 | samples trend or its moments change across iterations. A stationary 22 | posterior distribution is an equilibrium distribution, and assessing 23 | stationarity is an important diagnostic toward inferring Markov chain 24 | convergence. 25 | 26 | In the cases of a matrix or an object of class \code{demonoid}, all 27 | Markov chains (as column vectors) must be stationary for 28 | \code{is.stationary} to return \code{TRUE}. 29 | 30 | Alternative ways to assess stationarity of chains are to use the 31 | \code{\link{BMK.Diagnostic}} or \code{\link{Heidelberger.Diagnostic}} 32 | functions. 33 | } 34 | \value{ 35 | \code{is.stationary} returns a logical value indicating whether or not 36 | the supplied object is stationary according to the 37 | \code{\link{Geweke.Diagnostic}} function. 38 | } 39 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 40 | \seealso{ 41 | \code{\link{BMK.Diagnostic}}, 42 | \code{\link{Geweke.Diagnostic}}, 43 | \code{\link{Heidelberger.Diagnostic}}, and 44 | \code{\link{LaplacesDemon}}. 45 | } 46 | \examples{ 47 | library(LaplacesDemon) 48 | is.stationary(rnorm(100)) 49 | is.stationary(matrix(rnorm(100),10,10)) 50 | } 51 | \keyword{Diagnostic} 52 | \keyword{Stationarity} 53 | \keyword{Utility} -------------------------------------------------------------------------------- /man/joint.pr.plot.Rd: -------------------------------------------------------------------------------- 1 | \name{joint.pr.plot} 2 | \alias{joint.pr.plot} 3 | \title{Joint Probability Region Plot} 4 | \description{ 5 | Given two vectors, the \code{joint.pr.plot} function creates a 6 | scatterplot with ellipses of probability regions. 7 | } 8 | \usage{ 9 | joint.pr.plot(x, y, quantiles=c(0.25,0.50,0.75,0.95)) 10 | } 11 | \arguments{ 12 | \item{x}{This required argument is a vector.} 13 | \item{y}{This required argument is a vector.} 14 | \item{quantiles}{These are the quantiles for which probability regions 15 | are estimated with ellipses. The center of the ellipse is plotted by 16 | default. The 0.95 quantile creates a probability region that 17 | contains approximately 95\% of the data or samples of \code{x} and 18 | \code{y}. By default, four quantiles are included.} 19 | } 20 | \details{ 21 | A probability region is also commonly called a credible region. For 22 | more information on probability regions, see \code{\link{p.interval}}. 23 | 24 | Joint probability regions are plotted only for two variables, and the 25 | regions are estimated with functions modified from the \code{car} 26 | package. The internal ellipse functions assume bivariate normality. 27 | 28 | This function is often used to plot posterior distributions of 29 | samples, such as from the \code{\link{LaplacesDemon}} function. 30 | } 31 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 32 | \seealso{ 33 | \code{\link{LaplacesDemon}} and 34 | \code{\link{p.interval}} 35 | } 36 | \examples{ 37 | library(LaplacesDemon) 38 | x <- rnorm(100) 39 | y <- rnorm(100) 40 | joint.pr.plot(x, y) 41 | } 42 | \keyword{Plot} 43 | -------------------------------------------------------------------------------- /man/log-log.Rd: -------------------------------------------------------------------------------- 1 | \name{log-log} 2 | \alias{cloglog} 3 | \alias{invcloglog} 4 | \alias{invloglog} 5 | \alias{loglog} 6 | \title{The log-log and complementary log-log functions} 7 | \description{ 8 | The log-log and complementary log-log functions, as well as the 9 | inverse functions, are provided. 10 | } 11 | \usage{ 12 | cloglog(p) 13 | invcloglog(x) 14 | invloglog(x) 15 | loglog(p) 16 | } 17 | \arguments{ 18 | \item{x}{This is a vector of real values that will be transformed to 19 | the interval [0,1].} 20 | \item{p}{This is a vector of probabilities p in the interval [0,1] 21 | that will be transformed to the real line.} 22 | } 23 | \details{ 24 | The logit and probit links are symmetric, because the probabilities 25 | approach zero or one at the same rate. The log-log and complementary 26 | log-log links are asymmetric. Complementary log-log links approach 27 | zero slowly and one quickly. Log-log links approach zero quickly and 28 | one slowly. Either the log-log or complementary log-log link will tend 29 | to fit better than logistic and probit, and are frequently used when 30 | the probability of an event is small or large. A mixture of the two 31 | links, the log-log and complementary log-log is often used, where each 32 | link is weighted. The reason that logit is so prevalent is because 33 | logistic parameters can be interpreted as odds ratios. 34 | } 35 | \value{ 36 | \code{cloglog} returns \code{x}, 37 | \code{invcloglog} and \code{invloglog} return probability \code{p}, 38 | and \code{loglog} returns \code{x}. 39 | } 40 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 41 | \seealso{\code{\link{LaplacesDemon}}} 42 | \examples{ 43 | library(LaplacesDemon) 44 | x <- -5:5 45 | p <- invloglog(x) 46 | x <- loglog(p) 47 | } 48 | \keyword{Complementary log-log} 49 | \keyword{Link Function} 50 | \keyword{log-log} 51 | \keyword{Transformation} 52 | -------------------------------------------------------------------------------- /man/plot.bmk.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.bmk} 2 | \alias{plot.bmk} 3 | \title{Plot Hellinger Distances} 4 | \description{ 5 | This function plots Hellinger distances in an object of class \code{bmk}. 6 | } 7 | \usage{ 8 | \method{plot}{bmk}(x, col=colorRampPalette(c("black","red"))(100), 9 | title="", PDF=FALSE, Parms=NULL, \dots) 10 | } 11 | \arguments{ 12 | \item{x}{This required argument is an object of class \code{bmk}. See 13 | the \code{\link{BMK.Diagnostic}} function for more information.} 14 | \item{col}{This argument specifies the colors of the cells. By 15 | default, the \code{colorRampPalette} function colors large Hellinger 16 | distances as \code{red}, small as \code{black}, and provides 100 17 | color gradations.} 18 | \item{title}{This argument specifies the title of the plot, and the 19 | default does not include a title.} 20 | \item{PDF}{Logical. When \code{TRUE}, the plot is saved as a .pdf 21 | file.} 22 | \item{Parms}{ 23 | This argument accepts a vector of quoted strings to be matched for 24 | selecting parameters for plotting. This argument defaults to 25 | \code{NULL} and selects every parameter for plotting. Each quoted 26 | string is matched to one or more parameter names with the 27 | \code{grep} function. For example, if the user specifies 28 | \code{Parms=c("eta", "tau")}, and if the parameter names 29 | are beta[1], beta[2], eta[1], eta[2], and tau, then all parameters 30 | will be selected, because the string \code{eta} is within 31 | \code{beta}. Since \code{grep} is used, string matching uses 32 | regular expressions, so beware of meta-characters, though these are 33 | acceptable: ".", "[", and "]".} 34 | \item{\dots}{Additional arguments are unused.} 35 | } 36 | \details{ 37 | The \code{plot.bmk} function plots the Hellinger distances in an 38 | object of class \code{bmk}. This is useful for quickly finding 39 | portions of chains with large Hellinger distances, which indicates 40 | non-stationarity and non-convergence. 41 | } 42 | \seealso{\code{\link{BMK.Diagnostic}}} 43 | \examples{ 44 | library(LaplacesDemon) 45 | N <- 1000 #Number of posterior samples 46 | J <- 10 #Number of parameters 47 | Theta <- matrix(runif(N*J),N,J) 48 | colnames(Theta) <- paste("beta[", 1:J, "]", sep="") 49 | for (i in 2:N) {Theta[i,1] <- Theta[i-1,1] + rnorm(1)} 50 | HD <- BMK.Diagnostic(Theta, batches=10) 51 | plot(HD, title="Hellinger distance between batches") 52 | } 53 | \keyword{Plot} -------------------------------------------------------------------------------- /man/plot.importance.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.importance} 2 | \alias{plot.importance} 3 | \title{Plot Variable Importance} 4 | \description{ 5 | This may be used to plot variable importance with BPIC, predictive 6 | concordance, a discrepancy statistic, or the L-criterion regarding an 7 | object of class \code{importance}. 8 | } 9 | \usage{\method{plot}{importance}(x, Style="BPIC", \dots)} 10 | \arguments{ 11 | \item{x}{This required argument is an object of class 12 | \code{importance}.} 13 | \item{Style}{When \code{Style="BPIC"}, BPIC is shown, and \code{BPIC} 14 | is the default. Otherwise, predictive concordance is plotted when 15 | \code{Style="Concordance"}, a discrepancy statistic is plotted when 16 | \code{Style="Discrep"}, or the L-criterion is plotted when 17 | \code{Style="L-criterion"}.} 18 | \item{\dots}{Additional arguments are unused.} 19 | } 20 | \details{ 21 | The x-axis is either BPIC (Ando, 2007), predictive concordance 22 | (Gelfand, 1996), a discrepancy statistic (Gelman et al., 1996), or the 23 | L-criterion (Laud and Ibrahim, 1995) of the \code{\link{Importance}} 24 | function (depending on the \code{Style} argument), and variables are 25 | on the y-axis. A more important variable is associated with a dot that 26 | is plotted farther to the right. For more information on variable 27 | importance, see the \code{\link{Importance}} function. 28 | } 29 | \references{ 30 | Ando, T. (2007). "Bayesian Predictive Information Criterion for 31 | the Evaluation of Hierarchical Bayesian and Empirical Bayes Models". 32 | \emph{Biometrika}, 94(2), p. 443--458. 33 | 34 | Gelfand, A. (1996). "Model Determination Using Sampling Based 35 | Methods". In Gilks, W., Richardson, S., Spiegehalter, D., Chapter 9 in 36 | Markov Chain Monte Carlo in Practice. Chapman and Hall: Boca Raton, FL. 37 | 38 | Gelman, A., Meng, X.L., and Stern H. (1996). "Posterior Predictive 39 | Assessment of Model Fitness via Realized Discrepancies". 40 | \emph{Statistica Sinica}, 6, p. 733--807. 41 | 42 | Laud, P.W. and Ibrahim, J.G. (1995). "Predictive Model 43 | Selection". \emph{Journal of the Royal Statistical Society}, B 57, 44 | p. 247--262. 45 | } 46 | \author{Statisticat, LLC \email{software@bayesian-inference.com}} 47 | \seealso{ 48 | \code{\link{Importance}}} 49 | \keyword{Plot} -------------------------------------------------------------------------------- /man/plot.juxtapose.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.juxtapose} 2 | \alias{plot.juxtapose} 3 | \title{Plot MCMC Juxtaposition} 4 | \description{ 5 | This may be used to plot a juxtaposition of MCMC algorithms according 6 | either to \code{\link{IAT}} or ISM (Independent Samples per Minute). 7 | } 8 | \usage{\method{plot}{juxtapose}(x, Style="ISM", \dots)} 9 | \arguments{ 10 | \item{x}{This required argument is an object of class 11 | \code{juxtapose}.} 12 | \item{Style}{This argument accepts either \code{IAT} or \code{ISM}, 13 | and defaults to \code{ISM}.} 14 | \item{\dots}{Additional arguments are unused.} 15 | } 16 | \details{ 17 | When \code{Style="IAT"}, the medians and 95\% probability intervals of 18 | the integrated autocorrelation times (IATs) of MCMC algorithms are 19 | displayed in a caterpillar plot. The best, or least inefficient, MCMC 20 | algorithm is the algorithm with the lowest IAT. 21 | 22 | When \code{Style="ISM"}, the medians and 95\% probability intervals of 23 | the numbers of independent samples per minute (ISM) of MCMC algorithms 24 | are displayed in a caterpillar plot. The best, or least inefficient, 25 | MCMC algorithm is the algorithm with the highest ISM. 26 | 27 | For more information, see the \code{\link{Juxtapose}} function. 28 | } 29 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 30 | \seealso{ 31 | \code{\link{Juxtapose}}} 32 | \keyword{Plot} -------------------------------------------------------------------------------- /man/plot.laplace.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.laplace} 2 | \alias{plot.laplace} 3 | \title{Plot the output of \code{\link{LaplaceApproximation}}} 4 | \description{ 5 | This may be used to plot, or save plots of, the iterated history of 6 | the parameters and, if posterior samples were taken, density plots of 7 | parameters and monitors in an object of class \code{laplace}. 8 | } 9 | \usage{\method{plot}{laplace}(x, Data, PDF=FALSE, Parms, \dots)} 10 | \arguments{ 11 | \item{x}{ 12 | This required argument is an object of class \code{laplace}.} 13 | \item{Data}{ 14 | This required argument must receive the list of data that was 15 | supplied to \code{\link{LaplaceApproximation}} to create the object 16 | of class \code{laplace}.} 17 | \item{PDF}{ 18 | This logical argument indicates whether or not the user wants 19 | Laplace's Demon to save the plots as a .pdf file.} 20 | \item{Parms}{ 21 | This argument accepts a vector of quoted strings to be matched for 22 | selecting parameters for plotting. This argument defaults to 23 | \code{NULL} and selects every parameter for plotting. Each quoted 24 | string is matched to one or more parameter names with the 25 | \code{grep} function. For example, if the user specifies 26 | \code{Parms=c("eta", "tau")}, and if the parameter names 27 | are beta[1], beta[2], eta[1], eta[2], and tau, then all parameters 28 | will be selected, because the string \code{eta} is within 29 | \code{beta}. Since \code{grep} is used, string matching uses 30 | regular expressions, so beware of meta-characters, though these are 31 | acceptable: ".", "[", and "]".} 32 | \item{\dots}{Additional arguments are unused.} 33 | } 34 | \details{ 35 | The plots are arranged in a \eqn{2 \times 2}{2 x 2} matrix. The 36 | purpose of the iterated history plots is to show how the value of each 37 | parameter and the deviance changed by iteration as the 38 | \code{\link{LaplaceApproximation}} attempted to maximize the logarithm 39 | of the unnormalized joint posterior density. If the algorithm 40 | converged, and if \code{sir=TRUE} in 41 | \code{\link{LaplaceApproximation}}, then plots are produced of 42 | selected parameters and all monitored variables. 43 | } 44 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 45 | \seealso{\code{\link{LaplaceApproximation}}} 46 | \examples{### See the LaplaceApproximation function for an example.} 47 | \keyword{Plot} -------------------------------------------------------------------------------- /man/plot.miss.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.miss} 2 | \alias{plot.miss} 3 | \title{Plot samples from the output of MISS} 4 | \description{ 5 | This may be used to plot, or save plots of, samples in an object of 6 | class \code{miss}. Plots include a trace plot, density plot, and 7 | autocorrelation or ACF plot. 8 | } 9 | \usage{ 10 | \method{plot}{miss}(x, PDF=FALSE, \dots) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | This required argument is an object of class \code{miss}.} 15 | \item{PDF}{ 16 | This logical argument indicates whether or not the user wants 17 | Laplace's Demon to save the plots as a .pdf file.} 18 | \item{\dots}{Additional arguments are unused.} 19 | } 20 | \details{ 21 | The plots are arranged in a \eqn{3 \times 3}{3 x 3} matrix. Each row 22 | represents the predictive distribution of a missing value. The 23 | left column displays trace plots, the middle column displays kernel 24 | density plots, and the right column displays autocorrelation (ACF) 25 | plots. 26 | 27 | Trace plots show the thinned history of the predictive distribution, 28 | with its value in the y-axis moving by iteration across the x-axis. 29 | Simulations of a predictive distribution with good properties do not 30 | suggest a trend upward or downward as it progresses across the x-axis 31 | (it should appear stationary), and it should mix well, meaning it 32 | should appear as though random samples are being taken each time from 33 | the same target distribution. Visual inspection of a trace plot cannot 34 | verify convergence, but apparent non-stationarity or poor mixing can 35 | certainly suggest non-convergence. A red, smoothed line also appears 36 | to aid visual inspection. 37 | 38 | Kernel density plots depict the marginal posterior distribution. 39 | There is no distributional assumption about this density. 40 | 41 | Autocorrelation plots show the autocorrelation or serial correlation 42 | between sampled values at nearby iterations. Samples with 43 | autocorrelation do not violate any assumption, but are inefficient 44 | because they reduce the effective sample size (\code{\link{ESS}}), and 45 | indicate that the chain is not mixing well, since each value is 46 | influenced by values that are previous and nearby. The x-axis 47 | indicates lags with respect to samples by iteration, and the y-axis 48 | represents autocorrelation. The ideal autocorrelation plot shows 49 | perfect correlation at zero lag, and quickly falls to zero 50 | autocorrelation for all other lags. 51 | } 52 | \author{Statisticat, LLC \email{software@bayesian-inference.com}} 53 | \seealso{ 54 | \code{\link{MISS}}.} 55 | \examples{### See the MISS function for an example.} 56 | \keyword{Plot} -------------------------------------------------------------------------------- /man/plot.vb.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.vb} 2 | \alias{plot.vb} 3 | \title{Plot the output of \code{\link{VariationalBayes}}} 4 | \description{ 5 | This may be used to plot, or save plots of, the iterated history of 6 | the parameters and variances, and if posterior samples were taken, 7 | density plots of parameters and monitors in an object of class 8 | \code{vb}. 9 | } 10 | \usage{\method{plot}{vb}(x, Data, PDF=FALSE, Parms, \dots)} 11 | \arguments{ 12 | \item{x}{ 13 | This required argument is an object of class \code{vb}.} 14 | \item{Data}{ 15 | This required argument must receive the list of data that was 16 | supplied to \code{\link{VariationalBayes}} to create the object 17 | of class \code{vb}.} 18 | \item{PDF}{ 19 | This logical argument indicates whether or not the user wants 20 | Laplace's Demon to save the plots as a .pdf file.} 21 | \item{Parms}{ 22 | This argument accepts a vector of quoted strings to be matched for 23 | selecting parameters for plotting. This argument defaults to 24 | \code{NULL} and selects every parameter for plotting. Each quoted 25 | string is matched to one or more parameter names with the 26 | \code{grep} function. For example, if the user specifies 27 | \code{Parms=c("eta", "tau")}, and if the parameter names 28 | are beta[1], beta[2], eta[1], eta[2], and tau, then all parameters 29 | will be selected, because the string \code{eta} is within 30 | \code{beta}. Since \code{grep} is used, string matching uses 31 | regular expressions, so beware of meta-characters, though these are 32 | acceptable: ".", "[", and "]".} 33 | \item{\dots}{Additional arguments are unused.} 34 | } 35 | \details{ 36 | The plots are arranged in a \eqn{3 \times 3}{3 x 3} matrix. The 37 | purpose of the iterated history plots is to show how the value of each 38 | parameter, variance, and the deviance changed by iteration as the 39 | \code{\link{VariationalBayes}} attempted to maximize the logarithm 40 | of the unnormalized joint posterior density. If the algorithm 41 | converged, and if \code{sir=TRUE} in 42 | \code{\link{VariationalBayes}}, then plots are produced of 43 | selected parameters and all monitored variables. 44 | } 45 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 46 | \seealso{\code{\link{VariationalBayes}}} 47 | \examples{### See the VariationalBayes function for an example.} 48 | \keyword{Plot} -------------------------------------------------------------------------------- /man/plotSamples.Rd: -------------------------------------------------------------------------------- 1 | \name{plotSamples} 2 | \alias{plotSamples} 3 | \title{Plot Samples} 4 | \description{ 5 | This function provides basic plots that are extended to include 6 | samples. 7 | } 8 | \usage{ 9 | plotSamples(X, Style="KDE", LB=0.025, UB=0.975, Title=NULL) 10 | } 11 | \arguments{ 12 | \item{X}{This required argument is a \eqn{N \times S}{N x S} numerical 13 | matrix of \eqn{N} records and \eqn{S} samples.} 14 | \item{Style}{This argument accepts the following quoted strings: 15 | "barplot", "dotchart", "hist", "KDE", or "Time-Series". It defaults 16 | to \code{Style="KDE"}.} 17 | \item{LB}{This argument accepts the lower bound of a probability 18 | interval, which must be in the interval [0,0.5).} 19 | \item{UB}{This argument accepts the upper bound of a probability 20 | interval, which must be in the interval (0.5,1].} 21 | \item{Title}{This argument defaults to \code{NULL}, and otherwise 22 | accepts a quoted string that will be the title of the plot.} 23 | } 24 | \details{ 25 | The \code{plotSamples} function extends several basic plots from 26 | points to samples. For example, it is common to use the \code{hist} 27 | function to plot a histogram from a column vector. However, the user 28 | may desire to plot a histogram of a column vector that was sampled 29 | numerous times, rather than a simple column vector, in which a 30 | (usually 95\%) probability interval is also plotted to show the 31 | uncertainty around the sampled median of each bin in the histogram. 32 | 33 | The \code{plotSamples} function extends the \code{barplot}, 34 | \code{dotchart}, and \code{hist} functions to include uncertainty due 35 | to samples. The \code{KDE} style of plot is added so that a 36 | probability interval is shown around a sampled kernel density estimate 37 | of a distribution, and the \code{Time-Series} style of plot is added 38 | so that a probability interval is shown around a sampled univariate 39 | time-series. 40 | 41 | For each style of plot, three quantiles are plotted: the lower bound 42 | (LB), median, and upper bound (UB). 43 | 44 | One of many potential Bayesian applications is to examine the 45 | uncertainty in a predictive distribution. 46 | } 47 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 48 | \examples{ 49 | #library(LaplacesDemon) 50 | #N <- 100 51 | #S <- 100 52 | #X <- matrix(rnorm(N*S),N,S) 53 | #rownames(X) <- 1:100 54 | #plotSamples(X, Style="barplot", LB=0.025, UB=0.975) 55 | #plotSamples(X[1:10,], Style="dotchart", LB=0.025, UB=0.975) 56 | #plotSamples(X, Style="hist", LB=0.025, UB=0.975) 57 | #plotSamples(X, Style="KDE", LB=0.025, UB=0.975) 58 | #plotSamples(X, Style="Time-Series", LB=0.025, UB=0.975) 59 | } 60 | \keyword{Plot} -------------------------------------------------------------------------------- /man/print.demonoid.Rd: -------------------------------------------------------------------------------- 1 | \name{print.demonoid} 2 | \alias{print.demonoid} 3 | \title{Print an object of class \code{demonoid} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{demonoid} to the screen. 7 | } 8 | \usage{\method{print}{demonoid}(x, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{demonoid} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \details{ 14 | If the user has an object of class \code{demonoid.hpc}, then the 15 | \code{print} function may still be used by specifying the chain as a 16 | component in a list, such as printing the second chain with 17 | \code{print(Fit[[2]])} when the \code{demonoid.hpc} object is named 18 | \code{Fit}, for example. 19 | } 20 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 21 | \seealso{ 22 | \code{\link{Consort}}, 23 | \code{\link{LaplacesDemon}}, and 24 | \code{\link{LaplacesDemon.hpc}}. 25 | } 26 | \examples{### See the LaplacesDemon function for an example.} 27 | \keyword{print} 28 | -------------------------------------------------------------------------------- /man/print.heidelberger.Rd: -------------------------------------------------------------------------------- 1 | \name{print.heidelberger} 2 | \alias{print.heidelberger} 3 | \title{Print an object of class \code{heidelberger} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{heidelberger} to the screen. 7 | } 8 | \usage{\method{print}{heidelberger}(x, digits=3, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{heidelberger} is required.} 11 | \item{digits}{This is the number of digits to print.} 12 | \item{\dots}{Additional arguments are unused.} 13 | } 14 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 15 | \seealso{ 16 | \code{\link{Heidelberger.Diagnostic}}. 17 | } 18 | \examples{### See the Heidelberger.Diagnostic function for an example.} 19 | \keyword{print} 20 | -------------------------------------------------------------------------------- /man/print.iterquad.Rd: -------------------------------------------------------------------------------- 1 | \name{print.iterquad} 2 | \alias{print.iterquad} 3 | \title{Print an object of class \code{iterquad} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{iterquad} to the screen. 7 | } 8 | \usage{\method{print}{iterquad}(x, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{iterquad} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 14 | \seealso{\code{\link{IterativeQuadrature}}} 15 | \examples{### See the IterativeQuadrature function for an example.} 16 | \keyword{print} -------------------------------------------------------------------------------- /man/print.laplace.Rd: -------------------------------------------------------------------------------- 1 | \name{print.laplace} 2 | \alias{print.laplace} 3 | \title{Print an object of class \code{laplace} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{laplace} to the screen. 7 | } 8 | \usage{\method{print}{laplace}(x, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{laplace} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 14 | \seealso{\code{\link{LaplaceApproximation}}} 15 | \examples{### See the LaplaceApproximation function for an example.} 16 | \keyword{print} -------------------------------------------------------------------------------- /man/print.miss.Rd: -------------------------------------------------------------------------------- 1 | \name{print.miss} 2 | \alias{print.miss} 3 | \title{Print an object of class \code{miss} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{miss} to the screen. 7 | } 8 | \usage{\method{print}{miss}(x, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{miss} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 14 | \seealso{ 15 | \code{\link{MISS}}. 16 | } 17 | \examples{### See the MISS function for an example.} 18 | \keyword{print} 19 | -------------------------------------------------------------------------------- /man/print.pmc.Rd: -------------------------------------------------------------------------------- 1 | \name{print.pmc} 2 | \alias{print.pmc} 3 | \title{Print an object of class \code{pmc} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{pmc} to the screen. 7 | } 8 | \usage{\method{print}{pmc}(x, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{pmc} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 14 | \seealso{ 15 | \code{\link{PMC}}. 16 | } 17 | \examples{### See the PMC function for an example.} 18 | \keyword{print} 19 | -------------------------------------------------------------------------------- /man/print.raftery.Rd: -------------------------------------------------------------------------------- 1 | \name{print.raftery} 2 | \alias{print.raftery} 3 | \title{Print an object of class \code{raftery} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{raftery} to the screen. 7 | } 8 | \usage{\method{print}{raftery}(x, digits=3, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{raftery} is required.} 11 | \item{digits}{This is the number of digits to print.} 12 | \item{\dots}{Additional arguments are unused.} 13 | } 14 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 15 | \seealso{ 16 | \code{\link{Raftery.Diagnostic}}. 17 | } 18 | \examples{### See the Raftery.Diagnostic function for an example.} 19 | \keyword{print} 20 | -------------------------------------------------------------------------------- /man/print.vb.Rd: -------------------------------------------------------------------------------- 1 | \name{print.vb} 2 | \alias{print.vb} 3 | \title{Print an object of class \code{vb} to the screen.} 4 | \description{ 5 | This may be used to print the contents of an object of class 6 | \code{vb} to the screen. 7 | } 8 | \usage{\method{print}{vb}(x, \dots)} 9 | \arguments{ 10 | \item{x}{An object of class \code{vb} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 14 | \seealso{\code{\link{VariationalBayes}}} 15 | \examples{### See the VariationalBayes function for an example.} 16 | \keyword{print} -------------------------------------------------------------------------------- /man/summary.miss.Rd: -------------------------------------------------------------------------------- 1 | \name{summary.miss} 2 | \alias{summary.miss} 3 | \title{MISS Summary} 4 | \description{ 5 | This function summarizes posterior predictive distributions from 6 | an object of class \code{miss}. 7 | } 8 | \usage{\method{summary}{miss}(object, \dots)} 9 | \arguments{ 10 | \item{object}{An object of class \code{miss} is required.} 11 | \item{\dots}{Additional arguments are unused.} 12 | } 13 | \details{ 14 | This function summarizes the posterior predictive distributions from 15 | an object of class \code{miss}. 16 | } 17 | \value{ 18 | This function returns a \eqn{M \times 7}{M x 7} matrix, in which each 19 | row is the posterior predictive distribution of one of \eqn{M} missing 20 | values. Columns are Mean, SD, MCSE, ESS, LB, Median, and UB. 21 | } 22 | \author{Statisticat, LLC. \email{software@bayesian-inference.com}} 23 | \seealso{ 24 | \code{\link{MISS}}. 25 | } 26 | \examples{### See the MISS function for an example.} 27 | \keyword{Imputation} 28 | \keyword{summary} 29 | -------------------------------------------------------------------------------- /vignettes/LDlogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaplacesDemonR/LaplacesDemon/de9107d46c215a9db57ad6e9c95a9ebcaf75ef25/vignettes/LDlogo.png --------------------------------------------------------------------------------