├── R ├── plot.rjags.R ├── print.jzs_med.R ├── plot.CI.R ├── plot.JZSMed.R ├── jzs_cor.R ├── jzs_partcor.R ├── jzs_corSD.R ├── jzs_partcorSD.R ├── jzs_med.R └── jzs_medSD.R ├── DESCRIPTION ├── NAMESPACE ├── man ├── plot.JZSMed.Rd ├── plot.CI.Rd ├── plot.rjags.Rd ├── Firefighters.Rd ├── print.jzs_med.Rd ├── jzs_cor.Rd ├── jzs_partcor.Rd ├── jzs_corSD.Rd ├── jzs_partcorSD.Rd ├── BayesMed-package.Rd ├── jzs_med.Rd └── jzs_medSD.Rd └── data └── Firefighters.txt /R/plot.rjags.R: -------------------------------------------------------------------------------- 1 | plot.rjags <- 2 | function(x,...){ 3 | traceplot(x) 4 | } -------------------------------------------------------------------------------- /R/print.jzs_med.R: -------------------------------------------------------------------------------- 1 | print.jzs_med <- 2 | function(x,...){ 3 | print(x[!(names(x)%in%c("cor_coef_samples","alpha_samples","beta_samples","tau_prime_samples","rho", 4 | "prob_alpha","prob_beta","prob_tau_prime_samples", 5 | "ab_samples","jagssamples","jagssamplesA","jagssamplesTB"))]) 6 | } 7 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: BayesMed 2 | Type: Package 3 | Title: Default Bayesian Hypothesis Tests for Correlation, Partial Correlation, and Mediation 4 | Version: 1.2.3 5 | Date: 2020-1-29 6 | Author: Michele B. Nuijten, Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers 7 | Maintainer: Michele B. Nuijten 8 | Depends: R (>= 3.6) 9 | Imports: R2jags, polspline 10 | Suggests: MCMCpack, QRM 11 | Description: Default Bayesian hypothesis tests for correlation, partial correlation, and mediation. 12 | License: GPL-2 -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | export(jzs_med) 2 | export(jzs_medSD) 3 | export(jzs_cor) 4 | export(jzs_partcor) 5 | export(jzs_corSD) 6 | export(jzs_partcorSD) 7 | S3method(print,jzs_med) 8 | S3method(plot,JZSMed) 9 | S3method(plot,rjags) 10 | S3method(plot,CI) 11 | import(R2jags) 12 | import(polspline) 13 | importFrom("graphics", "abline", "arrows", "hist", "legend", "par", 14 | "plot", "points", "text") 15 | importFrom("stats", "cor", "dcauchy", "density", "dnorm", "dt", 16 | "integrate", "lm", "median", "optim", "pt", "quantile", 17 | "runif", "sd", "splinefun") 18 | -------------------------------------------------------------------------------- /R/plot.CI.R: -------------------------------------------------------------------------------- 1 | plot.CI <- function(x,...){ 2 | 3 | # histogram of posterior samples 4 | hist(x,100,freq=FALSE, 5 | main=paste("posterior distribution for ",deparse(substitute(x)),sep=" "), 6 | xlab=deparse(substitute(x)),...) 7 | 8 | # indicate 95% credible interval, mean, and median 9 | abline(v=quantile(x,c(.025,.975)),lwd=2,col="red") 10 | abline(v=mean(x),lwd=2,col="green") 11 | abline(v=median(x),lwd=2,col="blue") 12 | 13 | # legend 14 | legend("topright", 15 | legend=c(paste(c("lower bound 95% CI","upper bound 95% CI"), 16 | round(quantile(x,c(.025,.975)),3),sep=": "), 17 | paste("mean",round(mean(x),3),sep=": "), 18 | paste("median",round(median(x),3),sep=": ")), 19 | col=c("red","red","green","blue"),lty=1,lwd=2) 20 | } -------------------------------------------------------------------------------- /man/plot.JZSMed.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.JZSMed} 2 | \alias{plot.JZSMed} 3 | 4 | \title{ 5 | Display the results of \code{\link{jzs_med}} in a figure. 6 | } 7 | \description{ 8 | This function displays the estimates and posterior probabilities of path alpha, beta, and tau' in a mediation schema and thus renders a clear view of the structure in the data. 9 | } 10 | \usage{ 11 | \method{plot}{JZSMed}(x,\dots) 12 | } 13 | 14 | \arguments{ 15 | \item{x}{ 16 | the output of the jzs_med function. 17 | } 18 | \item{\dots}{ 19 | additional arguments to be passed on to the plot method, such as graphical parameters (see \code{par}). 20 | } 21 | } 22 | 23 | \author{ 24 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 25 | } 26 | 27 | \seealso{ 28 | \code{\link{jzs_med}} 29 | } 30 | \examples{ 31 | \dontrun{ 32 | # simulate mediational data 33 | a <- .5 34 | b <- .6 35 | t_prime <- .3 36 | 37 | X <- rnorm(50,0,1) 38 | M <- a*X + rnorm(50,0,1) 39 | Y <- t_prime*X + b*M + rnorm(50,0,1) 40 | 41 | # save jzs_med output 42 | res <- jzs_med(independent=X,dependent=Y,mediator=M) 43 | 44 | # plot results 45 | plot(res$main_result) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /man/plot.CI.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.CI} 2 | \alias{plot.CI} 3 | \title{ 4 | Plot the posterior distribution of the indirect effect alpha*beta 5 | } 6 | \description{ 7 | Plot the posterior distribution of the indirect effect alpha*beta including a 95\% credible interval around the mean of the posterior (see Nuijten et al. (2014); Yuan & MacKinnon, 2009). 8 | } 9 | \usage{ 10 | \method{plot}{CI}(x,\dots) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | the posterior samples of alpha*beta as obtained from the output of \code{\link{jzs_medSD}}. This is an object of class \code{CI}. 15 | } 16 | \item{\dots}{ 17 | additional arguments to be passed on to the plot method, such as graphical parameters (see \code{par}). 18 | } 19 | } 20 | \references{ 21 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 22 | 23 | Yuan, Y., & MacKinnon, D. (2009). Bayesian mediation analysis. Psychological Methods, 14, 301-322. 24 | } 25 | \author{ 26 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 27 | } 28 | \seealso{ 29 | \code{\link{jzs_medSD}} 30 | } 31 | \examples{ 32 | \dontrun{ 33 | # simulate mediational data 34 | a <- .5 35 | b <- .6 36 | t_prime <- .3 37 | 38 | X <- rnorm(50,0,1) 39 | M <- a*X + rnorm(50,0,1) 40 | Y <- t_prime*X + b*M + rnorm(50,0,1) 41 | 42 | # run jzs_med 43 | res <- jzs_med(independent=X,dependent=Y,mediator=M) 44 | 45 | # plot posterior distribution of a*b 46 | plot(res$ab_samples) 47 | 48 | # print the exact lower and upper boundary of the interval 49 | res$CI_ab 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /man/plot.rjags.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.rjags} 2 | \alias{plot.rjags} 3 | 4 | \title{ 5 | Plot the chains of the JAGS samples 6 | } 7 | \description{ 8 | Displays a plot of iterations vs. sampled values for each variable in the chain, with a separate plot per variable (see \code{traceplot} of the package \code{R2jags}; Su & Yajima, 2012). 9 | } 10 | \usage{ 11 | \method{plot}{rjags}(x,\dots) 12 | } 13 | \arguments{ 14 | \item{x}{ 15 | an \code{rjags} object. 16 | } 17 | \item{\dots}{ 18 | additional arguments to be passed on to the traceplot method, such as graphical parameters (see \code{traceplot}). 19 | } 20 | } 21 | \references{ 22 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 23 | 24 | Su, Y.-S., & Yajima, M. (2012). R2jags: A package for running jags from Rb[Computer software manual]. Available from http://CRAN.R-project.org/package=R2jags (R package version 0.03-08) 25 | } 26 | \author{ 27 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 28 | } 29 | 30 | \examples{ 31 | \dontrun{ 32 | # simulate correlational data 33 | X <- rnorm(50,0,1) 34 | Y <- .3*X + rnorm(50,0,1) 35 | 36 | # save jzs_cor output 37 | res <- jzs_cor(X,Y) 38 | 39 | # plot results 40 | plot(res$jagssamples) 41 | 42 | ############ 43 | 44 | # simulate mediational data 45 | a <- .5 46 | b <- .6 47 | t_prime <- .3 48 | 49 | X <- rnorm(50,0,1) 50 | M <- a*X + rnorm(50,0,1) 51 | Y <- t_prime*X + b*M + rnorm(50,0,1) 52 | 53 | # run jzs_med 54 | res2 <- jzs_med(independent=X,dependent=Y,mediator=M) 55 | 56 | # plot resulting chains for alpha, beta, and tau_prime 57 | plot(res2$jagssamplesA) 58 | plot(res2$jagssamplesTB) 59 | } 60 | } -------------------------------------------------------------------------------- /R/plot.JZSMed.R: -------------------------------------------------------------------------------- 1 | plot.JZSMed <- 2 | function(x,...){ 3 | par(mai=c(0,0,0.4,0)) 4 | 5 | # plot mediation figure 6 | plot(1:3,bty="n",type="n",axes=F,xlab="",ylab="",main="",...) 7 | 8 | points(2,2.8,pch="M",cex=2) 9 | points(1.5,1.5,pch="X",cex=2) 10 | points(2.5,1.5,pch="Y",cex=2) 11 | 12 | arrows(1.55,1.6,1.95,2.65,lwd=2,length=.15) 13 | arrows(2.05,2.65,2.45,1.6,lwd=2,length=.15) 14 | arrows(1.55,1.5,2.45,1.5,lwd=2,length=.15) 15 | 16 | # add values for paths 17 | 18 | text(1.5,2.2,substitute("p("*{alpha != 0}*"|D) = "*p_a,list(p_a=round(x[rownames(x)%in%"alpha",colnames(x)%in%"PostProb"],2)))) 19 | 20 | if(any(colnames(x)%in%"Estimate")){ 21 | text(1.5,2.1,substitute({hat(alpha)==a},list(a=round(x[rownames(x)%in%"alpha",colnames(x)%in%"Estimate"],2)))) 22 | } 23 | 24 | text(2.5,2.2,substitute("p("*beta != 0*"|D) = "*p_b,list(p_b=round(x[rownames(x)%in%"beta",colnames(x)%in%"PostProb"],2)))) 25 | 26 | if(any(colnames(x)%in%"Estimate")){ 27 | text(2.5,2.1,substitute({hat(beta)==b},list(b=round(x[rownames(x)%in%"beta",colnames(x)%in%"Estimate"],2)))) 28 | } 29 | 30 | text(2,1.3,substitute("p("*tau*{symbol("\242")} != 0*"|D) = "*p_t,list(p_t=round(x[rownames(x)%in%"tau_prime",colnames(x)%in%"PostProb"],2)))) 31 | 32 | if(any(colnames(x)%in%"Estimate")){ 33 | text(2,1.2,substitute(hat(tau*{symbol("\242")}) == t,list(t=round(x[rownames(x)%in%"tau_prime",colnames(x)%in%"Estimate"],2)))) 34 | } 35 | 36 | # add Bayes factors for mediation and full mediation 37 | legend("topleft",legend=paste("BF_Mediation",round(x[rownames(x)%in%"Mediation (alpha*beta)",colnames(x)%in%"BF"],2),sep=": ")) 38 | 39 | par(mai=c(1.360000, 1.093333, 1.093333, 0.560000)) # restore default settings 40 | 41 | } -------------------------------------------------------------------------------- /man/Firefighters.Rd: -------------------------------------------------------------------------------- 1 | \name{Firefighters} 2 | \alias{Firefighters} 3 | \docType{data} 4 | \title{ 5 | Data from a study of health promotion of firefighters 6 | } 7 | \description{ 8 | Data from a study of health promotion of firefighters (Elliot et al., 2007). These data are also used as an example in the Bayesian mediation paper of Yuan and MacKinnon (2009) and Nuijten et al. (2014). 9 | } 10 | \usage{data(Firefighters)} 11 | \format{ 12 | A data frame with 354 observations on the following 3 variables. 13 | \describe{ 14 | \item{\code{y}}{Dependent variable: reported eating of fruits and vegetables} 15 | \item{\code{m}}{Mediating variable: change from baseline to followup in knowledge of the benefits of eating fruits and vegetables} 16 | \item{\code{x}}{Independent variable: randomized exposure to an intervention} 17 | } 18 | } 19 | \details{ 20 | The data are centered. 21 | } 22 | \source{ 23 | Elliot, D. L., Goldberg, L., Kuehl, K. S., Moe, E. L., Breger, R. K. R., Pickering, M. A. (2007). The PHLAME (Promoting Healthy Lifestyles: Alternative Models' Effects) Firefighter Study: Outcomes of Two Models of Behavior Change. JOEM, 49, 204-213. 24 | } 25 | \references{ 26 | Elliot, D. L., Goldberg, L., Kuehl, K. S., Moe, E. L., Breger, R. K. R., Pickering, M. A. (2007). The PHLAME (Promoting Healthy Lifestyles: Alternative Models' Effects) Firefighter Study: Outcomes of Two Models of Behavior Change. JOEM, 49, 204-213. 27 | 28 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 29 | 30 | Yuan, Y., & MacKinnon, D. (2009). Bayesian mediation analysis. Psychological Methods, 14 , 301-322. 31 | 32 | } 33 | \examples{ 34 | \dontrun{ 35 | data(Firefighters) 36 | 37 | # mediation analysis 38 | result <- jzs_med(Firefighters$x,Firefighters$y,Firefighters$m) 39 | result 40 | 41 | ### Note that the Bayes factor is 1.96, instead of 1.94 as reported 42 | ### in the paper (Nuijten et al., 2014). 43 | ### This is caused by rounding differences; the posterior probabilities are equal. 44 | 45 | # underlying mediational model with path weights and posterior probabilities 46 | plot(result$main_result) 47 | 48 | # posterior distribution of indirect effect "ab" incl 95\% credible interval 49 | plot(result$ab_samples) 50 | } 51 | } -------------------------------------------------------------------------------- /man/print.jzs_med.Rd: -------------------------------------------------------------------------------- 1 | \name{print.jzs_med} 2 | \alias{print.jzs_med} 3 | \title{ 4 | Print jzs_med output. 5 | } 6 | \description{ 7 | Print the output of a jzs_med object. 8 | } 9 | 10 | \usage{ 11 | \method{print}{jzs_med}(x,\dots) 12 | } 13 | \arguments{ 14 | \item{x}{ 15 | a jzs_med object. 16 | } 17 | \item{\dots}{ 18 | further arguments passed to or from other methods. 19 | } 20 | } 21 | 22 | \value{ 23 | The function returns a list with the following items: 24 | 25 | \item{EvidenceMediation}{The posterior probability that the relation between the independent and the dependent variable is mediated by the specified mediator. 26 | } 27 | \item{EvidenceFullMediation}{The posterior probability that the relation between the independent and the dependent variable is fully mediated by the specified mediator and the direct effect of the independent variable on the dependent variable disappears after introducing the mediator. 28 | } 29 | \item{BF_Mediation}{The Bayes factor for mediation compared to no mediation. A value greater than one indicates evidence in favor of mediation, a value smaller than one indicates evidence against mediation. 30 | } 31 | \item{BF_FullMediation}{The Bayes factor for full mediation compared to no mediation. A value greater than one indicates evidence in favor of full mediation, a value smaller than one indicates evidence against full mediation. 32 | } 33 | \item{BF_alpha}{The Bayes factor for the existence of path alpha. A value greater than one indicates evidence that alpha exists, a value smaller than one indicates evidence that alpha does not exist. 34 | } 35 | \item{BF_beta}{The Bayes factor for the existence of path beta. A value greater than one indicates evidence that beta exists, a value smaller than one indicates evidence that beta does not exist. 36 | } 37 | \item{BF_tau_accent}{The Bayes factor for the existence of path tau_accent. A value greater than one indicates evidence that tau_accent exists, a value smaller than one indicates evidence that tau_accent does not exist. 38 | } 39 | } 40 | 41 | \author{ 42 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 43 | } 44 | 45 | 46 | \examples{ 47 | \dontrun{ 48 | # simulate mediational data 49 | a <- .5 50 | b <- .6 51 | t_prime <- .3 52 | 53 | X <- rnorm(50,0,1) 54 | M <- a*X + rnorm(50,0,1) 55 | Y <- t_prime*X + b*M + rnorm(50,0,1) 56 | 57 | # run jzs_med 58 | result <- jzs_med(independent=X,dependent=Y,mediator=M,SDmethod='dnorm') 59 | 60 | # print result 61 | result 62 | } 63 | } -------------------------------------------------------------------------------- /man/jzs_cor.Rd: -------------------------------------------------------------------------------- 1 | \name{jzs_cor} 2 | \alias{jzs_cor} 3 | 4 | \title{ 5 | A default Bayesian hypothesis test for correlation (Wetzels, R., & Wagenmakers). 6 | } 7 | \description{ 8 | This function can be used to perform a default Bayesian hypothesis test for correlation, using a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). 9 | } 10 | \usage{ 11 | jzs_cor(V1, V2, 12 | alternative = c("two.sided", "less", "greater"), 13 | n.iter=10000,n.burnin=500,standardize=TRUE, subdivisions = 200) 14 | } 15 | \arguments{ 16 | \item{V1}{ 17 | a numeric vector. 18 | } 19 | \item{V2}{ 20 | a numeric vector of the same length as V1. 21 | } 22 | \item{alternative}{ 23 | specify the alternative hypothesis for the correlation coefficient: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 24 | } 25 | \item{n.iter}{ 26 | number of total iterations per chain (see the package \code{R2jags}). Defaults to 10000. 27 | } 28 | \item{n.burnin}{ 29 | length of burn in, i.e. number of iterations to discard at the beginning(see the package \code{R2jags}). Defaults to 500. 30 | } 31 | \item{standardize}{ 32 | logical. Should the variables be standardized? Defaults to TRUE. 33 | } 34 | \item{subdivisions}{ 35 | the maximum number of subdivisions. Defaults to 200. 36 | } 37 | } 38 | \details{ 39 | See Wetzels & Wagenmakers (2012). 40 | } 41 | \value{ 42 | The function returns a list with the following items: 43 | \item{Correlation}{ 44 | The correlation coefficient for the relation between V1 and V2. The correlation coefficient is calculated by standardizing the mean of the posterior samples: mean(samples)*(sd(V1)/sd(V2)). 45 | } 46 | \item{BayesFactor}{ 47 | The Bayes factor for the correlation coefficient. A value greater than one indicates evidence in favor of correlation, a value smaller than one indicates evidence against correlation. 48 | } 49 | \item{PosteriorProbability}{ 50 | The posterior probability for the existence of a correlation between V1 and V2. 51 | } 52 | \item{alpha}{ 53 | The posterior samples for the correlation coefficient alpha. 54 | } 55 | \item{jagssamples}{The JAGS output for the MCMC estimation of the path. This object can be used to construct a traceplot. 56 | } 57 | } 58 | \references{ 59 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 60 | 61 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 62 | 63 | Wetzels, R., & Wagenmakers, E.-J. (2012). A Default Bayesian Hypothesis Test for Correlations and Partial Correlations. Psychonomic Bulletin & Review, 19, 1057-1064. 64 | } 65 | \author{ 66 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 67 | } 68 | \seealso{ 69 | \code{\link{jzs_partcor}}, \code{\link{jzs_med}} 70 | } 71 | \examples{ 72 | \dontrun{ 73 | # generate correlational data 74 | X <- rnorm(100) 75 | Y <- .4*X + rnorm(100,0,1) 76 | 77 | # run jzs_cor 78 | result <- jzs_cor(X,Y) 79 | 80 | # inspect posterior distribution 81 | plot(result$alpha_samples) 82 | 83 | # print a traceplot of the chains 84 | plot(result$jagssamples) 85 | 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /man/jzs_partcor.Rd: -------------------------------------------------------------------------------- 1 | \name{jzs_partcor} 2 | \alias{jzs_partcor} 3 | \title{ 4 | A default Bayesian hypothesis test for partial correlation (Wetzels, R., & Wagenmakers). 5 | } 6 | \description{ 7 | This function can be used to perform a default Bayesian hypothesis test for partial correlation, using a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). 8 | } 9 | \usage{ 10 | jzs_partcor(V1, V2, control, alternative = c("two.sided", "less", "greater"), 11 | n.iter=10000,n.burnin=500,standardize=TRUE) 12 | } 13 | \arguments{ 14 | \item{V1}{ 15 | a numeric vector. 16 | } 17 | \item{V2}{ 18 | a numeric vector of the same length as V1. 19 | } 20 | \item{control}{ 21 | a numeric vector of the same length as V1 and V2. This variable is partialled out of the correlation between V1 and V2. 22 | } 23 | \item{alternative}{ 24 | specify the alternative hypothesis for the correlation coefficient: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 25 | } 26 | \item{n.iter}{ 27 | number of total iterations per chain (see the package \code{R2jags}). Defaults to 10000. 28 | } 29 | \item{n.burnin}{ 30 | length of burn in, i.e. number of iterations to discard at the beginning(see the package \code{R2jags}). Defaults to 500. 31 | } 32 | 33 | \item{standardize}{ 34 | logical. Should the variables be standardized? Defaults to TRUE. 35 | } 36 | } 37 | \details{ 38 | See Wetzels & Wagenmakers, 2012. 39 | } 40 | \value{ 41 | The function returns a list with the following items: 42 | \item{PartCoef}{ 43 | Mean of the posterior samples of the unstandardized partial correlation (the regression coefficient beta in the equation V2 = intercept + alpha*control + beta*V1). 44 | } 45 | \item{BayesFactor}{ 46 | The Bayes factor for the existence of a partial correlation between V1 and V2, controlled for the control variable. A value greater than one indicates evidence in favor of partial correlation, a value smaller than one indicates evidence against partial correlation. 47 | } 48 | \item{PosteriorProbability}{ 49 | The posterior probability for the existence of a partial correlation between V1 and V2, controlled for the control variable. 50 | } 51 | \item{beta}{ 52 | The posterior samples for the regression coefficient beta. This is the unstandardized partial correlation. 53 | } 54 | \item{jagssamples}{The JAGS output for the MCMC estimation of the path. This object can be used to construct a traceplot. 55 | } 56 | } 57 | \references{ 58 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 59 | 60 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 61 | 62 | Wetzels, R., & Wagenmakers, E.-J. (2012). A Default Bayesian Hypothesis Test for Correlations and Partial Correlations. Psychonomic Bulletin & Review, 19, 1057-1064. 63 | } 64 | \author{ 65 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 66 | } 67 | \examples{ 68 | \dontrun{ 69 | # simulate partially correlated data 70 | X <- rnorm(50,0,1) 71 | C <- .5*X + rnorm(50,0,1) 72 | Y <- .3*X + .6*C + rnorm(50,0,1) 73 | 74 | # run jzs_partcor 75 | res <- jzs_partcor(X,Y,C) 76 | 77 | # plot posterior samples 78 | plot(res$beta_samples) 79 | 80 | # plot traceplot 81 | plot(res$jagssamples) 82 | # where the first chain (theta[1]) is for tau' and the second chain (theta[2]) for beta 83 | 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /man/jzs_corSD.Rd: -------------------------------------------------------------------------------- 1 | \name{jzs_corSD} 2 | \alias{jzs_corSD} 3 | 4 | \title{ 5 | A default Bayesian hypothesis test for correlation using the Savage-Dickey method. 6 | } 7 | \description{ 8 | This function can be used to perform a default Bayesian hypothesis test for correlation, using the Savage-Dickey method (Dickey & Lientz, 1970). The test uses a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). 9 | } 10 | \usage{ 11 | jzs_corSD(V1, V2, 12 | SDmethod = c("dnorm", "splinefun", "logspline", "fit.st"), 13 | alternative = c("two.sided", "less", "greater"), 14 | n.iter=10000,n.burnin=500, standardize=TRUE) 15 | } 16 | \arguments{ 17 | \item{V1}{ 18 | a numeric vector. 19 | } 20 | \item{V2}{ 21 | a numeric vector of the same length as V1. 22 | } 23 | \item{SDmethod}{ 24 | specify the precise method with which the density of the posterior distribution will be estimated in order to compute the Savage-Dickey ratio. 25 | } 26 | \item{alternative}{ 27 | specify the alternative hypothesis for the correlation coefficient: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 28 | } 29 | \item{n.iter}{ 30 | number of total iterations per chain (see the package \code{R2jags}). Defaults to 10000. 31 | } 32 | \item{n.burnin}{ 33 | length of burn in, i.e. number of iterations to discard at the beginning(see the package \code{R2jags}). Defaults to 500. 34 | } 35 | \item{standardize}{ 36 | logical. Should the variables be standardized? Defaults to TRUE. 37 | } 38 | } 39 | \value{ 40 | A list containing the following components: 41 | \item{Correlation}{ 42 | The correlation coefficient for the relation between V1 and V2. The correlation coefficient is calculated by standardizing the mean of the posterior samples: mean(samples)*(sd(V1)/sd(V2)). 43 | } 44 | \item{BayesFactor}{ 45 | The Bayes factor for the correlation coefficient. A value greater than one indicates evidence in favor of correlation, a value smaller than one indicates evidence against correlation. 46 | } 47 | \item{PosteriorProbability}{ 48 | The posterior probability for the existence of a correlation between V1 and V2. 49 | } 50 | \item{alpha}{ 51 | The posterior samples for the correlation coefficient alpha. 52 | } 53 | \item{jagssamples}{The JAGS output for the MCMC estimation of the path. This object can be used to construct a traceplot.} 54 | } 55 | \references{ 56 | Dickey, J. M., & Lientz, B. P. (1970). The weighted likelihood ratio, sharp hypotheses about chances, the order of a Markov chain. The Annals of Mathematical Statistics, 214-226. 57 | 58 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 59 | 60 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 61 | 62 | Wetzels, R., & Wagenmakers, E.-J. (2012). A Default Bayesian Hypothesis Test for Correlations and Partial Correlations. Psychonomic Bulletin & Review, 19, 1057-1064. 63 | } 64 | \author{ 65 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 66 | } 67 | \section{Warning}{ 68 | In some cases the SDmethod \code{fit.st} will fail to converge. If so, another optimization method is used, using different starting values. If the other optimization method does not converge either or gives you a negative Bayes factor (which is meaningless), you could try one of the other SDmethod options or see \code{\link{jzs_cor}}. 69 | } 70 | \seealso{ 71 | \code{\link{jzs_cor}}, \code{\link{jzs_partcorSD}} 72 | } 73 | \examples{ 74 | \dontrun{ 75 | # generate correlational data 76 | X <- rnorm(100) 77 | Y <- .4*X + rnorm(100,0,1) 78 | 79 | # run jzs_cor 80 | result <- jzs_corSD(X,Y) 81 | 82 | # inspect posterior distribution 83 | plot(result$alpha_samples) 84 | 85 | # print a traceplot of the chains 86 | plot(result$jagssamples) 87 | 88 | } 89 | } -------------------------------------------------------------------------------- /man/jzs_partcorSD.Rd: -------------------------------------------------------------------------------- 1 | \name{jzs_partcorSD} 2 | \alias{jzs_partcorSD} 3 | \title{ 4 | A default Bayesian hypothesis test for partial correlation using the Savage-Dickey method. 5 | } 6 | \description{ 7 | This function can be used to perform a default Bayesian hypothesis test for partial correlation, using the Savage-Dickey method (Dickey & Lientz, 1970). The test uses a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). 8 | } 9 | \usage{ 10 | jzs_partcorSD(V1, V2, control, 11 | SDmethod = c("dnorm", "splinefun", "logspline", "fit.st"), 12 | alternative = c("two.sided", "less", "greater"), 13 | n.iter=10000,n.burnin=500,standardize=TRUE) 14 | } 15 | \arguments{ 16 | \item{V1}{ 17 | a numeric vector. 18 | } 19 | \item{V2}{ 20 | a numeric vector of the same length as V1. 21 | } 22 | \item{control}{ 23 | a numeric vector of the same length as V1 and V2. This variable is partialled out of the correlation between V1 and V2. 24 | } 25 | \item{SDmethod}{ 26 | specify the precise method with which the density of the posterior distribution will be estimated in order to compute the Savage-Dickey ratio. 27 | } 28 | \item{alternative}{ 29 | specify the alternative hypothesis for the correlation coefficient: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 30 | } 31 | \item{n.iter}{ 32 | number of total iterations per chain (see the package \code{R2jags}). Defaults to 10000. 33 | } 34 | \item{n.burnin}{ 35 | length of burn in, i.e. number of iterations to discard at the beginning(see the package \code{R2jags}). Defaults to 500. 36 | } 37 | \item{standardize}{ 38 | logical. Should the variables be standardized? Defaults to TRUE. 39 | } 40 | } 41 | 42 | \value{ 43 | \item{PartCoef}{ 44 | Mean of the posterior samples of the unstandardized partial correlation (the regression coefficient beta in the equation V2 = intercept + alpha*control + beta*V1). 45 | } 46 | \item{BayesFactor}{ 47 | The Bayes factor for the correlation coefficient. A value greater than one indicates evidence in favor of correlation, a value smaller than one indicates evidence against correlation. 48 | } 49 | \item{PosteriorProbability}{ 50 | The posterior probability for the existence of a correlation between V1 and V2. 51 | } 52 | \item{beta}{ 53 | The posterior samples for the regression coefficient beta. This is the unstandardized partial correlation. 54 | } 55 | \item{jagssamples}{The JAGS output for the MCMC estimation of the path. This object can be used to construct a traceplot.} 56 | } 57 | \references{ 58 | Dickey, J. M., & Lientz, B. P. (1970). The weighted likelihood ratio, sharp hypotheses about chances, the order of a Markov chain. The Annals of Mathematical Statistics, 214-226. 59 | 60 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 61 | 62 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 63 | 64 | Wetzels, R., & Wagenmakers, E.-J. (2012). A Default Bayesian Hypothesis Test for Correlations and Partial Correlations. Psychonomic Bulletin & Review, 19, 1057-1064. 65 | } 66 | \author{ 67 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 68 | } 69 | \section{Warning}{ 70 | In some cases the SDmethod \code{fit.st} will fail to converge. If so, another optimization method is used, using different starting values. If the other optimization method does not converge either or gives you a negative Bayes factor (which is meaningless), you could try one of the other SDmethod options or see \code{\link{jzs_partcor}}. 71 | } 72 | \seealso{ 73 | \code{\link{jzs_partcor}}, \code{\link{jzs_corSD}} 74 | } 75 | \examples{ 76 | # simulate partially correlated data 77 | X <- rnorm(50,0,1) 78 | C <- .5*X + rnorm(50,0,1) 79 | Y <- .3*X + .6*C + rnorm(50,0,1) 80 | 81 | # run jzs_partcor 82 | (res <- jzs_partcorSD(X,Y,C)) 83 | 84 | # plot posterior samples 85 | plot(res$beta_samples) 86 | 87 | # plot traceplot 88 | plot(res$jagssamples) 89 | # where the first chain (theta[1]) is for tau' and the second chain (theta[2]) for beta 90 | } 91 | -------------------------------------------------------------------------------- /man/BayesMed-package.Rd: -------------------------------------------------------------------------------- 1 | \name{BayesMed} 2 | \alias{BayesMed} 3 | \docType{package} 4 | \title{ 5 | A default Bayesian hypothesis test for mediation, correlation, and partial correlation. 6 | } 7 | \description{ 8 | This package can be used to perform a default Bayesian hypothesis test for mediation, correlation, and partial correlation, either analytically or through the Savage-Dickey method (Dickey & Lientz, 1970). All tests make use of a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). This package is based on the paper by Nuijten, Wetzels, Matzke, Dolan, and Wagenmakers (under review). 9 | } 10 | \details{ 11 | \tabular{ll}{ 12 | Package: \tab BayesMed\cr 13 | Type: \tab Package\cr 14 | Version: \tab 1.2.2\cr 15 | Date: \tab 2018-05-28\cr 16 | License: \tab GPL-2\cr 17 | } 18 | The main functions \code{jzs_med} and \code{jzs_medSD} can be used to establish and test mediation in a data set. With \code{jzs_cor} and \code{jzs_corSD} you can establish and test correlation, and with jzs_partcor and jzs_partcorSD partial correlation. 19 | } 20 | \author{ 21 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. Many thanks to Sacha Epskamp. 22 | } 23 | \references{ 24 | Dickey, J. M., & Lientz, B. P. (1970). The weighted likelihood ratio, sharp hypotheses about chances, the order of a Markov chain. The Annals of Mathematical Statistics, 214-226. 25 | 26 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 27 | 28 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 29 | 30 | Wetzels, R. & Wagenmakers, E.-J. (2012). A default Bayesian hypothesis test for correlations and partial correlations. Psychonomic Bulletin & Review. 31 | 32 | } 33 | \note{ 34 | This function requires the program "JAGS" (Just Another Gibbs Sampler) to be in the PATH variable. This program can be obtained from http://mcmc-jags.sourceforge.net. 35 | } 36 | \examples{ 37 | \dontrun{ 38 | # simulate mediational data 39 | X <- rnorm(50,0,1) 40 | M <- .5*X + rnorm(50,0,1) 41 | Y <- .3*X + .6*M + rnorm(50,0,1) 42 | 43 | ########### 44 | 45 | # run jzs_med to perform the Bayesian hypothesis test for mediation 46 | 47 | result <- jzs_med(independent=X,dependent=Y,mediator=M) 48 | result 49 | 50 | ### NOTE ### 51 | #Sometimes this error will pop up: 52 | # 53 | #Error in solve.default(nItheta) : 54 | # system is computationally singular: reciprocal condition number = *some small number* 55 | #Error in mydt2(0, mT, sT, dfT) : unused arguments (mT, sT, dfT) 56 | #In addition: Warning message: 57 | #In jzs_medSD(X, Y, M) : 58 | # fit.st did not converge. Alternative optimization method was used. 59 | # 60 | #If this happens, just run jzs_medSD() again. 61 | #This usually solves the convergence problem. If it does not, 62 | #try a different SD method. For instance: jzs_medSD(X,Y,M,SDmethod="dnorm"). 63 | # 64 | ############# 65 | 66 | # plot results 67 | plot(result$main_result) 68 | 69 | # plot posterior samples including credible interval, mean, and median 70 | # of the indirect effect alpha*beta 71 | plot(result$ab_samples) 72 | 73 | # inspect separate posterior distributions of alpha, beta, and tau_prime 74 | plot(result$alpha_samples) 75 | plot(result$beta_samples) 76 | plot(result$tau_prime_samples) 77 | 78 | # print a traceplot of the chains 79 | # where the first chain (theta[1]) is for tau' and the second chain (theta[2]) for beta 80 | plot(result$jagssamplesA) 81 | plot(result$jagssamplesTB) 82 | 83 | ########### 84 | 85 | # run jzs_medSD to perform the Savage-Dickey (SD) Bayesian hypothesis test for mediation 86 | 87 | result_SD <- jzs_medSD(independent=X,dependent=Y,mediator=M) 88 | result_SD 89 | 90 | # plot(results) 91 | plot(result_SD$main_result) 92 | 93 | # plot posterior samples 94 | # including credible interval, mean, and median of the indirect effect alpha*beta 95 | plot(result_SD$ab_samples) 96 | 97 | # inspect separate posterior distributions of alpha, beta, and tau_prime 98 | plot(result_SD$alpha_samples) 99 | plot(result_SD$beta_samples) 100 | plot(result_SD$tau_prime_samples) 101 | 102 | # print a traceplot of the chains 103 | # where the first chain (theta[1]) is for tau' and the second chain (theta[2]) for beta 104 | plot(result_SD$jagssamplesA) 105 | plot(result_SD$jagssamplesTB) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /man/jzs_med.Rd: -------------------------------------------------------------------------------- 1 | \name{jzs_med} 2 | \alias{jzs_med} 3 | 4 | \title{ 5 | Perform a default Bayesian hypothesis test for mediation. 6 | } 7 | \description{ 8 | This function can be used to perform a default Bayesian hypothesis test for mediation, using a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). The test is based on the default Bayesian hypothesis tests for correlation and partial correlation (Wetzels & Wagenmakers, 2012). 9 | } 10 | \usage{ 11 | jzs_med(independent, dependent, mediator, 12 | alternativeA=c("two.sided","less","greater"), 13 | alternativeB=c("two.sided","less","greater"), 14 | alternativeT=c("two.sided","less","greater"), 15 | n.iter=10000,n.burnin=500,standardize=TRUE) 16 | } 17 | \arguments{ 18 | \item{independent}{ 19 | a vector containing values for the independent variable. 20 | } 21 | \item{dependent}{ 22 | a vector containing values for the dependent variable. 23 | } 24 | \item{mediator}{ 25 | a vector containing values for the mediating variable. 26 | } 27 | \item{alternativeA}{ 28 | specify the alternative hypothesis for path alpha: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 29 | } 30 | \item{alternativeB}{ 31 | specify the alternative hypothesis for path beta: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 32 | } 33 | \item{alternativeT}{ 34 | specify the alternative hypothesis for path tau_accent: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 35 | } 36 | \item{n.iter}{ 37 | number of total iterations per chain (see the package \code{R2jags}). Defaults to 10000. 38 | } 39 | \item{n.burnin}{ 40 | length of burn in, i.e. number of iterations to discard at the beginning(see the package \code{R2jags}). Defaults to 500. 41 | } 42 | \item{standardize}{ 43 | logical. Should the variables be standardized? Defaults to TRUE. 44 | } 45 | } 46 | \details{ 47 | The test consists of four steps. Firstly, it computes the posterior probability for the existence of the path between the independent and the mediating variable (path alpha) by means of a default Bayesian hypothesis test for correlation (Wetzels & Wagenmakers, 2012). 48 | 49 | Secondly, it computes the posterior probability for the existence of the path between the mediating and the dependent variable, controlled for the influence of the independent variable (path beta) by means of a default Bayesian hypothesis test for partial correlation (Wetzels & Wagenmakers, 2012). 50 | 51 | Thirdly, the evidence for mediation is computed by multiplying the posterior probabilities for the paths alpha and beta. 52 | 53 | Fourthly, the evidence for full mediation is computed by multiplying the evidence for mediation with one minus the posterior probability for the existence of path tau', the path between the independent and dependent variable, controlled for the mediator. 54 | } 55 | \value{ 56 | jzs_md returns a list containing visible (printed) and invisible components. The visible components are a data frame with the main results and the 95\% credible interval of the mediated effect (see next section). The invisible components contain additional information on the parameters, and can be used for plot functions etc. 57 | } 58 | 59 | \section{Visible Output}{ 60 | \describe{ 61 | 62 | The visible output, the output that is printed to the screen, is a list containing a data frame and a credible interval. 63 | 64 | \item{Estimate_alpha}{The mean of the posterior samples of alpha. 65 | } 66 | \item{Estimate_beta}{The mean of the posterior samples of beta. 67 | } 68 | \item{Estimate_tau_prime}{The mean of the posterior samples of tau_prime. 69 | } 70 | \item{Estimate_Mediation (alpha*beta)}{The mean of the posterior samples of the indirect effect alpha*beta.} 71 | 72 | \item{BF_alpha}{The Bayes factor for the existence of path alpha. A value greater than one indicates evidence that alpha exists, a value smaller than one indicates evidence that alpha does not exist. 73 | } 74 | \item{BF_beta}{The Bayes factor for the existence of path beta. A value greater than one indicates evidence that beta exists, a value smaller than one indicates evidence that beta does not exist. 75 | } 76 | \item{BF_tau_prime}{The Bayes factor for the existence of path tau_prime. A value greater than one indicates evidence that tau_prime exists, a value smaller than one indicates evidence that tau_prime does not exist. 77 | } 78 | \item{BF_Mediation (alpha*beta)}{The Bayes factor for mediation compared to no mediation. A value greater than one indicates evidence in favor of mediation, a value smaller than one indicates evidence against mediation. 79 | } 80 | 81 | \item{PostProb_alpha}{The posterior probability that the path alpha (the relation between the independent and the mediating variable) is not zero.} 82 | \item{PostProb_beta}{The posterior probability that the path beta (the relation between the mediating and the dependent variable after controlling for the independent variable) is not zero.} 83 | \item{PostProb_tau_prime}{The posterior probability that the path tau_prime (the relation between the independent and the dependent variable after controlling for the mediator) is not zero.} 84 | \item{PostProb_Mediation (alpha*beta)}{The posterior probability that the relation between the independent and the dependent variable is mediated by the specified mediator.} 85 | 86 | \item{CI_ab}{The 95\% credible interval of the indirect effect "ab".} 87 | } 88 | } 89 | 90 | \references{ 91 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 92 | 93 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 94 | 95 | Wetzels, R., & Wagenmakers, E.-J. (2012). A Default Bayesian Hypothesis Test for Correlations and Partial Correlations. Psychonomic Bulletin & Review, 19, 1057-1064. 96 | } 97 | \author{ 98 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 99 | } 100 | \seealso{ 101 | \code{\link{jzs_cor}}, \code{\link{jzs_partcor}}, \code{\link{jzs_medSD}} 102 | } 103 | \examples{ 104 | \dontrun{ 105 | # simulate mediational data 106 | a <- .5 107 | b <- .6 108 | t_prime <- .3 109 | 110 | X <- rnorm(50,0,1) 111 | M <- a*X + rnorm(50,0,1) 112 | Y <- t_prime*X + b*M + rnorm(50,0,1) 113 | 114 | # run jzs_med 115 | 116 | result <- jzs_med(independent=X,dependent=Y,mediator=M) 117 | result 118 | 119 | #------------------------------------- 120 | 121 | # load Firefighter data 122 | data(Firefighters) 123 | 124 | X <- Firefighters$x 125 | M <- Firefighters$m 126 | Y <- Firefighters$y 127 | 128 | # run jzs_med 129 | result <- jzs_med(independent=X,dependent=Y,mediator=M) 130 | 131 | # plot the result in a mediation diagram 132 | plot(result$main_result) 133 | 134 | # inspect posterior distribution of the coefficients 135 | plot(result$alpha_samples) 136 | plot(result$beta_samples) 137 | plot(result$tau_prime_samples) 138 | 139 | # print a traceplot of the chains 140 | plot(result$jagssamplesA) 141 | plot(result$jagssamplesTB) 142 | # where the first chain (theta[1]) is for tau' and the second chain (theta[2]) for beta 143 | 144 | # calculate and plot a 95\% credible interval for the 145 | # posterior mean of the indirect effect 146 | result$CI_ab 147 | plot(result$ab_samples) 148 | 149 | 150 | } 151 | } -------------------------------------------------------------------------------- /R/jzs_cor.R: -------------------------------------------------------------------------------- 1 | jzs_cor <- 2 | function(V1, V2, 3 | alternative = c("two.sided","less","greater"), 4 | n.iter = 10000, n.burnin = 500, standardize = TRUE, subdivisions = 200){ 5 | 6 | runif(1) # defines .Random.seed 7 | 8 | # standardize variables 9 | if(standardize==TRUE){ 10 | X <- (V1-mean(V1))/sd(V1) 11 | Y <- (V2-mean(V2))/sd(V2) 12 | }else { 13 | X <- V1 14 | Y <- V2 15 | } 16 | 17 | r <- cor(X,Y) 18 | n <- length(X) 19 | 20 | # main function to analytically calculate the BF for correlation 21 | # see Wetzels, R. & Wagenmakers, E.-J. (2012). A default Bayesian hypothesis test for correlations and partial correlations. Psychonomic Bulletin & Review, 19, 1057-1064 22 | # the jzs_corbf function is based on updated R code that can handle larger values of n 23 | 24 | jzs_corbf=function(r,n){ 25 | int=function(r,n,g){ 26 | exp( 27 | ((n-2)/2)*log(1+g)+ 28 | (-(n-1)/2)*log(1+(1-r^2)*g)+ 29 | (-3/2)*log(g)+ 30 | -n/(2*g)) 31 | } 32 | 33 | bf10 <- try(sqrt((n/2))/gamma(1/2)* 34 | integrate(int, lower = 0, upper = Inf, r = r, n = n, subdivisions = subdivisions)$value) 35 | 36 | # if the evidence is overwhelming, the BF will become infinite 37 | # to avoid this resulting in an error, give back the max possible number 38 | if(class(bf10)=="try-error" & grepl("non-finite function value",bf10[1])){ 39 | bf10 <- .Machine$double.xmax 40 | message("Note: the Error above was caused because the BF from the function jzs_corbf was approaching infinity. To avoid the function from crashing, the returned BF is the largest possible number in R.") 41 | } else { 42 | if(class(bf10)=="try-error" & !grepl("non-finite function value",bf10[1])){ 43 | bf10 <- NA 44 | message("An error occurred. The BF could not be calculated") 45 | } 46 | } 47 | return(bf10) 48 | } 49 | 50 | BF <- jzs_corbf(r,n) 51 | 52 | ### the next part is needed to impose an order restriction 53 | ### for the order restrictions we need to estimate the posterior samples 54 | 55 | #========================================================== 56 | # load JAGS models 57 | #========================================================== 58 | 59 | jagsmodelcorrelation <- 60 | 61 | "####### Cauchy-prior on single regression coefficient ####### 62 | model 63 | 64 | { 65 | 66 | for (i in 1:n) 67 | 68 | { 69 | mu[i] <- intercept + alpha*x[i] 70 | y[i] ~ dnorm(mu[i],phi) 71 | 72 | } 73 | 74 | # uninformative prior on the intercept intercept, 75 | # Jeffreys' prior on precision phi 76 | intercept ~ dnorm(0,.0001) 77 | phi ~ dgamma(.0001,.0001) 78 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 79 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 80 | 81 | # inverse-gamma prior on g: 82 | g <- 1/invg 83 | a.gamma <- 1/2 84 | b.gamma <- n/2 85 | invg ~ dgamma(a.gamma,b.gamma) 86 | 87 | 88 | # g-prior on beta: 89 | vari <- (g/phi) * invSigma 90 | prec <- 1/vari 91 | alpha ~ dnorm(0, prec) 92 | } 93 | 94 | # Explanation------------------------------------------------------------------ 95 | # Prior on g: 96 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape 97 | # parameter and n/2 the scale parameter. 98 | # It follows that 1/g ~ gamma(1/2, 2/n). 99 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 100 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 101 | # 1/g ~ dgamma(1/2, n/2) 102 | #------------------------------------------------------------------------------ 103 | " 104 | 105 | jags.model.file1 <- tempfile(fileext=".txt") 106 | write(jagsmodelcorrelation,jags.model.file1) 107 | 108 | #======================================================================== 109 | # Estimate Posterior Distribution for the Correlation Coefficient Alpha 110 | #======================================================================== 111 | 112 | x <- X 113 | y <- Y 114 | 115 | invSigma <- solve(t(x)%*%x) 116 | 117 | jags.data <- list("n", "x", "y", "invSigma") 118 | jags.params <- c("alpha", "g") 119 | jags.inits <- list( 120 | list(alpha = 0.0), #chain 1 starting value 121 | list(alpha = -0.3), #chain 2 starting value 122 | list(alpha = 0.3)) #chain 3 starting value 123 | 124 | jagssamples <- jags(data=jags.data, inits=jags.inits, jags.params, 125 | n.chains=3, n.iter=n.iter, DIC=T, 126 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file1) 127 | 128 | # estimate the posterior regression coefficient and scaling factor g 129 | alpha <- jagssamples$BUGSoutput$sims.list$alpha[,1] 130 | g <- jagssamples$BUGSoutput$sims.list$g 131 | 132 | #------------------------------------------------------- 133 | 134 | # one-sided test? 135 | 136 | # save BF for one-tailed test 137 | # BF21 = 2*{proportion posterior samples of alpha < 0} 138 | 139 | propposterior_less <- sum(alpha<0)/length(alpha) 140 | propposterior_greater <- sum(alpha>0)/length(alpha) 141 | 142 | # posterior proportion cannot be zero, because this renders a BF of zero 143 | # none of the samples of the parameter follow the restriction 144 | # ergo: the posterior proportion is smaller than 1/length(parameter) 145 | 146 | if(propposterior_less==0){ 147 | propposterior_less <- 1/length(alpha) 148 | } 149 | 150 | if(propposterior_greater==0){ 151 | propposterior_greater <- 1/length(alpha) 152 | } 153 | 154 | BF21_less <- 2*propposterior_less 155 | BF21_greater <- 2*propposterior_greater 156 | 157 | 158 | if(alternative[1]=="less"){ 159 | # BF10 = p(D|a~cauchy(0,1))/p(D|a=0) 160 | BF10 <- BF 161 | 162 | # BF21 = p(D|a~cauchy-(0,1))/p(D|a~cauchy(0,1)) 163 | # BF21 = 2*{proportion posterior samples of alpha < 0} 164 | BF21 <- BF21_less 165 | 166 | BF <- BF10*BF21 167 | 168 | } else if(alternative[1]=="greater"){ 169 | # BF10 = p(D|a~cauchy(0,1))/p(D|a=0) 170 | BF10 <- BF 171 | 172 | # BF21 = p(D|a~cauchy+(0,1))/p(D|a~cauchy(0,1)) 173 | # BF21 = 2*{proportion posterior samples of alpha > 0} 174 | BF21 <- BF21_greater 175 | 176 | BF <- BF10*BF21 177 | 178 | } 179 | 180 | #-------------------------------------------------------- 181 | 182 | # convert BFs to posterior probability 183 | # prob cannot be exactly 1 or 0 184 | prob_r <- BF/(BF+1) 185 | 186 | if(prob_r == 1){ 187 | prob_r <- prob_r - .Machine$double.eps 188 | } 189 | if(prob_r == 0){ 190 | prob_r <- prob_r + .Machine$double.eps 191 | } 192 | 193 | #================================================== 194 | 195 | # convert posterior samples for the regression coefficient x-y to correlation 196 | cor_coef <- alpha*(sd(x)/sd(y)) 197 | 198 | #=================================================== 199 | 200 | res <- list(Correlation=mean(cor_coef), 201 | BayesFactor=BF, 202 | PosteriorProbability=prob_r, 203 | cor_coef_samples=cor_coef, 204 | jagssamples=jagssamples) 205 | 206 | class(res) <- c("jzs_med","list") 207 | class(res$jagssamples) <- "rjags" 208 | class(res$cor_coef_samples) <- "CI" 209 | 210 | return(res) 211 | } 212 | -------------------------------------------------------------------------------- /R/jzs_partcor.R: -------------------------------------------------------------------------------- 1 | jzs_partcor <- 2 | function(V1,V2,control, 3 | alternative=c("two.sided","less","greater"), 4 | n.iter=10000,n.burnin=500,standardize=TRUE){ 5 | 6 | runif(1) # defines .Random.seed 7 | 8 | # standardize variables 9 | if(standardize==TRUE){ 10 | M <- (V1-mean(V1))/sd(V1) 11 | Y <- (V2-mean(V2))/sd(V2) 12 | X <- (control-mean(control))/sd(control) 13 | } else { 14 | M <- V1 15 | Y <- V2 16 | X <- control 17 | } 18 | 19 | r0 <- sqrt(summary(lm(M~X))$r.squared) 20 | r1 <- sqrt(summary(lm(M~X+Y))$r.squared) 21 | p0 <- 1 22 | p1 <- 2 23 | n <- length(X) 24 | 25 | # main function to analytically calculate the BF for partial correlation 26 | # see Wetzels, R. & Wagenmakers, E.-J. (2012). A default Bayesian hypothesis test for correlations and partial correlations. Psychonomic Bulletin & Review, 19, 1057-1064 27 | # the jzs_partcorbf function is based on updated R code that can handle larger values of n 28 | 29 | jzs_partcorbf=function(r0,r1,p0,p1,n){ 30 | int=function(r,n,p,g){ 31 | exp( 32 | ((n-1-p)/2)*log(1+g)+ 33 | (-(n-1)/2)*log(1+(1-r^2)*g)+ 34 | (-3/2)*log(g)+ 35 | -n/(2*g)) 36 | } 37 | 38 | bf10 <- try(integrate(int,lower=0,upper=Inf,r=r1,p=p1,n=n)$value/ 39 | integrate(int,lower=0,upper=Inf,r=r0,p=p0,n=n)$value) 40 | 41 | # if the evidence is overwhelming, the BF will become infinite 42 | # to avoid this resulting in an error, give back the max possible number 43 | if(class(bf10)=="try-error" & grepl("non-finite function value",bf10[1])){ 44 | bf10 <- .Machine$double.xmax 45 | message("Note: the Error above was caused because the BF from the function jzs_partcorbf was approaching infinity. To avoid the function from crashing, the returned BF is the largest possible number in R.") 46 | } else { 47 | if(class(bf10)=="try-error" & !grepl("non-finite function value",bf10[1])){ 48 | bf10 <- NA 49 | message("An error occurred. The BF could not be calculated") 50 | } 51 | } 52 | 53 | return(bf10) 54 | } 55 | 56 | BF <- jzs_partcorbf(r0,r1,p0,p1,n) 57 | 58 | ### the next part is needed to impose order restrictions 59 | ### for the order restrictions we need to estimate the posterior samples 60 | 61 | #========================================================== 62 | # load JAGS models 63 | #========================================================== 64 | 65 | 66 | jagsmodelpartialcorrelation <- 67 | 68 | "####### Cauchy-prior on beta and tau' ####### 69 | model 70 | 71 | { 72 | 73 | for (i in 1:n) 74 | 75 | { 76 | mu[i] <- intercept + theta[1]*x[i,1] + theta[2]*x[i,2] 77 | y[i] ~ dnorm(mu[i],phi) 78 | 79 | } 80 | 81 | # uninformative prior on intercept alpha, 82 | # Jeffreys' prior on precision phi 83 | intercept ~ dnorm(0,.0001) 84 | phi ~ dgamma(.0001,.0001) 85 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 86 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 87 | 88 | # inverse-gamma prior on g: 89 | g <- 1/invg 90 | a.gamma <- 1/2 91 | b.gamma <- n/2 92 | invg ~ dgamma(a.gamma,b.gamma) 93 | 94 | # Ntzoufras, I. (2009). Bayesian Modeling Using WinBUGS. 95 | # New Jersey: John Wiley & Sons, Inc. p. 167 96 | # calculation of the inverse matrix of V 97 | inverse.V <- inverse(V) 98 | # calculation of the elements of prior precision matrix 99 | for(i in 1:2) 100 | { 101 | for (j in 1:2) 102 | { 103 | prior.T[i,j] <- inverse.V[i,j] * phi/g 104 | } 105 | } 106 | # multivariate prior for the beta vector 107 | theta[1:2] ~ dmnorm( mu.theta, prior.T ) 108 | for(i in 1:2) { mu.theta[i] <- 0 } 109 | 110 | } 111 | 112 | # Explanation----------------------------------------------------------------- 113 | # Prior on g: 114 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape parameter and 115 | # n/2 the scale parameter. 116 | # It follows that 1/g ~ gamma(1/2, 2/n). 117 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 118 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 119 | # 1/g ~ dgamma(1/2, n/2) 120 | # Also note: JAGS does not want [,] structure 121 | #----------------------------------------------------------------------------- 122 | " 123 | 124 | jags.model.file2 <- tempfile(fileext=".txt") 125 | write(jagsmodelpartialcorrelation,jags.model.file2) 126 | 127 | #========================================================== 128 | # BF FOR PARTIAL CORRELATION (MY|X) 129 | #========================================================== 130 | 131 | x <- cbind(X,M) 132 | y <- Y 133 | 134 | V <- solve(t(x)%*%x) #NB I switched to the notation from Ntzoufras, p. 167 135 | 136 | jags.data <- list("n", "x", "y", "V") 137 | jags.params <- c("theta") 138 | jags.inits <- list( 139 | list(theta = c(0.0,0.3)), #chain 1 starting value 140 | list(theta = c(0.3, 0.0)), #chain 2 starting value 141 | list(theta = c(-.15,.15))) #chain 3 starting value 142 | 143 | jagssamples <- jags(data=jags.data, inits=jags.inits, jags.params, 144 | n.chains=3, n.iter=n.iter, DIC=T, 145 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file2) 146 | 147 | beta <- jagssamples$BUGSoutput$sims.list$theta[,2] 148 | 149 | #------------------------------------------------------- 150 | 151 | # one-sided test? 152 | 153 | # save BF for one-tailed test 154 | # BF21 = 2*{proportion posterior samples of beta < 0} 155 | propposterior_less <- sum(beta<0)/length(beta) 156 | propposterior_greater <- sum(beta>0)/length(beta) 157 | 158 | # posterior proportion cannot be zero, because this renders a BF of zero 159 | # none of the samples of the parameter follow the restriction 160 | # ergo: the posterior proportion is smaller than 1/length(parameter) 161 | 162 | if(propposterior_less==0){ 163 | propposterior_less <- 1/length(beta) 164 | } 165 | 166 | 167 | if(propposterior_greater==0){ 168 | propposterior_greater <- 1/length(beta) 169 | } 170 | 171 | BF21_less <- 2*propposterior_less 172 | BF21_greater <- 2*propposterior_greater 173 | 174 | if(alternative[1]=="less"){ 175 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 176 | BF10 <- BF 177 | 178 | # BF21 = p(D|b~cauchy-(0,1))/p(D|b~cauchy(0,1)) 179 | # BF21 = 2*{proportion posterior samples of beta < 0} 180 | BF21 <- BF21_less 181 | 182 | BF <- BF10*BF21 183 | 184 | } else if(alternative[1]=="greater"){ 185 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 186 | BF10 <- BF 187 | 188 | # BF21 = p(D|b~cauchy+(0,1))/p(D|b~cauchy(0,1)) 189 | # BF21 = 2*{proportion posterior samples of beta > 0} 190 | BF21 <- BF21_greater 191 | 192 | BF <- BF10*BF21 193 | 194 | } 195 | 196 | #--------------------------------------------------- 197 | 198 | # convert BFs to posterior probability 199 | # prob cannot be exactly 1 or 0 200 | prob_b <- BF/(BF+1) 201 | 202 | if(prob_b == 1){ 203 | prob_b <- prob_b - .Machine$double.eps 204 | } 205 | if(prob_b == 0){ 206 | prob_b <- prob_b + .Machine$double.eps 207 | } 208 | 209 | 210 | #==================================================== 211 | 212 | res <- list(PartCoef=mean(beta), 213 | BayesFactor=BF, 214 | PosteriorProbability=prob_b, 215 | beta_samples=beta, 216 | jagssamples=jagssamples) 217 | 218 | class(res) <- c("jzs_med","list") 219 | class(res$jagssamples) <- "rjags" 220 | class(res$beta_samples) <- "CI" 221 | 222 | return(res) 223 | 224 | } 225 | -------------------------------------------------------------------------------- /R/jzs_corSD.R: -------------------------------------------------------------------------------- 1 | jzs_corSD <- 2 | function(V1,V2, 3 | SDmethod=c("dnorm","splinefun","logspline","fit.st"), 4 | alternative=c("two.sided","less","greater"), 5 | n.iter=10000,n.burnin=500,standardize=TRUE){ 6 | 7 | runif(1) # defines .Random.seed 8 | 9 | # standardize variables 10 | if(standardize==TRUE){ 11 | X <- (V1-mean(V1))/sd(V1) 12 | Y <- (V2-mean(V2))/sd(V2) 13 | }else { 14 | X <- V1 15 | Y <- V2 16 | } 17 | 18 | n <- length(X) 19 | r <- cor(X,Y) 20 | 21 | #========================================================== 22 | # load JAGS models 23 | #========================================================== 24 | 25 | jagsmodelcorrelation <- 26 | 27 | "####### Cauchy-prior on single beta ####### 28 | model 29 | 30 | { 31 | 32 | for (i in 1:n) 33 | 34 | { 35 | mu[i] <- intercept + alpha*x[i] 36 | y[i] ~ dnorm(mu[i],phi) 37 | 38 | } 39 | 40 | # uninformative prior on the intercept intercept, 41 | # Jeffreys' prior on precision phi 42 | intercept ~ dnorm(0,.0001) 43 | phi ~ dgamma(.0001,.0001) 44 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 45 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 46 | 47 | # inverse-gamma prior on g: 48 | g <- 1/invg 49 | a.gamma <- 1/2 50 | b.gamma <- n/2 51 | invg ~ dgamma(a.gamma,b.gamma) 52 | 53 | 54 | # g-prior on beta: 55 | vari <- (g/phi) * invSigma 56 | prec <- 1/vari 57 | alpha ~ dnorm(0, prec) 58 | } 59 | 60 | # Explanation------------------------------------------------------------------ 61 | # Prior on g: 62 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape 63 | # parameter and n/2 the scale parameter. 64 | # It follows that 1/g ~ gamma(1/2, 2/n). 65 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 66 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 67 | # 1/g ~ dgamma(1/2, n/2) 68 | #------------------------------------------------------------------------------ 69 | " 70 | jags.model.file1 <- tempfile(fileext=".txt") 71 | write(jagsmodelcorrelation,jags.model.file1) 72 | 73 | #========================================================== 74 | # BF FOR CORRELATION 75 | #========================================================== 76 | 77 | x <- X 78 | y <- Y 79 | 80 | invSigma <- solve(t(x)%*%x) 81 | 82 | jags.data <- list("n", "x", "y", "invSigma") 83 | jags.params <- c("alpha", "g") 84 | jags.inits <- list( 85 | list(alpha = 0.0), #chain 1 starting value 86 | list(alpha = -0.3), #chain 2 starting value 87 | list(alpha = 0.3)) #chain 3 starting value 88 | 89 | jagssamples <- jags(data=jags.data, inits=jags.inits, jags.params, 90 | n.chains=3, n.iter=n.iter, DIC=T, 91 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file1) 92 | 93 | # estimate the posterior regression coefficient and scaling factor g 94 | alpha <- jagssamples$BUGSoutput$sims.list$alpha[,1] 95 | g <- jagssamples$BUGSoutput$sims.list$g 96 | 97 | 98 | #------------------------------------------------------------------ 99 | 100 | if(SDmethod[1]=="fit.st"){ 101 | 102 | mydt <- function(x, m, s, df) dt((x-m)/s, df)/s 103 | 104 | foo <- try({ 105 | fit.t <- QRM::fit.st(alpha) 106 | nu <- as.numeric(fit.t$par.ests[1]) #degrees of freedom 107 | mu <- as.numeric(fit.t$par.ests[2]) 108 | sigma <- abs(as.numeric(fit.t$par.ests[3])) # This is a hack -- with high n occasionally 109 | # sigma switches sign. 110 | }) 111 | 112 | if(!("try-error"%in%class(foo))){ 113 | 114 | # BAYES FACTOR ALPHA 115 | BF <- 1/(mydt(0,mu,sigma,nu)/dcauchy(0)) 116 | 117 | } else { 118 | 119 | warning("fit.st did not converge, alternative optimization method was used.","\n") 120 | 121 | mydt2 <- function(pars){ 122 | 123 | m <- pars[1] 124 | s <- abs(pars[2]) # no negative standard deviation 125 | df <- abs(pars[3]) # no negative degrees of freedom 126 | 127 | -2*sum(dt((alpha-m)/s, df,log=TRUE)-log(s)) 128 | } 129 | 130 | res <- optim(c(mean(alpha),sd(alpha),20),mydt2)$par 131 | 132 | m <- res[1] 133 | s <- res[2] 134 | df <- res[3] 135 | 136 | 137 | # ALTERNATIVE BAYES FACTOR ALPHA 138 | BF <- 1/(mydt2(0,m,s,df)/dcauchy(0)) 139 | 140 | } 141 | 142 | #------------------------- 143 | 144 | } else if(SDmethod[1]=="dnorm"){ 145 | BF <- 1/(dnorm(0,mean(alpha),sd(alpha))/dcauchy(0)) 146 | 147 | #------------------------- 148 | 149 | } else if(SDmethod[1]=="splinefun"){ 150 | f <- splinefun(density(alpha)) 151 | BF <- 1/(f(0)/dcauchy(0)) 152 | 153 | #------------------------- 154 | 155 | } else if (SDmethod[1]=="logspline"){ 156 | fit.posterior <- polspline::logspline(alpha) 157 | posterior.pp <- polspline::dlogspline(0, fit.posterior) # this gives the pdf at point b2 = 0 158 | prior.pp <- dcauchy(0) # height of prior at b2 = 0 159 | BF <- prior.pp/posterior.pp 160 | 161 | } 162 | 163 | #-------------------------------------------------------- 164 | 165 | # one-sided test? 166 | 167 | # save BF for one-tailed test 168 | # BF21 = 2*{proportion posterior samples of alpha < 0} 169 | 170 | propposterior_less <- sum(alpha<0)/length(alpha) 171 | propposterior_greater <- sum(alpha>0)/length(alpha) 172 | 173 | # posterior proportion cannot be zero, because this renders a BF of zero 174 | # none of the samples of the parameter follow the restriction 175 | # ergo: the posterior proportion is smaller than 1/length(parameter) 176 | 177 | if(propposterior_less==0){ 178 | propposterior_less <- 1/length(alpha) 179 | } 180 | 181 | if(propposterior_greater==0){ 182 | propposterior_greater <- 1/length(alpha) 183 | } 184 | 185 | BF21_less <- 2*propposterior_less 186 | BF21_greater <- 2*propposterior_greater 187 | 188 | if(alternative[1]=="less"){ 189 | # BF10 = p(D|a~cauchy(0,1))/p(D|a=0) 190 | BF10 <- BF 191 | 192 | # BF21 = p(D|a~cauchy-(0,1))/p(D|a~cauchy(0,1)) 193 | # BF21 = 2*{proportion posterior samples of alpha < 0} 194 | BF21 <- BF21_less 195 | 196 | BF <- BF10*BF21 197 | 198 | } else if(alternative[1]=="greater"){ 199 | # BF10 = p(D|a~cauchy(0,1))/p(D|a=0) 200 | BF10 <- BF 201 | 202 | # BF21 = p(D|a~cauchy+(0,1))/p(D|a~cauchy(0,1)) 203 | # BF21 = 2*{proportion posterior samples of alpha > 0} 204 | BF21 <- BF21_greater 205 | 206 | BF <- BF10*BF21 207 | 208 | } 209 | 210 | #-------------------------------------------------------- 211 | 212 | # convert BFs to posterior probability 213 | # prob cannot be exactly 1 or 0 214 | prob_r <- BF/(BF+1) 215 | 216 | if(prob_r == 1){ 217 | prob_r <- prob_r - .Machine$double.eps 218 | } 219 | if(prob_r == 0){ 220 | prob_r <- prob_r + .Machine$double.eps 221 | } 222 | 223 | 224 | #================================================== 225 | 226 | # convert posterior samples for the regression coefficient x-y to correlation 227 | cor_coef <- alpha*(sd(x)/sd(y)) 228 | 229 | #=================================================== 230 | 231 | res <- list(Correlation=mean(cor_coef), 232 | BayesFactor=BF, 233 | PosteriorProbability=prob_r, 234 | alpha_samples=cor_coef, 235 | jagssamples=jagssamples) 236 | 237 | class(res) <- c("jzs_med","list") 238 | class(res$alpha_samples) <- "CI" 239 | class(res$jagssamples) <- "rjags" 240 | 241 | return(res) 242 | 243 | } 244 | -------------------------------------------------------------------------------- /R/jzs_partcorSD.R: -------------------------------------------------------------------------------- 1 | jzs_partcorSD <- 2 | function(V1,V2,control, 3 | SDmethod=c("dnorm","splinefun","logspline","fit.st"), 4 | alternative=c("two.sided","less","greater"), 5 | n.iter=10000,n.burnin=500, 6 | standardize=TRUE){ 7 | 8 | runif(1) # defines .Random.seed 9 | 10 | if(standardize==TRUE){ 11 | M <- (V1-mean(V1))/sd(V1) 12 | Y <- (V2-mean(V2))/sd(V2) 13 | X <- (control-mean(control))/sd(control) 14 | } else { 15 | M <- V1 16 | Y <- V2 17 | X <- control 18 | } 19 | 20 | n <- length(V1) 21 | 22 | #========================================================== 23 | # load JAGS models 24 | #========================================================== 25 | 26 | 27 | jagsmodelpartialcorrelation <- 28 | 29 | "####### Cauchy-prior on beta and tau' ####### 30 | model 31 | 32 | { 33 | 34 | for (i in 1:n) 35 | 36 | { 37 | mu[i] <- intercept + theta[1]*x[i,1] + theta[2]*x[i,2] 38 | y[i] ~ dnorm(mu[i],phi) 39 | 40 | } 41 | 42 | # uninformative prior on intercept alpha, 43 | # Jeffreys' prior on precision phi 44 | intercept ~ dnorm(0,.0001) 45 | phi ~ dgamma(.0001,.0001) 46 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 47 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 48 | 49 | # inverse-gamma prior on g: 50 | g <- 1/invg 51 | a.gamma <- 1/2 52 | b.gamma <- n/2 53 | invg ~ dgamma(a.gamma,b.gamma) 54 | 55 | # Ntzoufras, I. (2009). Bayesian Modeling Using WinBUGS. 56 | # New Jersey: John Wiley & Sons, Inc. p. 167 57 | # calculation of the inverse matrix of V 58 | inverse.V <- inverse(V) 59 | # calculation of the elements of prior precision matrix 60 | for(i in 1:2) 61 | { 62 | for (j in 1:2) 63 | { 64 | prior.T[i,j] <- inverse.V[i,j] * phi/g 65 | } 66 | } 67 | # multivariate prior for the beta vector 68 | theta[1:2] ~ dmnorm( mu.theta, prior.T ) 69 | for(i in 1:2) { mu.theta[i] <- 0 } 70 | 71 | } 72 | 73 | # Explanation----------------------------------------------------------------- 74 | # Prior on g: 75 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape parameter and 76 | # n/2 the scale parameter. 77 | # It follows that 1/g ~ gamma(1/2, 2/n). 78 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 79 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 80 | # 1/g ~ dgamma(1/2, n/2) 81 | # Also note: JAGS does not want [,] structure 82 | #----------------------------------------------------------------------------- 83 | " 84 | 85 | jags.model.file2 <- tempfile(fileext=".txt") 86 | write(jagsmodelpartialcorrelation,jags.model.file2) 87 | 88 | #========================================================== 89 | # BF FOR PARTIAL CORRELATION (MY|X) 90 | #========================================================== 91 | 92 | x <- cbind(X,M) 93 | y <- Y 94 | 95 | V <- solve(t(x)%*%x) #NB I switched to the notation from Ntzoufras, p. 167 96 | 97 | jags.data <- list("n", "x", "y", "V") 98 | jags.params <- c("theta") 99 | jags.inits <- list( 100 | list(theta = c(0.0,0.3)), #chain 1 starting value 101 | list(theta = c(0.3, 0.0)), #chain 2 starting value 102 | list(theta = c(-.15,.15))) #chain 3 starting value 103 | 104 | jagssamples <- jags(data=jags.data, inits=jags.inits, jags.params, 105 | n.chains=3, n.iter=n.iter, DIC=T, 106 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file2) 107 | 108 | beta <- jagssamples$BUGSoutput$sims.list$theta[,2] 109 | 110 | #------------------------------------------------------------------ 111 | 112 | if(SDmethod[1]=="fit.st"){ 113 | 114 | mydt <- function(x, m, s, df) dt((x-m)/s, df)/s 115 | 116 | bar <- try({ 117 | fit.t <- QRM::fit.st(beta) 118 | nu <- as.numeric(fit.t$par.ests[1]) #degrees of freedom 119 | mu <- as.numeric(fit.t$par.ests[2]) 120 | sigma <- abs(as.numeric(fit.t$par.ests[3])) # This is a hack -- with high n occasionally 121 | # sigma switches sign. 122 | }) 123 | 124 | if(!("try-error"%in%class(bar))){ 125 | 126 | # BAYES FACTOR BETA 127 | BF <- 1/(mydt(0,mu,sigma,nu)/dcauchy(0)) 128 | 129 | } else { 130 | 131 | warning("fit.st did not converge, alternative optimization method was used.","\n") 132 | 133 | mydt2 <- function(pars){ 134 | 135 | m <- pars[1] 136 | s <- abs(pars[2]) # no negative standard deviation 137 | df <- abs(pars[3]) # no negative degrees of freedom 138 | 139 | -2*sum(dt((beta-m)/s, df,log=TRUE)-log(s)) 140 | } 141 | 142 | res <- optim(c(mean(beta),sd(beta),20),mydt2)$par 143 | 144 | m <- res[1] 145 | s <- res[2] 146 | df <- res[3] 147 | 148 | # ALTERNATIVE BAYES FACTOR PARTIAL CORRELATION 149 | BF <- 1/(mydt2(0,m,s,df)/dcauchy(0)) 150 | } 151 | 152 | #------------------------- 153 | 154 | } else if(SDmethod[1]=="dnorm"){ 155 | 156 | BF <- 1/(dnorm(0,mean(beta),sd(beta))/dcauchy(0)) 157 | 158 | #------------------------- 159 | 160 | } else if(SDmethod[1]=="splinefun"){ 161 | f <- splinefun(density(beta)) 162 | BF <- 1/(f(0)/dcauchy(0)) 163 | 164 | #------------------------- 165 | 166 | } else if (SDmethod[1]=="logspline"){ 167 | fit.posterior <- polspline::logspline(beta) 168 | posterior.pp <- polspline::dlogspline(0, fit.posterior) # this gives the pdf at point b2 = 0 169 | prior.pp <- dcauchy(0) # height of prior at b2 = 0 170 | BF <- prior.pp/posterior.pp 171 | 172 | } 173 | 174 | #------------------------------------------------------- 175 | 176 | # one-sided test? 177 | 178 | # save BF for one-tailed test 179 | # BF21 = 2*{proportion posterior samples of beta < 0} 180 | 181 | propposterior_less <- sum(beta<0)/length(beta) 182 | propposterior_greater <- sum(beta>0)/length(beta) 183 | 184 | # posterior proportion cannot be zero, because this renders a BF of zero 185 | # none of the samples of the parameter follow the restriction 186 | # ergo: the posterior proportion is smaller than 1/length(parameter) 187 | 188 | if(propposterior_less==0){ 189 | propposterior_less <- 1/length(beta) 190 | } 191 | 192 | if(propposterior_greater==0){ 193 | propposterior_greater <- 1/length(beta) 194 | } 195 | 196 | BF21_less <- 2*propposterior_less 197 | BF21_greater <- 2*propposterior_greater 198 | 199 | if(alternative[1]=="less"){ 200 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 201 | BF10 <- BF 202 | 203 | # BF21 = p(D|b~cauchy-(0,1))/p(D|b~cauchy(0,1)) 204 | # BF21 = 2*{proportion posterior samples of beta < 0} 205 | BF21 <- BF21_less 206 | 207 | BF <- BF10*BF21 208 | 209 | } else if(alternative[1]=="greater"){ 210 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 211 | BF10 <- BF 212 | 213 | # BF21 = p(D|b~cauchy+(0,1))/p(D|b~cauchy(0,1)) 214 | # BF21 = 2*{proportion posterior samples of beta > 0} 215 | BF21 <- BF21_greater 216 | 217 | BF <- BF10*BF21 218 | 219 | } 220 | 221 | #--------------------------------------------------- 222 | 223 | # convert BFs to posterior probability 224 | # prob cannot be exactly 1 or 0 225 | prob_b <- BF/(BF+1) 226 | 227 | if(prob_b == 1){ 228 | prob_b <- prob_b - .Machine$double.eps 229 | } 230 | if(prob_b == 0){ 231 | prob_b <- prob_b + .Machine$double.eps 232 | } 233 | 234 | #==================================================== 235 | 236 | res <- list(PartCoef=mean(beta), 237 | BayesFactor=BF, 238 | PosteriorProbability=prob_b, 239 | beta_samples=beta, 240 | jagssamples=jagssamples) 241 | 242 | class(res) <- c("jzs_med","list") 243 | class(res$jagssamples) <- "rjags" 244 | class(res$beta_samples) <- "CI" 245 | 246 | return(res) 247 | } 248 | -------------------------------------------------------------------------------- /man/jzs_medSD.Rd: -------------------------------------------------------------------------------- 1 | \name{jzs_medSD} 2 | \alias{jzs_medSD} 3 | \title{ 4 | Perform a default Bayesian hypothesis test for mediation using the Savage-Dickey method. 5 | } 6 | \description{ 7 | This function can be used to perform a default Bayesian hypothesis test for mediation, using the Savage-Dickey method (Dickey & Lientz, 1970). The test uses a Jeffreys-Zellner-Siow prior set-up (Liang et al., 2008). 8 | } 9 | \usage{ 10 | jzs_medSD(independent, dependent, mediator, 11 | SDmethod=c("dnorm","splinefun","logspline", "fit.st"), 12 | alternativeA=c("two.sided","less","greater"), 13 | alternativeB=c("two.sided","less","greater"), 14 | alternativeT=c("two.sided","less","greater"), 15 | n.iter=10000,n.burnin=500, 16 | standardize=TRUE) 17 | } 18 | \arguments{ 19 | \item{independent}{ 20 | a vector containing values for the independent variable. 21 | } 22 | \item{dependent}{ 23 | a vector containing values for the dependent variable. 24 | } 25 | \item{mediator}{ 26 | a vector containing values for the mediating variable. 27 | } 28 | \item{SDmethod}{ 29 | specify the precise method with which the density of the posterior distribution will be estimated in order to compute the Savage-Dickey ratio. 30 | } 31 | \item{alternativeA}{ 32 | specify the alternative hypothesis for path alpha: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 33 | } 34 | \item{alternativeB}{ 35 | specify the alternative hypothesis for path beta: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 36 | } 37 | \item{alternativeT}{ 38 | specify the alternative hypothesis for path tau_accent: \code{two.sided}, \code{greater} than zero, or \code{less} than zero. 39 | } 40 | \item{n.iter}{ 41 | number of total iterations per chain (see the package \code{R2jags}). Defaults to 10000. 42 | } 43 | \item{n.burnin}{ 44 | length of burn in, i.e. number of iterations to discard at the beginning(see the package \code{R2jags}). Defaults to 500. 45 | } 46 | \item{standardize}{ 47 | logical. Should the variables be standardized? Defaults to TRUE. 48 | } 49 | } 50 | \note{ 51 | This function requires the program "JAGS" (Just Another Gibbs Sampler). This program can be obtained from http://mcmc-jags.sourceforge.net. 52 | } 53 | \details{ 54 | The test consists of four steps. Firstly, it computes the posterior probability for the existence of the path between the independent and the mediating variable (path alpha) through the Savage-Dickey density ratio method. 55 | 56 | Secondly, it computes the posterior probability for the existence of the path between the mediating and the dependent variable, controlled for the influence of the independent variable (path beta), again with the Savage-Dickey density ratio method. 57 | 58 | Thirdly, the evidence for mediation is computed by multiplying the posterior probabilities for the paths alpha and beta. 59 | 60 | Fourthly, the evidence for full mediation is computed by multiplying the evidence for mediation with one minus the posterior probability for the existence of path tau', the path between the independent and dependent variable, controlled for the mediator. 61 | } 62 | \value{ 63 | jzs_mdSD returns a list containing visible (printed) and invisible components. The visible components are a data frame with the main results and the 95\% credible interval of the mediated effect (see next section). The invisible components contain additional information on the parameters, and can be used for plot functions etc. 64 | } 65 | 66 | \section{Visible Output}{ 67 | \describe{ 68 | 69 | The visible output, the output that is printed to the screen, is a list containing a data frame and a credible interval. 70 | 71 | \item{Estimate_alpha}{The mean of the posterior samples of alpha. 72 | } 73 | \item{Estimate_beta}{The mean of the posterior samples of beta. 74 | } 75 | \item{Estimate_tau_prime}{The mean of the posterior samples of tau_prime. 76 | } 77 | \item{Estimate_Mediation (alpha*beta)}{The mean of the posterior samples of the indirect effect alpha*beta.} 78 | 79 | \item{BF_alpha}{The Bayes factor for the existence of path alpha. A value greater than one indicates evidence that alpha exists, a value smaller than one indicates evidence that alpha does not exist. 80 | } 81 | \item{BF_beta}{The Bayes factor for the existence of path beta. A value greater than one indicates evidence that beta exists, a value smaller than one indicates evidence that beta does not exist. 82 | } 83 | \item{BF_tau_prime}{The Bayes factor for the existence of path tau_prime. A value greater than one indicates evidence that tau_prime exists, a value smaller than one indicates evidence that tau_prime does not exist. 84 | } 85 | \item{BF_Mediation (alpha*beta)}{The Bayes factor for mediation compared to no mediation. A value greater than one indicates evidence in favor of mediation, a value smaller than one indicates evidence against mediation. 86 | } 87 | 88 | \item{PostProb_alpha}{The posterior probability that the path alpha (the relation between the independent and the mediating variable) is not zero.} 89 | \item{PostProb_beta}{The posterior probability that the path beta (the relation between the mediating and the dependent variable after controlling for the independent variable) is not zero.} 90 | \item{PostProb_tau_prime}{The posterior probability that the path tau_prime (the relation between the independent and the dependent variable after controlling for the mediator) is not zero.} 91 | \item{PostProb_Mediation (alpha*beta)}{The posterior probability that the relation between the independent and the dependent variable is mediated by the specified mediator.} 92 | 93 | \item{CI_ab}{The 95\% credible interval of the indirect effect "ab".} 94 | } 95 | } 96 | 97 | \section{Invisible Output}{ 98 | \describe{ 99 | 100 | jzs_medSD also returns output that is not printed to the screen. This invisible output contains the following elements: 101 | 102 | \item{alpha_samples}{The posterior samples of alpha. 103 | } 104 | \item{beta_samples}{The posterior samples of beta. 105 | } 106 | \item{tau_prime_samples}{The posterior samples of tau_accent. 107 | } 108 | \item{ab_samples}{The posterior distribution of alpha multiplied by the posterior distribution of beta. This is equivalent to the posterior distribution of the indirect effect "ab".} 109 | \item{jagssamplesA}{The JAGS output for the MCMC estimation of path alpha. This object can be used to construct a traceplot.} 110 | \item{jagssamplesTB}{The JAGS output for the MCMC estimation of path tau' and beta. This object can be used to construct a traceplot.} 111 | } 112 | } 113 | 114 | \references{ 115 | Dickey, J. M., & Lientz, B. P. (1970). The weighted likelihood ratio, sharp hypotheses about chances, the order of a Markov chain. The Annals of Mathematical Statistics, 214-226. 116 | 117 | Liang, F., Paulo, R., Molina, G., Clyde, M. A., & Berger, J. O. (2008). Mixtures of g priors for Bayesian variable selection. Journal of the American Statistical Association, 103(481), 410-423. 118 | 119 | Nuijten, M. B., Wetzels, R., Matzke, D., Dolan, C. V., & Wagenmakers, E.-J. (2014). A default Bayesian hypothesis test for mediation. Behavior Research Methods. doi: 10.3758/s13428-014-0470-2 120 | 121 | Wetzels, R., & Wagenmakers, E.-J. (2012). A Default Bayesian Hypothesis Test for Correlations and Partial Correlations. Psychonomic Bulletin & Review, 19, 1057-1064. 122 | } 123 | \author{ 124 | Michele B. Nuijten , Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers. 125 | } 126 | 127 | 128 | \section{Warning}{ 129 | In some cases the SDmethod \code{fit.st} will fail to converge. If so, jzs_medSD automatically switches to another optimization method (optim {stats}) with different starting values (mean and sd of the posterior sample). If the other optimization method does not converge either, you could either try to run the same code again (sometimes it will converge a second time), or you could try one of the other SDmethod options or try the analytical mediation test instead (see \code{\link{jzs_med}}). 130 | } 131 | 132 | \seealso{ 133 | \code{\link{jzs_med}} 134 | } 135 | 136 | \examples{ 137 | \dontrun{ 138 | 139 | # simulated mediational data 140 | a <- .5 141 | b <- .6 142 | t_prime <- .3 143 | 144 | X <- rnorm(50,0,1) 145 | M <- a*X + rnorm(50,0,1) 146 | Y <- t_prime*X + b*M + rnorm(50,0,1) 147 | 148 | # run jzs_medSD 149 | result <- jzs_medSD(independent=X,dependent=Y,mediator=M) 150 | 151 | # print result 152 | result 153 | 154 | ### NOTE ### 155 | #Sometimes this error will pop up: 156 | # 157 | #Error in solve.default(nItheta) : 158 | # system is computationally singular: reciprocal condition number = *some small number* 159 | #Error in mydt2(0, mT, sT, dfT) : unused arguments (mT, sT, dfT) 160 | #In addition: Warning message: 161 | #In jzs_medSD(X, Y, M) : 162 | # fit.st did not converge. Alternative optimization method was used. 163 | # 164 | #If this happens, just run jzs_medSD() again. 165 | #This usually solves the convergence problem. If it does not, 166 | #try a different SD method. For instance: jzs_medSD(X,Y,M,SDmethod="dnorm"). 167 | # 168 | ############# 169 | 170 | #------------------------------------------ 171 | 172 | # load Firefighter data 173 | data(Firefighters) 174 | 175 | X <- Firefighters$x 176 | M <- Firefighters$m 177 | Y <- Firefighters$y 178 | 179 | # run jzs_medSD 180 | result <- jzs_medSD(independent=X,dependent=Y,mediator=M) 181 | 182 | # plot the result in a mediation diagram 183 | plot(result$main_result) 184 | 185 | # inspect posterior distribution of the coefficients 186 | plot(result$alpha_samples) 187 | plot(result$beta_samples) 188 | plot(result$tau_prime_samples) 189 | 190 | # print a traceplot of the chains 191 | plot(result$jagssamplesA) 192 | plot(result$jagssamplesTB) 193 | # where the first chain (theta[1]) is for tau' and the second chain (theta[2]) for beta 194 | 195 | # calculate and plot a 95\% credible interval for the 196 | # posterior mean of the indirect effect 197 | result$CI_ab 198 | plot(result$ab_samples) 199 | 200 | 201 | } 202 | } -------------------------------------------------------------------------------- /data/Firefighters.txt: -------------------------------------------------------------------------------- 1 | y m x 2 | 1.666666 0.834275 -0.4096045 3 | 4 -0.1657251 -0.4096045 4 | 0 -0.1657251 -0.4096045 5 | -0.666667 -0.665725 -0.4096045 6 | 1.333333 0.3342749 -0.4096045 7 | 2.333334 0.834275 -0.4096045 8 | 3.333333 1.000942 -0.4096045 9 | 0.333333 0.667608 -0.4096045 10 | 1.666667 1.500942 -0.4096045 11 | 1.666666 -0.4990581 -0.4096045 12 | 0 -1.832392 -0.4096045 13 | -2.666666 -1.665725 -0.4096045 14 | 0.666666 -0.3323921 -0.4096045 15 | 3 0.834275 -0.4096045 16 | 0 -1.832392 -0.4096045 17 | -1 -0.665725 -0.4096045 18 | -0.333333 0.0009419463 -0.4096045 19 | -0.333334 -1.499058 -0.4096045 20 | -0.333333 -0.1657251 -0.4096045 21 | 0 0.1676079 -0.4096045 22 | 0.333333 0.3342749 -0.4096045 23 | 0 -0.3323921 -0.4096045 24 | -0.666667 -0.4990581 -0.4096045 25 | 0 -0.832392 -0.4096045 26 | 1 -1.165725 -0.4096045 27 | 0 0.1676089 -0.4096045 28 | 0.333334 -1.165725 -0.4096045 29 | 0.333334 -0.665725 -0.4096045 30 | 0.666667 -0.1657251 -0.4096045 31 | -0.333333 1.000942 -0.4096045 32 | 0.666667 -0.832392 -0.4096045 33 | -0.333333 0.5009419 -0.4096045 34 | 0.333333 0.834275 -0.4096045 35 | 0.333333 -1.165725 -0.4096045 36 | 1.333333 -0.665725 -0.4096045 37 | 0 0.3342749 -0.4096045 38 | 0.333334 1.834275 -0.4096045 39 | -0.666666 -0.1657251 -0.4096045 40 | 0 -0.1657251 -0.4096045 41 | 0 -3.165725 -0.4096045 42 | 1 0.1676079 -0.4096045 43 | -0.666667 0.0009419463 -0.4096045 44 | 0 -0.1657251 -0.4096045 45 | 1 -0.665725 -0.4096045 46 | 0.666666 0.5009419 -0.4096045 47 | 0 -0.1657251 -0.4096045 48 | 0 0.667608 -0.4096045 49 | 0.666667 -2.332392 -0.4096045 50 | -0.333334 0.834275 -0.4096045 51 | 1.333333 -0.665725 -0.4096045 52 | 1 -0.832392 -0.4096045 53 | 1 0.1676079 -0.4096045 54 | -0.333333 -1.332392 -0.4096045 55 | 0.333334 -1.165725 -0.4096045 56 | 0 0.3342749 -0.4096045 57 | 1.333333 -1.665725 -0.4096045 58 | 0.333334 0.0009419463 -0.4096045 59 | 2.333333 0.0009419463 -0.4096045 60 | 1 0.0009419463 -0.4096045 61 | -0.333333 0.834275 -0.4096045 62 | 1.333333 0.667609 -0.4096045 63 | -0.333333 -1.165725 -0.4096045 64 | 0 0.834275 -0.4096045 65 | 0.666666 0.1676089 -0.4096045 66 | 0.666666 -1.165725 -0.4096045 67 | -0.333333 -0.999058 -0.4096045 68 | 0 -0.4990581 -0.4096045 69 | -1.333334 -0.3323921 -0.4096045 70 | -0.666667 1.000942 -0.4096045 71 | 0.666667 0.1676079 -0.4096045 72 | 0 -0.832392 -0.4096045 73 | 0.333333 0.5009419 -0.4096045 74 | -0.666667 -0.4990581 -0.4096045 75 | 0.333334 -0.832392 -0.4096045 76 | 0 0.5009419 -0.4096045 77 | 0.666667 -0.1657251 -0.4096045 78 | 0 -0.3323921 -0.4096045 79 | -0.666667 1.667608 -0.4096045 80 | -1.666667 0.834275 -0.4096045 81 | 0.333333 -0.832391 -0.4096045 82 | 0.333333 0.667609 -0.4096045 83 | -3 -1.832392 -0.4096045 84 | 0.666667 -1.165725 -0.4096045 85 | -0.666666 -0.1657251 -0.4096045 86 | 3 3.167608 -0.4096045 87 | 1 -0.1657251 -0.4096045 88 | -0.333333 -0.3323921 -0.4096045 89 | -1.333334 -0.3323911 -0.4096045 90 | 1 -0.1657251 -0.4096045 91 | 1 0.3342749 -0.4096045 92 | 0.333333 -0.832392 -0.4096045 93 | 0.333333 1.000942 -0.4096045 94 | 1 -0.999058 -0.4096045 95 | 0 0.667608 -0.4096045 96 | 0.333334 -0.1657251 -0.4096045 97 | 0.666667 -0.1657251 -0.4096045 98 | -0.333333 0.5009419 -0.4096045 99 | -1.333334 0.834275 -0.4096045 100 | 0 -0.3323921 -0.4096045 101 | -0.666666 1.000942 -0.4096045 102 | 0.333334 0.834275 -0.4096045 103 | -0.333333 0.5009419 -0.4096045 104 | -1 -1.332392 -0.4096045 105 | 1.666667 -0.1657251 -0.4096045 106 | -1 -0.4990581 -0.4096045 107 | 0 -0.4990581 -0.4096045 108 | 2.333334 0.1676079 -0.4096045 109 | 0.333333 -0.665725 -0.4096045 110 | 1.333333 -1.165725 -0.4096045 111 | 0 1.167608 -0.4096045 112 | -2 -3.165725 -0.4096045 113 | 0 -2.832392 -0.4096045 114 | 2 -0.1657251 -0.4096045 115 | 0 -3.832391 -0.4096045 116 | 0.333333 -0.3323911 -0.4096045 117 | 0 0.834275 -0.4096045 118 | 1.666667 -0.3323921 -0.4096045 119 | 0 -0.4990591 -0.4096045 120 | -0.666667 -0.3323921 -0.4096045 121 | 0.333333 0.0009419463 -0.4096045 122 | 2 -0.3323911 -0.4096045 123 | 0.333333 0.0009419463 -0.4096045 124 | -0.333333 1.334275 -0.4096045 125 | 0 -0.999058 -0.4096045 126 | 1.666667 1.000942 -0.4096045 127 | -1 -0.1657251 -0.4096045 128 | -0.333333 0.1676079 -0.4096045 129 | -0.333333 -0.1657251 -0.4096045 130 | 0 -0.1657251 -0.4096045 131 | 1 -1.499058 -0.4096045 132 | -1 1.500942 -0.4096045 133 | 1.333334 0.0009419463 -0.4096045 134 | -1.333334 -0.1657251 -0.4096045 135 | 1.333333 -0.1657251 -0.4096045 136 | 1.333334 0.3342749 -0.4096045 137 | 0.333333 -1.165725 -0.4096045 138 | 2 0.1676079 -0.4096045 139 | 0.333333 -0.1657251 -0.4096045 140 | 0.333334 1.000941 -0.4096045 141 | 1.666667 -0.1657251 -0.4096045 142 | 1 -0.832392 -0.4096045 143 | 1.666666 1.000942 -0.4096045 144 | 1 -1.165725 -0.4096045 145 | 0.333333 -0.1657251 -0.4096045 146 | 0 -0.665725 -0.4096045 147 | -0.333333 -0.832391 -0.4096045 148 | 1.666667 -2.499058 -0.4096045 149 | 1 -0.1657251 -0.4096045 150 | 0 -0.832392 -0.4096045 151 | 0.333333 -1.332392 -0.4096045 152 | 0 0.667608 -0.4096045 153 | 1.333333 2.000942 -0.4096045 154 | -0.333334 -0.1657251 -0.4096045 155 | 0 0.667608 -0.4096045 156 | 0.333333 -0.4990581 -0.4096045 157 | 0.666667 0.5009419 -0.4096045 158 | 1.666666 0.667608 -0.4096045 159 | 1 0.1676079 -0.4096045 160 | -1 -0.4990591 -0.4096045 161 | 1.333333 -0.3323921 -0.4096045 162 | -0.333334 0.1676089 -0.4096045 163 | 3 0.5009419 -0.4096045 164 | -2.333333 -0.1657251 -0.4096045 165 | -0.333333 -0.1657251 -0.4096045 166 | -0.333333 0.0009419463 -0.4096045 167 | -0.333333 -0.832391 -0.4096045 168 | -0.333333 -2.165725 -0.4096045 169 | 0.666666 -0.665725 -0.4096045 170 | -0.333333 0.5009419 -0.4096045 171 | 1 -2.665725 -0.4096045 172 | 0.666667 -1.165725 -0.4096045 173 | 2 -0.832391 -0.4096045 174 | 0.666666 -0.1657251 -0.4096045 175 | -1 1.167608 -0.4096045 176 | 4 0.0009409463 -0.4096045 177 | 2 -2.332392 -0.4096045 178 | 1 2.167608 -0.4096045 179 | 0.666667 0.834275 -0.4096045 180 | 0 -1.665725 -0.4096045 181 | 1 0.3342749 -0.4096045 182 | 0.666667 0.1676079 -0.4096045 183 | 1.333333 -0.1657251 -0.4096045 184 | 0.333334 -0.3323921 -0.4096045 185 | -1.666667 0.1676079 -0.4096045 186 | 2.333334 0.834275 -0.4096045 187 | 2.333333 0.1676079 -0.4096045 188 | 0 -0.999058 -0.4096045 189 | -0.666667 -0.3323921 -0.4096045 190 | 0 0.1676079 -0.4096045 191 | -1 0.0009419463 -0.4096045 192 | 0 -0.4990581 -0.4096045 193 | 1.666667 1.834275 -0.4096045 194 | 1.666667 0.667608 -0.4096045 195 | 0.666667 -0.1657251 -0.4096045 196 | -0.333333 -0.1657251 -0.4096045 197 | -1.333333 0.3342749 -0.4096045 198 | 0 -1.665725 -0.4096045 199 | 0 0.834275 -0.4096045 200 | 1 0.667608 -0.4096045 201 | -0.666667 0.0009419463 -0.4096045 202 | -0.666667 0.3342749 -0.4096045 203 | 1.666667 0.5009419 -0.4096045 204 | 0.666667 0.0009409463 -0.4096045 205 | -1.333333 -0.665725 -0.4096045 206 | 0.333333 2.667608 -0.4096045 207 | 0.333333 -1.665725 -0.4096045 208 | 0 0.0009419463 -0.4096045 209 | -1 0.667608 -0.4096045 210 | -0.666667 0.667608 -0.4096045 211 | -0.333333 0.0009419463 0.5903955 212 | 0.666667 -0.1657251 0.5903955 213 | -1.333333 -0.832392 0.5903955 214 | -1 0.834275 0.5903955 215 | 0 -0.3323921 0.5903955 216 | -0.333333 -1.999058 0.5903955 217 | 1.333333 0.834275 0.5903955 218 | -0.666667 2.167608 0.5903955 219 | 1.666667 -0.4990581 0.5903955 220 | 1 0.834275 0.5903955 221 | 0.666666 0.667608 0.5903955 222 | -0.333334 0.3342749 0.5903955 223 | 1.666667 -0.832392 0.5903955 224 | -0.666667 -1.165725 0.5903955 225 | 0.333334 -0.4990581 0.5903955 226 | 0.666667 2.334275 0.5903955 227 | 0.666667 -0.832391 0.5903955 228 | 2 0.667608 0.5903955 229 | -0.666667 0.834275 0.5903955 230 | 3 -0.665725 0.5903955 231 | 0 -0.832391 0.5903955 232 | 2 1.000942 0.5903955 233 | 0.666666 0.0009419463 0.5903955 234 | 1.666666 0.834275 0.5903955 235 | -0.333334 0.3342749 0.5903955 236 | -2 0.834275 0.5903955 237 | -1 0.834275 0.5903955 238 | -0.666667 -0.1657251 0.5903955 239 | 0.666667 0.3342749 0.5903955 240 | 0 -0.665725 0.5903955 241 | -0.333333 -0.1657251 0.5903955 242 | 1.666667 0.3342749 0.5903955 243 | -0.666666 0.834275 0.5903955 244 | -0.666667 -0.1657251 0.5903955 245 | 1 -1.165725 0.5903955 246 | 1.666667 -0.4990581 0.5903955 247 | -0.333333 0.5009419 0.5903955 248 | -1 -1.165725 0.5903955 249 | 0.333334 -0.1657251 0.5903955 250 | 1.333333 0.834275 0.5903955 251 | -0.333333 1.667608 0.5903955 252 | 0 0.5009409 0.5903955 253 | 1 2.834275 0.5903955 254 | -0.333334 -0.3323911 0.5903955 255 | 1 2.334275 0.5903955 256 | 1.333334 1.000942 0.5903955 257 | 1 0.1676079 0.5903955 258 | 2 -0.3323921 0.5903955 259 | -0.333334 0.3342749 0.5903955 260 | 2 -0.1657251 0.5903955 261 | 1.333334 0.1676079 0.5903955 262 | -0.333334 -0.999059 0.5903955 263 | 1.333334 0.834275 0.5903955 264 | 0.333333 -0.832391 0.5903955 265 | 2.333333 2.167608 0.5903955 266 | -1.333333 -0.1657251 0.5903955 267 | -0.333334 0.0009419463 0.5903955 268 | 1 0.5009409 0.5903955 269 | 0.333333 0.834275 0.5903955 270 | 1 -3.165725 0.5903955 271 | -2.666666 -0.832392 0.5903955 272 | 0 -0.4990581 0.5903955 273 | 1.333333 -0.3323921 0.5903955 274 | 1 -0.1657251 0.5903955 275 | -0.333333 -0.1657251 0.5903955 276 | -0.333333 0.0009419463 0.5903955 277 | -0.666667 -0.832392 0.5903955 278 | 2 1.834275 0.5903955 279 | 2 1.834275 0.5903955 280 | 0.333334 0.1676079 0.5903955 281 | 2.666667 1.000942 0.5903955 282 | 1.333333 1.667608 0.5903955 283 | 2 -0.665725 0.5903955 284 | 1 0.667608 0.5903955 285 | 0.666667 2.500942 0.5903955 286 | 0 1.167608 0.5903955 287 | -1.666667 0.0009419463 0.5903955 288 | 1 0.5009419 0.5903955 289 | 1.333334 0.3342749 0.5903955 290 | -0.333334 -0.1657251 0.5903955 291 | -0.333333 1.000942 0.5903955 292 | -0.333333 1.667608 0.5903955 293 | 1.666667 1.334275 0.5903955 294 | 3.666666 0.3342749 0.5903955 295 | 1 -0.4990581 0.5903955 296 | 0.333333 -1.165725 0.5903955 297 | -0.333333 1.667608 0.5903955 298 | -0.666667 -0.832392 0.5903955 299 | 0 -0.3323921 0.5903955 300 | -0.333334 1.834275 0.5903955 301 | -0.666666 -1.165725 0.5903955 302 | -0.666666 -0.1657251 0.5903955 303 | 0.666667 0.1676079 0.5903955 304 | 1 1.500942 0.5903955 305 | 0 -1.165725 0.5903955 306 | -1 -1.332391 0.5903955 307 | -0.333334 1.167609 0.5903955 308 | 0.333333 -0.1657251 0.5903955 309 | -1 1.334275 0.5903955 310 | 2 0.667608 0.5903955 311 | 2.333334 -0.4990581 0.5903955 312 | 1 1.167608 0.5903955 313 | 1.333334 -3.665725 0.5903955 314 | 0.333333 0.834275 0.5903955 315 | 0 -5.165725 0.5903955 316 | 0.333334 -0.832392 0.5903955 317 | 2 -0.665725 0.5903955 318 | -2 0.3342749 0.5903955 319 | 0.666666 2.834275 0.5903955 320 | 0.333333 -1.165725 0.5903955 321 | 2.666667 1.334275 0.5903955 322 | 1.666666 -0.999058 0.5903955 323 | 0.666666 0.667609 0.5903955 324 | 1 2.834275 0.5903955 325 | 0.666666 2.834275 0.5903955 326 | 1 -0.4990581 0.5903955 327 | 1 2.834275 0.5903955 328 | -0.666667 0.667608 0.5903955 329 | 0.666667 -0.4990581 0.5903955 330 | 2.333333 -0.1657251 0.5903955 331 | -0.666667 -0.1657251 0.5903955 332 | 1 -0.1657251 0.5903955 333 | 1.333333 -1.165725 0.5903955 334 | 2.333334 0.834275 0.5903955 335 | -0.333333 -0.832392 0.5903955 336 | 1.333333 -0.1657251 0.5903955 337 | 1 0.3342749 0.5903955 338 | 0 0.5009419 0.5903955 339 | 0.666667 -0.1657251 0.5903955 340 | 1 2.167608 0.5903955 341 | 0 0.834275 0.5903955 342 | -0.333333 0.667609 0.5903955 343 | 0 0.834275 0.5903955 344 | 0 0.667608 0.5903955 345 | -0.333333 -0.1657251 0.5903955 346 | 1 -1.832392 0.5903955 347 | 1.666666 1.167608 0.5903955 348 | 0.666667 -0.665725 0.5903955 349 | 1.666667 3.834275 0.5903955 350 | 1.333334 0.0009419463 0.5903955 351 | 0.666667 0.0009409463 0.5903955 352 | 0 0.1676079 0.5903955 353 | 1.333333 0.1676089 0.5903955 354 | 0 0.667608 0.5903955 355 | 1 -0.3323921 0.5903955 -------------------------------------------------------------------------------- /R/jzs_med.R: -------------------------------------------------------------------------------- 1 | jzs_med <- 2 | function(independent,dependent,mediator, 3 | alternativeA=c("two.sided","less","greater"), 4 | alternativeB=c("two.sided","less","greater"), 5 | alternativeT=c("two.sided","less","greater"), 6 | n.iter=10000,n.burnin=500, 7 | standardize=TRUE){ 8 | 9 | runif(1) # defines .Random.seed 10 | 11 | # independent = vector with values for independent variable 12 | # dependent = vector with values for dependent variable 13 | # mediator = vector with values for mediating variable 14 | 15 | # sample size 16 | n <- length(independent) 17 | 18 | X <- independent 19 | Y <- dependent 20 | M <- mediator 21 | 22 | if(standardize==TRUE){ 23 | X <- (X-mean(X))/sd(X) 24 | Y <- (Y-mean(Y))/sd(Y) 25 | M <- (M-mean(M))/sd(M) 26 | } 27 | 28 | #========================================================== 29 | # RESULTS FOR PATH ALPHA 30 | #========================================================== 31 | 32 | # alternativeA <- alternativeA 33 | # n.iter <- n.iter 34 | # n.burnin <- n.burnin 35 | 36 | res_alpha <- jzs_cor(X,M,alternative=alternativeA,n.iter=n.iter, 37 | n.burnin=n.burnin,standardize=standardize) 38 | 39 | BFa <- res_alpha$BayesFactor 40 | prob_a <- res_alpha$PosteriorProbability 41 | alpha <- res_alpha$cor_coef_samples 42 | jagssamplesA <- res_alpha$jagssamples 43 | 44 | #========================================================== 45 | 46 | # we chose not to use jzs_partcor() for the results of path beta and tau 47 | # because this would mean we'd have to fit the JAGS model three times in total 48 | # now we can get the information for both beta and tau out of one and the same model 49 | 50 | # function to analytically calculate the BF for partial correlation 51 | # see Wetzels, R. & Wagenmakers, E.-J. (2012). A default Bayesian hypothesis test for correlations and partial correlations. Psychonomic Bulletin & Review, 19, 1057-1064 52 | # the jzs_partcorbf function is based on updated R code that can handle larger values of n 53 | 54 | jzs_partcor_basic <- function(V1,V2,control,standardize=TRUE){ 55 | 56 | # standardize variables 57 | if(standardize==TRUE){ 58 | V1 <- (V1-mean(V1))/sd(V1) 59 | V2 <- (V2-mean(V2))/sd(V2) 60 | control <- (control-mean(control))/sd(control) 61 | } 62 | 63 | r0 <- sqrt(summary(lm(V1~control))$r.squared) 64 | r1 <- sqrt(summary(lm(V1~control+V2))$r.squared) 65 | p0 <- 1 66 | p1 <- 2 67 | n <- length(V1) 68 | 69 | jzs_partcorbf=function(r0,r1,p0,p1,n){ 70 | int=function(r,n,p,g){ 71 | exp( 72 | ((n-1-p)/2)*log(1+g)+ 73 | (-(n-1)/2)*log(1+(1-r^2)*g)+ 74 | (-3/2)*log(g)+ 75 | -n/(2*g)) 76 | } 77 | bf10 <- try(integrate(int,lower=0,upper=Inf,r=r1,p=p1,n=n)$value/ 78 | integrate(int,lower=0,upper=Inf,r=r0,p=p0,n=n)$value) 79 | 80 | # if the evidence is overwhelming, the BF will become infinite 81 | # to avoid this resulting in an error, give back the max possible number 82 | if(class(bf10)=="try-error" & grepl("non-finite function value",bf10[1])){ 83 | bf10 <- .Machine$double.xmax 84 | message("Note: the Error above was caused because the BF from the function jzs_partcorbf was approaching infinity. To avoid the function from crashing, the returned BF is the largest possible number in R.") 85 | } else { 86 | if(class(bf10)=="try-error" & !grepl("non-finite function value",bf10[1])){ 87 | bf10 <- NA 88 | message("An error occurred. The BF could not be calculated") 89 | } 90 | } 91 | 92 | return(bf10) 93 | } 94 | 95 | BF <- jzs_partcorbf(r0,r1,p0,p1,n) 96 | return(BF) 97 | } 98 | 99 | #========================================================== 100 | # JAGS MODEL FOR PARTIAL CORRELATION 101 | #========================================================== 102 | 103 | jagsmodelpartialcorrelation <- 104 | 105 | "####### Cauchy-prior on beta and tau' ####### 106 | model 107 | 108 | { 109 | 110 | for (i in 1:n) 111 | 112 | { 113 | mu[i] <- intercept + theta[1]*x[i,1] + theta[2]*x[i,2] 114 | y[i] ~ dnorm(mu[i],phi) 115 | 116 | } 117 | 118 | # uninformative prior on intercept alpha, 119 | # Jeffreys' prior on precision phi 120 | intercept ~ dnorm(0,.0001) 121 | phi ~ dgamma(.0001,.0001) 122 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 123 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 124 | 125 | # inverse-gamma prior on g: 126 | g <- 1/invg 127 | a.gamma <- 1/2 128 | b.gamma <- n/2 129 | invg ~ dgamma(a.gamma,b.gamma) 130 | 131 | # Ntzoufras, I. (2009). Bayesian Modeling Using WinBUGS. 132 | # New Jersey: John Wiley & Sons, Inc. p. 167 133 | # calculation of the inverse matrix of V 134 | inverse.V <- inverse(V) 135 | # calculation of the elements of prior precision matrix 136 | for(i in 1:2) 137 | { 138 | for (j in 1:2) 139 | { 140 | prior.T[i,j] <- inverse.V[i,j] * phi/g 141 | } 142 | } 143 | # multivariate prior for the beta vector 144 | theta[1:2] ~ dmnorm( mu.theta, prior.T ) 145 | for(i in 1:2) { mu.theta[i] <- 0 } 146 | 147 | } 148 | 149 | # Explanation----------------------------------------------------------------- 150 | # Prior on g: 151 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape parameter and 152 | # n/2 the scale parameter. 153 | # It follows that 1/g ~ gamma(1/2, 2/n). 154 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 155 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 156 | # 1/g ~ dgamma(1/2, n/2) 157 | # Also note: JAGS does not want [,] structure 158 | #----------------------------------------------------------------------------- 159 | " 160 | 161 | jags.model.file2 <- tempfile(fileext=".txt") 162 | write(jagsmodelpartialcorrelation,jags.model.file2) 163 | 164 | #========================================================== 165 | # SAVE SAMPLES FOR PATH BETA AND TAU_ACCENT 166 | #========================================================== 167 | 168 | x <- cbind(X,M) 169 | y <- Y 170 | 171 | V <- solve(t(x)%*%x) #NB I switched to the notation from Ntzoufras, p. 167 172 | 173 | jags.data <- list("n", "x", "y", "V") 174 | jags.params <- c("theta") 175 | jags.inits <- list( 176 | list(theta = c(0.0,0.3)), #chain 1 starting value 177 | list(theta = c(0.3, 0.0)), #chain 2 starting value 178 | list(theta = c(-.15,.15))) #chain 3 starting value 179 | 180 | jagssamplesTB <- jags(data=jags.data, inits=jags.inits, jags.params, 181 | n.chains=3, n.iter=n.iter, DIC=T, 182 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file2) 183 | 184 | tau_accent <- jagssamplesTB$BUGSoutput$sims.list$theta[,1] 185 | beta <- jagssamplesTB$BUGSoutput$sims.list$theta[,2] 186 | 187 | #========================================================== 188 | # RESULTS FOR PATH BETA 189 | #========================================================== 190 | 191 | BFb <- jzs_partcor_basic(M,Y,control=X,standardize=standardize) 192 | 193 | # one-sided test beta? 194 | 195 | # save BF for one-tailed test 196 | # BF21 = 2*{proportion posterior samples of beta < 0} 197 | propposterior_less <- sum(beta<0)/length(beta) 198 | propposterior_greater <- sum(beta>0)/length(beta) 199 | 200 | # posterior proportion cannot be zero, because this renders a BF of zero 201 | # none of the samples of the parameter follow the restriction 202 | # ergo: the posterior proportion is smaller than 1/length(parameter) 203 | 204 | if(propposterior_less==0){ 205 | propposterior_less <- 1/length(beta) 206 | } 207 | 208 | 209 | if(propposterior_greater==0){ 210 | propposterior_greater <- 1/length(beta) 211 | } 212 | 213 | BF21_less <- 2*propposterior_less 214 | BF21_greater <- 2*propposterior_greater 215 | 216 | if(alternativeB[1]=="less"){ 217 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 218 | BF10 <- BFb 219 | 220 | # BF21 = p(D|b~cauchy-(0,1))/p(D|b~cauchy(0,1)) 221 | # BF21 = 2*{proportion posterior samples of beta < 0} 222 | BF21 <- BF21_less 223 | 224 | BFb <- BF10*BF21 225 | 226 | } else if(alternativeB[1]=="greater"){ 227 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 228 | BF10 <- BFb 229 | 230 | # BF21 = p(D|b~cauchy+(0,1))/p(D|b~cauchy(0,1)) 231 | # BF21 = 2*{proportion posterior samples of beta > 0} 232 | BF21 <- BF21_greater 233 | 234 | BFb <- BF10*BF21 235 | 236 | } 237 | 238 | #--------------------------------------------------- 239 | 240 | # convert BFb to posterior probability 241 | # prob cannot be exactly 1 or 0 242 | prob_b <- BFb/(BFb+1) 243 | 244 | if(prob_b == 1){ 245 | prob_b <- prob_b - .Machine$double.eps 246 | } 247 | if(prob_b == 0){ 248 | prob_b <- prob_b + .Machine$double.eps 249 | } 250 | 251 | #========================================= 252 | # calculate evidence for mediation (EM) 253 | #========================================= 254 | 255 | EM <- prob_a*prob_b 256 | BF.EM <- EM/(1-EM) 257 | 258 | #========================================================== 259 | # RESULTS FOR PATH TAU_ACCENT 260 | #========================================================== 261 | 262 | BFt_accent <- jzs_partcor_basic(X,Y,control=M,standardize=standardize) 263 | 264 | # one-sided test tau_accent? 265 | 266 | # save BF for one-tailed test 267 | # BF21 = 2*{proportion posterior samples of tau_accent < 0} 268 | propposterior_less <- sum(tau_accent<0)/length(tau_accent) 269 | propposterior_greater <- sum(tau_accent>0)/length(tau_accent) 270 | 271 | # posterior proportion cannot be zero, because this renders a BF of zero 272 | # none of the samples of the parameter follow the restriction 273 | # ergo: the posterior proportion is smaller than 1/length(parameter) 274 | 275 | if(propposterior_less==0){ 276 | propposterior_less <- 1/length(tau_accent) 277 | } 278 | 279 | 280 | if(propposterior_greater==0){ 281 | propposterior_greater <- 1/length(tau_accent) 282 | } 283 | 284 | BF21_less <- 2*propposterior_less 285 | BF21_greater <- 2*propposterior_greater 286 | 287 | if(alternativeT[1]=="less"){ 288 | # BF10 = p(D|t'~cauchy(0,1))/p(D|t'=0) 289 | BF10 <- BFt_accent 290 | 291 | # BF21 = p(D|t'~cauchy-(0,1))/p(D|t'~cauchy(0,1)) 292 | # BF21 = 2*{proportion posterior samples of tau_accent < 0} 293 | BF21 <- BF21_less 294 | 295 | BFt_accent <- BF10*BF21 296 | 297 | } else if(alternativeT[1]=="greater"){ 298 | # BF10 = p(D|t'~cauchy(0,1))/p(D|t'=0) 299 | BF10 <- BFt_accent 300 | 301 | # BF21 = p(D|t'~cauchy+(0,1))/p(D|t'~cauchy(0,1)) 302 | # BF21 = 2*{proportion posterior samples of tau_accent > 0} 303 | BF21 <- BF21_greater 304 | 305 | BFt_accent <- BF10*BF21 306 | 307 | } 308 | 309 | #-------------------------------------------------------- 310 | 311 | # convert BFs to posterior probability 312 | # prob cannot be exactly 1 or 0 313 | prob_t_accent <- BFt_accent/(BFt_accent+1) 314 | 315 | if(prob_t_accent == 1){ 316 | prob_t_accent <- prob_t_accent - .Machine$double.eps 317 | } 318 | if(prob_t_accent == 0){ 319 | prob_t_accent <- prob_t_accent + .Machine$double.eps 320 | } 321 | 322 | #=============================================================== 323 | 324 | # calculate 95% credible interval for ab 325 | ab <- alpha*beta 326 | CI <- quantile(ab,c(.025,.975)) 327 | 328 | #=============================================================== 329 | 330 | res <- data.frame(Estimate = c(mean(alpha),mean(beta),mean(tau_accent),mean(ab)), 331 | BF = c(BFa,BFb,BFt_accent,BF.EM), 332 | PostProb = c(prob_a,prob_b,prob_t_accent,EM)) 333 | 334 | rownames(res) <- c("alpha","beta","tau_prime","Mediation (alpha*beta)") 335 | 336 | result <- list(main_result=res, 337 | CI_ab=CI, 338 | alpha_samples=alpha, 339 | beta_samples=beta, 340 | tau_prime_samples=tau_accent, 341 | ab_samples=ab, 342 | jagssamplesA=jagssamplesA, 343 | jagssamplesTB=jagssamplesTB) 344 | 345 | 346 | class(result) <- c("jzs_med","list") 347 | class(result$main_result) <- c("JZSMed","data.frame") 348 | class(result$jagssamplesA) <- "rjags" 349 | class(result$jagssamplesTB) <- "rjags" 350 | class(result$ab_samples) <- "CI" 351 | class(result$alpha_samples) <- "CI" 352 | class(result$beta_samples) <- "CI" 353 | class(result$tau_prime_samples) <- "CI" 354 | 355 | return(result) 356 | 357 | } 358 | -------------------------------------------------------------------------------- /R/jzs_medSD.R: -------------------------------------------------------------------------------- 1 | jzs_medSD <- 2 | function(independent,dependent,mediator, 3 | SDmethod=c("dnorm","splinefun","logspline", "fit.st"), 4 | alternativeA=c("two.sided","less","greater"), 5 | alternativeB=c("two.sided","less","greater"), 6 | alternativeT=c("two.sided","less","greater"), 7 | n.iter=10000,n.burnin=500, 8 | standardize=TRUE){ 9 | 10 | runif(1) # defines .Random.seed 11 | 12 | # independent = vector with values for independent variable 13 | # dependent = vector with values for dependent variable 14 | # mediator = vector with values for mediating variable 15 | 16 | X <- independent 17 | Y <- dependent 18 | M <- mediator 19 | 20 | if(standardize==TRUE){ 21 | X <- (X-mean(X))/sd(X) 22 | Y <- (Y-mean(Y))/sd(Y) 23 | M <- (M-mean(M))/sd(M) 24 | } 25 | 26 | # sample size 27 | n <- length(independent) 28 | 29 | #========================================================== 30 | # load JAGS models 31 | #========================================================== 32 | 33 | jagsmodelcorrelation <- 34 | 35 | "####### Cauchy-prior on single beta ####### 36 | model 37 | 38 | { 39 | 40 | for (i in 1:n) 41 | 42 | { 43 | mu[i] <- intercept + alpha*x[i] 44 | y[i] ~ dnorm(mu[i],phi) 45 | 46 | } 47 | 48 | # uninformative prior on the intercept intercept, 49 | # Jeffreys' prior on precision phi 50 | intercept ~ dnorm(0,.0001) 51 | phi ~ dgamma(.0001,.0001) 52 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 53 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 54 | 55 | # inverse-gamma prior on g: 56 | g <- 1/invg 57 | a.gamma <- 1/2 58 | b.gamma <- n/2 59 | invg ~ dgamma(a.gamma,b.gamma) 60 | 61 | 62 | # g-prior on beta: 63 | vari <- (g/phi) * invSigma 64 | prec <- 1/vari 65 | alpha ~ dnorm(0, prec) 66 | } 67 | 68 | # Explanation------------------------------------------------------------------ 69 | # Prior on g: 70 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape 71 | # parameter and n/2 the scale parameter. 72 | # It follows that 1/g ~ gamma(1/2, 2/n). 73 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 74 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 75 | # 1/g ~ dgamma(1/2, n/2) 76 | #------------------------------------------------------------------------------ 77 | " 78 | jags.model.file1 <- tempfile(fileext=".txt") 79 | write(jagsmodelcorrelation,jags.model.file1) 80 | 81 | #============================================================================ 82 | 83 | jagsmodelpartialcorrelation <- 84 | 85 | "####### Cauchy-prior on beta and tau' ####### 86 | model 87 | 88 | { 89 | 90 | for (i in 1:n) 91 | 92 | { 93 | mu[i] <- intercept + theta[1]*x[i,1] + theta[2]*x[i,2] 94 | y[i] ~ dnorm(mu[i],phi) 95 | 96 | } 97 | 98 | # uninformative prior on intercept alpha, 99 | # Jeffreys' prior on precision phi 100 | intercept ~ dnorm(0,.0001) 101 | phi ~ dgamma(.0001,.0001) 102 | #phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this 103 | #phi ~ dgamma(0.01,0.01) #WinBUGS wants this 104 | 105 | # inverse-gamma prior on g: 106 | g <- 1/invg 107 | a.gamma <- 1/2 108 | b.gamma <- n/2 109 | invg ~ dgamma(a.gamma,b.gamma) 110 | 111 | # Ntzoufras, I. (2009). Bayesian Modeling Using WinBUGS. 112 | # New Jersey: John Wiley & Sons, Inc. p. 167 113 | # calculation of the inverse matrix of V 114 | inverse.V <- inverse(V) 115 | # calculation of the elements of prior precision matrix 116 | for(i in 1:2) 117 | { 118 | for (j in 1:2) 119 | { 120 | prior.T[i,j] <- inverse.V[i,j] * phi/g 121 | } 122 | } 123 | # multivariate prior for the beta vector 124 | theta[1:2] ~ dmnorm( mu.theta, prior.T ) 125 | for(i in 1:2) { mu.theta[i] <- 0 } 126 | 127 | } 128 | 129 | # Explanation----------------------------------------------------------------- 130 | # Prior on g: 131 | # We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape parameter and 132 | # n/2 the scale parameter. 133 | # It follows that 1/g ~ gamma(1/2, 2/n). 134 | # However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the 135 | # scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation: 136 | # 1/g ~ dgamma(1/2, n/2) 137 | # Also note: JAGS does not want [,] structure 138 | #----------------------------------------------------------------------------- 139 | " 140 | 141 | jags.model.file2 <- tempfile(fileext=".txt") 142 | write(jagsmodelpartialcorrelation,jags.model.file2) 143 | 144 | 145 | #========================================================== 146 | # BF FOR PATH alpha: CORRELATION X-M 147 | #========================================================== 148 | 149 | x <- X 150 | y <- M 151 | 152 | invSigma <- solve(t(x)%*%x) 153 | 154 | jags.data <- list("n", "x", "y", "invSigma") 155 | jags.params <- c("alpha", "g") 156 | jags.inits <- list( 157 | list(alpha = 0.0), #chain 1 starting value 158 | list(alpha = -0.3), #chain 2 starting value 159 | list(alpha = 0.3)) #chain 3 starting value 160 | 161 | jagssamplesA <- jags(data=jags.data, inits=jags.inits, jags.params, 162 | n.chains=3, n.iter=n.iter, DIC=T, 163 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file1) 164 | 165 | # estimate the posterior regression coefficient and scaling factor g 166 | alpha <- jagssamplesA$BUGSoutput$sims.list$alpha[,1] 167 | g <- jagssamplesA$BUGSoutput$sims.list$g 168 | 169 | #------------------------------------------------------------------ 170 | 171 | if(SDmethod[1]=="fit.st"){ 172 | 173 | mydt <- function(x, m, s, df) dt((x-m)/s, df)/s 174 | 175 | foo <- try({ 176 | fit.t1 <- QRM::fit.st(alpha) 177 | nuA <- as.numeric(fit.t1$par.ests[1]) #degrees of freedom 178 | muA <- as.numeric(fit.t1$par.ests[2]) 179 | sigmaA <- abs(as.numeric(fit.t1$par.ests[3])) # This is a hack -- with high n occasionally 180 | # sigma switches sign. 181 | }) 182 | 183 | if(!("try-error"%in%class(foo))){ 184 | 185 | # BAYES FACTOR ALPHA 186 | BFa <- 1/(mydt(0,muA,sigmaA,nuA)/dcauchy(0)) 187 | 188 | # save BF for one-tailed test 189 | # BF21 = 2*{proportion posterior samples of alpha < 0} 190 | BF21a_less <- 2*pt((0-muA)/sigmaA,nuA,lower.tail=TRUE)/sigmaA 191 | BF21a_greater <- 2*pt((0-muA)/sigmaA,nuA,lower.tail=FALSE)/sigmaA 192 | 193 | } else { 194 | 195 | warning("fit.st did not converge, alternative optimization method was used.","\n") 196 | 197 | mydt2 <- function(pars){ 198 | 199 | mA <- pars[1] 200 | sA <- abs(pars[2]) # no negative standard deviation 201 | dfA <- abs(pars[3]) # no negative degrees of freedom 202 | 203 | -2*sum(dt((alpha-mA)/sA, dfA,log=TRUE)-log(sA)) 204 | } 205 | 206 | res <- optim(c(mean(alpha),sd(alpha),20),mydt2)$par 207 | 208 | mA <- res[1] 209 | sA <- res[2] 210 | dfA <- res[3] 211 | 212 | 213 | # ALTERNATIVE BAYES FACTOR ALPHA 214 | BFa <- 1/(mydt2(0,mA,sA,dfA)/dcauchy(0)) 215 | 216 | } 217 | 218 | #------------------------- 219 | 220 | } else if(SDmethod[1]=="dnorm"){ 221 | BFa <- 1/(dnorm(0,mean(alpha),sd(alpha))/dcauchy(0)) 222 | 223 | #------------------------- 224 | 225 | } else if(SDmethod[1]=="splinefun"){ 226 | f <- splinefun(density(alpha)) 227 | BFa <- 1/(f(0)/dcauchy(0)) 228 | 229 | #------------------------- 230 | 231 | } else if (SDmethod[1]=="logspline"){ 232 | fit.posterior <- polspline::logspline(alpha) 233 | posterior.pp <- polspline::dlogspline(0, fit.posterior) # this gives the pdf at point b2 = 0 234 | prior.pp <- dcauchy(0) # height of prior at b2 = 0 235 | BFa <- prior.pp/posterior.pp 236 | 237 | } 238 | 239 | #-------------------------------------------------------- 240 | 241 | # one-sided test? 242 | 243 | # save BF for one-tailed test 244 | # BF21 = 2*{proportion posterior samples of alpha < 0} 245 | propposterior_less <- sum(alpha<0)/length(alpha) 246 | propposterior_greater <- sum(alpha>0)/length(alpha) 247 | 248 | # posterior proportion cannot be zero, because this renders a BF of zero 249 | # none of the samples of the parameter follow the restriction 250 | # ergo: the posterior proportion is smaller than 1/length(parameter) 251 | 252 | if(propposterior_less==0){ 253 | propposterior_less <- 1/length(alpha) 254 | } 255 | 256 | if(propposterior_greater==0){ 257 | propposterior_greater <- 1/length(alpha) 258 | } 259 | 260 | BF21a_less <- 2*propposterior_less 261 | BF21a_greater <- 2*propposterior_greater 262 | 263 | if(alternativeA[1]=="less"){ 264 | # BF10 = p(D|a~cauchy(0,1))/p(D|a=0) 265 | BF10 <- BFa 266 | 267 | # BF21 = p(D|a~cauchy-(0,1))/p(D|a~cauchy(0,1)) 268 | # BF21 = 2*{proportion posterior samples of alpha < 0} 269 | BF21 <- BF21a_less 270 | 271 | BFa <- BF10*BF21 272 | 273 | } else if(alternativeA[1]=="greater"){ 274 | # BF10 = p(D|a~cauchy(0,1))/p(D|a=0) 275 | BF10 <- BFa 276 | 277 | # BF21 = p(D|a~cauchy+(0,1))/p(D|a~cauchy(0,1)) 278 | # BF21 = 2*{proportion posterior samples of alpha > 0} 279 | BF21 <- BF21a_greater 280 | 281 | BFa <- BF10*BF21 282 | 283 | } 284 | 285 | #-------------------------------------------------------- 286 | 287 | # convert BFs to posterior probability 288 | # prob cannot be exactly 1 or 0 289 | prob_a <- BFa/(BFa+1) 290 | 291 | if(prob_a == 1){ 292 | prob_a <- prob_a - .Machine$double.eps 293 | } 294 | if(prob_a == 0){ 295 | prob_a <- prob_a + .Machine$double.eps 296 | } 297 | 298 | #========================================================== 299 | # BF FOR PATH beta: PARTIAL CORRELATION MY|X 300 | #========================================================== 301 | 302 | x <- cbind(X,M) 303 | y <- Y 304 | 305 | V <- solve(t(x)%*%x) #NB I switched to the notation from Ntzoufras, p. 167 306 | 307 | jags.data <- list("n", "x", "y", "V") 308 | jags.params <- c("theta") 309 | jags.inits <- list( 310 | list(theta = c(0.0,0.3)), #chain 1 starting value 311 | list(theta = c(0.3, 0.0)), #chain 2 starting value 312 | list(theta = c(-.15,.15))) #chain 3 starting value 313 | 314 | jagssamplesTB <- jags(data=jags.data, inits=jags.inits, jags.params, 315 | n.chains=3, n.iter=n.iter, DIC=T, 316 | n.burnin=n.burnin, n.thin=1, model.file=jags.model.file2) 317 | 318 | beta <- jagssamplesTB$BUGSoutput$sims.list$theta[,2] 319 | 320 | #------------------------------------------------------------------ 321 | 322 | if(SDmethod[1]=="fit.st"){ 323 | 324 | bar <- try({ 325 | fit.t2 <- QRM::fit.st(beta) 326 | nuB <- as.numeric(fit.t2$par.ests[1]) #degrees of freedom 327 | muB <- as.numeric(fit.t2$par.ests[2]) 328 | sigmaB <- abs(as.numeric(fit.t2$par.ests[3])) # This is a hack -- with high n occasionally 329 | # sigma switches sign. 330 | }) 331 | 332 | if(!("try-error"%in%class(bar))){ 333 | 334 | # BAYES FACTOR BETA 335 | BFb <- 1/(mydt(0,muB,sigmaB,nuB)/dcauchy(0)) 336 | 337 | } else { 338 | 339 | warning("fit.st did not converge, alternative optimization method was used.","\n") 340 | 341 | mydt2 <- function(pars){ 342 | 343 | mB <- pars[1] 344 | sB <- abs(pars[2]) # no negative standard deviation 345 | dfB <- abs(pars[3]) # no negative degrees of freedom 346 | 347 | -2*sum(dt((beta-mB)/sB, dfB,log=TRUE)-log(sB)) 348 | } 349 | 350 | res <- optim(c(mean(beta),sd(beta),20),mydt2)$par 351 | 352 | mB <- res[1] 353 | sB <- res[2] 354 | dfB <- res[3] 355 | 356 | # ALTERNATIVE BAYES FACTOR BETA 357 | BFb <- 1/(mydt2(0,mB,sB,dfB)/dcauchy(0)) 358 | } 359 | 360 | #------------------------- 361 | 362 | } else if(SDmethod[1]=="dnorm"){ 363 | 364 | BFb <- 1/(dnorm(0,mean(beta),sd(beta))/dcauchy(0)) 365 | 366 | #------------------------- 367 | 368 | } else if(SDmethod[1]=="splinefun"){ 369 | f <- splinefun(density(beta)) 370 | BFb <- 1/(f(0)/dcauchy(0)) 371 | 372 | #------------------------- 373 | 374 | } else if (SDmethod[1]=="logspline"){ 375 | fit.posterior <- polspline::logspline(beta) 376 | posterior.pp <- polspline::dlogspline(0, fit.posterior) # this gives the pdf at point b2 = 0 377 | prior.pp <- dcauchy(0) # height of prior at b2 = 0 378 | BFb <- prior.pp/posterior.pp 379 | 380 | } 381 | 382 | #------------------------------------------------------- 383 | 384 | # one-sided test? 385 | 386 | # save BF for one-tailed test 387 | # BF21 = 2*{proportion posterior samples of beta < 0} 388 | propposterior_less <- sum(beta<0)/length(beta) 389 | propposterior_greater <- sum(beta>0)/length(beta) 390 | 391 | # posterior proportion cannot be zero, because this renders a BF of zero 392 | # none of the samples of the parameter follow the restriction 393 | # ergo: the posterior proportion is smaller than 1/length(parameter) 394 | 395 | if(propposterior_less==0){ 396 | propposterior_less <- 1/length(beta) 397 | } 398 | 399 | if(propposterior_greater==0){ 400 | propposterior_greater <- 1/length(beta) 401 | } 402 | 403 | BF21b_less <- 2*propposterior_less 404 | BF21b_greater <- 2*propposterior_greater 405 | 406 | if(alternativeB[1]=="less"){ 407 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 408 | BF10 <- BFb 409 | 410 | # BF21 = p(D|b~cauchy-(0,1))/p(D|b~cauchy(0,1)) 411 | # BF21 = 2*{proportion posterior samples of beta < 0} 412 | BF21 <- BF21b_less 413 | 414 | BFb <- BF10*BF21 415 | 416 | } else if(alternativeB[1]=="greater"){ 417 | # BF10 = p(D|b~cauchy(0,1))/p(D|b=0) 418 | BF10 <- BFb 419 | 420 | # BF21 = p(D|b~cauchy+(0,1))/p(D|b~cauchy(0,1)) 421 | # BF21 = 2*{proportion posterior samples of beta > 0} 422 | BF21 <- BF21b_greater 423 | 424 | BFb <- BF10*BF21 425 | 426 | } 427 | 428 | #--------------------------------------------------- 429 | 430 | # convert BFs to posterior probability 431 | # prob cannot be exactly 1 or 0 432 | prob_b <- BFb/(BFb+1) 433 | 434 | if(prob_b == 1){ 435 | prob_b <- prob_b - .Machine$double.eps 436 | } 437 | if(prob_b == 0){ 438 | prob_b <- prob_b + .Machine$double.eps 439 | } 440 | 441 | #========================================= 442 | # calculate evidence for mediation (EM) 443 | #========================================= 444 | 445 | EM <- prob_a*prob_b 446 | BF.EM <- EM/(1-EM) 447 | 448 | 449 | #========================================= 450 | # FULL OR PARTIAL MEDIATION 451 | #========================================= 452 | 453 | tau_accent <- jagssamplesTB$BUGSoutput$sims.list$theta[,1] 454 | 455 | #--------------------------------------------------- 456 | 457 | if(SDmethod[1]=="fit.st"){ 458 | 459 | baz <- try({ 460 | fit.t3 <- QRM::fit.st(tau_accent) 461 | nuT <- as.numeric(fit.t3$par.ests[1]) #degrees of freedom 462 | muT <- as.numeric(fit.t3$par.ests[2]) 463 | sigmaT <- abs(as.numeric(fit.t3$par.ests[3])) # This is a hack -- with high n occasionally 464 | # sigma switches sign. 465 | }) 466 | 467 | if(!("try-error"%in%class(baz))){ 468 | 469 | # BAYES FACTOR TAU 470 | BFt_accent <- 1/(mydt(0,muT,sigmaT,nuT)/dcauchy(0)) 471 | 472 | } else { 473 | 474 | warning("fit.st did not converge. Alternative optimization method was used.","\n") 475 | 476 | mydt2 <- function(pars){ 477 | 478 | mT <- pars[1] 479 | sT <- abs(pars[2]) # no negative standard deviation 480 | dfT <- abs(pars[3]) # no negative degrees of freedom 481 | 482 | -2*sum(dt((tau_accent-mT)/sT, dfT,log=TRUE)-log(sT)) 483 | } 484 | 485 | res <- optim(c(mean(tau_accent),sd(tau_accent),20),mydt2)$par 486 | 487 | mT <- res[1] 488 | sT <- res[2] 489 | dfT <- res[3] 490 | 491 | # ALTERNATIVE BAYES FACTOR TAU 492 | BFt_accent <- 1/(mydt2(0,mT,sT,dfT)/dcauchy(0)) 493 | 494 | } 495 | 496 | #------------------------- 497 | 498 | } else if(SDmethod[1]=="dnorm"){ 499 | BFt_accent <- 1/(dnorm(0,mean(tau_accent),sd(tau_accent))/dcauchy(0)) 500 | 501 | #------------------------- 502 | 503 | } else if(SDmethod[1]=="splinefun"){ 504 | f <- splinefun(density(tau_accent)) 505 | BFt_accent <- 1/(f(0)/dcauchy(0)) 506 | 507 | #------------------------- 508 | 509 | } else if (SDmethod[1]=="logspline"){ 510 | fit.posterior <- polspline::logspline(tau_accent) 511 | posterior.pp <- polspline::dlogspline(0, fit.posterior) # this gives the pdf at point b2 = 0 512 | prior.pp <- dcauchy(0) # height of prior at b2 = 0 513 | BFt_accent <- prior.pp/posterior.pp 514 | 515 | } 516 | 517 | #------------------------------------------------------------ 518 | 519 | # one-sided test? 520 | 521 | # save BF for one-tailed test 522 | # BF21 = 2*{proportion posterior samples of tau < 0} 523 | propposterior_less <- sum(tau_accent<0)/length(tau_accent) 524 | propposterior_greater <- sum(tau_accent>0)/length(tau_accent) 525 | 526 | 527 | # posterior proportion cannot be zero, because this renders a BF of zero 528 | # none of the samples of the parameter follow the restriction 529 | # ergo: the posterior proportion is smaller than 1/length(parameter) 530 | 531 | if(propposterior_less==0){ 532 | propposterior_less <- 1/length(tau_accent) 533 | } 534 | 535 | if(propposterior_greater==0){ 536 | propposterior_greater <- 1/length(tau_accent) 537 | } 538 | 539 | BF21t_less <- 2*propposterior_less 540 | BF21t_greater <- 2*propposterior_greater 541 | 542 | 543 | if(alternativeT[1]=="less"){ 544 | # BF10 = p(D|t~cauchy(0,1))/p(D|t=0) 545 | BF10 <- BFt_accent 546 | 547 | # BF21 = p(D|t~cauchy-(0,1))/p(D|t~cauchy(0,1)) 548 | # BF21 = 2*{proportion posterior samples of tau_accent < 0} 549 | BF21 <- BF21t_less 550 | 551 | BFt_accent <- BF10*BF21 552 | 553 | } else if(alternativeT[1]=="greater"){ 554 | # BF10 = p(D|t~cauchy(0,1))/p(D|t=0) 555 | BF10 <- BFt_accent 556 | 557 | # BF21 = p(D|t~cauchy+(0,1))/p(D|t~cauchy(0,1)) 558 | # BF21 = 2*{proportion posterior samples of tau_accent > 0} 559 | BF21 <- BF21t_greater 560 | 561 | BFt_accent <- BF10*BF21 562 | 563 | } 564 | #-------------------------------------------------------- 565 | 566 | # convert BFs to posterior probability 567 | # prob cannot be exactly 1 or 0 568 | prob_t_accent <- BFt_accent/(BFt_accent+1) 569 | 570 | if(prob_t_accent == 1){ 571 | prob_t_accent <- prob_t_accent - .Machine$double.eps 572 | } 573 | if(prob_t_accent == 0){ 574 | prob_t_accent <- prob_t_accent + .Machine$double.eps 575 | } 576 | 577 | 578 | #=============================================================== 579 | 580 | if(BFa<0){ 581 | BFa <- NA 582 | warning("Negative Bayes factor: try other SDmethod","\n") 583 | } 584 | 585 | if(BFb<0){ 586 | BFb <- NA 587 | warning("Negative Bayes factor: try other SDmethod","\n") 588 | } 589 | 590 | if(BFt_accent<0){ 591 | warning("Negative Bayes factor: try other SDmethod","\n") 592 | } 593 | 594 | #=============================================================== 595 | 596 | # calculate 95% credible interval for ab 597 | ab <- alpha*beta 598 | CI <- quantile(ab,c(.025,.975)) 599 | 600 | #=============================================================== 601 | 602 | res <- data.frame(Estimate = c(mean(alpha),mean(beta),mean(tau_accent),mean(ab)), 603 | BF = c(BFa,BFb,BFt_accent,BF.EM), 604 | PostProb = c(prob_a,prob_b,prob_t_accent,EM)) 605 | 606 | rownames(res) <- c("alpha","beta","tau_prime","Mediation (alpha*beta)") 607 | 608 | result <- list(main_result=res, 609 | CI_ab=CI, 610 | alpha_samples=alpha, 611 | beta_samples=beta, 612 | tau_prime_samples=tau_accent, 613 | ab_samples=ab, 614 | jagssamplesA=jagssamplesA, 615 | jagssamplesTB=jagssamplesTB) 616 | 617 | 618 | class(result) <- c("jzs_med","list") 619 | class(result$main_result) <- c("JZSMed","data.frame") 620 | class(result$jagssamplesA) <- "rjags" 621 | class(result$jagssamplesTB) <- "rjags" 622 | class(result$ab_samples) <- "CI" 623 | class(result$alpha_samples) <- "CI" 624 | class(result$beta_samples) <- "CI" 625 | class(result$tau_prime_samples) <- "CI" 626 | 627 | return(result) 628 | } 629 | --------------------------------------------------------------------------------