├── .Rbuildignore ├── .github └── workflows │ └── R-CMD-check.yaml ├── .gitignore ├── .travis.yml ├── DESCRIPTION ├── NAMESPACE ├── NEWS ├── R ├── Cor2DataFrame.R ├── Diag.R ├── as.mxMatrix.R ├── asyCov.R ├── bdiagMat.R ├── bdiagRep.R ├── bootuniR.R ├── calEffSizes.R ├── checkRAM.R ├── create.Fmatrix.R ├── create.modMatrix.R ├── create.mxMatrix.R ├── hidden.R ├── homoStat.R ├── impliedR.R ├── indirectEffect.R ├── is.pd.R ├── lavaan2RAM.R ├── list2matrix.R ├── matrix2bdiag.R ├── meta.R ├── meta2semPlot.R ├── meta3L.R ├── meta3LFIML.R ├── metaFIML.R ├── osmasem.R ├── osmasem2.R ├── pattern.R ├── plot.meta.R ├── rCor.R ├── rCor3L.R ├── readDataSet.R ├── reml.R ├── reml3L.R ├── rerun.R ├── sem.R ├── smd.R ├── summary.R ├── tssem.R ├── uniR.R ├── vec2symMat.R └── zzz.R ├── README.md ├── data ├── Aloe14.R ├── BCG.R ├── Becker09.R ├── Becker83.R ├── Becker92.R ├── Becker94.R ├── Berkey98.R ├── Boer16.R ├── Bornmann07.R ├── Chan17.R ├── Cheung00.R ├── Cheung09.R ├── Cooke16.R ├── Cooper03.R ├── Digman97.R ├── Gleser94.R ├── Gnambs18.R ├── HedgesOlkin85.R ├── Hox02.R ├── Hunter83.R ├── Jansen19.R ├── Jaramillo05.R ├── Kalaian96.R ├── Mak09.R ├── Mathieu15.R ├── Nam03.R ├── Nohe15A1.R ├── Nohe15A2.R ├── Norton13.R ├── Roorda11.R ├── Scalco17.R ├── Sheeran20.R ├── Stadler15.R ├── Tenenbaum02.R ├── issp05.R ├── issp89.R ├── vanderPol17.R ├── wvs94a.R └── wvs94b.R ├── inst └── CITATION ├── man ├── Aloe14.Rd ├── BCG.Rd ├── Becker09.Rd ├── Becker83.Rd ├── Becker92.Rd ├── Becker94.Rd ├── Berkey98.Rd ├── Boer16.Rd ├── Bornmann07.Rd ├── Chan17.Rd ├── Cheung00.Rd ├── Cheung09.Rd ├── Cooke16.Rd ├── Cooper03.Rd ├── Cor2DataFrame.Rd ├── Diag.Rd ├── Digman97.Rd ├── Gleser94.Rd ├── Gnambs18.Rd ├── HedgesOlkin85.Rd ├── Hox02.Rd ├── Hunter83.Rd ├── Jansen19.Rd ├── Jaramillo05.Rd ├── Kalaian96.Rd ├── Mak09.Rd ├── Mathieu15.Rd ├── Nam03.Rd ├── Nohe15.Rd ├── Norton13.Rd ├── Roorda11.Rd ├── Scalco17.Rd ├── Sheeran20.Rd ├── Stadler15.Rd ├── Tenenbaum02.Rd ├── VarCorr.Rd ├── anova.Rd ├── as.mxAlgebra.Rd ├── as.mxMatrix.Rd ├── as.symMatrix.Rd ├── asyCov.Rd ├── bdiagMat.Rd ├── bdiagRep.Rd ├── bootuniR1.Rd ├── bootuniR2.Rd ├── calEffSizes.Rd ├── checkRAM.Rd ├── coef.Rd ├── create.Fmatrix.Rd ├── create.Tau2.Rd ├── create.V.Rd ├── create.modMatrix.Rd ├── create.mxMatrix.Rd ├── create.vechsR.Rd ├── homoStat.Rd ├── impliedR.Rd ├── indirectEffect.Rd ├── is.pd.Rd ├── issp05.Rd ├── issp89.Rd ├── lavaan2RAM.Rd ├── list2matrix.Rd ├── matrix2bdiag.Rd ├── meta.Rd ├── meta2semPlot.Rd ├── meta3L.Rd ├── metaSEM-package.Rd ├── osmasem.Rd ├── osmasemR2.Rd ├── osmasemSRMR.Rd ├── pattern.n.Rd ├── pattern.na.Rd ├── plot.Rd ├── print.Rd ├── rCor.Rd ├── readData.Rd ├── reml.Rd ├── reml3L.Rd ├── rerun.Rd ├── sem.Rd ├── smdMES.Rd ├── smdMTS.Rd ├── summary.Rd ├── tssem1.Rd ├── tssemParaVar.Rd ├── uniR1.Rd ├── uniR2.Rd ├── vanderPol17.Rd ├── vcov.Rd ├── vec2symMat.Rd ├── wls.Rd ├── wvs94a.Rd └── wvs94b.Rd ├── tests ├── testthat.R └── testthat │ └── test_utilities.R └── vignettes ├── Examples.html ├── Examples.html.asis ├── metaSEM.pdf └── metaSEM.pdf.asis /.Rbuildignore: -------------------------------------------------------------------------------- 1 | .travis.yml 2 | .gitignore 3 | .git 4 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | 9 | name: R-CMD-check 10 | 11 | jobs: 12 | R-CMD-check: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | config: 21 | # - {os: macos-latest, r: 'release'} 22 | # - {os: windows-latest, r: 'release'} 23 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 24 | - {os: ubuntu-latest, r: 'release'} 25 | # - {os: ubuntu-latest, r: 'oldrel-1'} 26 | 27 | env: 28 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 29 | R_KEEP_PKG_SOURCE: yes 30 | 31 | steps: 32 | - uses: actions/checkout@v3 33 | 34 | - uses: r-lib/actions/setup-pandoc@v2 35 | 36 | - uses: r-lib/actions/setup-r@v2 37 | with: 38 | r-version: ${{ matrix.config.r }} 39 | http-user-agent: ${{ matrix.config.http-user-agent }} 40 | use-public-rspm: true 41 | 42 | - uses: r-lib/actions/setup-r-dependencies@v2 43 | with: 44 | extra-packages: any::rcmdcheck, any::XML 45 | needs: check 46 | 47 | - uses: r-lib/actions/check-r-package@v2 48 | with: 49 | upload-snapshots: true 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .Rhistory 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: r 2 | cache: packages 3 | sudo: false 4 | 5 | # r_packages: 6 | # - R.rsp 7 | # - sem 8 | # - qgraph 9 | # - semPlot 10 | 11 | os: 12 | - linux 13 | #addons: 14 | # apt: 15 | # sources: 16 | # - ubuntu-toolchain-r-test 17 | # packages: 18 | # - gcc-7 19 | # - g++-7 20 | # - gfortran-7 21 | #- osx 22 | 23 | r: 24 | # - oldrel 25 | - release 26 | - devel 27 | 28 | #install: 29 | # - sudo apt-get install -yq --allow-unauthenticated --no-install-suggests --no-install-recommends r-cran-openmx 30 | 31 | warnings_are_errors: true 32 | r_check_args: "--run-dontrun" 33 | 34 | #before_install: 35 | # Rscript -e 'install.packages(c("R.rsp", "qgraph", "semPlot"))' 36 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: metaSEM 2 | Type: Package 3 | Title: Meta-Analysis using Structural Equation Modeling 4 | Version: 1.5.2 5 | Date: 2025-05-08 6 | Depends: R (>= 4.0.0), OpenMx 7 | Imports: Matrix, MASS, ellipse, graphics, stats, utils, mvtnorm, numDeriv, lavaan 8 | Suggests: metafor, semPlot, R.rsp, testthat, matrixcalc 9 | VignetteBuilder: R.rsp 10 | Authors@R: person(given = "Mike", family = "Cheung", role = c("aut","cre"), email = "mikewlcheung@nus.edu.sg", comment = c(ORCID = "0000-0003-0113-0758")) 11 | Maintainer: Mike Cheung 12 | Description: A collection of functions for conducting meta-analysis using a 13 | structural equation modeling (SEM) approach via the 'OpenMx' and 14 | 'lavaan' packages. It also implements various procedures to 15 | perform meta-analytic structural equation modeling on the 16 | correlation and covariance matrices, see Cheung (2015) 17 | . 18 | License: GPL (>=2) 19 | LazyLoad: yes 20 | LazyData: yes 21 | ByteCompile: yes 22 | URL: https://github.com/mikewlcheung/metasem 23 | BugReports: https://github.com/mikewlcheung/metasem/issues 24 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | exportPattern("^[^\\.]") 2 | import(OpenMx) 3 | 4 | # import(Matrix) 5 | importFrom(Matrix, nearPD) 6 | 7 | importFrom(ellipse, ellipse) 8 | importFrom(MASS, mvrnorm) 9 | importFrom(MASS, ginv) 10 | importFrom(mvtnorm, rmvnorm) 11 | # importFrom(matrixcalc, D.matrix) 12 | 13 | S3method(plot, meta) 14 | S3method(plot, character) 15 | S3method(plot, wls) 16 | S3method(plot, osmasem) 17 | S3method(plot, osmasem2) 18 | S3method(plot, mxsem) 19 | # S3method(plot, osmasem3L) 20 | 21 | S3method(summary, wls) 22 | S3method(summary, wls.cluster) 23 | S3method(summary, tssem1FEM) 24 | S3method(summary, tssem1FEM.cluster) 25 | S3method(summary, tssem1REM) 26 | S3method(summary, meta) 27 | S3method(summary, meta3LFIML) 28 | S3method(summary, reml) 29 | S3method(summary, CorPop) 30 | S3method(summary, Cor3L) 31 | S3method(summary, bootuniR2) 32 | S3method(summary, osmasem) 33 | S3method(summary, osmasem2) 34 | S3method(summary, mxsem) 35 | # S3method(summary, osmasem3L) 36 | # S3method(summary, tssemRobust1) 37 | 38 | S3method(anova, meta) 39 | S3method(anova, meta3LFIML) 40 | S3method(anova, wls) 41 | S3method(anova, reml) 42 | S3method(anova, osmasem) 43 | S3method(anova, osmasem2) 44 | S3method(anova, mxsem) 45 | # S3method(anova, osmasem3L) 46 | 47 | S3method(coef, tssem1FEM) 48 | S3method(coef, tssem1FEM.cluster) 49 | S3method(coef, wls) 50 | S3method(coef, wls.cluster) 51 | S3method(coef, tssem1REM) 52 | S3method(coef, meta) 53 | S3method(coef, meta3LFIML) 54 | S3method(coef, reml) 55 | S3method(coef, osmasem) 56 | S3method(coef, osmasem2) 57 | S3method(coef, mxsem) 58 | # S3method(coef, osmasem3L) 59 | # S3method(coef, tssemRobust1) 60 | 61 | S3method(vcov, wls) 62 | S3method(vcov, tssem1FEM) 63 | S3method(vcov, tssem1FEM.cluster) 64 | S3method(vcov, wls.cluster) 65 | S3method(vcov, tssem1REM) 66 | S3method(vcov, meta) 67 | S3method(vcov, meta3LFIML) 68 | S3method(vcov, reml) 69 | S3method(vcov, osmasem) 70 | S3method(vcov, osmasem2) 71 | S3method(vcov, mxsem) 72 | # S3method(vcov, osmasem3L) 73 | # S3method(vcov, tssemRobust1) 74 | 75 | S3method(print, wls) 76 | S3method(print, tssem1FEM) 77 | S3method(print, tssem1FEM.cluster) 78 | S3method(print, tssem1REM) 79 | S3method(print, meta) 80 | S3method(print, meta3LFIML) 81 | S3method(print, reml) 82 | S3method(print, summary.wls) 83 | S3method(print, summary.tssem1FEM) 84 | S3method(print, summary.meta) 85 | S3method(print, summary.meta3LFIML) 86 | S3method(print, summary.reml) 87 | S3method(print, impliedR) 88 | S3method(print, uniR1) 89 | S3method(print, summary.CorPop) 90 | S3method(print, summary.Cor3L) 91 | S3method(print, summary.bootuniR2) 92 | S3method(print, summary.mxsem) 93 | 94 | # required by R3.3 95 | importFrom("graphics", "abline", "arrows", "layout", "par", "plot", 96 | "points", "polygon") 97 | importFrom("stats", "as.formula", "coef", "cov2cor", "deriv", 98 | "na.omit", "pchisq", "pnorm", "printCoefmat", "qchisq", 99 | "qnorm", "reshape", "var", "vcov", "weighted.mean", "uniroot", 100 | "cov", "cor", "rWishart", "sd", "quantile") 101 | importFrom("utils", "read.table") 102 | 103 | importFrom("lavaan", "sem", "lavaanify", "inspect", "coef", "vcov") 104 | 105 | -------------------------------------------------------------------------------- /R/Diag.R: -------------------------------------------------------------------------------- 1 | Diag <- function(x, ...) { 2 | #Diag <- function(x, nrow, ncol) { 3 | if (inherits(x, "character") & missing(...)) { 4 | p <- length(x) 5 | out <- matrix(0, nrow=p, ncol=p) 6 | diag(out) <- x 7 | } else { 8 | out <- diag(x, ...) 9 | #out <- diag(x, nrow=nrow, ncol=ncol) 10 | } 11 | out 12 | } 13 | 14 | `Diag<-` <- function(x, value) { 15 | diag(x) <- value 16 | x 17 | } 18 | -------------------------------------------------------------------------------- /R/bdiagMat.R: -------------------------------------------------------------------------------- 1 | bdiagMat <- function(x){ 2 | if(!is.list(x)) stop("\"x\" must be a list.") 3 | n <- length(x) 4 | if(n==0) return(NULL) 5 | x <- lapply(x, function(y) if(length(y)) as.matrix(y) else stop("Zero-length component in x")) 6 | d <- array(unlist(lapply(x, dim)), c(2, n)) 7 | rr <- d[1,] 8 | cc <- d[2,] 9 | rsum <- sum(rr) 10 | csum <- sum(cc) 11 | out <- array(0, c(rsum, csum)) 12 | ind <- array(0, c(4, n)) 13 | rcum <- cumsum(rr) 14 | ccum <- cumsum(cc) 15 | ind[1,-1] <- rcum[-n] 16 | ind[2,] <- rcum 17 | ind[3,-1] <- ccum[-n] 18 | ind[4,] <- ccum 19 | imat <- array(1:(rsum * csum), c(rsum, csum)) 20 | iuse <- apply(ind, 2, function(y, imat) imat[(y[1]+1):y[2], 21 | (y[3]+1):y[4]], imat=imat) 22 | iuse <- as.vector(unlist(iuse)) 23 | out[iuse] <- unlist(x) 24 | return(out) 25 | } 26 | -------------------------------------------------------------------------------- /R/bdiagRep.R: -------------------------------------------------------------------------------- 1 | bdiagRep <- function(x, times) { 2 | bdiagMat( replicate(times, x, simplify=FALSE) ) 3 | } 4 | -------------------------------------------------------------------------------- /R/calEffSizes.R: -------------------------------------------------------------------------------- 1 | calEffSizes <- function(model, data=NULL, n, Cov, Mean=NULL, group=NULL, 2 | lavaan.output=FALSE, warn=FALSE, ...) { 3 | 4 | ## When raw data are present 5 | if (!is.null(data)) { 6 | fit <- lavaan::sem(model, data=data, group=group, warn=warn, ...) 7 | } else { 8 | ## Summary statistics as inputs 9 | if (is.null (Mean)) { 10 | fit <- lavaan::sem(model, sample.cov=Cov, sample.nobs=n, group=group, 11 | sample.cov.rescale=FALSE, warn=warn, ...) 12 | } else { 13 | fit <- lavaan::sem(model, sample.cov=Cov, sample.mean=Mean, 14 | sample.nobs=n, group=group, 15 | sample.cov.rescale=FALSE, warn=warn, ...) 16 | } 17 | } 18 | 19 | if (lavaan.output==FALSE) { 20 | 21 | ## Get the free parameters in the model 22 | x <- fit@Fit@x 23 | 24 | ## Get the sampling covariance matrix of the parameter estimates 25 | VCOV <- lavaan::vcov(fit) 26 | 27 | ## Compute the effect sizes 28 | ES <- fit@Model@def.function(.x.=x) 29 | 30 | ## Compute the jacobian for 'defined parameters' 31 | JAC <- .lavJacobianD(func=fit@Model@def.function, x=x) 32 | 33 | ## Compute the sampling covariance matrix using delta method 34 | ES.VCOV <- JAC %*% VCOV %*% t(JAC) 35 | 36 | ## Add the variable names for ease of reference 37 | dimnames(ES.VCOV) <- list(names(ES), names(ES)) 38 | 39 | fit <- list(ES=ES, VCOV=ES.VCOV) 40 | } 41 | fit 42 | } 43 | 44 | 45 | -------------------------------------------------------------------------------- /R/checkRAM.R: -------------------------------------------------------------------------------- 1 | checkRAM <- function(Amatrix, Smatrix, cor.analysis=TRUE) { 2 | if (missing(Amatrix)&missing(Smatrix)) { 3 | warning("Either 'Amatrix' or 'Smatrix' must be present.") 4 | } 5 | 6 | ## Check A 7 | if (!missing(Amatrix)) { 8 | ## Convert A into mxMatrix if they are not yet. 9 | if (is.matrix(Amatrix)) { 10 | Amatrix <- as.mxMatrix(Amatrix, name="A") 11 | } else { 12 | Amatrix@name <- "A" 13 | } 14 | 15 | ## A_name <- deparse(substitute(A)) 16 | 17 | ## Check diagonals: either free or non-zero values 18 | if ( any(Diag(Amatrix$free)) | any(Diag(Amatrix$values)!=0) ) { 19 | warning("Diagonals of the 'Amatrix' must be zeros.\n") 20 | } 21 | 22 | ## Check both lower tri & upper tri are TRUE 23 | if ( any(Amatrix$free & t(Amatrix$free)) ) { 24 | warning("Non-recursive models are not allowed in the 'Amatrix'.\n") 25 | } 26 | } 27 | 28 | ## Check S 29 | if (!missing(Smatrix)) { 30 | if (is.matrix(Smatrix)) { 31 | Smatrix <- as.mxMatrix(Smatrix, name="S") 32 | } else { 33 | Smatrix@name <- "S" 34 | } 35 | 36 | ## S_name <- deparse(substitute(S)) 37 | ## Cannot check 'free' for definition variables 38 | ## Only check labels!!! 39 | S_labels <- Smatrix$labels 40 | 41 | ## Check symmetric 42 | if (!isSymmetric(Smatrix$free)) { 43 | warning("The free parameters of the 'Smatrix' must be symmetric.\n") 44 | } 45 | 46 | if (!isSymmetric(Smatrix$labels)) { 47 | warning("The labels of 'Smatrix' must be symmetric.\n") 48 | } 49 | 50 | if (!isSymmetric(Smatrix$values)) { 51 | warning("The values of 'Smatrix' must be symmetric.\n") 52 | } 53 | 54 | ## ## Check digaonals 55 | ## if (SdiagZero==TRUE & any(!is.na(Diag(S_labels)))) { 56 | ## warning("Diagonal of 'S' must be 0.\n") 57 | ## } 58 | 59 | ## Check both A and S: Variances of IVs must be fixed at 1 and DVs must be free 60 | ## Limitation: it may still give warnings when there are DVs with fixed parameters in A 61 | if (cor.analysis==TRUE & !missing(Amatrix)) { 62 | ## Check A if it is a DV 63 | dv <- apply(Amatrix$free, 1, any) 64 | 65 | if ( any(diag(Smatrix$free)[!dv]) | !all(diag(Smatrix$values)[!dv]==1) ) { 66 | warning("The variances of the independent variables in 'Smatrix' must be fixed at 1.") 67 | } 68 | if (!all(diag(Smatrix$free)[dv])) { 69 | warning("The variances of the dependent variables in 'Smatrix' should be free.") 70 | } 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /R/create.Fmatrix.R: -------------------------------------------------------------------------------- 1 | create.Fmatrix <- function(x, name, as.mxMatrix=TRUE, ...) { 2 | x <- as.logical(x) 3 | Fmatrix <- Diag(as.numeric(x))[x, , drop=FALSE] 4 | if (as.mxMatrix) { 5 | if (missing(name)) as.mxMatrix(Fmatrix) else as.mxMatrix(Fmatrix, name=name) 6 | } else { 7 | Fmatrix 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /R/create.modMatrix.R: -------------------------------------------------------------------------------- 1 | create.modMatrix <- function(RAM, output=c("A", "S"), mod) { 2 | output <- match.arg(output) 3 | 4 | switch(output, 5 | A = { out <- RAM$A 6 | out[grep("\\*", out)] <- paste0("0*data.", mod)}, 7 | S = { out <- RAM$S 8 | out[grep("\\*", out)] <- paste0("0*data.", mod) 9 | Diag(out) <- "0"}) 10 | 11 | out 12 | } 13 | 14 | -------------------------------------------------------------------------------- /R/homoStat.R: -------------------------------------------------------------------------------- 1 | homoStat <- function(y, v) { 2 | if (is.vector(y)) no.y <- 1 else no.y <- ncol(y) 3 | if (is.vector(v)) no.v <- 1 else no.v <- ncol(v) 4 | if ( no.v != no.y*(no.y+1)/2 ) 5 | stop(paste("The expected no. of columns in v is ", no.y*(no.y+1)/2, 6 | " while the observed no. of columns in v is ", no.v, ".", sep="")) 7 | 8 | if (no.y==1) { 9 | miss.index <- is.na(y) 10 | y <- y[!miss.index] 11 | v <- v[!miss.index] 12 | w <- 1/v 13 | beta <- sum(y*w)/sum(w) 14 | Q <- sum( w*(y-beta)^2 ) 15 | Q.df <- length(y)-1 16 | pval <- 1-pchisq(Q, df=Q.df) 17 | } else { 18 | Y <- matrix( c(t(y)), ncol=1 ) 19 | miss.index <- is.na(Y) 20 | Y <- matrix( Y[!miss.index], ncol=1 ) 21 | X <- matrix( rep(Diag(no.y), nrow(y)), ncol=no.y, byrow=TRUE ) 22 | 23 | X <- X[!miss.index, , drop=FALSE] 24 | V <- matrix2bdiag(v) 25 | V <- V[!miss.index, !miss.index, drop=FALSE] 26 | 27 | V_inv <- chol2inv(chol(V)) 28 | Q <- t(Y) %*% ( V_inv - V_inv %*% X %*% solve(t(X) 29 | %*% V_inv %*% X) %*% t(X) %*% V_inv ) %*% Y 30 | Q.df <- nrow(X)-ncol(X) 31 | pval <- 1-pchisq(Q, df=Q.df) 32 | } 33 | list(Q=Q, Q.df=Q.df, pval=pval) 34 | } 35 | 36 | 37 | -------------------------------------------------------------------------------- /R/is.pd.R: -------------------------------------------------------------------------------- 1 | is.pd <- function(x, check.aCov=FALSE, cor.analysis=TRUE, tol=1e-06) { 2 | if (is.list(x)) { 3 | return(sapply(x, is.pd, check.aCov=check.aCov, cor.analysis=cor.analysis, tol=tol)) 4 | } 5 | else { 6 | ## Criteria based on asyCov() 7 | if (check.aCov) { 8 | if (cor.analysis) Diag(x)[is.na(Diag(x))] <- 1 else Diag(x)[is.na(Diag(x))] <- mean(Diag(x), na.rm=TRUE) 9 | x[is.na(x)] <- 0 10 | } else { 11 | ## Normal definition of pd 12 | miss.index <- is.na(Diag(x)) 13 | x <- x[!miss.index, !miss.index] 14 | } 15 | 16 | ## Catch the error when there are NA in the matrix 17 | lambda <- tryCatch(eigen(x, only.values = TRUE)$values, error=function(e) e) 18 | ## Return NA when there are NA in the matrix 19 | if (inherits(lambda, "error")) { 20 | out <- NA 21 | } else { 22 | # lambda_k/lambda_1 > tol 23 | ## if (lambda[length(lambda)]/lambda[1] > tol) { 24 | 25 | ## Use the definition in MASS::mvrnorm 26 | if (all(lambda >= -tol*abs(lambda[1L]))) { 27 | out <- TRUE 28 | } else { 29 | out <- FALSE 30 | } 31 | } 32 | } 33 | return(out) 34 | } 35 | -------------------------------------------------------------------------------- /R/list2matrix.R: -------------------------------------------------------------------------------- 1 | list2matrix <- function(x, diag=FALSE) { 2 | if (!is.list(x)) 3 | stop("\"x\" has to be a list.") 4 | if (!identical(0, var(sapply(x, function(x){dim(x)[[1]]})))) 5 | stop("Dimensions of matrices in \"x\" have to be the same in order to stack them together.") 6 | 7 | if (is.null(dimnames(x[[1]]))) { 8 | oldNames <- paste("x", 1:dim(x[[1]])[[1]], sep = "") 9 | } else { 10 | oldNames <- dimnames(x[[1]])[[1]] 11 | } 12 | 13 | if (diag) { 14 | psNames <- vech(outer(oldNames, oldNames, paste, sep = "_")) 15 | ## out <- t(sapply(x, function(x) {(vech(x))})) 16 | ## out is a list 17 | out <- lapply(x, vech) 18 | } else { 19 | psNames <- vechs(outer(oldNames, oldNames, paste, sep = "_")) 20 | ## Fix a bug found by Steffen Zitzmann when x is a 2x2 matrix with diag=FALSE 21 | ## It returns 1xn vector rather than nx1 because of sapply(). 22 | ## out <- t(sapply(x, function(x) {(vechs(x))})) 23 | out <- lapply(x, vechs) 24 | } 25 | 26 | ## convert the list into a matrix 27 | ## out <- matrix(unlist(out), nrow=length(out), byrow=TRUE) 28 | out <- do.call(rbind, out) 29 | 30 | dimnames(out) <- list(names(x), psNames) 31 | out 32 | } 33 | -------------------------------------------------------------------------------- /R/matrix2bdiag.R: -------------------------------------------------------------------------------- 1 | matrix2bdiag <- function(x, ...) { 2 | tmp <- split(as.matrix(x), row(x)) 3 | # Use bdiagMat() to handle string matrices 4 | out <- bdiagMat(lapply(tmp, vec2symMat, ...)) 5 | as.matrix(out) 6 | } 7 | -------------------------------------------------------------------------------- /R/pattern.R: -------------------------------------------------------------------------------- 1 | pattern.n <- function(x, n) { 2 | if (!is.list(x)) stop("\"x\" must be a list of matrices.\n") 3 | if (length(x)!=length(n)) stop("The lengths of \"x\" and \"n\" must be the same.\n") 4 | 5 | fun <- function(x1, n1) { 6 | ## x2: a copy of x1 7 | x2 <- x1 8 | ## replace NA with 0 9 | x2[is.na(x1)] <-0 10 | ## replace not NA with the sample size 11 | x2[!is.na(x1)] <- n1 12 | x2} 13 | my.df <- mapply(fun, x, n, SIMPLIFY=FALSE) 14 | Reduce('+', my.df) 15 | } 16 | 17 | pattern.na <- function(x, show.na=TRUE, 18 | type=c("tssem", "osmasem")) { 19 | 20 | type <- match.arg(type) 21 | 22 | if (type=="tssem") { 23 | out <- Reduce("+", lapply(x, is.na)) 24 | if (show.na==FALSE) { 25 | out <- length(x)-out 26 | } 27 | } else { 28 | out <- x$data[, x$ylabels] 29 | out <- split(out, seq_len(nrow(out))) 30 | out <- lapply(out, function(x) { x <- is.na(x) 31 | matrix(x, ncol=1, nrow=length(x)) %*% 32 | matrix(x, ncol=length(x), nrow=1) }) 33 | out <- Reduce("+", out) 34 | dimnames(out) <- list(x$ylabels, x$ylabels) 35 | if (show.na==FALSE) { 36 | out <- nrow(x$data)-out 37 | } 38 | } 39 | out 40 | } 41 | 42 | -------------------------------------------------------------------------------- /R/rCor3L.R: -------------------------------------------------------------------------------- 1 | rCor3L <- function(Sigma, V.B, V.W, n, cluster, corr=TRUE, raw.data=FALSE, 2 | nonPD.pop=c("replace", "nearPD", "accept"), 3 | nonPD.sam=c("stop", "nearPD")) { 4 | 5 | nonPD.pop <- match.arg(nonPD.pop) 6 | nonPD.sam <- match.arg(nonPD.sam) 7 | 8 | if (sum(cluster) != length(n)) { 9 | stop("The length of 'n' must equal the sum of 'cluster'.\n") 10 | } 11 | 12 | ## Generate between-level population matrices 13 | P.B <- rCorPop(Sigma=Sigma, V=V.B, k=length(cluster), corr=corr, nonPD.pop=nonPD.pop) 14 | 15 | ## Generate within-level population matrices 16 | P.W <- mapply(rCorPop, Sigma=P.B, k=cluster, 17 | MoreArgs=list(V=V.W, corr=corr, nonPD.pop=nonPD.pop), 18 | SIMPLIFY=FALSE) 19 | 20 | names(P.B) <- names(P.W) <- paste0("Cluster", seq_along(cluster)) 21 | 22 | ## Add "_" in the names, which make it easier to read later 23 | P.W.tmp <- P.W 24 | names(P.W.tmp) <- paste0("Cluster", seq_along(cluster), "_") 25 | 26 | ## Generate sample matrices 27 | R <- rCorSam(Sigma=unlist(P.W.tmp, recursive=FALSE), n=n, corr=corr, 28 | raw.data=raw.data, nonPD.sam=nonPD.sam) 29 | 30 | ## Labels for the clusters 31 | Cluster <- paste0("Cluster", rep(seq_along(cluster), times=cluster)) 32 | 33 | out <- list(P.B=P.B, P.W=P.W, R=R, cluster=Cluster, n=n) 34 | attr(out, "Sigma") <- Sigma 35 | attr(out, "V.B") <- V.B 36 | attr(out, "V.W") <- V.W 37 | attr(out, "cluster") <- cluster 38 | class(out) <- "Cor3L" 39 | out 40 | } 41 | 42 | summary.Cor3L <- function(object, ...) { 43 | if (!is.element("Cor3L", class(object))) 44 | stop("\"object\" must be an object of class \"Cor3L\".") 45 | 46 | cluster <- attr(object, "cluster") 47 | 48 | sum.b <- summary(object$P.B) 49 | sum.w <- lapply(object$P.W, summary) 50 | 51 | ## Numbers of within studies. Should be the same as cluster. 52 | k.w <- sapply(sum.w, function(x) x$k) 53 | 54 | ## Empirical V.W 55 | V.W.emp <- lapply(sum.w, function(x) x$V_Samp) 56 | V.W.emp <- Reduce("+", Map("*", k.w, V.W.emp))/sum(k.w) 57 | nonPD.pop.W <- sum.w[[1]]$nonPD.pop 58 | nonPD.count.W <- sum(sapply(sum.w, function(x) x$nonPD.count)) 59 | 60 | out <- list(Sigma=attr(object, "Sigma"), 61 | V.B = attr(object, "V.B"), 62 | V.W = attr(object, "V.W"), 63 | cluster = cluster, 64 | Sigma.emp = sum.b$R, 65 | V.B.emp = sum.b$V_Samp, 66 | nonPD.pop.B = sum.b$nonPD.pop, 67 | nonPD.count.B = sum.b$nonPD.count, 68 | V.W.emp = V.W.emp, 69 | nonPD.pop.W = nonPD.pop.W, 70 | nonPD.count.W = nonPD.count.W) 71 | class(out) <- "summary.Cor3L" 72 | out 73 | } 74 | 75 | print.summary.Cor3L <- function(x, ...) { 76 | if (!is.element("summary.Cor3L", class(x))) 77 | stop("\"x\" must be an object of class \"summary.Cor3L\".") 78 | 79 | cat("Population Sigma:\n") 80 | print(x$Sigma) 81 | cat("\nCluster sizes:\n") 82 | print(x$cluster) 83 | 84 | cat("\nPopulation V (between):\n") 85 | print(x$V.B) 86 | cat("\nEmpirical V (between):\n") 87 | print(x$V.B.emp) 88 | cat("\nMethod to handle non-positive definite matrices (between):", x$nonPD.pop.B) 89 | cat("\nNumber of samples (between):", length(x$cluster)) 90 | cat("\nCount of non-positive definite matrices (between):", x$nonPD.count.B, "\n") 91 | 92 | cat("\nPopulation V (within):\n") 93 | print(x$V.W) 94 | cat("\nEmpirical V (within):\n") 95 | print(x$V.W.emp) 96 | cat("\nMethod to handle non-positive definite matrices (within):", x$nonPD.pop.W) 97 | cat("\nNumber of samples (within):", sum(x$cluster)) 98 | cat("\nCount of non-positive definite matrices (within):", x$nonPD.count.W, "\n") 99 | } 100 | 101 | -------------------------------------------------------------------------------- /R/readDataSet.R: -------------------------------------------------------------------------------- 1 | readFullMat <- function(file, ...) { 2 | my.df <- read.table(file = file, ...) 3 | no.lines <- nrow(my.df) 4 | no.var <- ncol(my.df) 5 | if (no.lines%%no.var == 0) { 6 | no.groups <- no.lines/no.var 7 | } else { 8 | stop("No. of lines read is not divided by the no. of variables.") 9 | } 10 | var.names <- paste("x", 1:no.var, sep = "") 11 | my.list <- split(my.df, rep(1:no.groups, each = no.var)) 12 | # my.mat <- lapply(my.list, matrix, byrow = TRUE, ncol = no.var, nrow = no.var) 13 | # Add variable names into the matrices 14 | out <- lapply(my.list, function(x, v.names) { 15 | dimnames(x) <- list(v.names, v.names) 16 | x 17 | }, var.names) 18 | out 19 | lapply(out, function(x) {as.matrix(x)} ) 20 | } 21 | 22 | 23 | 24 | readLowTriMat <- function(file, no.var, ...) { 25 | if (missing(no.var)) 26 | stop("No. of variables was missing.") 27 | # problem: read by row major! 28 | my.scan <- scan(file = file, ...) 29 | ps <- no.var * (no.var + 1)/2 30 | no.groups <- length(my.scan)/ps 31 | if (length(my.scan)%%ps != 0) 32 | stop("No. of elements read != no.var*(no.var+1)*no.of.studies.") 33 | my.df <- matrix(my.scan, ncol = ps, nrow = no.groups, byrow = TRUE) 34 | my.list <- split(my.df, 1:no.groups) 35 | # mat 1: no.var by no.var of 0 36 | # mat 2: fill lower triangle of mat by my.list 37 | # mat 3: lower + upper 38 | my.mat <- lapply(my.list, function(x, no.var) { 39 | mat <- matrix(0, ncol = no.var, nrow = no.var) 40 | mat[upper.tri(mat, diag = TRUE)] <- x 41 | mat[lower.tri(mat)] <- t(mat)[lower.tri(mat)] 42 | mat 43 | }, no.var) 44 | # Add variable names into the matrices 45 | var.names <- paste("x", 1:no.var, sep = "") 46 | out <- lapply(my.mat, function(x, v.names) { 47 | dimnames(x) <- list(v.names, v.names) 48 | x 49 | }, var.names) 50 | out 51 | } 52 | 53 | 54 | #### Convert list of matrices into row vector of matrices 55 | #my.mat <- do.call(rbind, lapply(my.df, vech)) 56 | #write.table(my.mat, file='row.dat', row.names=FALSE, col.names=FALSE) 57 | # http://tolstoy.newcastle.edu.au/R/help/04/11/6694.html 58 | 59 | #### Read cov/cor elements by column major 60 | #### Sample data of 2 studies with 3 variables 61 | #### Study1: c11 c21 c31 c22 c32 c33 62 | #### Study2: c11 c21 c31 c22 c32 c33 63 | readStackVec <- function(file, ...) { 64 | my.df <- as.matrix(read.table(file = file, ...)) 65 | # Convert no. of covariance elements into no. of variables by p*=p(p+1)/2 66 | no.col <- ncol(my.df) 67 | no.var <- (sqrt(1 + 8 * no.col) - 1)/2 68 | if (abs(no.var - round(no.var)) > .Machine$double.eps^0.5) { 69 | stop("No. of columns does not match no. of variables: p(p+1)/2") 70 | } 71 | no.groups <- nrow(my.df) 72 | my.list <- split(my.df, 1:no.groups) 73 | 74 | # mat 1: no.var by no.var of 0 75 | # mat 2: fill lower triangle of mat by my.list 76 | # mat 3: lower + upper 77 | my.mat <- lapply(my.list, function(x, no.var) { 78 | mat <- matrix(0, ncol = no.var, nrow = no.var) 79 | mat[lower.tri(mat, diag = TRUE)] <- x 80 | mat[upper.tri(mat)] <- t(mat)[upper.tri(mat)] 81 | mat 82 | }, no.var) 83 | 84 | # Add variable names into the matrices 85 | var.names <- paste("x", 1:no.var, sep = "") 86 | out <- lapply(my.mat, function(x, v.names) { 87 | dimnames(x) <- list(v.names, v.names) 88 | x 89 | }, var.names) 90 | out 91 | } 92 | -------------------------------------------------------------------------------- /R/vec2symMat.R: -------------------------------------------------------------------------------- 1 | vec2symMat <- function (x, diag=TRUE, byrow=FALSE) { 2 | m <- length(x) 3 | d <- if (diag) 1 else -1 4 | n <- floor((sqrt(1 + 8*m) - d)/2) 5 | if (m != n*(n + d)/2) 6 | stop("Cannot make a square matrix as the length of \"x\" is incorrect.") 7 | mat <- Diag(n) 8 | 9 | ## Row major 10 | if (byrow) { 11 | mat[upper.tri(mat, diag=diag)] <- x 12 | index <- lower.tri(mat) 13 | mat[index] <- t(mat)[index] 14 | } else { 15 | ## Column major: default behavior 16 | mat[lower.tri(mat, diag=diag)] <- x 17 | # Just mirroring the matrix, exclude the diagonals 18 | ## mat[upper.tri(mat, diag=FALSE)] <- mat[lower.tri(mat, diag=FALSE)] 19 | ## Corrected a bug 20 | index <- upper.tri(mat) 21 | mat[index] <- t(mat)[index] 22 | } 23 | mat 24 | } 25 | -------------------------------------------------------------------------------- /R/zzz.R: -------------------------------------------------------------------------------- 1 | ## Removed to improve performance 2 | 3 | ## .onAttach <- function(lib, pkg){ 4 | ## libMatrix <- installed.packages() 5 | ## packageStartupMessage("Loaded OpenMx version ", libMatrix["OpenMx", "Version"], ".") 6 | ## packageStartupMessage("Loading metaSEM version ", libMatrix["metaSEM", "Version"], ".") 7 | ## packageStartupMessage("You may refer to the vignette for the examples.\n") 8 | ## } 9 | 10 | .onAttach <- function(lib, pkg){ 11 | ## SLSQP (not NPSOL) is used as the default optimizer. 12 | ## "central" (not "forward") is used as the default "Gradient algorithm". 13 | 14 | mxOption(NULL, "Default optimizer", "SLSQP") 15 | mxOption(NULL, "Gradient algorithm", "central") 16 | mxOption(NULL, "Optimality tolerance", "6.3e-14") 17 | mxOption(NULL, "Gradient iterations", 2) 18 | 19 | packageStartupMessage('"SLSQP" is set as the default optimizer in OpenMx.') 20 | packageStartupMessage('mxOption(NULL, "Gradient algorithm") is set at "', mxOption(NULL, "Gradient algorithm"), '".') 21 | packageStartupMessage('mxOption(NULL, "Optimality tolerance") is set at "', mxOption(NULL, "Optimality tolerance"), '".') 22 | packageStartupMessage('mxOption(NULL, "Gradient iterations") is set at "', mxOption(NULL, "Gradient iterations"), '".') 23 | 24 | #packageStartupMessage('If "SLSQP" does not work well for you, e.g., there are many error codes,') 25 | #packageStartupMessage('you may install the "NPSOL" optimizer from the OpenMx website and use it by calling:') 26 | #packageStartupMessage('mxOption(NULL, "Default optimizer", "NPSOL")') 27 | } 28 | 29 | ## Added .onLoad See https://github.com/mikewlcheung/metasem/issues/3 and https://github.com/OpenMx/OpenMx/issues/98 30 | .onLoad <- function(lib, pkg){ 31 | mxOption(NULL, "Default optimizer", "SLSQP") 32 | mxOption(NULL, "Gradient algorithm", "central") 33 | mxOption(NULL, "Optimality tolerance", "6.3e-14") 34 | mxOption(NULL, "Gradient iterations", 2) 35 | 36 | ## packageStartupMessage('"SLSQP" is set as the default optimizer in OpenMx.') 37 | ## packageStartupMessage('mxOption(NULL, "Gradient algorithm") is set at "', mxOption(NULL, "Gradient algorithm"), '".') 38 | ## packageStartupMessage('mxOption(NULL, "Optimality tolerance") is set at "', mxOption(NULL, "Optimality tolerance"), '".') 39 | ## packageStartupMessage('mxOption(NULL, "Gradient iterations") is set at "', mxOption(NULL, "Gradient iterations"), '".') 40 | } 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![R build status](https://github.com/mikewlcheung/metasem/workflows/R-CMD-check/badge.svg)](https://github.com/mikewlcheung/metasem/actions) 2 | [![cran version](http://www.r-pkg.org/badges/version/metaSEM)](https://cran.r-project.org/package=metaSEM) 3 | [![Monthly Downloads](https://cranlogs.r-pkg.org/badges/metaSEM)](https://cranlogs.r-pkg.org/badges/metaSEM) 4 | [![Total Downloads](https://cranlogs.r-pkg.org/badges/grand-total/metaSEM)](https://cranlogs.r-pkg.org/badges/grand-total/metaSEM) 5 | [![Rdoc](https://www.rdocumentation.org/badges/version/metaSEM)](https://www.rdocumentation.org/packages/metaSEM) 6 | [![DOI](https://img.shields.io/badge/doi-10.3389/fpsyg.2014.01521-yellow.svg?style=flat)](https://doi.org/10.3389/fpsyg.2014.01521) 7 | 8 | The `metaSEM` package conducts univariate and multivariate meta-analyses using a structural equation modeling (SEM) approach via the `OpenMx` package. It also implements the two-stage SEM approach to conduct meta-analytic structural equation modeling on correlation/covariance matrices. 9 | 10 | * [Examples](https://cran.r-project.org/package=metaSEM/vignettes/Examples.html) 11 | * [Vignettes](https://cran.r-project.org/package=metaSEM/vignettes/metaSEM.pdf) 12 | * [Reference manual](https://cran.r-project.org/package=metaSEM/metaSEM.pdf) 13 | 14 | The stable version can be installed from CRAN by: 15 | ``` 16 | install.packages("metaSEM") 17 | ``` 18 | 19 | The developmental version can be installed from GitHub by: 20 | ``` 21 | ## Install remotes package if it has not been installed yet 22 | # install.packages("remotes") 23 | 24 | remotes::install_github("mikewlcheung/metasem") 25 | ``` 26 | -------------------------------------------------------------------------------- /data/Aloe14.R: -------------------------------------------------------------------------------- 1 | Aloe14 <- 2 | structure(list(Study = structure(c(1L, 2L, 3L, 4L, 5L, 6L, 6L, 3 | 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L), .Label = c("Betoret", 4 | "Brouwers & Tomic", "Bumen", "Chang", "Durr", "Evers et al.", 5 | "Friedman", "Gold", "Huk", "Kress", "Kumarakulasingam", "Martin et al.", 6 | "Ozdemir", "Skaalvik and Skaalvik", "Williams"), class = "factor"), 7 | Year = c(2009L, 2000L, 2010L, 2009L, 2008L, 2002L, 2004L, 8 | 2003L, 1985L, 2011L, 2007L, 2002L, 2012L, 2007L, 2007L, 2012L 9 | ), EE = c(-0.38, -0.4, -0.31, -0.32, -0.47, -0.26, -0.3, 10 | -0.15, -0.08, -0.08, 0, -0.22, -0.24, -0.42, -0.35, -0.26 11 | ), DP = c(-0.32, -0.39, -0.34, -0.41, -0.54, -0.31, -0.33, 12 | -0.33, -0.1, -0.11, -0.2, -0.3, -0.4, -0.46, -0.34, -0.26 13 | ), PA = c(0.62, 0.56, 0.48, 0.41, 0.71, 0.39, 0.56, 0.23, 14 | 0.12, 0.06, 0.23, 0.56, 0.49, 0.62, 0.37, 0.4), V_EE = c(0.0016, 15 | 0.0013, 0.0014, 0.0021, 0.0061, 0.0093, 0.002, 0.0045, 0.0019, 16 | 0.0136, 0.0143, 0.0058, 0.0016, 0.002, 0.0042, 0.0048), V_DP = c(0.0018, 17 | 9e-04, 0.0014, 0.0019, 0.0063, 0.0067, 0.0013, 0.0045, 0.0015, 18 | 0.0118, 0.0102, 0.0066, 0.001, 0.002, 0.0049, 0.004), V_PA = c(0.0011, 19 | 8e-04, 0.0012, 0.0019, 0.0041, 0.0066, 0.001, 0.0048, 0.0016, 20 | 0.0107, 0.0087, 0.0049, 7e-04, 0.0015, 0.0047, 0.0033), C_EE_DP = c(5e-04, 21 | 6e-04, 7e-04, 9e-04, 0.0032, 0.0028, 7e-04, 0.0011, 0.001, 22 | 0.0062, 0.0081, 0.0029, 9e-04, 5e-04, 0.0014, 0.0021), C_EE_PA = c(-2e-04, 23 | -4e-04, -3e-04, -0.001, -0.001, -0.0015, -4e-04, -0.0018, 24 | -5e-04, -0.0041, -0.002, -7e-04, -3e-04, -2e-04, -0.0011, 25 | -7e-04), C_DP_PA = c(-3e-04, -4e-04, -4e-04, -0.0011, -0.0012, 26 | -0.0045, -6e-04, -0.0014, -7e-04, -0.0051, -0.0026, -0.001, 27 | -4e-04, -3e-04, -0.0013, -9e-04), Publication_type = structure(c(2L, 28 | 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L 29 | ), .Label = c("Dissertation", "Journal"), class = "factor"), 30 | Percentage_females = c(0.7, 0.26, 0.69, 0.79, 0.82, 0.23, 31 | 0.22, 0.86, 0.81, NA, 0.83, 1, 0.79, 0.66, 0.6, 0.68), Years_experience = c(10.08, 32 | 21.25, 18.14, 2.58, 2.71, 22.14, NA, 12.9, NA, 12.39, NA, 33 | 13.3, 13.3, 13.77, 14, NA)), .Names = c("Study", "Year", 34 | "EE", "DP", "PA", "V_EE", "V_DP", "V_PA", "C_EE_DP", "C_EE_PA", 35 | "C_DP_PA", "Publication_type", "Percentage_females", "Years_experience" 36 | ), class = "data.frame", row.names = c(NA, -16L)) 37 | -------------------------------------------------------------------------------- /data/BCG.R: -------------------------------------------------------------------------------- 1 | BCG <- 2 | structure(list(Trial = 1:13, Author = structure(c(1L, 5L, 8L, 3 | 7L, 6L, 9L, 11L, 10L, 2L, 8L, 3L, 4L, 3L), .Label = c("Aronson", 4 | "Coetzee & Berjak", "Comstock et al", "Comstock & Webster", "Ferguson & Simes", 5 | "Frimodt-Moller et al", "Hart & Sutherland", "Rosenthal et al", 6 | "Stein & Aronson", "TPT Madras", "Vandiviere et al"), class = "factor"), 7 | Year = c(1948L, 1949L, 1960L, 1977L, 1973L, 1953L, 1973L, 8 | 1980L, 1968L, 1961L, 1974L, 1969L, 1976L), VD = c(4L, 6L, 9 | 3L, 62L, 33L, 180L, 8L, 505L, 29L, 17L, 186L, 5L, 27L), VWD = c(119L, 10 | 300L, 228L, 13536L, 5036L, 1361L, 2537L, 87886L, 7470L, 1699L, 11 | 50448L, 2493L, 16886L), NVD = c(11L, 29L, 11L, 248L, 47L, 12 | 372L, 10L, 499L, 45L, 65L, 141L, 3L, 29L), NVWD = c(128L, 13 | 274L, 209L, 12619L, 5761L, 1079L, 619L, 87892L, 7232L, 1600L, 14 | 27197L, 2338L, 17825L), Latitude = c(44L, 55L, 42L, 52L, 15 | 13L, 44L, 19L, 13L, 27L, 42L, 18L, 33L, 33L), Allocation = structure(c(2L, 16 | 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 3L, 3L, 3L, 3L), .Label = c("alternate", 17 | "random", "systematic"), class = "factor"), ln_OR = c(-0.938694140870392, 18 | -1.66619072902655, -1.38629436111989, -1.45644354931298, 19 | -0.219141085675433, -0.958122040776268, -1.6337758382289, 20 | 0.0120206014527324, -0.471746035838696, -1.40121013928348, 21 | -0.340849646366211, 0.446634682274447, -0.0173418739290936 22 | ), v_ln_OR = c(0.357124952253629, 0.208132393657186, 0.43341307814992, 23 | 0.0203144129731974, 0.0519517773171203, 0.00990526551533824, 24 | 0.227009675223494, 0.00400696201214883, 0.0569771239877627, 25 | 0.0754217263149204, 0.0125251338202155, 0.534162172474717, 26 | 0.0716351172956587), ln_Odd_V = c(-3.39282913199164, -3.91202300542815, 27 | -4.33073334028633, -5.38597369680091, -5.02785983369776, 28 | -2.02301815176126, -5.75929601791945, -5.15923736981291, 29 | -5.55135444814039, -4.60458177761572, -5.60295170807164, 30 | -6.21180417108946, -6.43840328922512), ln_Odd_NV = c(-2.45413499112125, 31 | -2.2458322764016, -2.94443897916644, -3.92953014748793, -4.80871874802232, 32 | -1.06489611098499, -4.12552017969055, -5.17125797126564, 33 | -5.07960841230169, -3.20337163833224, -5.26210206170543, 34 | -6.6584388533639, -6.42106141529602), v_ln_Odd_V = c(0.258403361344538, 35 | 0.17, 0.337719298245614, 0.0162029093266224, 0.0305016005969143, 36 | 0.00629030941301331, 0.125394166338195, 0.00199157639633522, 37 | 0.0346166274292573, 0.059412110930305, 0.0053961664773948, 38 | 0.200401123144805, 0.0370962576932019), cov_V_NV = c(0, 0, 39 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), v_ln_Odd_NV = c(0.0987215909090909, 40 | 0.038132393657186, 0.0956937799043062, 0.00411150364657493, 41 | 0.0214501767202059, 0.00361495610232493, 0.101615508885299, 42 | 0.00201538561581361, 0.0223604965585054, 0.0160096153846154, 43 | 0.00712896734282072, 0.333761049329912, 0.0345388596024568 44 | )), .Names = c("Trial", "Author", "Year", "VD", "VWD", "NVD", 45 | "NVWD", "Latitude", "Allocation", "ln_OR", "v_ln_OR", "ln_Odd_V", 46 | "ln_Odd_NV", "v_ln_Odd_V", "cov_V_NV", "v_ln_Odd_NV"), row.names = c(NA, 47 | -13L), class = "data.frame") 48 | -------------------------------------------------------------------------------- /data/Becker09.R: -------------------------------------------------------------------------------- 1 | Becker09 <- 2 | structure(list(data = structure(list(`1` = structure(c(1, -0.55, 3 | -0.48, 0.66, -0.55, 1, 0.47, -0.38, -0.48, 0.47, 1, -0.46, 0.66, 4 | -0.38, -0.46, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 5 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 6 | "Cognitive", "Somatic", "Self_confidence"))), `3` = structure(c(1, 7 | 0.53, -0.12, 0.03, 0.53, 1, 0.52, -0.48, -0.12, 0.52, 1, -0.4, 8 | 0.03, -0.48, -0.4, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 9 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 10 | "Cognitive", "Somatic", "Self_confidence"))), `6` = structure(c(1, 11 | 0.44, 0.46, NA, 0.44, 1, 0.67, NA, 0.46, 0.67, 1, NA, NA, NA, 12 | NA, NA), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 13 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 14 | "Cognitive", "Somatic", "Self_confidence"))), `10` = structure(c(1, 15 | -0.39, -0.17, 0.19, -0.39, 1, 0.21, -0.54, -0.17, 0.21, 1, -0.43, 16 | 0.19, -0.54, -0.43, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 17 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 18 | "Cognitive", "Somatic", "Self_confidence"))), `17` = structure(c(1, 19 | 0.1, 0.31, -0.17, 0.1, 1, NA, NA, 0.31, NA, NA, NA, -0.17, NA, 20 | NA, NA), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 21 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 22 | "Cognitive", "Somatic", "Self_confidence"))), `22` = structure(c(1, 23 | 0.23, 0.08, 0.51, 0.23, 1, 0.45, -0.29, 0.08, 0.45, 1, -0.44, 24 | 0.51, -0.29, -0.44, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 25 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 26 | "Cognitive", "Somatic", "Self_confidence"))), `26` = structure(c(1, 27 | -0.52, -0.43, 0.16, -0.52, 1, 0.57, -0.18, -0.43, 0.57, 1, -0.26, 28 | 0.16, -0.18, -0.26, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 29 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 30 | "Cognitive", "Somatic", "Self_confidence"))), `28` = structure(c(1, 31 | 0.14, 0.02, 0.13, 0.14, 1, 0.56, -0.53, 0.02, 0.56, 1, -0.27, 32 | 0.13, -0.53, -0.27, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 33 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 34 | "Cognitive", "Somatic", "Self_confidence"))), `36` = structure(c(1, 35 | -0.01, -0.16, 0.42, -0.01, 1, 0.62, -0.46, -0.16, 0.62, 1, -0.54, 36 | 0.42, -0.46, -0.54, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 37 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 38 | "Cognitive", "Somatic", "Self_confidence"))), `38` = structure(c(1, 39 | -0.27, -0.13, 0.15, -0.27, 1, 0.63, -0.68, -0.13, 0.63, 1, -0.71, 40 | 0.15, -0.68, -0.71, 1), .Dim = c(4L, 4L), .Dimnames = list(c("Performance", 41 | "Cognitive", "Somatic", "Self_confidence"), c("Performance", 42 | "Cognitive", "Somatic", "Self_confidence")))), .Names = c("1", 43 | "3", "6", "10", "17", "22", "26", "28", "36", "38")), n = c(142, 44 | 37, 16, 14, 45, 100, 51, 128, 70, 30), Type_of_sport = c("Individual", 45 | "Individual", "Team", "Individual", "Individual", "Individual", 46 | "Team", "Team", "Team", "Individual")), .Names = c("data", "n", 47 | "Type_of_sport")) 48 | -------------------------------------------------------------------------------- /data/Becker83.R: -------------------------------------------------------------------------------- 1 | Becker83 <- matrix( 2 | c(1,-0.33,0.03,25,2, 3 | 2,0.07,0.03,25,2, 4 | 3,-0.3,0.02,50,2, 5 | 4,0.35,0.02,100,38, 6 | 5,0.69,0.07,100,30, 7 | 6,0.81,0.22,100,45, 8 | 7,0.4,0.05,100,45, 9 | 8,0.47,0.07,100,45, 10 | 9,0.37,0.05,100,5, 11 | 10,-0.06,0.03,100,5), 12 | ncol=5, byrow=TRUE) 13 | dimnames(Becker83) <- list(NULL, c("study","di","vi","percentage","items")) 14 | Becker83 <- data.frame(Becker83) 15 | -------------------------------------------------------------------------------- /data/Becker92.R: -------------------------------------------------------------------------------- 1 | r <- matrix(c(.46,.31,.19,.46,.55,.32,.397,.402,.183,.266,.567,.218,.28,.19,.18,.47,-.21,-.15), ncol=3, byrow=T) 2 | my.df <- lapply(split(r, 1:6), function(x) { out <- diag(rep(1,3)) 3 | out[lower.tri(out)] <- x 4 | out[upper.tri(out)] <- x 5 | dimnames(out) <- list(c("Math", "Spatial", "Verbal"), 6 | c("Math", "Spatial", "Verbal")) 7 | out}) 8 | names(my.df) <- c("Berry (1957)", "Rosenberg (1981)", "Weiner 1 (1984)", "Weiner 2 (1984)", "Becker 1 (1978)", "Becker 2 (1978)") 9 | Becker92 <- list(data=my.df, n=c(103,69,69,70,153,74)) 10 | rm(r, my.df) 11 | -------------------------------------------------------------------------------- /data/Becker94.R: -------------------------------------------------------------------------------- 1 | Becker94 <- 2 | structure(list(data = structure(list(`Becker (1978) Females` = structure(c(1, 3 | 0.47, -0.21, 0.47, 1, -0.15, -0.21, -0.15, 1), .Dim = c(3L, 3L 4 | ), .Dimnames = list(c("Math", "Spatial", "Verbal" 5 | ), c("Math", "Spatial", "Verbal"))), `Becker (1978) Males` = structure(c(1, 6 | 0.28, 0.19, 0.28, 1, 0.18, 0.19, 0.18, 1), .Dim = c(3L, 3L), .Dimnames = list( 7 | c("Math", "Spatial", "Verbal"), c("Math", 8 | "Spatial", "Verbal"))), `Berry (1957) Females` = structure(c(1, 9 | 0.48, 0.41, 0.48, 1, 0.26, 0.41, 0.26, 1), .Dim = c(3L, 3L), .Dimnames = list( 10 | c("Math", "Spatial", "Verbal"), c("Math", 11 | "Spatial", "Verbal"))), `Berry (1957) Males` = structure(c(1, 12 | 0.37, 0.4, 0.37, 1, 0.27, 0.4, 0.27, 1), .Dim = c(3L, 3L), .Dimnames = list( 13 | c("Math", "Spatial", "Verbal"), c("Math", 14 | "Spatial", "Verbal"))), `Rosenberg (1981) Females` = structure(c(1, 15 | 0.42, 0.48, 0.42, 1, 0.23, 0.48, 0.23, 1), .Dim = c(3L, 3L), .Dimnames = list( 16 | c("Math", "Spatial", "Verbal"), c("Math", 17 | "Spatial", "Verbal"))), `Rosenberg (1981) Males` = structure(c(1, 18 | 0.41, 0.74, 0.41, 1, 0.44, 0.74, 0.44, 1), .Dim = c(3L, 3L), .Dimnames = list( 19 | c("Math", "Spatial", "Verbal"), c("Math", 20 | "Spatial", "Verbal"))), `Weiner A (1984) Females` = structure(c(1, 21 | 0.26, 0.72, 0.26, 1, 0.36, 0.72, 0.36, 1), .Dim = c(3L, 3L), .Dimnames = list( 22 | c("Math", "Spatial", "Verbal"), c("Math", 23 | "Spatial", "Verbal"))), `Weiner A (1984) Males` = structure(c(1, 24 | 0.32, 0.52, 0.32, 1, 0.1, 0.52, 0.1, 1), .Dim = c(3L, 3L), .Dimnames = list( 25 | c("Math", "Spatial", "Verbal"), c("Math", 26 | "Spatial", "Verbal"))), `Weiner B (1984) Females` = structure(c(1, 27 | 0.58, 0.64, 0.58, 1, 0.4, 0.64, 0.4, 1), .Dim = c(3L, 3L), .Dimnames = list( 28 | c("Math", "Spatial", "Verbal"), c("Math", 29 | "Spatial", "Verbal"))), `Weiner B (1984) Males` = structure(c(1, 30 | 0.34, 0.28, 0.34, 1, -0.03, 0.28, -0.03, 1), .Dim = c(3L, 3L), .Dimnames = list( 31 | c("Math", "Spatial", "Verbal"), c("Math", 32 | "Spatial", "Verbal")))), .Names = c("Becker (1978) Females", 33 | "Becker (1978) Males", "Berry (1957) Females", "Berry (1957) Males", 34 | "Rosenberg (1981) Females", "Rosenberg (1981) Males", "Weiner A (1984) Females", 35 | "Weiner A (1984) Males", "Weiner B (1984) Females", "Weiner B (1984) Males" 36 | )), n = c(74, 153, 48, 55, 51, 18, 27, 43, 35, 34), gender = c("Females", 37 | "Males", "Females", "Males", "Females", "Males", "Females", "Males", 38 | "Females", "Males")), .Names = c("data", "n", "gender")) 39 | -------------------------------------------------------------------------------- /data/Berkey98.R: -------------------------------------------------------------------------------- 1 | Berkey98 <- matrix( 2 | c(1,1983,14,0.47,-0.32,0.0075,0.0030,0.0077, 3 | 2,1982,15,0.20,-0.60,0.0057,0.0009,0.0008, 4 | 3,1979,78,0.40,-0.12,0.0021,0.0007,0.0014, 5 | 4,1987,89,0.26,-0.31,0.0029,0.0009,0.0015, 6 | 5,1988,16,0.56,-0.39,0.0148,0.0072,0.0304), 7 | ncol=8, byrow=TRUE) 8 | dimnames(Berkey98) <- list(NULL, c("trial","pub_year","no_of_patients", 9 | "PD","AL","var_PD","cov_PD_AL","var_AL")) 10 | Berkey98 <- data.frame(Berkey98) 11 | -------------------------------------------------------------------------------- /data/Cheung09.R: -------------------------------------------------------------------------------- 1 | Cheung09 <- 2 | structure(list(data = structure(list(`1` = structure(c(0.77298, 3 | 0.26975, 0.24009, 0.23778, 0.20869, 0.22377, 0.18801, 0.07055, 4 | 0.10051, 0.26975, 0.91307, 0.44374, 0.26083, 0.28387, 0.2066, 5 | 0.12764, 0.22892, 0.0959, 0.24009, 0.44374, 1.11292, 0.2944, 6 | 0.26262, 0.2732, 0.18548, 0.20417, 0.18243, 0.23778, 0.26083, 7 | 0.2944, 0.80501, 0.47489, 0.45939, 0.40998, 0.09104, 0.10142, 8 | 0.20869, 0.28387, 0.26262, 0.47489, 0.89692, 0.41972, 0.31541, 9 | 0.33907, 0.06561, 0.22377, 0.2066, 0.2732, 0.45939, 0.41972, 10 | 1.36089, 0.74274, 0.18137, 0.12973, 0.18801, 0.12764, 0.18548, 11 | 0.40998, 0.31541, 0.74274, 1.01075, 0.13724, 0.12776, 0.07055, 12 | 0.22892, 0.20417, 0.09104, 0.33907, 0.18137, 0.13724, 1.81805, 13 | -0.0198, 0.10051, 0.0959, 0.18243, 0.10142, 0.06561, 0.12973, 14 | 0.12776, -0.0198, 0.91252), .Dim = c(9L, 9L), .Dimnames = list( 15 | c("x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9"), 16 | c("x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9"))), 17 | `2` = structure(c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 18 | NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1.06293, 0.27094, 19 | 0.20331, 0.16522, 0.11922, 0.25387, 0.06877, NA, NA, 0.27094, 20 | 0.73625, 0.27053, 0.33506, 0.33495, 0.16124, 0.00912, NA, 21 | NA, 0.20331, 0.27053, 0.71718, 0.19873, 0.14582, 0.21907, 22 | 0.04089, NA, NA, 0.16522, 0.33506, 0.19873, 0.92247, 0.55128, 23 | 0.17143, 0.0138, NA, NA, 0.11922, 0.33495, 0.14582, 0.55128, 24 | 1.00462, 0.16561, 0.04322, NA, NA, 0.25387, 0.16124, 0.21907, 25 | 0.17143, 0.16561, 1.49431, 0.29094, NA, NA, 0.06877, 0.00912, 26 | 0.04089, 0.0138, 0.04322, 0.29094, 1.0196), .Dim = c(9L, 27 | 9L), .Dimnames = list(c("x1", "x2", "x3", "x4", "x5", "x6", 28 | "x7", "x8", "x9"), c("x1", "x2", "x3", "x4", "x5", "x6", 29 | "x7", "x8", "x9"))), `3` = structure(c(0.95825, 0.32958, 30 | NA, NA, NA, 0.13948, 0.15463, 0.15248, 0.10405, 0.32958, 31 | 1.02277, NA, NA, NA, 0.073, 0.07002, 0.17056, 0.13502, NA, 32 | NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 33 | NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0.13948, 0.073, 34 | NA, NA, NA, 0.82987, 0.43769, 0.23195, 0.03856, 0.15463, 35 | 0.07002, NA, NA, NA, 0.43769, 0.83476, 0.19002, 0.03986, 36 | 0.15248, 0.17056, NA, NA, NA, 0.23195, 0.19002, 1.42583, 37 | 0.38343, 0.10405, 0.13502, NA, NA, NA, 0.03856, 0.03986, 38 | 0.38343, 1.03062), .Dim = c(9L, 9L), .Dimnames = list(c("x1", 39 | "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9"), c("x1", 40 | "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9"))), `4` = structure(c(0.83995, 41 | 0.21117, 0.14249, 0.13268, 0.17861, 0.22783, 0.18991, NA, 42 | NA, 0.21117, 0.9338, 0.34383, 0.1904, 0.15068, 0.12191, -0.04762, 43 | NA, NA, 0.14249, 0.34383, 1.33025, 0.31041, 0.10873, 0.19756, 44 | 0.12113, NA, NA, 0.13268, 0.1904, 0.31041, 0.77512, 0.36093, 45 | 0.36519, 0.22716, NA, NA, 0.17861, 0.15068, 0.10873, 0.36093, 46 | 0.91598, 0.37035, 0.1955, NA, NA, 0.22783, 0.12191, 0.19756, 47 | 0.36519, 0.37035, 1.48445, 0.62637, NA, NA, 0.18991, -0.04762, 48 | 0.12113, 0.22716, 0.1955, 0.62637, 1.05049, NA, NA, NA, NA, 49 | NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 50 | NA), .Dim = c(9L, 9L), .Dimnames = list(c("x1", "x2", "x3", 51 | "x4", "x5", "x6", "x7", "x8", "x9"), c("x1", "x2", "x3", 52 | "x4", "x5", "x6", "x7", "x8", "x9")))), .Names = c("1", "2", 53 | "3", "4")), n = c(591, 656, 832, 823)), .Names = c("data", "n" 54 | )) 55 | -------------------------------------------------------------------------------- /data/Cooper03.R: -------------------------------------------------------------------------------- 1 | Cooper03 <- matrix( 2 | c(11,1,-0.18,0.118,1976, 3 | 11,2,-0.22,0.118,1976, 4 | 11,3,0.23,0.144,1976, 5 | 11,4,-0.30,0.144,1976, 6 | 12,5,0.13,0.014,1989, 7 | 12,6,-0.26,0.014,1989, 8 | 12,7,0.19,0.015,1989, 9 | 12,8,0.32,0.024,1989, 10 | 18,9,0.45,0.023,1994, 11 | 18,10,0.38,0.043,1994, 12 | 18,11,0.29,0.012,1994, 13 | 27,12,0.16,0.020,1976, 14 | 27,13,0.65,0.004,1976, 15 | 27,14,0.36,0.004,1976, 16 | 27,15,0.60,0.007,1976, 17 | 56,16,0.08,0.019,1997, 18 | 56,17,0.04,0.007,1997, 19 | 56,18,0.19,0.005,1997, 20 | 56,19,-0.06,0.004,1997, 21 | 58,20,-0.18,0.020,1976, 22 | 58,21,0.00,0.018,1976, 23 | 58,22,0.00,0.019,1976, 24 | 58,23,-0.28,0.022,1976, 25 | 58,24,-0.04,0.020,1976, 26 | 58,25,-0.30,0.021,1976, 27 | 58,26,0.07,0.006,1976, 28 | 58,27,0.00,0.007,1976, 29 | 58,28,0.05,0.007,1976, 30 | 58,29,-0.08,0.007,1976, 31 | 58,30,-0.09,0.007,1976, 32 | 71,31,0.30,0.015,1997, 33 | 71,32,0.98,0.011,1997, 34 | 71,33,1.19,0.010,1997, 35 | 86,34,-0.07,0.001,1997, 36 | 86,35,-0.05,0.001,1997, 37 | 86,36,-0.01,0.001,1997, 38 | 86,37,0.02,0.001,1997, 39 | 86,38,-0.03,0.001,1997, 40 | 86,39,0.00,0.001,1997, 41 | 86,40,0.01,0.001,1997, 42 | 86,41,-0.10,0.001,1997, 43 | 91,42,0.50,0.010,2000, 44 | 91,43,0.66,0.011,2000, 45 | 91,44,0.20,0.010,2000, 46 | 91,45,0.00,0.009,2000, 47 | 91,46,0.05,0.013,2000, 48 | 91,47,0.07,0.013,2000, 49 | 108,48,-0.52,0.031,2000, 50 | 108,49,0.70,0.031,2000, 51 | 108,50,-0.03,0.030,2000, 52 | 108,51,0.27,0.030,2000, 53 | 108,52,-0.34,0.030,2000, 54 | 644,53,0.12,0.087,1995, 55 | 644,54,0.61,0.082,1995, 56 | 644,55,0.04,0.067,1994, 57 | 644,56,-0.05,0.067,1994), 58 | ncol=5, byrow=TRUE, dimnames=list(NULL, c("District", "Study", "y", "v", "Year"))) 59 | Cooper03 <- data.frame(Cooper03) 60 | -------------------------------------------------------------------------------- /data/Digman97.R: -------------------------------------------------------------------------------- 1 | Digman97 <- matrix( 2 | c(-.48,-.10,.62,.27,.41,.59,.37,.00,.35,.41, 3 | -.30,.07,.39,.09,.53,.59,.45,-.05,.44,.22, 4 | .25,-.10,.65,.24,.35,.37,.41,.14,.33,.41, 5 | -.26,-.16,.65,.01,.70,.71,.66,-.03,.24,.11, 6 | .29,.16,.64,.32,.35,.27,.53,.22,.22,.36, 7 | .35,.20,.66,.49,.57,.45,.59,.38,.31,.31, 8 | .13,.43,.25,.37,.59,.28,.35,.15,.12,.10, 9 | .16,.26,.36,.36,.41,.26,.33,.19,.16,.07, 10 | .11,.19,.18,.22,.44,.42,.56,.24,.05,.12, 11 | .42,.25,.34,.26,.69,.43,.46,.44,.54,.42, 12 | .04,.27,.24,.21,.25,.53,.40,-.02,-.02,-.02, 13 | -.07,.22,.13,.21,.25,.49,.43,-.06,-.04,-.05, 14 | -.04,-.03,.25,-.03,.34,.41,.28,-.17,.08,.12, 15 | .06,.04,.13,.16,.23,.17,.24,-.09,-.03,-.01), 16 | ncol=10, byrow=TRUE) 17 | Digman97 <- lapply(split(Digman97, 1:14), 18 | function(x) {mat <- matrix(1, ncol=5, nrow=5); 19 | mat[upper.tri(mat, diag=FALSE)] <- x; 20 | mat[lower.tri(mat)] <- t(mat)[lower.tri(mat)]; 21 | mat}) 22 | Digman97 <- lapply(Digman97, function(x, var.names) {dimnames(x) <- list(var.names, var.names); x}, 23 | var.names=c("E", "A", "C", "ES", "I")) 24 | ## Arrange the data into Alpha: A, C and ES; Beta: E and I 25 | Digman97 <- lapply( Digman97, function(x) x[c(2,3,4,1,5), c(2,3,4,1,5)] ) 26 | names(Digman97) <- c("Digman 1 (1994)", "Digman 2 (1994)", "Digman 3 (1963c)", "Digman & Takemoto-Chock (1981b)", 27 | "Graziano & Ward (1992)", "Yik & Bond (1993)", "John et al. 1 (1984)", "John et al. 2 (1984)", 28 | "Costa & McCrae 1 (1992c)", "Costa & McCrae 2 (1992b)", "Costa & McCrae 3 (1992b)", 29 | "Costa, McCrae, & Dye (1991)", "Barrick & Mount (1993)", "Goldberg (1992a)") 30 | 31 | Digman97.n <- c(102,149,334,162,91,656,70,70,277,227,1000,227,91,1040) 32 | 33 | Digman97.cluster <- c(rep("Children", 4), "Adolescents", rep("Young adults", 3), rep("Mature adults", 6)) 34 | Digman97 <- list(data=Digman97, n=Digman97.n, cluster=Digman97.cluster) 35 | rm(Digman97.n) 36 | rm(Digman97.cluster) 37 | -------------------------------------------------------------------------------- /data/Gleser94.R: -------------------------------------------------------------------------------- 1 | Gleser94 <- 2 | structure(list(MTS = structure(list(Study = c(1, 2, 3, 4, 5, 3 | 6), N.C = c(25, 40, 30, 50, 30, 100), N.E1 = c(22, NA, NA, NA, 4 | 30, 100), N.E2 = c(25, 38, 30, 50, 30, NA), N.E3 = c(23, 37, 5 | NA, NA, 28, NA), N.E4 = c(NA, 40, 28, NA, 26, NA), N.E5 = c(NA, 6 | NA, NA, 50, NA, NA), Mean.C = c(150.96, 149.94, 152.45, 149.49, 7 | 150.36, 150.19), Mean.E1 = c(144.14, NA, NA, NA, 144.55, 145.62 8 | ), Mean.E2 = c(139.92, 141.23, 140.8, 140.69, 140.32, NA), Mean.E3 = c(139.32, 9 | 137.36, NA, NA, 138.34, NA), Mean.E4 = c(NA, 136.44, 136.14, 10 | NA, 134.69, NA), Mean.E5 = c(NA, NA, NA, 135.39, NA, NA), SD.C = c(8.44, 11 | 6.88, 6.35, 6.92, 4.96, 6.71), SD.E1 = c(4.25, NA, NA, NA, 5.58, 12 | 5.06), SD.E2 = c(5.06, 5.11, 4.52, 5.33, 4.16, NA), SD.E3 = c(3.6, 13 | 5.29, NA, NA, 5.76, NA), SD.E4 = c(NA, 3.34, 3.35, NA, 4.05, 14 | NA), SD.E5 = c(NA, NA, NA, 3.35, NA, NA)), .Names = c("Study", 15 | "N.C", "N.E1", "N.E2", "N.E3", "N.E4", "N.E5", "Mean.C", "Mean.E1", 16 | "Mean.E2", "Mean.E3", "Mean.E4", "Mean.E5", "SD.C", "SD.E1", 17 | "SD.E2", "SD.E3", "SD.E4", "SD.E5"), row.names = c(NA, -6L), class = "data.frame"), 18 | MES = structure(list(Study = c(1, 2, 3, 4, 5, 6, 7), N.Uncoached = c(34, 19 | 17, 52, 14, 47, 45, 8), N.Coached = c(21, 16, 52, 13, 93, 20 | 45, 8), Mean.Uncoached.Math = c(510, 383, 475.32, 431.43, 21 | 512, 630.44, 342.5), Mean.Uncoached.Verbal = c(503, 385, 22 | 451.15, 393.57, 462, 597.47, 250), Mean.Coached.Math = c(620, 23 | 446, 469.32, 423.31, 540, 641.94, 290), Mean.Coached.Verbal = c(561, 24 | 375, 462.12, 436.15, 443, 607.22, 293.75), SD.Uncoached.Math = c(83.6, 25 | 82.9, 88.97, 83.47, 72, 70.02, 65.19), SD.Uncoached.Verbal = c(102.4, 26 | 73.3, 86.19, 102.48, 78, 72.86, 29.28), SD.Coached.Math = c(102.5, 27 | 104.7, 96.38, 115.48, 76, 56.94, 35.05), SD.Coached.Verbal = c(78.5, 28 | 56.7, 89.62, 100.04, 76, 72.1, 92.42), Cor.Math.Verbal = c(0.66, 29 | 0.66, 0.66, 0.66, 0.66, 0.66, 0.66)), .Names = c("Study", 30 | "N.Uncoached", "N.Coached", "Mean.Uncoached.Math", "Mean.Uncoached.Verbal", 31 | "Mean.Coached.Math", "Mean.Coached.Verbal", "SD.Uncoached.Math", 32 | "SD.Uncoached.Verbal", "SD.Coached.Math", "SD.Coached.Verbal", 33 | "Cor.Math.Verbal"), row.names = c(NA, -7L), class = "data.frame")), .Names = c("MTS", 34 | "MES")) 35 | -------------------------------------------------------------------------------- /data/HedgesOlkin85.R: -------------------------------------------------------------------------------- 1 | HedgesOlkin85 <- matrix( 2 | c(1,0.458,0.1,0.0513,0.0319,0.0501, 3 | 2,0.363,0.241,0.0354,0.0222,0.0351, 4 | 3,0.162,-0.121,0.0546,0.0344,0.0545, 5 | 4,0.294,0.037,0.0286,0.0179,0.0286), 6 | ncol=6, byrow=TRUE) 7 | dimnames(HedgesOlkin85) <- list(NULL, c("study", "d_att","d_ach","var_att","cov_att_ach","var_ach")) 8 | HedgesOlkin85 <- data.frame(HedgesOlkin85) 9 | -------------------------------------------------------------------------------- /data/Hox02.R: -------------------------------------------------------------------------------- 1 | Hox02 <- matrix( 2 | c(-0.264, 0.086, 3, 3 | -0.230, 0.106, 1, 4 | 0.166, 0.055, 2, 5 | 0.173, 0.084, 4, 6 | 0.225, 0.071, 3, 7 | 0.291, 0.078, 6, 8 | 0.309, 0.051, 7, 9 | 0.435, 0.093, 9, 10 | 0.476, 0.149, 3, 11 | 0.617, 0.095, 6, 12 | 0.651, 0.110, 6, 13 | 0.718, 0.054, 7, 14 | 0.740, 0.081, 9, 15 | 0.745, 0.084, 5, 16 | 0.758, 0.087, 6, 17 | 0.922, 0.103, 5, 18 | 0.938, 0.113, 5, 19 | 0.962, 0.083, 7, 20 | 1.522, 0.100, 9, 21 | 1.844, 0.141, 9), 22 | ncol=3, byrow=TRUE) 23 | Hox02 <- cbind(1:nrow(Hox02), Hox02) 24 | dimnames(Hox02) <- list(NULL, c("study", "yi","vi","weeks")) 25 | Hox02 <- data.frame(Hox02) 26 | -------------------------------------------------------------------------------- /data/Mak09.R: -------------------------------------------------------------------------------- 1 | Mak09 <- matrix( 2 | c(94,3862,73,3852,0.255750409,0.024866941,73,3, 3 | 81,3236,71,3223,0.130817954,0.027064402,69.3,3.6, 4 | 189,10018,94,5048,0.013310364,0.0162329,73.5,NA, 5 | 29,1054,27,1057,0.076325154,0.073466279,74.5,5, 6 | 57,6830,18,1924,-0.115257805,0.073771719,66.9,2.5, 7 | 797,14302,1280,28731,0.235581932,0.00214643,74.3,10, 8 | 47,87,672,1598,0.481884042,0.048844605,72.7,3, 9 | 724,3862,12862,77643,0.150185582,0.001793075,76.1,6), 10 | ncol=8, byrow=TRUE, dimnames=list(NULL, c("AF.BP", "Tot.BP", "AF.non.BP", "Tot.non.BP", "yi", "vi", "age.mean", "study.duration"))) 11 | 12 | Mak09 <- data.frame(Study=c("Black (2007)", "Cummings (2007)", "Karam (2007)", "Lyles (2007)", "Papapoulous (2008)", "Abrahamsen (2009)", "Heckbert (2008)", "Sorensen (2008)"), type=rep(c("RCT", "Obs"), times=c(5,3)), Mak09, stringsAsFactors=FALSE) 13 | 14 | -------------------------------------------------------------------------------- /data/wvs94a.R: -------------------------------------------------------------------------------- 1 | country=c("France","Britain","W Germany","Italy","Netherlands","Denmark", 2 | "Belgium","Spain","Ireland","N Ireland","USA","Canada","Japan","Mexico", 3 | "S Africa","Hungary","Norway","Sweden","Iceland","Argentina","Finland", 4 | "S Korea","Poland","Switzerland","Brazil","Nigeria","Chile","Belarus", 5 | "India","Czech","E Germany","Slovenia","Bulgaria","Romania","China", 6 | "Portugal","Austria","Turkey","Lithuania","Latvia","Estonia","Russia") 7 | wvs94a <- matrix( 8 | c(-0.037603022,-0.030487011,0.004041305,0.001757647,0.004118119,19490, 9 | 0.02004794,0.044454991,0.00272378,0.001185755,0.002746361,16100, 10 | 0.008104576,0.077451409,0.001921298,0.001021114,0.001943832,22320, 11 | 0.134024757,0.223956797,0.002005091,0.000734201,0.002073886,16830, 12 | 0.018574375,0.191814127,0.004003854,0.000727449,0.004047432,17320, 13 | 0.097168113,0.215502404,0.003907078,0.001186997,0.003952144,22080, 14 | 0.007754997,0.127995095,0.001456405,0.00040501,0.001513248,15540, 15 | 0.093727148,0.072931212,0.000975891,0.000446397,0.001000401,11020, 16 | -0.056021872,0.038152593,0.004007979,0.001692863,0.004007138,9550, 17 | 0.012872315,0.160712063,0.013494591,0.004562865,0.013635751,16100, 18 | 0.003096714,-0.006276187,0.002229668,0.000998096,0.002240914,21790, 19 | -0.056729222,-0.084332608,0.002315824,0.001168235,0.002329098,20470, 20 | -0.125584298,-0.029057269,0.004131789,0.001242711,0.004454493,25430, 21 | -0.139970224,-0.009794704,0.002671926,0.001231131,0.002688647,2490, 22 | -0.01898269,-0.049997243,0.001493511,0.00073191,0.001503184,2530, 23 | -0.055780996,-0.065376457,0.004044818,0.001315222,0.004138412,2780, 24 | -0.140202592,-0.094179942,0.003240855,0.001034359,0.003298565,23120, 25 | -0.070562207,-0.202495591,0.00390615,0.001538927,0.003944114,23660, 26 | -0.170044533,0.040004732,0.005736106,0.002124959,0.005732229,NA, 27 | -0.032092632,0.057607591,0.004043047,0.001407526,0.004158255,2370, 28 | -0.112675062,0.009716206,0.006952605,0.001750707,0.007001649,26040, 29 | 0.033236661,-0.099830841,0.003274385,0.000726745,0.003274997,NA, 30 | 0.056044601,0.045872369,0.004325889,0.001657374,0.004472464,1690, 31 | -0.03092815,0.049733723,0.002877927,0.001031684,0.002910119,32680, 32 | 0.148138118,0.182105718,0.002266132,0.000789084,0.002290195,2680, 33 | -0.24077135,0.031548524,0.004204115,0.00101748,0.004209257,290, 34 | 0.050749679,0.225677036,0.002681216,0.000976019,0.002710911,1940, 35 | 0.041978879,0.074086526,0.004009652,0.001335892,0.00401099,3110, 36 | -0.024721396,0.217445808,0.001634517,0.000597796,0.001693183,350, 37 | -0.033552273,0.107145395,0.004307437,0.001512237,0.004317324,3140, 38 | -0.001259268,0.021234505,0.003034794,0.001531155,0.003078277,NA, 39 | 0.092006355,0.146393493,0.003928158,0.001542552,0.004191389,NA, 40 | 0.026636679,0.16909562,0.003984432,0.001882169,0.004129105,NA, 41 | 0.105821795,0.313888995,0.003658596,0.001375677,0.003735797,2250, 42 | 0.077708317,0.109931369,0.004193453,0.001937979,0.004212022,1640, 43 | 0.172247538,0.140457455,0.003430778,0.001182798,0.003466994,370, 44 | 0.080096043,0.00889294,0.002887624,0.000933741,0.002893639,4900, 45 | -0.219883322,0.096255817,0.003918677,-9.75E-06,0.003903521,19060, 46 | 0.003338987,0.067571712,0.004054867,0.001010644,0.0041275,1630, 47 | 0.05378744,-0.020084012,0.004873309,0.001547068,0.005113448,3410, 48 | 0.101282579,0.182666714,0.004058874,0.001233074,0.004152001,3830, 49 | -0.006700282,0.193170718,0.002140248,0.000635116,0.002228264,3220), 50 | ncol=6, byrow=TRUE) 51 | dimnames(wvs94a) <- list(NULL, c("lifesat","lifecon","lifesat_var","inter_cov","lifecon_var","gnp")) 52 | wvs94a <- data.frame(country, wvs94a) 53 | wvs94a <- wvs94a[order(wvs94a$country), ] 54 | rownames(wvs94a) <- NULL 55 | rm(country) 56 | 57 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | citHeader("To cite the 'metaSEM' package in publications use:") 2 | 3 | bibentry(bibtype="Article", 4 | title = "{metaSEM}: An R Package for Meta-Analysis using Structural Equation Modeling", 5 | author = as.person("Mike W.-L. Cheung"), 6 | journal = "Frontiers in Psychology", 7 | year = "2015", 8 | volume = "5", 9 | number = "1521", 10 | url = "https://www.frontiersin.org/articles/10.3389/fpsyg.2014.01521/full", 11 | doi = "10.3389/fpsyg.2014.01521", 12 | 13 | textVersion = 14 | paste("Cheung, M.W.L. (2015).", 15 | "{metaSEM}: An R Package for Meta-Analysis using Structural Equation Modeling.", 16 | "Frontiers in Psychology 5, 1521.", 17 | "URL https://www.frontiersin.org/articles/10.3389/fpsyg.2014.01521/full", 18 | "DOI 10.3389/fpsyg.2014.01521") 19 | ) 20 | -------------------------------------------------------------------------------- /man/Aloe14.Rd: -------------------------------------------------------------------------------- 1 | \name{Aloe14} 2 | \alias{Aloe14} 3 | \docType{data} 4 | \title{Multivariate effect sizes between classroom management 5 | self-efficacy (CMSE) and other variables reported by Aloe et al. (2014) 6 | } 7 | \description{This study reports sixteen studies on the effect sizes 8 | (correlation coefficients) between CMSE and emotional exhaustion (EE), 9 | depersonalization (DP), and (lowered) personal accomplishment (PA) 10 | reported by Aloe et al. (2014). 11 | } 12 | \usage{data("Aloe14")} 13 | \format{ 14 | A data frame with 16 observations on the following 14 variables. 15 | \describe{ 16 | \item{\code{Study}}{a factor with levels \code{Betoret} \code{Brouwers & Tomic} \code{Bumen} \code{Chang} \code{Durr} \code{Evers et al.} \code{Friedman} \code{Gold} \code{Huk} \code{Kress} \code{Kumarakulasingam} \code{Martin et al.} \code{Ozdemir} \code{Skaalvik and Skaalvik} \code{Williams}} 17 | \item{\code{Year}}{Year of publication} 18 | \item{\code{EE}}{Emotional exhaustion} 19 | \item{\code{DP}}{Depersonalization} 20 | \item{\code{PA}}{(Lowered) personal accomplishment} 21 | \item{\code{V_EE}}{Sampling variance of emotional exhaustion} 22 | \item{\code{V_DP}}{Sampling variance of depersonalization} 23 | \item{\code{V_PA}}{Sampling variance of (lowered) personal accomplishment} 24 | \item{\code{C_EE_DP}}{Sampling covariance between EE and DP} 25 | \item{\code{C_EE_PA}}{Sampling covariance between EE and PA} 26 | \item{\code{C_DP_PA}}{Sampling covariance between DP and PA} 27 | \item{\code{Publication_type}}{Either \code{Dissertation} or \code{Journal}} 28 | \item{\code{Percentage_females}}{Percentage of females in the study} 29 | \item{\code{Years_experience}}{Average years of experience} 30 | } 31 | } 32 | \source{Aloe, A. M., Amo, L. C., & Shanahan, M. E. (2014). Classroom management self-efficacy and burnout: A multivariate meta-analysis. \emph{Educational Psychology Review}, \bold{26(1)}, 101-126. doi:10.1007/s10648-013-9244-0 33 | } 34 | \examples{ 35 | \donttest{ 36 | data(Aloe14) 37 | 38 | ## Random-effects meta-analysis 39 | meta1 <- meta(cbind(EE,DP,PA), 40 | cbind(V_EE, C_EE_DP, C_EE_PA, V_DP, C_DP_PA, V_PA), 41 | data=Aloe14) 42 | ## Remove error code 43 | meta1 <- rerun(meta1) 44 | 45 | summary(meta1) 46 | 47 | ## Extract the coefficients for the variance component of the random effects 48 | coef1 <- coef(meta1, select="random") 49 | 50 | ## Convert it into a symmetric matrix by row major 51 | my.cov <- vec2symMat(coef1, byrow=TRUE) 52 | 53 | ## Convert it into a correlation matrix 54 | cov2cor(my.cov) 55 | 56 | ## Plot the multivariate effect sizes 57 | plot(meta1) 58 | } 59 | } 60 | \keyword{datasets} 61 | -------------------------------------------------------------------------------- /man/BCG.Rd: -------------------------------------------------------------------------------- 1 | \name{BCG} 2 | \alias{BCG} 3 | \docType{data} 4 | \title{Dataset on the Effectiveness of the BCG Vaccine for Preventing Tuberculosis 5 | } 6 | \description{ 7 | This dataset includes 13 studies on the effectiveness of the 8 | Bacillus Calmette-Guerin (BCG) vaccine for preventing tuberculosis 9 | (see van Houwelingen, Arends, & Stijnen (2002) for details). 10 | } 11 | \usage{data(BCG)} 12 | 13 | \details{ 14 | A list of data with the following structure: 15 | \describe{ 16 | \item{Trial}{Number of the trials} 17 | \item{Author}{Authors of the original studies} 18 | \item{Year}{Year of publication} 19 | \item{VD}{Vaccinated group with disease} 20 | \item{VWD}{Vaccinated group without the disease} 21 | \item{NVD}{Not vaccinated group with disease} 22 | \item{NVWD}{Not vaccinated group without the disease} 23 | \item{Latitude}{Geographic latitude of the place where the study was done} 24 | \item{Allocation}{Method of treatment allocation} 25 | \item{ln_OR}{Natural logarithm of the odds ratio: log((VD/VWD)/(NVD/NVWD))} 26 | \item{v_ln_OR}{Sampling variance of ln_OR: 1/VD+1/VWD+1/NVD+1/NVWD} 27 | \item{ln_Odd_V}{Natural logarithm of the odds of the vaccinated group: 28 | log(VD/VWD)} 29 | \item{ln_Odd_NV}{Natural logarithm of the odds of the not vaccinated group: log(NVD/NVWD)} 30 | \item{v_ln_Odd_V}{Sampling variance of ln_Odd_V: 1/VD+1/VWD} 31 | \item{cov_V_NV}{Sampling covariance between ln_Odd_V and ln_Odd_NV: It 32 | is always 0} 33 | \item{v_ln_Odd_NV}{Sampling variance of ln_Odd_NV: 1/NVD+1/NVWD} 34 | } 35 | } 36 | 37 | \source{ 38 | Colditz, G. A., Brewer, T. F., Berkey, C. S., Wilson, M. E., Burdick, E., Fineberg, H. V., & Mosteller, F. (1994). Efficacy of BCG vaccine in the prevention of tuberculosis: Meta-analysis of the published literature. \emph{Journal of the American Medical Association}, \bold{271}, 698--702. 39 | } 40 | \references{ 41 | Berkey, C. S., Hoaglin, D. C., Mosteller, F., & Colditz, G. A. (1995). A random-effects regression model for meta-analysis. \emph{Statistics in Medicine}, \bold{14}, 395--411. 42 | 43 | van Houwelingen, H. C., Arends, L. R., & Stijnen, T. (2002). Advanced methods in meta-analysis: Multivariate approach and meta-regression. \emph{Statistics in Medicine}, \bold{21}, 589--624. 44 | 45 | Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. \emph{Journal of Statistical Software}, \bold{36}(3), 1--48. \url{https://www.jstatsoft.org/v36/i03/}. 46 | } 47 | 48 | \examples{ 49 | data(BCG) 50 | 51 | ## Univariate meta-analysis on the log of the odds ratio 52 | summary( meta(y=ln_OR, v=v_ln_OR, data=BCG, 53 | x=cbind(scale(Latitude,scale=FALSE), 54 | scale(Year,scale=FALSE))) ) 55 | 56 | ## Multivariate meta-analysis on the log of the odds 57 | ## The conditional sampling covariance is 0 58 | bcg <- meta(y=cbind(ln_Odd_V, ln_Odd_NV), data=BCG, 59 | v=cbind(v_ln_Odd_V, cov_V_NV, v_ln_Odd_NV)) 60 | summary(bcg) 61 | 62 | plot(bcg) 63 | } 64 | \keyword{datasets} 65 | -------------------------------------------------------------------------------- /man/Becker83.Rd: -------------------------------------------------------------------------------- 1 | \name{Becker83} 2 | \alias{Becker83} 3 | \docType{data} 4 | \title{Studies on Sex Differences in Conformity Reported by Becker (1983) 5 | } 6 | \description{ 7 | The data set includes studies on sex differences in conformity using the fictitious norm group 8 | paradigm reported by Becker (1983). 9 | } 10 | \usage{data(Becker83)} 11 | 12 | \details{ 13 | The variables are: 14 | \describe{ 15 | \item{study}{study number} 16 | \item{di}{Standardized mean difference} 17 | \item{vi}{Sampling variance of the effect size} 18 | \item{percentage}{Percentage of male authors} 19 | \item{items}{Number of items} 20 | } 21 | } 22 | \source{ 23 | Becker, B. J. (1983, April). Influence again: A comparison of methods 24 | for meta-analysis. \emph{Paper presented at the annual meeting of the 25 | American Educational Research Association, Montreal.} 26 | 27 | Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis.} Orlando, FL: Academic Press. 28 | } 29 | \references{ 30 | Cheung, M. W.-L. (2010). Fixed-effects meta-analyses as multiple-group structural equation models. \emph{Structural Equation Modeling}, \bold{17}, 481-509. 31 | } 32 | \examples{ 33 | data(Becker83) 34 | 35 | ## Random-effects meta-analysis 36 | summary( meta(y=di, v=vi, data=Becker83) ) 37 | 38 | ## Mixed-effects meta-analysis with log(items) as the predictor 39 | summary( meta(y=di, v=vi, x=log(items), data=Becker83) ) 40 | } 41 | \keyword{datasets} 42 | -------------------------------------------------------------------------------- /man/Becker94.Rd: -------------------------------------------------------------------------------- 1 | \name{Becker94} 2 | \alias{Becker94} 3 | \docType{data} 4 | \title{Five Studies of Ten Correlation Matrices reported by Becker and 5 | Schram (1994) 6 | } 7 | \description{ 8 | This data set includes five studies of ten correlation matrices 9 | reported by Becker and Schram (1994). 10 | } 11 | \usage{data(Becker94)} 12 | 13 | \details{ 14 | A list of data with the following structure: 15 | \describe{ 16 | \item{data}{A list of 10 correlation matrices. The 17 | variables are \emph{Math} (math aptitude), \emph{Spatial} (spatial 18 | ability), and \emph{Verbal} (verbal ability)} 19 | \item{n}{A vector of sample sizes} 20 | \item{gender}{\emph{Females} or \emph{Males} samples} 21 | } 22 | } 23 | 24 | \source{ 25 | Becker, B. J., & Schram, C. M. (1994). Examining explanatory models through research synthesis. In H. Cooper & L. V. Hedges (Eds.), \emph{The handbook of research synthesis} (pp. 357-381). New York: Russell Sage Foundation. 26 | } 27 | 28 | \examples{ 29 | \donttest{ 30 | data(Becker94) 31 | 32 | #### Fixed-effects model 33 | ## First stage analysis 34 | fixed1 <- tssem1(Becker94$data, Becker94$n, method="FEM") 35 | summary(fixed1) 36 | 37 | ## Prepare a regression model using create.mxMatrix() 38 | ## A1 <- create.mxMatrix(c(0,0,0,"0.2*Spatial2Math", 39 | ## 0,0,"0.2*Verbal2Math",0,0), type="Full", 40 | ## ncol=3, nrow=3, name="A1") 41 | ## S1 <- create.mxMatrix(c("0.2*ErrorVarMath",0,0,1, 42 | ## "0.2*CorBetweenSpatialVerbal",1), 43 | ## type="Symm", name="S1") 44 | 45 | ## An alternative method to create a regression model with the lavaan syntax 46 | model <- "## Regression model 47 | Math ~ Spatial2Math*Spatial + Verbal2Math*Verbal 48 | ## Error variance of Math 49 | Math ~~ ErrorVarMath*Math 50 | ## Variances of Spatial and Verbal fixed at 1.0 51 | Spatial ~~ 1*Spatial 52 | Verbal ~~ 1*Verbal 53 | ## Correlation between Spatial and Verbal 54 | Spatial ~~ CorBetweenSpatialVerbal*Verbal" 55 | 56 | ## Display the model 57 | plot(model) 58 | 59 | RAM <- lavaan2RAM(model, obs.variables=c("Math", "Spatial", "Verbal")) 60 | RAM 61 | 62 | ## Second stage analysis 63 | ## A1 <- RAM$A 64 | ## S1 <- RAM$S 65 | ## fixed2 <- tssem2(fixed1, Amatrix=A1, Smatrix=S1, intervals.type="LB") 66 | 67 | fixed2 <- tssem2(fixed1, RAM=RAM, intervals.type="LB") 68 | summary(fixed2) 69 | 70 | ## Display the model with the parameter estimates 71 | plot(fixed2) 72 | 73 | #### Fixed-effects model: with gender as cluster 74 | ## First stage analysis 75 | cluster1 <- tssem1(Becker94$data, Becker94$n, method="FEM", cluster=Becker94$gender) 76 | summary(cluster1) 77 | 78 | ## Second stage analysis 79 | cluster2 <- tssem2(cluster1, RAM=RAM, intervals.type="LB") 80 | summary(cluster2) 81 | 82 | #### Conventional fixed-effects GLS approach 83 | ## First stage analysis 84 | ## No random effects 85 | ## Replicate Becker's (1992) analysis using 4 studies only 86 | gls1 <- tssem1(Becker92$data[1:4], Becker92$n[1:4], method="REM", RE.type="Zero", 87 | model.name="Fixed effects GLS Stage 1") 88 | summary(gls1) 89 | 90 | ## Fixed-effects GLS model: Second stage analysis 91 | gls2 <- tssem2(gls1, RAM=RAM, intervals.type="LB", 92 | model.name="Fixed effects GLS Stage 2") 93 | summary(gls2) 94 | } 95 | } 96 | \keyword{datasets} 97 | -------------------------------------------------------------------------------- /man/Berkey98.Rd: -------------------------------------------------------------------------------- 1 | \name{Berkey98} 2 | \alias{Berkey98} 3 | \docType{data} 4 | \title{Five Published Trails from Berkey et al. (1998)} 5 | \description{ 6 | The data set includes five published trials, reported by Berkey et al. (1998), comparing surgical and non-surgical treatments 7 | for medium-severity periodontal disease, one year after treatment. 8 | } 9 | \usage{data(Berkey98)} 10 | 11 | \details{ 12 | The variables are: 13 | \describe{ 14 | \item{trial}{Trial number} 15 | \item{pub_year}{Publication year} 16 | \item{no_of_patients}{Number of patients} 17 | \item{PD}{Patient improvements (mm) in \emph{probing depth}} 18 | \item{AL}{Patient improvements (mm) in \emph{attachment level}} 19 | \item{var_PD}{Sampling variance of PD} 20 | \item{cov_PD_AL}{Sampling covariance between PD and AD} 21 | \item{var_AL}{Sampling variance of AL} 22 | } 23 | 24 | } 25 | \source{ 26 | Berkey, C. S., Hoaglin, D. C., Antczak-Bouckoms, A., Mosteller, F, & Colditz, G. A. (1998). Meta-analysis of multiple outcomes by regression with random effects. \emph{Statistics in Medicine}, \bold{17}, 2537-2550. 27 | } 28 | \examples{ 29 | \donttest{ 30 | data(Berkey98) 31 | 32 | #### ML estimation method 33 | ## Multivariate meta-analysis 34 | x <- meta(y=cbind(PD, AL), v=cbind(var_PD, cov_PD_AL, var_AL), data=Berkey98) 35 | x <- rerun(x) 36 | summary(x) 37 | plot(x) 38 | 39 | ## Plot individual studies proportional to the weights 40 | plot(x, study.weight.plot=TRUE) 41 | 42 | ## Include forest plot from the metafor package 43 | library(metafor) 44 | plot(x, diag.panel=TRUE, main="Multivariate meta-analysis", 45 | axis.label=c("PD", "AL")) 46 | forest( rma(yi=PD, vi=var_PD, data=Berkey98) ) 47 | title("Forest plot of PD") 48 | forest( rma(yi=AL, vi=var_AL, data=Berkey98) ) 49 | title("Forest plot of AL") 50 | 51 | ## Multivariate meta-analysis with "publication year-1979" as the predictor 52 | summary( meta(y=cbind(PD, AL), v=cbind(var_PD, cov_PD_AL, var_AL), 53 | x=scale(pub_year, center=1979), data=Berkey98, 54 | RE.lbound=NA) ) 55 | 56 | ## Multivariate meta-analysis with equality constraint on the regression coefficients 57 | summary( meta(y=cbind(PD, AL), v=cbind(var_PD, cov_PD_AL, var_AL), 58 | x=scale(pub_year, center=1979), data=Berkey98, 59 | coef.constraints=matrix(c("0.3*Eq_slope", "0.3*Eq_slope"), 60 | nrow=2)) ) 61 | 62 | #### REML estimation method 63 | ## Multivariate meta-analysis 64 | summary( reml(y=cbind(PD, AL), v=cbind(var_PD, cov_PD_AL, var_AL), 65 | data=Berkey98, 66 | model.name="Multivariate meta analysis with REML") ) 67 | 68 | ## Multivariate meta-analysis with "publication year-1979" as the predictor 69 | ## Diagonal structure for the variance component 70 | summary( reml(y=cbind(PD, AL), v=cbind(var_PD, cov_PD_AL, var_AL), 71 | RE.constraints=Diag(c("1e-5*Tau2_1_1", "1e-5*Tau2_2_2")), 72 | x=scale(pub_year, center=1979), data=Berkey98) ) 73 | } 74 | } 75 | \keyword{datasets} 76 | -------------------------------------------------------------------------------- /man/Boer16.Rd: -------------------------------------------------------------------------------- 1 | \name{Boer16} 2 | \alias{Boer16} 3 | \docType{data} 4 | \title{Correlation Matrices from Boer et al. (2016)} 5 | \description{ 6 | The data set includes correlation matrices of leader-member exchange in 7 | transformational leadership reported by Boer et al. (2016). 8 | } 9 | \usage{data(Boer16)} 10 | 11 | \details{ 12 | A list of data with the following structure: 13 | \describe{ 14 | \item{data}{A list of correlation matrices. The variables are 15 | \emph{LMX} (leader-member exchange), \emph{TFL} 16 | (transformational leadership), \emph{JS} (job satisfaction), 17 | \emph{OC} (organizational commitment), and \emph{LE} (leader 18 | effectiveness)} 19 | \item{n}{A vector of sample sizes} 20 | \item{RelLMX}{The reliability of \emph{LMX}} 21 | \item{RelTFL}{The reliability of \emph{TFL}} 22 | } 23 | } 24 | \source{ 25 | Boer, D., Deinert, A., Homan, A. C., & Voelpel, S. C. (2016). Revisiting the mediating role of leader-member exchange in transformational leadership: the differential impact model. \emph{European Journal of Work and Organizational Psychology}, \bold{25}(6), 883-899. 26 | } 27 | \examples{ 28 | \donttest{ 29 | ## Stage 1 analysis 30 | rand1 <- tssem1(Boer16$data, Boer16$n, method="REM", RE.type="Diag", 31 | acov="weighted") 32 | summary(rand1) 33 | 34 | ## Stage 2 analysis 35 | model2a <- 'JS+OC+LE ~ LMX+TFL 36 | LMX ~ TFL 37 | ## Variance of TFL is fixed at 1 38 | TFL ~~ 1*TFL 39 | ## Correlated residuals 40 | JS ~~ OC 41 | JS ~~ LE 42 | OC ~~ LE' 43 | 44 | ## Display the model 45 | plot(model2a) 46 | 47 | RAM2a <- lavaan2RAM(model2a, obs.variables = c("LMX", "TFL", "JS", "OC", "LE"), 48 | A.notation="on", S.notation="with") 49 | 50 | rand2a <- tssem2(rand1, Amatrix=RAM2a$A, Smatrix=RAM2a$S) 51 | summary(rand2a) 52 | 53 | ## Display the model with the parameter estimates 54 | plot(rand2a, layout="spring") 55 | } 56 | } 57 | \keyword{datasets} 58 | -------------------------------------------------------------------------------- /man/Bornmann07.Rd: -------------------------------------------------------------------------------- 1 | \name{Bornmann07} 2 | \alias{Bornmann07} 3 | \docType{data} 4 | \title{A Dataset from Bornmann et al. (2007)} 5 | \description{ 6 | A dataset from Bornmann et al. (2007) for three-level meta-analysis. 7 | } 8 | \usage{data(Bornmann07)} 9 | 10 | \details{ 11 | The variables are: 12 | \describe{ 13 | \item{ID}{ID of the study} 14 | \item{Study}{Study name} 15 | \item{Cluster}{Cluster for effect sizes} 16 | \item{logOR}{Effect size: log odds ratio} 17 | \item{v}{Sampling variance of logOR} 18 | \item{Year}{Year of publication} 19 | \item{Type}{Type of proposal: either \bold{Grant} or \bold{Fellowship}} 20 | \item{Discipline}{Discipline of the proposal: either \bold{Physical sciences}, \bold{Life sciences/biology}, 21 | \bold{Social sciences/humanities} or \bold{Multidisciplinary})} 22 | \item{Country}{Country of the proposal: either the \bold{United States}, 23 | \bold{Canada}, \bold{Australia}, \bold{United Kingdom} or \bold{Europe}} 24 | } 25 | } 26 | \source{ 27 | Bornmann, L., Mutz, R., & Daniel, H.-D. (2007). Gender differences in grant peer review: A meta-analysis. \emph{Journal of Informetrics}, \bold{1(3)}, 226-238. doi:10.1016/j.joi.2007.03.001 28 | } 29 | \references{ 30 | Cheung, M. W.-L. (2014). Modeling dependent effect sizes with three-level meta-analyses: A structural equation modeling approach. \emph{Psychological Methods}, \bold{19}, 211-229. 31 | 32 | Marsh, H. W., Bornmann, L., Mutz, R., Daniel, H.-D., & O'Mara, A. (2009). Gender Effects in the Peer Reviews of Grant Proposals: A Comprehensive Meta-Analysis Comparing Traditional and Multilevel Approaches. \emph{Review of Educational Research}, \bold{79(3)}, 1290-1326. doi:10.3102/0034654309334143 33 | } 34 | \examples{ 35 | \donttest{ 36 | data(Bornmann07) 37 | 38 | #### ML estimation method 39 | ## No predictor 40 | summary( meta3L(y=logOR, v=v, cluster=Cluster, data=Bornmann07) ) 41 | 42 | ## Type as a predictor 43 | ## Grant: 0 44 | ## Fellowship: 1 45 | summary( meta3L(y=logOR, v=v, x=(as.numeric(Type)-1), 46 | cluster=Cluster, data=Bornmann07) ) 47 | 48 | ## Centered Year as a predictor 49 | summary( meta3L(y=logOR, v=v, x=scale(Year, scale=FALSE), 50 | cluster=Cluster, data=Bornmann07) ) 51 | 52 | #### REML estimation method 53 | ## No predictor 54 | summary( reml3L(y=logOR, v=v, cluster=Cluster, data=Bornmann07) ) 55 | 56 | ## Type as a predictor 57 | ## Grants: 0 58 | ## Fellowship: 1 59 | summary( reml3L(y=logOR, v=v, x=(as.numeric(Type)-1), 60 | cluster=Cluster, data=Bornmann07) ) 61 | 62 | ## Centered Year as a predictor 63 | summary( reml3L(y=logOR, v=v, x=scale(Year, scale=FALSE), 64 | cluster=Cluster, data=Bornmann07) ) 65 | 66 | ## Handling missing covariates with FIML 67 | ## MCAR 68 | ## Set seed for replication 69 | set.seed(1000000) 70 | 71 | ## Copy Bornmann07 to my.df 72 | my.df <- Bornmann07 73 | ## "Fellowship": 1; "Grant": 0 74 | my.df$Type_MCAR <- ifelse(Bornmann07$Type=="Fellowship", yes=1, no=0) 75 | 76 | ## Create 17 out of 66 missingness with MCAR 77 | my.df$Type_MCAR[sample(1:66, 17)] <- NA 78 | summary(meta3LFIML(y=logOR, v=v, cluster=Cluster, x2=Type_MCAR, data=my.df)) 79 | 80 | ## MAR 81 | Type_MAR <- ifelse(Bornmann07$Type=="Fellowship", yes=1, no=0) 82 | 83 | ## Create 27 out of 66 missingness with MAR for cases Year<1996 84 | index_MAR <- ifelse(Bornmann07$Year<1996, yes=TRUE, no=FALSE) 85 | Type_MAR[index_MAR] <- NA 86 | 87 | ## Include auxiliary variable 88 | summary(meta3LFIML(y=logOR, v=v, cluster=Cluster, x2=Type_MAR, av2=Year, data=my.df)) 89 | } 90 | } 91 | \keyword{datasets} 92 | -------------------------------------------------------------------------------- /man/Chan17.Rd: -------------------------------------------------------------------------------- 1 | \name{Chan17} 2 | \alias{Chan17} 3 | \docType{data} 4 | \title{Dataset from Chan, Jones, Jamieson, and Albarracin (2017)} 5 | 6 | \description{A dataset of multiple treatment effects of standardized mean differences on misinformation and debunking effects.} 7 | 8 | \usage{data(Chan17)} 9 | \format{ 10 | A data frame with 34 independent samples from 6 research reports. 11 | \describe{ 12 | \item{\code{Author}}{a character vector of study} 13 | \item{\code{g_misinfo}}{Hedges' g of misinformation comparing the misinformation experimental and control groups} 14 | \item{\code{g_debunk}}{Hedges' g of debunking comparing the debuking experimental and misinformation experimental groups} 15 | \item{\code{v_misinfo}}{sampling variance of g_misinfo} 16 | \item{\code{c_mis_deb}}{Sampling covariance between \code{g_misinfo} and \code{g_debunk} due to the overlap of the misinformation experimental group} 17 | \item{\code{v_debunk}}{sampling variance of g_debunk} 18 | \item{\code{PublicationYear}}{publication year} 19 | \item{\code{Published}}{published or unpublished} 20 | \item{\code{MeanAge}}{mean age of participants} 21 | \item{\code{PctFemale}}{percentage of female participants} 22 | } 23 | } 24 | 25 | \details{ 26 | The sampling variances and covariances are calculated using Gleser and Olkin's (2009) method for multiple treatment effects (Equations 3.3 and 3.4). Since the sample sizes of the misinformation, debunking, and control groups are not given, it is assumed they are equal.} 27 | 28 | \source{Chan, M. S., Jones, C. R., Hall Jamieson, K., & Albarracin, D. (2017). Debunking: A meta-analysis of the psychological efficacy of messages countering misinformation. \emph{Psychological Science}, \bold{28(11)}, 1531-1546. https://doi.org/10.1177/0956797617714579} 29 | 30 | \references{ 31 | Gleser, L. J., & Olkin, I. (2009). Stochastically dependent effect sizes. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta-analysis.} (2nd ed., pp. 357-376). Russell Sage Foundation. 32 | } 33 | 34 | \keyword{datasets} 35 | -------------------------------------------------------------------------------- /man/Cheung00.Rd: -------------------------------------------------------------------------------- 1 | \name{Cheung00} 2 | \alias{Cheung00} 3 | \docType{data} 4 | \title{Fifty Studies of Correlation Matrices used in Cheung and Chan (2000)} 5 | \description{ 6 | This data set includes fifty studies of correlation matrices on the theory 7 | of planned theory reported by Cheung and Chan (2000). 8 | } 9 | \usage{data(Cheung00)} 10 | 11 | \details{ 12 | A list of data with the following structure: 13 | \describe{ 14 | \item{data}{A list of 50 studies of correlation matrices. The 15 | variables are the attitude toward behavior \emph{att}, subjective norm \emph{sn}, 16 | behavioral intention \emph{bi}, and behavior \emph{beh}} 17 | \item{n}{A vector of sample sizes} 18 | } 19 | } 20 | \source{ 21 | Cheung, S.-F., & Chan, D. K.-S. (2000). The role of perceived behavioral control in predicting human behavior: A meta-analytic review of studies on the theory of planned behavior. \emph{Unpublished manuscript}, Chinese University of Hong Kong. 22 | } 23 | \references{ 24 | Cheung, M.W.-L., & Cheung, S.-F. (2016). Random-effects models for 25 | meta-analytic structural equation modeling: Review, issues, and 26 | illustrations. \emph{Research Synthesis Methods}, \bold{7}, 140-155. 27 | } 28 | \note{These studies were extracted from the original data set for 29 | illustration purpose. Some samples contained two or more correlation 30 | matrices, and only one of them was arbitrarily selected to avoid the 31 | problem of dependence. Moreover, studies with less than 3 correlation 32 | coefficients were also excluded. 33 | } 34 | \examples{ 35 | \donttest{ 36 | data(Cheung00) 37 | 38 | ## Variable labels 39 | labels <- colnames(Cheung00$data[[1]]) 40 | 41 | ## Full mediation model 42 | S <- create.mxMatrix(c("1", 43 | ".2*cov_att_sn", "1", 44 | 0, 0, ".2*e_bi", 45 | 0, 0, 0, ".2*e_beh"), 46 | type="Symm", as.mxMatrix=FALSE, byrow=TRUE) 47 | dimnames(S) <- list(labels, labels) 48 | S 49 | 50 | A <- matrix(c("0","0","0","0", 51 | "0","0","0","0", 52 | ".2*att2bi", ".2*sn2bi", "0", "0", 53 | "0", "0", ".2*bi2beh", "0"), 54 | byrow=TRUE, 4, 4) 55 | dimnames(A) <- list(labels, labels) 56 | A 57 | 58 | #### Random-effects model 59 | 60 | ## Stage 1 analysis 61 | random_1 <- tssem1(Cheung00$data, Cheung00$n, method="REM", RE.type="Diag", 62 | acov="weighted") 63 | summary(random_1) 64 | 65 | ## Stage 2 analysis 66 | random_2 <- tssem2(random_1, Amatrix=A, Smatrix=S, intervals.type="LB", 67 | diag.constraints=TRUE) 68 | summary(random_2) 69 | 70 | ## Display the model 71 | plot(random_2, what="path") 72 | 73 | ## Display the model with the parameter estimates 74 | plot(random_2, color="yellow") 75 | 76 | ## Load the library 77 | library("semPlot") 78 | } 79 | } 80 | \keyword{datasets} 81 | -------------------------------------------------------------------------------- /man/Cheung09.Rd: -------------------------------------------------------------------------------- 1 | \name{Cheung09} 2 | \alias{Cheung09} 3 | \docType{data} 4 | \title{A Dataset from TSSEM User's Guide Version 1.11 by Cheung (2009) 5 | } 6 | \description{Four studies were selected from the data set used by Cheung 7 | and Chan (2005; 2009). Some variables were randomly deleted to 8 | illustrate the analysis with missing data. 9 | %% ~~ A concise (1-5 lines) description of the dataset. ~~ 10 | } 11 | \usage{data(Cheung09)} 12 | \details{ 13 | A list of data with the following structure: 14 | \describe{ 15 | \item{data}{A list of 4 studies of correlation matrices } 16 | \item{n}{A vector of sample sizes} 17 | } 18 | } 19 | \references{ 20 | Cheung, M. W.-L., & Chan, W. (2005). Meta-analytic structural equation modeling: A two-stage approach. \emph{Psychological Methods}, \bold{10}, 40-64. 21 | 22 | Cheung, M. W.-L., & Chan, W. (2009). A two-stage approach to synthesizing covariance matrices in meta-analytic structural equation modeling. \emph{Structural Equation Modeling}, \bold{16}, 28-53. 23 | } 24 | \examples{ 25 | \donttest{ 26 | data(Cheung09) 27 | 28 | #### Fixed-effects model: Stage 1 analysis 29 | fixed1 <- tssem1(Cheung09$data, Cheung09$n, method="FEM") 30 | summary(fixed1) 31 | 32 | ## Prepare a model implied matrix 33 | ## Factor correlation matrix 34 | Phi <- create.mxMatrix( c("0.3*corf2f1","0.3*corf3f1","0.3*corf3f2"), 35 | type="Stand", as.mxMatrix=FALSE ) 36 | ## Error variances 37 | Psi <- create.mxMatrix( paste("0.2*e", 1:9, sep=""), type="Diag", 38 | as.mxMatrix=FALSE ) 39 | 40 | ## Create Smatrix 41 | S1 <- bdiagMat(list(Psi, Phi)) 42 | ## dimnames(S1)[[1]] <- dimnames(S1)[[2]] <- c(paste("x",1:9,sep=""), 43 | ## paste("f",1:3,sep="")) 44 | ## S1 45 | S1 <- as.mxMatrix(S1) 46 | 47 | ## Factor loadings 48 | Lambda <- create.mxMatrix( c(".3*f1x1",".3*f1x2",".3*f1x3",rep(0,9), 49 | ".3*f2x4",".3*f2x5",".3*f2x6",".3*f2x7", 50 | rep(0,9),".3*f3x8",".3*f3x9"), type="Full", 51 | ncol=3, nrow=9, as.mxMatrix=FALSE ) 52 | Zero1 <- matrix(0, nrow=9, ncol=9) 53 | Zero2 <- matrix(0, nrow=3, ncol=12) 54 | 55 | ## Create Amatrix 56 | A1 <- rbind( cbind(Zero1, Lambda), 57 | Zero2 ) 58 | ## dimnames(A1)[[1]] <- dimnames(A1)[[2]] <- c(paste("x",1:9,sep=""), 59 | ## paste("f",1:3,sep="")) 60 | ## A1 61 | A1 <- as.mxMatrix(A1) 62 | 63 | ## Create Fmatrix 64 | F1 <- create.Fmatrix(c(rep(1,9), rep(0,3))) 65 | 66 | #### Fixed-effects model: Stage 2 analysis 67 | fixed2 <- tssem2(fixed1, Amatrix=A1, Smatrix=S1, Fmatrix=F1, 68 | intervals.type="LB") 69 | summary(fixed2) 70 | 71 | ## Display the model 72 | plot(fixed2, what="path") 73 | 74 | ## Display the model with the parameter estimates 75 | plot(fixed2, latNames=c("f1", "f2", "f3"), edge.label.cex=0.8, 76 | color="yellow") 77 | } 78 | } 79 | \keyword{datasets} 80 | -------------------------------------------------------------------------------- /man/Cooke16.Rd: -------------------------------------------------------------------------------- 1 | \name{Cooke16} 2 | \alias{Cooke16} 3 | \docType{data} 4 | \title{Correlation Matrices from Cooke et al. (2016)} 5 | \description{ 6 | The data set includes correlation matrices on using the theory of planned 7 | behavior to predict alcohol consumption reported by Cooke et al. (2016). 8 | } 9 | \usage{data(Cooke16)} 10 | 11 | \details{ 12 | A list of data with the following structure: 13 | \describe{ 14 | \item{data}{A list of correlation matrices. The variables are 15 | \emph{SN} (subjective norm), \emph{ATT} (attitude), \emph{PBC} 16 | (perceived behavior control), \emph{BI} (behavioral intention), and 17 | \emph{BEH} (behavior).} 18 | \item{n}{A vector of sample sizes.} 19 | \item{MeanAge}{Mean age of the participants except for \code{Ajzen and 20 | Sheikh (2013)}, which is the median age, and \code{Glassman, et 21 | al. (2010a)} to \code{Glassman, et al. (2010d)}, which are based on 22 | the range of 18 to 24.} 23 | \item{Female}{Percentage of female participants.} 24 | } 25 | } 26 | \source{ 27 | Cooke, R., Dahdah, M., Norman, P., & French, D. P. (2016). How well does the theory of planned behaviour predict alcohol consumption? A systematic review and meta-analysis. \emph{Health Psychology Review}, \bold{10}(2), 148-167. 28 | } 29 | \references{ 30 | Cheung, M. W.-L., & Hong, R. Y. (2017). Applications of meta-analytic structural equation modeling in health psychology: Examples, issues, and recommendations. \emph{Health Psychology Review}, \bold{11}, 265-279. 31 | } 32 | 33 | \examples{ 34 | \donttest{ 35 | ## Check whether the correlation matrices are valid (positive definite) 36 | Cooke16$data[is.pd(Cooke16$data)==FALSE] 37 | 38 | ## Since the correlation matrix in Study 3 is not positive definite, 39 | ## we exclude it in the following analyses 40 | my.data <- Cooke16$data[-3] 41 | my.n <- Cooke16$n[-3] 42 | 43 | ## Show the no. of studies per correlation 44 | pattern.na(my.data, show.na = FALSE) 45 | 46 | ## Show the total sample sizes per correlation 47 | pattern.n(my.data, my.n) 48 | 49 | ## Stage 1 analysis 50 | ## Random-effects model 51 | random1 <- tssem1(my.data, my.n, method="REM", RE.type="Diag", acov="weighted") 52 | summary(random1) 53 | 54 | A1 <- create.mxMatrix(c(0,0,0,0,0, 55 | 0,0,0,0,0, 56 | 0,0,0,0,0, 57 | "0.2*SN2BI","0.2*ATT2BI","0.2*PBC2BI",0,0, 58 | 0,0,"0.2*PBC2BEH","0.2*BI2BEH",0), 59 | type="Full", ncol=5, nrow=5, 60 | byrow=TRUE, as.mxMatrix=FALSE) 61 | 62 | ## This step is not necessary but it is useful for inspecting the model. 63 | dimnames(A1)[[1]] <- dimnames(A1)[[2]] <- colnames(Cooke16$data[[1]]) 64 | 65 | ## Display A1 66 | A1 67 | 68 | S1 <- create.mxMatrix(c(1, 69 | "0.1*ATT_SN", 1, 70 | "0.1*PBC_SN", "0.1*PBC_ATT", 1, 71 | 0, 0, 0, "0.5*VarBI", 72 | 0, 0, 0, 0, "0.5*VarBEH"), 73 | type = "Symm", ncol=5, nrow=5, 74 | byrow=TRUE, as.mxMatrix=FALSE) 75 | 76 | dimnames(S1)[[1]] <- dimnames(S1)[[2]] <- colnames(Cooke16$data[[1]]) 77 | S1 78 | 79 | ## Stage 2 analysis 80 | random2 <- tssem2(random1, Amatrix=A1, Smatrix=S1, diag.constraints=FALSE, 81 | intervals.type="LB") 82 | summary(random2) 83 | 84 | ## Display the model 85 | plot(random2, what="path") 86 | 87 | ## Display the model with the parameter estimates 88 | plot(random2, color="yellow") 89 | } 90 | } 91 | \keyword{datasets} 92 | -------------------------------------------------------------------------------- /man/Cooper03.Rd: -------------------------------------------------------------------------------- 1 | \name{Cooper03} 2 | \alias{Cooper03} 3 | \docType{data} 4 | \title{Selected effect sizes from Cooper et al. (2003)} 5 | \description{ 6 | Fifty-six effect sizes from 11 districts from Cooper et al. (2003) were reported by Konstantopoulos (2011). 7 | } 8 | \usage{data(Cooper03)} 9 | \details{ 10 | The variables are: 11 | \describe{ 12 | \item{District}{District ID} 13 | \item{Study}{Study ID} 14 | \item{y}{Effect size} 15 | \item{v}{Sampling variance} 16 | \item{Year}{Year of publication} 17 | } 18 | 19 | } 20 | \source{ 21 | Cooper, H., Valentine, J. C., Charlton, K., & Melson, A. (2003). The Effects of Modified School Calendars on Student Achievement and on School and Community Attitudes. \emph{Review of Educational Research}, \bold{73(1)}, 1-52. doi:10.3102/00346543073001001 22 | } 23 | \references{ 24 | Konstantopoulos, S. (2011). Fixed effects and variance components 25 | estimation in three-level meta-analysis. \emph{Research Synthesis 26 | Methods}, \bold{2}, 61-76. doi:10.1002/jrsm.35 27 | } 28 | \examples{ 29 | \donttest{ 30 | data(Cooper03) 31 | 32 | #### ML estimation method 33 | ## No predictor 34 | summary( model1 <- meta3L(y=y, v=v, cluster=District, data=Cooper03) ) 35 | 36 | ## Show all heterogeneity indices and their 95\% confidence intervals 37 | summary( meta3L(y=y, v=v, cluster=District, data=Cooper03, 38 | intervals.type="LB", I2=c("I2q", "I2hm", "I2am", "ICC")) ) 39 | 40 | ## Year as a predictor 41 | summary( meta3L(y=y, v=v, cluster=District, x=scale(Year, scale=FALSE), 42 | data=Cooper03, model.name="Year as a predictor") ) 43 | 44 | ## Equality of level-2 and level-3 heterogeneity 45 | summary( model2 <- meta3L(y=y, v=v, cluster=District, data=Cooper03, 46 | RE2.constraints="0.2*EqTau2", 47 | RE3.constraints="0.2*EqTau2", 48 | model.name="Equal Tau2") ) 49 | 50 | ## Compare model2 vs. model1 51 | anova(model1, model2) 52 | 53 | #### REML estimation method 54 | ## No predictor 55 | summary( reml3L(y=y, v=v, cluster=District, data=Cooper03) ) 56 | 57 | ## Level-2 and level-3 variances are constrained equally 58 | summary( reml3L(y=y, v=v, cluster=District, data=Cooper03, 59 | RE.equal=TRUE, model.name="Equal Tau2") ) 60 | 61 | ## Year as a predictor 62 | summary( reml3L(y=y, v=v, cluster=District, x=scale(Year, scale=FALSE), 63 | data=Cooper03, intervals.type="LB") ) 64 | 65 | ## Handling missing covariates with FIML 66 | ## Create 20/56 MCAR data in Year 67 | set.seed(10000) 68 | Year_MCAR <- Cooper03$Year 69 | Year_MCAR[sample(56, 20)] <- NA 70 | summary( meta3LFIML(y=y, v=v, cluster=District, x2=scale(Year_MCAR, scale=FALSE), 71 | data=Cooper03, model.name="NA in Year_MCAR") ) 72 | } 73 | } 74 | \keyword{datasets} 75 | -------------------------------------------------------------------------------- /man/Cor2DataFrame.Rd: -------------------------------------------------------------------------------- 1 | \name{Cor2DataFrame} 2 | \alias{Cor2DataFrame} 3 | \title{Convert correlation or covariance matrices into a dataframe of correlations or 4 | covariances with their sampling covariance matrices 5 | } 6 | \description{It converts the correlation or covariance matrices into a 7 | dataframe of correlations or covariances with their asymptotic 8 | sampling covariance matrices. It uses the \code{asyCov} at the backend. 9 | } 10 | \usage{ 11 | Cor2DataFrame(x, n, v.na.replace=TRUE, cor.analysis=TRUE, 12 | acov=c("weighted", "individual", "unweighted"), 13 | Means, row.names.unique=FALSE, append.vars=TRUE, 14 | asyCovOld=FALSE, ...) 15 | } 16 | %- maybe also 'usage' for other objects documented here. 17 | \arguments{ 18 | \item{x}{A list of data with correlation/covariance matrix in \code{x$data} and 19 | sample sizes \code{x$n}. Additional variables in \code{x} can be attached. 20 | } 21 | \item{n}{If \code{x} is a list of correlation matrices without 22 | \code{x$data} and \code{x$n}, a vector of sample sizes \code{n} must 23 | be provided.} 24 | \item{v.na.replace}{Logical. Missing value is not allowed in definition 25 | variables. If it is \code{TRUE} (the default), missing value is 26 | replaced by a large value (1e10). These values are not used in the analysis.} 27 | \item{cor.analysis}{Logical. The output is either a correlation or 28 | covariance matrix.} 29 | \item{acov}{If it is \code{weighted}, the average correlation/covariance 30 | matrix is calculated based on the weighted mean with the sample 31 | sizes. The average correlation/covariance matrix is used to calculate the sampling 32 | variance-covariance matrices.} 33 | \item{Means}{An optional matrix of means. The number of rows must be the same as the length of \code{n}. The sampling covariance matrices of the means are calculated by the covariance matrices divided by the sample sizes. Therefore, it is important to make sure that covariance matrices (not correlation matrices) are used in \code{x} when \code{Means} are included; otherwise, the calculated sampling covariance matrices of the means are incorrect.} 34 | \item{row.names.unique}{Logical, If it is \code{FALSE} (the default), unique 35 | row names are not created.} 36 | \item{append.vars}{Whether to append the additional variables to 37 | the output dataframe.} 38 | \item{asyCovOld}{Whether to use the old version of \code{asyCov}. See \code{\link[metaSEM]{asyCov}}.} 39 | \item{\dots}{Further arguments to be passed to \code{\link[metaSEM]{asyCov}}.} 40 | } 41 | 42 | \value{A list of components: (1) a data frame of correlations or covariances with their 43 | sampling covariance matrices; (2) a vector of sample 44 | sizes; (3) labels of the correlations; and (3) labels of their sampling covariance matrices. } 45 | 46 | \author{Mike W.-L. Cheung 47 | } 48 | \seealso{ \code{\link[metaSEM]{asyCov}}, \code{\link[metaSEM]{osmasem}}, \code{\link[metaSEM]{create.vechsR}}, 49 | \code{\link[metaSEM]{create.Tau2}}, \code{\link[metaSEM]{create.V}} 50 | } 51 | 52 | 53 | \examples{ 54 | \donttest{ 55 | ## Provide a list of correlation matrices and a vector of sample sizes as the inputs 56 | my.df1 <- Cor2DataFrame(Nohe15A1$data, Nohe15A1$n) 57 | 58 | ## Add Lag time as a variable 59 | my.df1$data <- data.frame(my.df1$data, Lag=Nohe15A1$Lag, check.names=FALSE) 60 | 61 | ## Data 62 | my.df1$data 63 | 64 | ## Sample sizes 65 | my.df1$n 66 | 67 | ## ylabels 68 | my.df1$ylabels 69 | 70 | ## vlabels 71 | my.df1$vlabels 72 | 73 | #### Simplified version to do it 74 | my.df2 <- Cor2DataFrame(Nohe15A1) 75 | } 76 | } 77 | % Add one or more standard keywords, see file 'KEYWORDS' in the 78 | % R documentation directory. 79 | \keyword{ osmasem } 80 | -------------------------------------------------------------------------------- /man/Diag.Rd: -------------------------------------------------------------------------------- 1 | \name{Diag} 2 | \alias{Diag} 3 | \alias{Diag<-} 4 | %- Also NEED an '\alias' for EACH other topic documented here. 5 | \title{Matrix Diagonals 6 | } 7 | \description{Extract or replace the diagonal of a matrix, or construct a diagonal matrix with the same behaviors as \code{diag} prior to R-3.0.0. 8 | } 9 | \usage{ 10 | Diag(x, ...) 11 | Diag(x) <- value 12 | } 13 | 14 | \arguments{ 15 | \item{x}{A matrix, vector or 1D array, or missing.} 16 | \item{...}{Optional dimensions (\code{nrow} and \code{ncol}) for the result when \code{x} is 17 | not a matrix.} 18 | \item{value}{Either a single value or a vector of length equal to that 19 | of the current diagonal. Should be of a mode which can be coerced 20 | to that of \code{x}.} 21 | } 22 | 23 | \details{Started from R-3.0.0, \code{diag(x)} returns a numeric matrix with NA in the 24 | diagonals when x is a character vector. Although this follows what the manual says, this 25 | breaks the metaSEM. The \code{Diag} has the same functions as 26 | \code{diag} except that \code{Diag(x)} works for a character vector of 27 | x by returning a square matrix of character "0" with \code{x} as the 28 | diagonals. 29 | %% ~~ If necessary, more details than the description above ~~ 30 | } 31 | 32 | \author{Mike W.-L. Cheung 33 | } 34 | \note{See 35 | http://r.789695.n4.nabble.com/Behaviors-of-diag-with-character-vector-in-R-3-0-0-td4663735.html 36 | for the discussion. 37 | %% ~~further notes~~ 38 | } 39 | 40 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 41 | 42 | \seealso{\code{\link{diag}} 43 | } 44 | \examples{ 45 | v <- c("a", "b") 46 | Diag(v) 47 | } 48 | % Add one or more standard keywords, see file 'KEYWORDS' in the 49 | % R documentation directory. 50 | \keyword{utilities} 51 | 52 | -------------------------------------------------------------------------------- /man/Gleser94.Rd: -------------------------------------------------------------------------------- 1 | \name{Gleser94} 2 | \alias{Gleser94} 3 | \docType{data} 4 | \title{Two Datasets from Gleser and Olkin (1994) 5 | } 6 | \description{It includes two datasets in multiple-treatment studies and 7 | multiple-endpoint studies reported by Gleser and Olkin (1994). 8 | } 9 | \usage{data("Gleser94")} 10 | \format{ 11 | A list of two data frames. 12 | \describe{ 13 | \item{\code{MTS}}{A data frame of multiple-treatment studies.} 14 | \item{\code{MES}}{A data frame of multiple-endpoint studies.} 15 | } 16 | } 17 | \source{Gleser, L. J., & Olkin, I. (1994). Stochastically dependent effect sizes. In H. Cooper & L. V. Hedges (Eds.), The handbook of research synthesis. (pp. 339-355). New York: Russell Sage Foundation. 18 | } 19 | 20 | \seealso{\code{\link[metaSEM]{smdMTS}}, \code{\link[metaSEM]{smdMES}} 21 | } 22 | 23 | \examples{ 24 | \donttest{ 25 | data(Gleser94) 26 | 27 | #### Multiple-treatment studies 28 | Gleser94$MTS 29 | 30 | ## Assuming homogeneity of variances 31 | my.MTS <- t(apply(Gleser94$MTS, MARGIN=1, 32 | function(x) 33 | smdMTS(m=x[c("Mean.C", "Mean.E1", "Mean.E2", "Mean.E3", "Mean.E4", "Mean.E5")], 34 | v=x[c("SD.C", "SD.E1", "SD.E2", "SD.E3", "SD.E4", "SD.E5")]^2, 35 | n=x[c("N.C", "N.E1", "N.E2", "N.E3", "N.E4", "N.E5")], 36 | homogeneity="variance", list.output=FALSE))) 37 | 38 | ## Fixed-effects multivariate meta-analysis 39 | fit.MTS <- meta(y=my.MTS[, 1:5], 40 | v=my.MTS[, 6:20], 41 | RE.constraints = diag(0, ncol=5, nrow=5), 42 | model.name="MTS") 43 | summary(fit.MTS) 44 | 45 | #### Multiple-endpoint studies 46 | Gleser94$MES 47 | 48 | ## Calculate the sampling variances and covariance and amend into the data set 49 | Gleser94$MES$Uncoached.V11 <- with(Gleser94$MES, SD.Uncoached.Math^2) 50 | Gleser94$MES$Uncoached.V21 <- with(Gleser94$MES, 51 | SD.Uncoached.Math*Cor.Math.Verbal*SD.Uncoached.Verbal) 52 | Gleser94$MES$Uncoached.V22 <- with(Gleser94$MES, SD.Uncoached.Verbal^2) 53 | 54 | Gleser94$MES$Coached.V11 <- with(Gleser94$MES, SD.Coached.Math^2) 55 | Gleser94$MES$Coached.V21 <- with(Gleser94$MES, 56 | SD.Coached.Math*Cor.Math.Verbal*SD.Coached.Verbal) 57 | Gleser94$MES$Coached.V22 <- with(Gleser94$MES, SD.Coached.Verbal^2) 58 | 59 | ## Assuming homogeneity of covariance matrices 60 | my.MES <- t(apply(Gleser94$MES, MARGIN=1, 61 | function(x) 62 | smdMES(m1=x[c("Mean.Uncoached.Math", "Mean.Uncoached.Verbal")], 63 | m2=x[c("Mean.Coached.Math", "Mean.Coached.Verbal")], 64 | V1=vec2symMat(x[c("Uncoached.V11", "Uncoached.V21", "Uncoached.V22")]), 65 | V2=vec2symMat(x[c("Coached.V11", "Coached.V21", "Coached.V22")]), 66 | n1=x["N.Uncoached"], 67 | n2=x["N.Coached"], 68 | homogeneity="covariance", list.output=FALSE))) 69 | 70 | ## Fixed-effects multivariate meta-analysis 71 | fit.MES <- meta(y=my.MES[, 1:2], 72 | v=my.MES[, 3:5], 73 | RE.constraints = diag(0, ncol=2, nrow=2), 74 | model.name="MES") 75 | summary(fit.MES) 76 | } 77 | } 78 | \keyword{datasets} 79 | -------------------------------------------------------------------------------- /man/Gnambs18.Rd: -------------------------------------------------------------------------------- 1 | \name{Gnambs18} 2 | \alias{Gnambs18} 3 | \docType{data} 4 | \title{Correlation Matrices from Gnambs, Scharl, and Schroeders (2018)} 5 | \description{ 6 | The data set includes 113 correlation matrices on the Rosenberg 7 | Self-Esteem Scale reported by Gnambs, Scharl, and Schroeders 8 | (2018). Thirty-six studies were based on the reported correlation 9 | matrices (\code{CorMat=1}) whereas the correlation matrices of the 10 | other 77 studies were calculated from the reported factor loadings. 11 | } 12 | \usage{data(Gnambs18) 13 | } 14 | \details{ 15 | A list of data with the following structure: 16 | \describe{ 17 | \item{data}{A list of 113 correlation matrices. The variable names are from \emph{I1} to \emph{I10}.} 18 | \item{n}{A vector of sample sizes.} 19 | \item{Year}{The year of publications.} 20 | \item{Country}{The country of studies conducted.} 21 | \item{Language}{The language used in the studies.} 22 | \item{Publication}{Whether the studies were published (1) or 23 | unpublished (0).} 24 | \item{MeanAge}{Mean age of the participants.} 25 | \item{FemaleProp}{Proportion of the female participants.} 26 | \item{Individualism}{Individualism score of the country.} 27 | \item{CorMat}{Whether the correlation matrices are obtained from the original 28 | studies (1) or reproduced from the factor loadings (0).} 29 | } 30 | } 31 | \source{ 32 | Gnambs, T., Scharl, A., & Schroeders, U. (2018). The structure of the Rosenberg Self-Esteem Scale. \emph{Zeitschrift Fur Psychologie}, \bold{226}(1), 14-29. https://doi.org/10.1027/2151-2604/a000317 33 | } 34 | \keyword{datasets} 35 | -------------------------------------------------------------------------------- /man/HedgesOlkin85.Rd: -------------------------------------------------------------------------------- 1 | \name{HedgesOlkin85} 2 | \alias{HedgesOlkin85} 3 | \docType{data} 4 | \title{Effects of Open Education Reported by Hedges and Olkin (1985) 5 | } 6 | \description{ 7 | Effects of open education on attitude toward school and on reading achievement reported by Hedges and Olkin (1985). 8 | } 9 | \usage{data(HedgesOlkin85)} 10 | 11 | \details{ 12 | The variables are: 13 | \describe{ 14 | \item{study}{Study number} 15 | \item{d_att}{Standardized mean difference on \emph{attitude}} 16 | \item{d_ach}{Standardized mean difference on \emph{achievement}} 17 | \item{var_att}{Sampling variance of the effect size of \emph{attitude}} 18 | \item{cov_att_ach}{Sampling covariance between the effect sizes} 19 | \item{var_ach}{Sampling variance of the effect size of \emph{achievement}} 20 | } 21 | } 22 | \source{ 23 | Hedges, L. V., & Olkin, I. (1985). \emph{Statistical methods for meta-analysis.} Orlando, FL: Academic Press. 24 | } 25 | \references{ 26 | Cheung, M. W.-L. (2010). Fixed-effects meta-analyses as multiple-group structural equation models. \emph{Structural Equation Modeling}, \bold{17}, 481-509. 27 | } 28 | \examples{ 29 | data(HedgesOlkin85) 30 | 31 | ## Fixed-effects meta-analysis 32 | summary( meta(y=cbind(d_att, d_ach), 33 | v=cbind(var_att, cov_att_ach, var_ach), 34 | data=HedgesOlkin85, 35 | RE.constraints=matrix(0, nrow=2, ncol=2)) ) 36 | } 37 | \keyword{datasets} 38 | -------------------------------------------------------------------------------- /man/Hox02.Rd: -------------------------------------------------------------------------------- 1 | \name{Hox02} 2 | \alias{Hox02} 3 | \docType{data} 4 | \title{Simulated Effect Sizes Reported by Hox (2002)} 5 | \description{ 6 | Twenty stimulated studies on standardized mean difference and one 7 | continuous study characteristic reported by Hox (2002). 8 | } 9 | \usage{data(Hox02)} 10 | 11 | \details{ 12 | The variables are: 13 | \describe{ 14 | \item{study}{Study number} 15 | \item{yi}{Effect size (standardized mean difference)} 16 | \item{vi}{Sampling variance of the effect size} 17 | \item{weeks}{Duration of the experimental intervention in terms of weeks} 18 | } 19 | } 20 | \source{ 21 | Hox, J. J. (2002). \emph{Multilevel analysis: Techniques and applications.} Mahwah, N.J.: Lawrence Erlbaum Associates. 22 | } 23 | \references{ 24 | Cheung, M. W.-L. (2008). A model for integrating fixed-, random-, and mixed-effects meta-analyses into structural equation modeling. \emph{Psychological Methods}, \bold{13}, 182-202. 25 | } 26 | \examples{ 27 | \donttest{ 28 | data(Hox02) 29 | 30 | #### ML estimation method 31 | ## Random-effects meta-analysis 32 | summary( meta(y=yi, v=vi, data=Hox02, I2=c("I2q", "I2hm"), intervals.type="LB") ) 33 | 34 | ## Fixed-effects meta-analysis 35 | summary( meta(y=yi, v=vi, data=Hox02, RE.constraints=0, 36 | model.name="Fixed effects model") ) 37 | 38 | ## Mixed-effects meta-analysis with "weeks" as a predictor 39 | ## Request likelihood-based CI 40 | summary( meta(y=yi, v=vi, x=weeks, data=Hox02, intervals.type="LB", 41 | model.name="Mixed effects meta analysis with LB CI") ) 42 | 43 | #### REML estimation method 44 | ## Random-effects meta-analysis with REML 45 | summary( VarComp <- reml(y=yi, v=vi, data=Hox02) ) 46 | 47 | ## Extract the variance component 48 | VarComp_REML <- matrix( coef(VarComp), ncol=1, nrow=1 ) 49 | 50 | ## Meta-analysis by treating the variance component as fixed 51 | summary( meta(y=yi, v=vi, data=Hox02, RE.constraints=VarComp_REML) ) 52 | 53 | 54 | ## Mixed-effects meta-analysis with "weeks" as a predictor 55 | ## Request Wald CI 56 | summary( reml(y=yi, v=vi, x=weeks, intervals.type="z", 57 | data=Hox02, model.name="REML with LB CI") ) 58 | } 59 | } 60 | \keyword{datasets} 61 | -------------------------------------------------------------------------------- /man/Jansen19.Rd: -------------------------------------------------------------------------------- 1 | \name{Jansen19} 2 | \alias{Jansen19} 3 | \docType{data} 4 | \title{Effect Sizes Reported by Jansen et al. (2019)} 5 | \description{ 6 | This dataset contains effect sizes derived from Jansen et al. (2019). It includes standardized mean differences (SMDs) regarding self-regulated learning (SRL) activities and achievements, comparing the SRL intervention group to the control group, and the correlations between SRL activities and achievement. There are two key differences compared to the analyses conducted by Jansen et al. (2019). First, while Jansen et al. (2019) used correlation matrices with a d-to-r conversion, this dataset employs both SMDs and correlation matrices. Second, this dataset averages the effect sizes within each study to prevent non-independence among the effect sizes. Consequently, the results from this dataset may not be directly comparable to those of Jansen et al. (2019). 7 | } 8 | \usage{data(Jansen19)} 9 | 10 | \details{ 11 | The variables are: 12 | \describe{ 13 | \item{ArticleID}{Article ID number} 14 | \item{SRL_act}{SMD of the SRL activity comparing the SRL group to the control group} 15 | \item{Achieve}{SMD of the achievement comparing the SRL group to the control group} 16 | \item{r_SRL_achieve}{Correlation between the SRL activity and the achievement} 17 | \item{Ntotal}{Total sample size of the control and intervention groups} 18 | \item{Online}{Whether the primary studies were conducted online or offline} 19 | } 20 | } 21 | \source{ 22 | Jansen, R. S., van Leeuwen, A., Janssen, J., Jak, S., & Kester, L. (2019). Self-regulated learning partially mediates the effect of self-regulated learning interventions on achievement in higher education: A meta-analysis. \emph{Educational Research Review}, \bold{28}, 100292. https://doi.org/10.1016/j.edurev.2019.100292 23 | } 24 | 25 | \examples{ 26 | \donttest{ 27 | data(Jansen19) 28 | } 29 | } 30 | \keyword{datasets} 31 | -------------------------------------------------------------------------------- /man/Jaramillo05.Rd: -------------------------------------------------------------------------------- 1 | \name{Jaramillo05} 2 | \alias{Jaramillo05} 3 | \docType{data} 4 | \title{Dataset from Jaramillo, Mulki and Marshall (2005) 5 | } 6 | \description{A dataset of the relationship between organizational 7 | commitment (OC) and salesperson job performance (JP) from Jaramillo, Mulki & Marshall (2005). 8 | } 9 | \usage{data(Jaramillo05)} 10 | \format{ 11 | A data frame with 61 observations on the following 10 variables. 12 | \describe{ 13 | \item{\code{Author}}{a character vector of study} 14 | \item{\code{Sample_size}}{sample size of the study} 15 | \item{\code{Sales}}{sample type; either "mixed", "nonsales" or "sales"} 16 | \item{\code{Country}}{a character vector of country of study} 17 | \item{\code{IDV}}{Hofstede's (1997) individualism index} 18 | \item{\code{OC_scale}}{scale of OC; either "Porter or Mowday", 19 | "Meyer" or "other"} 20 | \item{\code{OC_alpha}}{Coefficient alpha of organizational commitment} 21 | \item{\code{JP_alpha}}{Coefficient alpha of job performance} 22 | \item{\code{r}}{correlation between organizational commitment and 23 | job performance} 24 | \item{\code{r_v}}{sampling variance of r} 25 | \item{Citations}{Citations from Google Scholar as of 27 August 2024} 26 | } 27 | } 28 | 29 | \source{Jaramillo, F., Mulki, J. P., & Marshall, G. W. (2005). A meta-analysis of the relationship between organizational commitment and salesperson job performance: 25 years of research. \emph{Journal of Business Research}, \bold{58(6)}, 705-714. doi:10.1016/j.jbusres.2003.10.004 30 | } 31 | 32 | \examples{ 33 | \donttest{ 34 | ## Research question 4.4.1 35 | summary(meta(r, r_v, data=Jaramillo05)) 36 | 37 | ## Research question 4.4.2 38 | ## Select cases with either "sales" or "nonsales" 39 | Sales.df <- subset(Jaramillo05, Sales \%in\% c("sales", "nonsales")) 40 | 41 | ## Create a predictor with 1 and 0 when they are "sales" or "nonsales", respectively 42 | predictor <- ifelse(Jaramillo05$Sales=="sales", yes=1, no=0) 43 | 44 | ## Mixed-effects meta-analysis 45 | summary( meta(y = r, v = r_v, x = predictor, data = Jaramillo05) ) 46 | 47 | ## Research question 4.4.3 48 | summary(meta(r, r_v, x=IDV, data=Jaramillo05)) 49 | } 50 | } 51 | \keyword{datasets} 52 | -------------------------------------------------------------------------------- /man/Kalaian96.Rd: -------------------------------------------------------------------------------- 1 | \name{Kalaian96} 2 | \alias{Kalaian96} 3 | \docType{data} 4 | \title{Multivariate effect sizes reported by Kalaian and Raudenbush (1996) 5 | } 6 | \description{ 7 | This data set includes 47 multivariate effect sizes reported by 8 | Kalaian and Raudenbush (1996, Table 1). 9 | } 10 | \usage{data(Kalaian96)} 11 | 12 | \details{ 13 | A list of data with the following structure: 14 | \describe{ 15 | \item{Study}{Study name} 16 | \item{Year}{Year of publication} 17 | \item{n_e}{Sample size of the experimental group} 18 | \item{n_c}{Sample size of the control group} 19 | \item{dSAT_V}{Standardized mean difference of the Scholastic Aptitude 20 | Test (SAT) on verbal} 21 | \item{dSAT_M}{Standardized mean difference of SAT on math} 22 | \item{var_V}{Sampling variance of \code{dSAT_V}} 23 | \item{cov_VM}{Sampling covariance of \code{dSAT_V} and \code{dSAT_M} 24 | with a common correlation of 0.66} 25 | \item{var_M}{Sampling variance of \code{dSAT_M}} 26 | \item{Hr}{Hours of training} 27 | \item{ETS}{Educational Testing Service} 28 | \item{Study_type}{Either \code{Randomized}, \code{Matched} or 29 | \code{Nonequivalent comparison}} 30 | \item{Home_work}{Home work} 31 | } 32 | } 33 | 34 | \source{ 35 | Kalaian, H. A., & Raudenbush, S. W. (1996). A multivariate mixed linear model for meta-analysis. \emph{Psychological Methods}, \emph{1}(3), 227-235. https://doi.org/10.1037/1082-989X.1.3.227 36 | } 37 | 38 | \examples{ 39 | \donttest{ 40 | data(Kalaian96) 41 | } 42 | } 43 | \keyword{datasets} 44 | -------------------------------------------------------------------------------- /man/Mak09.Rd: -------------------------------------------------------------------------------- 1 | \name{Mak09} 2 | \alias{Mak09} 3 | \docType{data} 4 | \title{Eight studies from Mak et al. (2009)} 5 | \description{ 6 | Eight studies from Mak et al. (2009) were reported by Cheung et al. (2012). 7 | } 8 | 9 | \usage{data(Mak09)} 10 | \format{ 11 | A data frame with 8 observations on the following 10 variables. 12 | \describe{ 13 | \item{\code{Study}}{a character vector of study} 14 | \item{\code{type}}{a character vector} 15 | \item{\code{AF.BP}}{a numeric vector} 16 | \item{\code{Tot.BP}}{a numeric vector} 17 | \item{\code{AF.non.BP}}{a numeric vector} 18 | \item{\code{Tot.non.BP}}{a numeric vector} 19 | \item{\code{yi}}{a numeric vector} 20 | \item{\code{vi}}{a numeric vector} 21 | \item{\code{age.mean}}{a numeric vector} 22 | \item{\code{study.duration}}{a numeric vector} 23 | } 24 | } 25 | 26 | \source{ 27 | Mak, A., Cheung, M. W.-L., Ho, R. C. M., Cheak, A. A. C., & Lau, 28 | C. S. (2009). Bisphosphonate and atrial fibrillation: Bayesian 29 | meta-analyses of randomized controlled trials and observational 30 | studies. \emph{BMC Musculoskeletal Disorders}, 31 | \bold{10(113)}. doi:10.1186/1471-2474-10-113 Available at \url{https://bmcmusculoskeletdisord.biomedcentral.com/articles/10.1186/1471-2474-10-113}. 32 | } 33 | \references{ 34 | Cheung, M. W.-L., Ho, R. C. M., Lim, Y., & Mak, A. (2012). Conducting a 35 | meta-analysis: Basics and good practices. \emph{International Journal 36 | of Rheumatic Diseases}, \bold{15(2)}, 129-135. doi: 10.1111/j.1756-185X.2012.01712.x 37 | } 38 | \examples{ 39 | ## Random-effects meta-analysis 40 | ( meta1 <- summary(meta(y=yi, v=vi, data=Mak09, I2=c("I2q", "I2hm"))) ) 41 | 42 | ## Convert the estimates back into odds ratio 43 | OR <- with(coef(meta1), exp(c(Estimate[1], lbound[1], ubound[1]))) 44 | names(OR) <- c("Estimate in OR", "lbound in OR", "ubound in OR") 45 | OR 46 | 47 | ## Mixed-effects meta-analysis with mean age as a predictor 48 | summary( meta(y=yi, v=vi, x=age.mean, data=Mak09) ) 49 | } 50 | \keyword{datasets} 51 | -------------------------------------------------------------------------------- /man/Mathieu15.Rd: -------------------------------------------------------------------------------- 1 | \name{Mathieu15} 2 | \alias{Mathieu15} 3 | \docType{data} 4 | \title{Correlation Matrices from Mathieu et al. (2015)} 5 | \description{ 6 | The data set includes a list of correlation matrices of panel studies between 7 | cohesion (C) and performance (P) in Mathieu et al. (2015, Table 1). 8 | } 9 | \usage{data(Mathieu15)} 10 | 11 | \details{ 12 | A list of data with the following structure: 13 | \describe{ 14 | \item{data}{A list of studies of correlation matrices. The 15 | variables are \emph{C1}, \emph{P1}, \emph{C2}, and \emph{P2}.} 16 | \item{n}{A vector of sample sizes.} 17 | \item{Year}{Year of publication.} 18 | \item{Sample}{Sample characteristics.} 19 | \item{Student}{Whether the samples are student or non-student based on 20 | \code{Sample}.} 21 | } 22 | } 23 | \source{ 24 | Mathieu, J. E., Kukenberger, M. R., D'Innocenzo, L., & Reilly, G. (2015). Modeling reciprocal team cohesion-performance relationships, as impacted by shared leadership and members' competence. \emph{Journal of Applied Psychology}, \bold{100}(3), 713-734. https://doi.org/10.1037/a0038898 25 | } 26 | \examples{ 27 | \donttest{ 28 | # TSSEM 29 | ## Model 1: no constraint 30 | ## Stage 1 analysis 31 | tssem1.fit <- tssem1(Mathieu15$data, Mathieu15$n) 32 | summary(tssem1.fit) 33 | 34 | ## Proposed model in lavaan syntax 35 | model1 <- 'C2 ~ c2c*C1 + p2c*P1 36 | P2 ~ c2p*C1 + p2p*P1 37 | C1 ~~ c1withp1*P1 38 | C1 ~~ 1*C1 39 | P1 ~~ 1*P1 40 | C2 ~~ c2withp2*P2' 41 | 42 | ## Convert the lavaan model to RAM specification 43 | RAM1 <- lavaan2RAM(model1, obs.variables=c("C1", "P1", "C2", "P2")) 44 | RAM1 45 | 46 | ## Stage 2 analysis 47 | tssem1b.fit <- tssem2(tssem1.fit, RAM=RAM1) 48 | summary(tssem1b.fit) 49 | 50 | plot(tssem1b.fit, col="yellow", edge.label.position=0.58) 51 | 52 | ## Model 2: Equality constraints on the path coefficient 53 | ## Proposed model with equal effects time 1 to time 2 54 | model2 <- 'C2 ~ same*C1 + diff*P1 55 | P2 ~ diff*C1 + same*P1 56 | C1 ~~ c1withp1*P1 57 | C1 ~~ 1*C1 58 | P1 ~~ 1*P1 59 | C2 ~~ c2withp2*P2' 60 | 61 | ## Convert the lavaan model to RAM specification 62 | RAM2 <- lavaan2RAM(model2, obs.variables=c("C1", "P1", "C2", "P2")) 63 | RAM2 64 | 65 | ## Stage 2 analysis 66 | tssem2b.fit <- tssem2(tssem1.fit, RAM=RAM2) 67 | summary(tssem2b.fit) 68 | 69 | ## Compare the models with and without the constraints. 70 | anova(tssem1b.fit, tssem2b.fit) 71 | 72 | ## Plot the model 73 | plot(tssem2b.fit, col="yellow", edge.label.position=0.60) 74 | 75 | 76 | ## OSMASEM 77 | my.df <- Cor2DataFrame(Mathieu15) 78 | 79 | head(my.df$data) 80 | 81 | ## Model without any moderator 82 | osmasem.fit1 <- osmasem(model.name="No moderator", RAM=RAM1, data=my.df) 83 | summary(osmasem.fit1) 84 | 85 | ## Extract the heterogeneity variance-covariance matrix 86 | diag(VarCorr(osmasem.fit1)) 87 | 88 | plot(osmasem.fit1, col="yellow", edge.label.position=0.6) 89 | 90 | ## Model with student sample as a moderator on the regression coefficients 91 | A1 <- create.modMatrix(RAM1, output="A", "Student") 92 | A1 93 | 94 | ## Model with a moderator 95 | osmasem.fit2 <- osmasem(model.name="Student sample as a moderator", RAM=RAM1, 96 | Ax=A1, data=my.df) 97 | summary(osmasem.fit2) 98 | 99 | ## Compare the models with and without the moderator 100 | anova(osmasem.fit2, osmasem.fit1) 101 | 102 | ## Get the R2 of the moderator 103 | osmasemR2(osmasem.fit2, osmasem.fit1) 104 | } 105 | } 106 | \keyword{datasets} 107 | -------------------------------------------------------------------------------- /man/Nam03.Rd: -------------------------------------------------------------------------------- 1 | \name{Nam03} 2 | \alias{Nam03} 3 | \docType{data} 4 | \title{Dataset on the Environmental Tobacco Smoke (ETS) on children's health 5 | } 6 | \description{ 7 | This dataset includes 59 studies reported by Nam, Mengersen, and 8 | Garthwaite (2003) on the potential health effects among children exposed to 9 | environmental tobacco smoke (ETS), or passive smoking. The effect sizes 10 | are the log odds ratios of asthma and lower respiratory disease (LRD). 11 | } 12 | \usage{data(Nam03)} 13 | 14 | \details{ 15 | A list of data with the following structure: 16 | \describe{ 17 | \item{ID}{Study identification number.} 18 | \item{Size}{Total number of valid subjects in the study.} 19 | \item{Age}{Mean age of participants.} 20 | \item{Year}{Year of publication.} 21 | \item{Country}{Country code.} 22 | \item{Smoke}{Source of ETS.} 23 | \item{Adj}{Whether the reported odds ratio is adjusted for covariates.} 24 | \item{Asthma_logOR}{Log odds ratio of asthma.} 25 | \item{LRD_logOR}{Log odds ratio of lower respiratory disease.} 26 | \item{Asthma_v}{Sampling variance of Asthma_logOR.} 27 | \item{AsthmaLRD_cov_05}{Sampling covariance between Asthma_logOR and 28 | LRD_logOR by assuming a correlation of 0.5} 29 | \item{LRD_v}{Sampling variance of LRD_logOR.} 30 | } 31 | } 32 | 33 | \source{ 34 | Nam, I.-S., Mengersen, K., & Garthwaite, P. (2003). Multivariate meta-analysis. \emph{Statistics in Medicine}, \bold{22}(14), 2309-2333. https://doi.org/10.1002/sim.1410 35 | } 36 | 37 | \examples{ 38 | data(Nam03) 39 | } 40 | \keyword{datasets} 41 | -------------------------------------------------------------------------------- /man/Norton13.Rd: -------------------------------------------------------------------------------- 1 | \name{Norton13} 2 | \alias{Norton13} 3 | \docType{data} 4 | \title{Studies on the Hospital Anxiety and Depression Scale Reported by Norton et al. (2013) 5 | } 6 | \description{ 7 | The data set includes 28 studies on 14 items measuring the 8 | Hospital Anxiety and Depression Scale (HADS) Reported by Norton et 9 | al. (2013). 10 | } 11 | \usage{data(Norton13)} 12 | 13 | \details{ 14 | The variables are: 15 | \describe{ 16 | \item{data}{A list of 28 studies of correlation matrices. The 17 | variables are 14 items (x1 to x14) measuring HADS.} 18 | \item{n}{A vector of sample sizes} 19 | \item{population}{A vector of the population of the data} 20 | \item{group}{A vector of classification into \emph{patients} 21 | vs. \emph{non-patients} based on population} 22 | } 23 | } 24 | \source{ 25 | Norton, S., Cosco, T., Doyle, F., Done, J., & Sacker, A. (2013). The Hospital Anxiety and Depression Scale: A meta confirmatory factor analysis. \emph{Journal of Psychosomatic Research}, \emph{74}(1), 74-81. 26 | } 27 | \references{ 28 | Jak, S., & Cheung, M. W.-L. (2018). Addressing heterogeneity in meta-analytic structural equation modeling using subgroup analysis. \emph{Behavior Research 29 | Methods}, \bold{50}, 1359-1373. 30 | } 31 | \examples{ 32 | data(Norton13) 33 | } 34 | \keyword{datasets} 35 | -------------------------------------------------------------------------------- /man/Roorda11.Rd: -------------------------------------------------------------------------------- 1 | \name{Roorda11} 2 | \alias{Roorda11} 3 | \docType{data} 4 | \title{Studies on Students' School Engagement and Achievement Reported 5 | by Roorda et al. (2011) 6 | } 7 | \description{ 8 | The data set includes 45 studies on the influence of affective teacher-student relationships on students' school engagement and 9 | achievement reported by Roorda et al. (2011). 10 | } 11 | \usage{data(Roorda11)} 12 | 13 | \details{ 14 | The variables are: 15 | \describe{ 16 | \item{data}{A list of 45 studies of correlation matrices. The 17 | variables are \emph{pos} (positive teacher-student relations), 18 | \emph{neg} (negative teacher-student relations), \emph{enga} 19 | (student engagement), and \emph{achiev} (student achievement).} 20 | \item{n}{A vector of sample sizes} 21 | \item{SES}{A vector of average socio-economic status (SES) of the samples} 22 | } 23 | } 24 | \source{ 25 | Roorda, D. L., Koomen, H. M. Y., Spilt, J. L., & Oort, F. J. (2011). The influence of affective teacher-student relationships on students' school engagement and achievement a meta-analytic approach. \emph{Review of Educational Research}, \emph{81}(4), 493-529. 26 | } 27 | \references{ 28 | Jak, S., & Cheung, M. W.-L. (2018). Addressing heterogeneity in meta-analytic structural equation modeling using subgroup analysis. \emph{Behavior Research 29 | Methods}, \bold{50}, 1359-1373. 30 | } 31 | \examples{ 32 | \donttest{ 33 | 34 | ## Random-effects model: First stage analysis 35 | random1 <- tssem1(Cov = Roorda11$data, n = Roorda11$n, method = "REM", 36 | RE.type = "Diag") 37 | summary(random1) 38 | 39 | varnames <- c("pos", "neg", "enga", "achiev") 40 | 41 | ## Prepare a regression model using create.mxMatrix() 42 | A <- create.mxMatrix(c(0,0,0,0, 43 | 0,0,0,0, 44 | "0.1*b31","0.1*b32",0,0, 45 | 0,0,"0.1*b43",0), 46 | type = "Full", nrow = 4, ncol = 4, byrow = TRUE, 47 | name = "A", as.mxMatrix = FALSE) 48 | 49 | ## This step is not necessary but it is useful for inspecting the model. 50 | dimnames(A) <- list(varnames, varnames) 51 | A 52 | 53 | S <- create.mxMatrix(c(1, 54 | ".5*p21",1, 55 | 0,0,"0.6*p33", 56 | 0,0,0,"0.6*p44"), 57 | type="Symm", byrow = TRUE, 58 | name="S", as.mxMatrix = FALSE) 59 | 60 | ## This step is not necessary but it is useful for inspecting the model. 61 | dimnames(S) <- list(varnames, varnames) 62 | S 63 | 64 | ## Random-effects model: Second stage analysis 65 | random2 <- tssem2(random1, Amatrix=A, Smatrix=S, diag.constraints=TRUE, 66 | intervals="LB") 67 | summary(random2) 68 | 69 | ## Display the model with the parameter estimates 70 | plot(random2) 71 | } 72 | } 73 | \keyword{datasets} 74 | -------------------------------------------------------------------------------- /man/Scalco17.Rd: -------------------------------------------------------------------------------- 1 | \name{Scalco17} 2 | \alias{Scalco17} 3 | \docType{data} 4 | \title{Correlation Matrices from Scalco et al. (2017)} 5 | \description{ 6 | The data set includes correlation matrices using the theory of planned 7 | behavior to predict organic food consumption reported by Scalco17 et al. (2017). 8 | } 9 | \usage{data(Scalco17)} 10 | 11 | \details{ 12 | A list of data with the following structure: 13 | \describe{ 14 | \item{data}{A list of correlation matrices. The variables are 15 | \emph{ATT} (attitude), \emph{SN} (subjective norm), \emph{PBC} 16 | (perceived behavior control), \emph{BI} (behavioral intention), and \emph{BEH} (behavior)} 17 | \item{n}{A vector of sample sizes} 18 | \item{Age}{A vector of the mean age of the samples} 19 | \item{Female}{A vector of the percentage of the female samples} 20 | } 21 | } 22 | \source{ 23 | Scalco, A., Noventa, S., Sartori, R., & Ceschi, A. (2017). Predicting organic food consumption: A meta-analytic structural equation model based on the theory of planned behavior. \emph{Appetite}, \bold{112}, 235-248. 24 | } 25 | 26 | \examples{ 27 | data(Scalco17) 28 | } 29 | \keyword{datasets} 30 | -------------------------------------------------------------------------------- /man/Sheeran20.Rd: -------------------------------------------------------------------------------- 1 | \name{Sheeran20} 2 | \alias{Sheeran20} 3 | \docType{data} 4 | \title{Effect Sizes Reported by Sheeran20 et al. (2020)} 5 | \description{ 6 | This dataset contains effect sizes derived from Sheeran et al. (2020). It includes standardized mean differences (SMDs) regarding health behaviors, autonomous motivation, and perceived competence, comparing the self-determination theory (SDT) group and control group, and the correlations among the variables. One key difference from the analyses conducted by Sheeran et al. (2020) is that while they used correlation matrices with a d-to-r conversion, this dataset utilizes both standardized mean differences (SMDs) and correlation matrices. 7 | } 8 | \usage{data(Sheeran20)} 9 | 10 | \details{ 11 | The variables are: 12 | \describe{ 13 | \item{Study}{Study ID number} 14 | \item{n_c}{Cluster-adjusted N in the control group} 15 | \item{n_t}{Cluster-adjusted N in the treatment group} 16 | \item{Heal_beh}{SMD of the health behaviors between the treatmeant and control groups} 17 | \item{Auto_mot}{SMD of the autonomous motivation between the treatment and control groups} 18 | \item{Per_com}{SMD of the perceived competence between the treatment and control groups} 19 | \item{r_Auto_mot_Heal_beh}{correlation between the autonomous motivation and the health behaviors} 20 | \item{r_Per_com_Heal_beh}{correlation between the perceived competence and the health behaviors} 21 | \item{r_Per_com_Auto_mot}{correlation between the perceived competence and the autonomous motivation} 22 | \item{Beh_timing}{Time between intervention starting and measurement of behavior in weeks} 23 | \item{SDT_timing}{Time between intervention starting and post-intervention measurement of SDT constructs in weeks} 24 | \item{Overweight}{Overweight/obese participants composed entire sample} 25 | \item{College}{University/college students composed entire sample} 26 | \item{Adolescents}{Adolescents composed entire sample} 27 | \item{Clinical_sample}{Older adults composed entire sample} 28 | } 29 | } 30 | \source{ 31 | Sheeran, P., Wright, C. E., Avishai, A., Villegas, M. E., Lindemans, J. W., Klein, W. M. P., Rothman, A. J., Miles, E., & Ntoumanis, N. (2020). Self-determination theory interventions for health behavior change: Meta-analysis and meta-analytic structural equation modeling of randomized controlled trials. \emph{Journal of Consulting and Clinical Psychology}, \bold{88}(8), 726-737. https://doi.org/10.1037/ccp0000501 32 | } 33 | 34 | \examples{ 35 | \donttest{ 36 | data(Sheeran20) 37 | } 38 | } 39 | \keyword{datasets} 40 | -------------------------------------------------------------------------------- /man/Stadler15.Rd: -------------------------------------------------------------------------------- 1 | \name{Stadler15} 2 | \alias{Stadler15} 3 | \docType{data} 4 | \title{Correlations from Stadler et al. (2015)} 5 | \description{ 6 | The data set includes correlations between complex problem solving and 7 | intelligence reported by Stadler et al. (2015). 8 | } 9 | \usage{data(Stadler15)} 10 | \details{ 11 | A list of data with the following structure: 12 | \describe{ 13 | \item{ID}{ID of the effect sizes} 14 | \item{Authors}{Authors of the studies} 15 | \item{Year}{Year of the studies} 16 | \item{N}{Sample size} 17 | \item{CPSMeasure}{Complex problem solving (CPS) measure} 18 | \item{IntelligenceMeasure}{Intelligence measure} 19 | \item{r}{Correlation between CPS and intelligence} 20 | \item{v}{Sampling variance of r} 21 | } 22 | } 23 | \source{ 24 | Stadler, M., Becker, N., Godker, M., Leutner, D., & Greiff, S. (2015). Complex problem solving and intelligence: A meta-analysis. \emph{Intelligence}, \bold{53}, 92-101. 25 | } 26 | \keyword{datasets} 27 | -------------------------------------------------------------------------------- /man/Tenenbaum02.Rd: -------------------------------------------------------------------------------- 1 | \name{Tenenbaum02} 2 | \alias{Tenenbaum02} 3 | \docType{data} 4 | \title{Correlation coefficients reported by Tenenbaum and Leaper (2002)} 5 | \description{ 6 | Forty-eight studies reported by Tenenbaum and Leaper (2002, Table 1). 7 | } 8 | \usage{data(Tenenbaum02)} 9 | \details{ 10 | The variables are: 11 | \describe{ 12 | \item{Authors}{Authors of the study} 13 | \item{Year}{Year of publication} 14 | \item{N}{Sample size} 15 | \item{r}{Correlation between parents' gender schemas and their 16 | offspring's gender-related cognitions.} 17 | \item{v}{Sampling variance of r} 18 | \item{Publication_source}{Publication source: 1="top-tier journal", 19 | 2="second-tier journal or book chapter", 3="dissertation", 4="other unpublished study"} 20 | \item{Author_gender}{Gender of the first author: "W"="woman", "M"="man"} 21 | \item{Parent_type}{Parent type: "M"="mother", "F"="father", "MF"="mother and father"} 22 | \item{Parent_predictor}{Parent predictor: "S"="self gender schema", "A"="gender attitudes about others"} 23 | \item{Offspring_age}{Offspring age (months)} 24 | \item{Offspring_type}{Offspring type: "D"="daughter", 25 | "S"="son", "DS"="daughter and son"} 26 | \item{Offspring_outcome}{Offspring outcome: 27 | "S"="gender schema for self", "A"="gender attitudes toward others", 28 | "I"="gender-related interests and preferences", "W"="work-related attitudes"} 29 | } 30 | 31 | } 32 | \source{ 33 | Tenenbaum, H. R., & Leaper, C. (2002). Are parents' gender schemas related to their children's gender-related cognitions? A meta-analysis. \emph{Developmental Psychology}, \emph{38}(4), 615-630. https://doi.org/10.1037/0012-1649.38.4.615 34 | } 35 | 36 | \examples{ 37 | \donttest{ 38 | data(Tenenbaum02) 39 | } 40 | } 41 | \keyword{datasets} 42 | -------------------------------------------------------------------------------- /man/VarCorr.Rd: -------------------------------------------------------------------------------- 1 | \name{VarCorr} 2 | \alias{VarCorr} 3 | \title{Extract Variance-Covariance Matrix of the Random Effects 4 | } 5 | \description{It extracts the variance-covariance matrix of the 6 | random effects (variance component) from either the \code{meta} or 7 | \code{osmasem} objects. 8 | } 9 | \usage{ 10 | VarCorr(x, \dots) 11 | } 12 | %- maybe also 'usage' for other objects documented here. 13 | \arguments{ 14 | \item{x}{An object returned from either class 15 | \code{meta} or \code{osmasem} 16 | } 17 | \item{\dots}{Further arguments; currently none is used} 18 | } 19 | 20 | \value{A variance-covariance matrix of the random effects. 21 | } 22 | 23 | \author{Mike W.-L. Cheung 24 | } 25 | \note{It is similar to \code{coef(object, select="random")} in tssem. The main 26 | difference is that \code{coef()} returns a vector while 27 | \code{VarCorr()} returns its correspondent matrix. 28 | } 29 | \seealso{ \code{\link[metaSEM]{coef}}, \code{\link[metaSEM]{vcov}} } 30 | \examples{ 31 | ## Multivariate meta-analysis on the log of the odds 32 | ## The conditional sampling covariance is 0 33 | bcg <- meta(y=cbind(ln_Odd_V, ln_Odd_NV), data=BCG, 34 | v=cbind(v_ln_Odd_V, cov_V_NV, v_ln_Odd_NV)) 35 | VarCorr(bcg) 36 | } 37 | 38 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 39 | % Add one or more standard keywords, see file 'KEYWORDS' in the 40 | % R documentation directory. 41 | \keyword{ methods } 42 | 43 | -------------------------------------------------------------------------------- /man/anova.Rd: -------------------------------------------------------------------------------- 1 | \name{anova} 2 | \alias{anova.wls} 3 | \alias{anova.meta} 4 | \alias{anova.meta3LFIML} 5 | \alias{anova.reml} 6 | \alias{anova.osmasem} 7 | \alias{anova.osmasem2} 8 | \alias{anova.mxsem} 9 | \title{Compare Nested Models with Likelihood Ratio Statistic 10 | } 11 | \description{It compares nested models with the likelihood ratio 12 | statistic from various objects. It is a wrapper of \code{\link[OpenMx]{mxCompare}}. 13 | } 14 | \usage{ 15 | \method{anova}{wls}(object, \dots, all=FALSE) 16 | \method{anova}{meta}(object, \dots, all=FALSE) 17 | \method{anova}{meta3LFIML}(object, \dots, all=FALSE) 18 | \method{anova}{reml}(object, \dots, all=FALSE) 19 | \method{anova}{osmasem}(object, \dots, all=FALSE) 20 | \method{anova}{osmasem2}(object, \dots, all=FALSE) 21 | \method{anova}{mxsem}(object, \dots, all=FALSE) 22 | } 23 | %- maybe also 'usage' for other objects documented here. 24 | \arguments{ 25 | \item{object}{An object or a list of objects of various classes. It will be passed to the 26 | \code{base} argument in \code{\link[OpenMx]{mxCompare}}. 27 | } 28 | \item{\dots}{An object or a list of objects of various classes. It will be passed to the 29 | \code{comparison} argument in \code{\link[OpenMx]{mxCompare}}. 30 | } 31 | \item{all}{A Boolean value on whether to compare all bases with all 32 | comparisons. It will be passed to the \code{all} argument in 33 | \code{\link[OpenMx]{mxCompare}}.} 34 | } 35 | % \details{Special care has to be taken to make sure that the models being 36 | % compared in \code{base} and \code{comparison} are nested. One common mistake is 37 | % comparing a model without predictor and a model with predictors in \code{\link[metaSEM]{meta}}. Since 38 | % the parameters in the predictors, e.g., means and variances, are also estimated in \code{\link[metaSEM]{meta}}, 39 | % these two models are not nested. The correct way to compare them is to 40 | % fix the regression coefficients of one model at zero while the 41 | % coefficients in the other model are free (see the example). If only one parameter is tested, an alternative (and easier) approach 42 | % is to request the likelihood-based CI directly with the 43 | % \code{intervals.type="LB"} argument in \code{\link[metaSEM]{meta}}. 44 | % 45 | % } 46 | \value{A table of comparisons between the models in base and comparison. 47 | } 48 | 49 | \author{Mike W.-L. Cheung 50 | } 51 | 52 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 53 | 54 | \note{When the objects are class \code{\link[metaSEM]{wls}}, the degrees 55 | of freedom in the base and comparison models are incorrect, while the degrees of 56 | freedom of the difference between them is correct. If users want to 57 | obtain the correct degrees of freedom in the base and comparison 58 | models, they may individually apply the \code{\link[metaSEM]{summary}} function on 59 | the base and comparison models. 60 | } 61 | 62 | \examples{ 63 | ## Test the significance of a predictor with likelihood ratio test 64 | ## Model0: No predictor 65 | model0 <- meta(y=yi, v=vi, data=Hox02, model.name="No predictor") 66 | 67 | ## Model1: With a predictor 68 | model1 <- meta(y=yi, v=vi, x=weeks, data=Hox02, model.name="One predictor") 69 | 70 | ## Compare these two models 71 | anova(model1, model0) 72 | } 73 | % Add one or more standard keywords, see file 'KEYWORDS' in the 74 | % R documentation directory. 75 | \keyword{ methods } 76 | 77 | -------------------------------------------------------------------------------- /man/as.mxAlgebra.Rd: -------------------------------------------------------------------------------- 1 | \name{as.mxAlgebra} 2 | \alias{as.mxAlgebra} 3 | \title{Convert a Character Matrix into MxAlgebra-class 4 | } 5 | \description{It converts a character matrix into \code{MxAlgebra} object. 6 | } 7 | \usage{ 8 | as.mxAlgebra(x, startvalues=NULL, lbound=NULL, ubound=NULL, name="X") 9 | } 10 | %- maybe also 'usage' for other objects documented here. 11 | \arguments{ 12 | \item{x}{A character or numeric matrix, which consists of valid 13 | operators in \code{mxAlgebra}. 14 | } 15 | \item{startvalues}{A list of starting values of the free parameters. If it is 16 | not provided, all free parameters are assumed 0.} 17 | \item{lbound}{A list of lower bound of the free parameters. If it is 18 | not provided, all free parameters are assumed \code{NA}.} 19 | \item{ubound}{A list of upper bound of the free parameters. If it is 20 | not provided, all free parameters are assumed \code{NA}.} 21 | \item{name}{A character string of the names of the objects based 22 | on.} 23 | } 24 | \details{Suppose the name argument is "X", the output is a list of the 25 | following elements. 26 | } 27 | \value{ 28 | \item{mxalgebra}{An \code{mxAlgebra} object.} 29 | \item{parameters}{A column vector \code{mxMatrix} of the free parameters.} 30 | \item{list}{A list of mxMatrix to form the \code{mxAlgebra} object.} 31 | } 32 | 33 | \author{Mike W.-L. Cheung 34 | } 35 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 36 | 37 | \seealso{\code{\link[metaSEM]{as.mxMatrix}}, \code{\link[OpenMx]{mxAlgebra}} 38 | } 39 | \examples{ 40 | ## a, b, and c are free parameters 41 | (A1 <- matrix(c(1, "a*b", "a^b", "exp(c)"), ncol=2, nrow=2)) 42 | ## [,1] [,2] 43 | ## [1,] "1" "a^b" 44 | ## [2,] "a*b" "exp(c)" 45 | 46 | A <- as.mxAlgebra(A1, startvalues=list(a=1, b=2), 47 | lbound=list(a=0), ubound=list(b=1, c=2), 48 | name="A") 49 | 50 | ## An object of mxAlgebra 51 | A$mxalgebra 52 | ## mxAlgebra 'A' 53 | ## $formula: rbind(cbind(A1_1, A1_2), cbind(A2_1, A2_2)) 54 | ## $result: (not yet computed) <0 x 0 matrix> 55 | ## dimnames: NULL 56 | 57 | ## A matrix of parameters 58 | A$parameters 59 | ## FullMatrix 'Avars' 60 | 61 | ## $labels 62 | ## [,1] 63 | ## [1,] "a" 64 | ## [2,] "b" 65 | ## [3,] "c" 66 | 67 | ## $values 68 | ## [,1] 69 | ## [1,] 1 70 | ## [2,] 2 71 | ## [3,] 0 72 | 73 | ## $free 74 | ## [,1] 75 | ## [1,] TRUE 76 | ## [2,] TRUE 77 | ## [3,] TRUE 78 | 79 | ## $lbound 80 | ## [,1] 81 | ## [1,] 0 82 | ## [2,] NA 83 | ## [3,] NA 84 | 85 | ## $ubound 86 | ## [,1] 87 | ## [1,] NA 88 | ## [2,] 1 89 | ## [3,] 2 90 | 91 | ## A list of matrices of elements for the mxAlgebra 92 | A$list 93 | ## $A1_1 94 | ## mxAlgebra 'A1_1' 95 | ## $formula: 1 96 | ## $result: (not yet computed) <0 x 0 matrix> 97 | ## dimnames: NULL 98 | 99 | ## $A2_1 100 | ## mxAlgebra 'A2_1' 101 | ## $formula: a * b 102 | ## $result: (not yet computed) <0 x 0 matrix> 103 | ## dimnames: NULL 104 | 105 | ## $A1_2 106 | ## mxAlgebra 'A1_2' 107 | ## $formula: a^b 108 | ## $result: (not yet computed) <0 x 0 matrix> 109 | ## dimnames: NULL 110 | 111 | ## $A2_2 112 | ## mxAlgebra 'A2_2' 113 | ## $formula: exp(c) 114 | ## $result: (not yet computed) <0 x 0 matrix> 115 | ## dimnames: NULL 116 | } 117 | \keyword{utilities} 118 | -------------------------------------------------------------------------------- /man/as.mxMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{as.mxMatrix} 2 | \alias{as.mxMatrix} 3 | \title{Convert a Matrix into MxMatrix-class 4 | } 5 | \description{It converts a matrix into \code{MxMatrix-class} via \code{mxMatrix}. 6 | } 7 | \usage{ 8 | as.mxMatrix(x, name, ...) 9 | } 10 | %- maybe also 'usage' for other objects documented here. 11 | \arguments{ 12 | \item{x}{A character or numeric matrix. If \code{x} is not a matrix, 13 | \code{as.matrix(x)} is applied first. 14 | } 15 | \item{name}{An optional character string as the name of the 16 | MxMatrix object created by mxModel function. If the \code{name} is missing, 17 | the name of \code{x} will be used.} 18 | \item{\dots}{Further arguments to be passed to 19 | \code{\link[OpenMx]{mxMatrix}}. It should be noted that \code{type}, 20 | \code{nrow}, \code{ncol}, \code{values}, \code{free}, \code{name} 21 | and \code{labels} will be created automatically. Thus, these 22 | arguments except labels should be avoided in \dots 23 | 24 | } 25 | } 26 | \details{If there are non-numeric values in \code{x}, they are treated 27 | as the labels of the parameters. If a "*" is 28 | present, the numeric value on the left-hand side will be treated as 29 | the starting value for a free parameter. If an "@" is present, the numeric value on the left-hand 30 | side will be considered as the value for a fixed parameter. If it is a matrix of numeric values, there are 31 | no free parameters in the output matrix. 32 | } 33 | \value{A \code{\link[OpenMx]{MxMatrix-class}} object with the same 34 | dimensions as \code{x} 35 | } 36 | 37 | \author{Mike W.-L. Cheung 38 | } 39 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 40 | 41 | \seealso{ \code{\link[OpenMx]{mxMatrix}}, 42 | \code{\link[metaSEM]{create.mxMatrix}}, 43 | \code{\link[metaSEM]{create.Fmatrix}}, 44 | \code{\link[metaSEM]{checkRAM}}, 45 | \code{\link[metaSEM]{lavaan2RAM}}, 46 | \code{\link[metaSEM]{as.symMatrix}} 47 | } 48 | \examples{ 49 | ## a and b are free parameters with starting values and labels 50 | (a1 <- matrix(c(1:4, "5*a", 6, "7*b", 8, 9), ncol=3, nrow=3)) 51 | # [,1] [,2] [,3] 52 | # [1,] "1" "4" "7*b" 53 | # [2,] "2" "5*a" "8" 54 | # [3,] "3" "6" "9" 55 | 56 | a1 <- as.mxMatrix(a1) 57 | 58 | ## a and b are fixed parameters without any labels, name="new2" 59 | (a2 <- matrix(1:9, ncol=3, nrow=3)) 60 | # [,1] [,2] [,3] 61 | # [1,] 1 4 7 62 | # [2,] 2 5 8 63 | # [3,] 3 6 9 64 | 65 | new2 <- as.mxMatrix(a2, name="new2") 66 | 67 | ## Free parameters without starting values 68 | (a3 <- matrix(c(1:4, "*a", 6, "*b", 8, 9), ncol=3, nrow=3)) 69 | # [,1] [,2] [,3] 70 | # [1,] "1" "4" "*b" 71 | # [2,] "2" "*a" "8" 72 | # [3,] "3" "6" "9" 73 | 74 | a3 <- as.mxMatrix(a3, lbound=0) 75 | 76 | ## A free parameter without label 77 | (a4 <- matrix(c(1:4, "5*", 6, "7*b", 8, 9), ncol=3, nrow=3)) 78 | # [,1] [,2] [,3] 79 | # [1,] "1" "4" "7*b" 80 | # [2,] "2" "5*" "8" 81 | # [3,] "3" "6" "9" 82 | 83 | a4 <- as.mxMatrix(a4) 84 | 85 | ## Convert a scalar into mxMatrix object 86 | ## "name" is required as "3*a" is not a valid name. 87 | (a5 <- as.mxMatrix("3*a", name="a5")) 88 | 89 | ## Free and fixed parameters 90 | (a6 <- matrix(c(1, "2*a", "3@b", 4), ncol=2, nrow=2)) 91 | 92 | as.mxMatrix(a6) 93 | } 94 | \keyword{utilities} 95 | -------------------------------------------------------------------------------- /man/as.symMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{as.symMatrix} 2 | \alias{as.symMatrix} 3 | \title{Convert a Character Matrix with Starting Values to a Character Matrix 4 | without Starting Values 5 | } 6 | \description{It converts a character matrix with starting values to a 7 | character matrix without the starting values. 8 | } 9 | \usage{ 10 | as.symMatrix(x) 11 | } 12 | %- maybe also 'usage' for other objects documented here. 13 | \arguments{ 14 | \item{x}{A character or numeric matrix or a list of character or 15 | numeric matrices.} 16 | } 17 | \details{If there are non-numeric values in \code{x}, they are treated 18 | as the labels of the free parameters. If a "*" is 19 | present, the numeric value on the left-hand side will be treated as 20 | the starting value for a free parameter or a fixed value for a fixed parameter. If it is a matrix of numeric values, there are 21 | no free parameters in the output matrix. This function removes the 22 | starting values and "*" in the matrices. 23 | } 24 | \value{A character matrix. 25 | } 26 | 27 | \author{Mike W.-L. Cheung 28 | } 29 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 30 | 31 | \seealso{ \code{\link[metaSEM]{as.mxMatrix}} 32 | } 33 | \examples{ 34 | ## a and b are free parameters with starting values and labels 35 | (a1 <- matrix(c(1:4, "5*a", 6, "7*b", 8, 9), ncol=3, nrow=3)) 36 | # [,1] [,2] [,3] 37 | # [1,] "1" "4" "7*b" 38 | # [2,] "2" "5*a" "8" 39 | # [3,] "3" "6" "9" 40 | 41 | (as.symMatrix(a1)) 42 | # [,1] [,2] [,3] 43 | # [1,] "1" "4" "b" 44 | # [2,] "2" "a" "8" 45 | # [3,] "3" "6" "9" 46 | } 47 | \keyword{utilities} 48 | -------------------------------------------------------------------------------- /man/bdiagMat.Rd: -------------------------------------------------------------------------------- 1 | \name{bdiagMat} 2 | \alias{bdiagMat} 3 | \title{Create a Block Diagonal Matrix 4 | }\description{It creates a block diagonal matrix from a list of numeric 5 | or character matrices. 6 | } 7 | \usage{ 8 | bdiagMat(x) 9 | } 10 | %- maybe also 'usage' for other objects documented here. 11 | \arguments{ 12 | \item{x}{A list of numeric or character matrices (or values) 13 | } 14 | } 15 | 16 | \value{A numeric or character block diagonal matrix 17 | %% ~Describe the value returned 18 | %% If it is a LIST, use 19 | %% \item{comp1 }{Description of 'comp1'} 20 | %% \item{comp2 }{Description of 'comp2'} 21 | %% ... 22 | } 23 | \references{It was based on a function posted by Scott Chasalow at http://www.math.yorku.ca/Who/Faculty/Monette/pub/stmp/0827.html. 24 | %% ~put references to the literature/web site here ~ 25 | } 26 | \author{Mike W.-L. Cheung 27 | } 28 | 29 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 30 | 31 | \seealso{\code{\link[metaSEM]{bdiagRep}}, \code{\link[metaSEM]{matrix2bdiag}} 32 | %% ~~objects to See Also as \code{\link{help}}, ~~~ 33 | } 34 | \examples{ 35 | ## Block diagonal matrix of numbers 36 | bdiagMat( list(matrix(1:4,nrow=2,ncol=2), 37 | matrix(5:6,nrow=1,ncol=2)) ) 38 | # [,1] [,2] [,3] [,4] 39 | # [1,] 1 3 0 0 40 | # [2,] 2 4 0 0 41 | # [3,] 0 0 5 6 42 | 43 | ## Block diagonal matrix of characters 44 | bdiagMat( list(matrix(letters[1:4],nrow=2,ncol=2), 45 | matrix(letters[5:6],nrow=1,ncol=2)) ) 46 | # [,1] [,2] [,3] [,4] 47 | # [1,] "a" "c" "0" "0" 48 | # [2,] "b" "d" "0" "0" 49 | # [3,] "0" "0" "e" "f" 50 | } 51 | % Add one or more standard keywords, see file 'KEYWORDS' in the 52 | % R documentation directory. 53 | \keyword{ utilities } 54 | -------------------------------------------------------------------------------- /man/bdiagRep.Rd: -------------------------------------------------------------------------------- 1 | \name{bdiagRep} 2 | \alias{bdiagRep} 3 | \title{Create a Block Diagonal Matrix by Repeating the Input 4 | } 5 | \description{It creates a block diagonal matrix by repeating the input 6 | matrix several times. 7 | } 8 | \usage{ 9 | bdiagRep(x, times) 10 | } 11 | %- maybe also 'usage' for other objects documented here. 12 | \arguments{ 13 | \item{x}{A numeric or character matrix (or values) 14 | } 15 | \item{times}{Number of times of \code{x} to be repeated 16 | %% ~~Describe \code{times} here~~ 17 | } 18 | } 19 | \value{A numeric or character block diagonal matrix 20 | %% ~Describe the value returned 21 | %% If it is a LIST, use 22 | %% \item{comp1 }{Description of 'comp1'} 23 | %% \item{comp2 }{Description of 'comp2'} 24 | %% ... 25 | } 26 | \author{Mike W.-L. Cheung 27 | %% ~~who you are~~ 28 | } 29 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 30 | 31 | \seealso{ \code{\link[metaSEM]{bdiagMat}}, \code{\link[metaSEM]{matrix2bdiag}} 32 | %% ~~objects to See Also as \code{\link{help}}, ~~~ 33 | } 34 | \examples{ 35 | ## Block diagonal matrix of numerics 36 | bdiagRep( matrix(1:4,nrow=2,ncol=2), 2 ) 37 | # [,1] [,2] [,3] [,4] 38 | # [1,] 1 3 0 0 39 | # [2,] 2 4 0 0 40 | # [3,] 0 0 1 3 41 | # [4,] 0 0 2 4 42 | 43 | ## Block diagonal matrix of characters 44 | bdiagRep( matrix(letters[1:4],nrow=2,ncol=2), 2 ) 45 | # [,1] [,2] [,3] [,4] 46 | # [1,] "a" "c" "0" "0" 47 | # [2,] "b" "d" "0" "0" 48 | # [3,] "0" "0" "a" "c" 49 | # [4,] "0" "0" "b" "d" 50 | } 51 | % Add one or more standard keywords, see file 'KEYWORDS' in the 52 | % R documentation directory. 53 | \keyword{ utilities } 54 | -------------------------------------------------------------------------------- /man/bootuniR1.Rd: -------------------------------------------------------------------------------- 1 | \name{bootuniR1} 2 | \alias{bootuniR1} 3 | \title{Parametric bootstrap on the univariate R (uniR) object 4 | } 5 | \description{It generates correlation matrices with the parametric bootstrap on the univariate R 6 | (uniR) object. 7 | } 8 | \usage{ 9 | bootuniR1(x, Rep, nonPD.pop=c("replace", "nearPD", "accept")) 10 | } 11 | \arguments{ 12 | \item{x}{An object of class 'uniR1'} 13 | \item{Rep}{Number of replications of the parametric bootstrap} 14 | \item{nonPD.pop}{If it is \code{replace}, generated non-positive 15 | definite matrices are replaced by generated new ones which are 16 | positive definite. If it is \code{nearPD}, they are replaced by 17 | nearly positive definite matrices by calling 18 | \code{Matrix::nearPD()}. If it is \code{accept}, they are accepted.} 19 | } 20 | \value{An object of the generated correlation matrices. 21 | } 22 | \details{ 23 | This function implements the parametric bootstrap approach suggested 24 | by Yu et al. (2016). It is included in this package for research 25 | interests. Please refer to Cheung (2018) for the issues associated 26 | with this parametric bootstrap approach. 27 | } 28 | \references{ 29 | Cheung, M. W.-L. (2018). Issues in solving the problem of effect size 30 | heterogeneity in meta-analytic structural equation modeling: A 31 | commentary and simulation study on Yu, Downes, Carter, and O'Boyle 32 | (2016). \emph{Journal of Applied Psychology}, \bold{103}, 787-803. 33 | 34 | Yu, J. (Joya), Downes, P. E., Carter, K. M., & O'Boyle, 35 | E. H. (2016). The problem of effect size heterogeneity 36 | in meta-analytic structural equation modeling. 37 | \emph{Journal of Applied Psychology}, \bold{101}, 1457-1473. 38 | } 39 | \author{Mike W.-L. Cheung 40 | } 41 | \seealso{ \code{\link[metaSEM]{rCor}}, \code{\link[metaSEM]{bootuniR2}}, 42 | \code{\link[metaSEM]{Nohe15}} 43 | } 44 | \keyword{bootuniR} 45 | -------------------------------------------------------------------------------- /man/bootuniR2.Rd: -------------------------------------------------------------------------------- 1 | \name{bootuniR2} 2 | \alias{bootuniR2} 3 | \title{Fit Models on the bootstrapped correlation matrices 4 | } 5 | \description{It fits structural equation models on the bootstrapped 6 | correlation matrices. 7 | } 8 | \usage{ 9 | bootuniR2(model, data, n, ...) 10 | } 11 | %- maybe also 'usage' for other objects documented here. 12 | \arguments{ 13 | \item{model}{A model in \code{\link[lavaan]{sem}} syntax.} 14 | \item{data}{A list of correlation matrices.} 15 | \item{n}{Sample size in fitting the structural equation models} 16 | \item{\dots}{Further arguments to be passed to \code{\link[lavaan]{sem}}. 17 | } 18 | } 19 | 20 | \value{A list of the fitted object from \code{\link[lavaan]{sem}}. 21 | } 22 | \details{This function fits the lavaan model with the bootstrapped correlation 23 | matrices. It implements the parametric bootstrap approach suggested 24 | by Yu et al. (2016). It is included in this package for research 25 | interests. Please refer to Cheung (2018) for the issues associated 26 | with this parametric bootstrap approach. 27 | } 28 | \references{ 29 | Cheung, M. W.-L. (2018). Issues in solving the problem of effect size 30 | heterogeneity in meta-analytic structural equation modeling: A 31 | commentary and simulation study on Yu, Downes, Carter, and O'Boyle 32 | (2016). \emph{Journal of Applied Psychology}, \bold{103}, 787-803. 33 | 34 | Yu, J. (Joya), Downes, P. E., Carter, K. M., & O'Boyle, 35 | E. H. (2016). The problem of effect size heterogeneity 36 | in meta-analytic structural equation modeling. 37 | \emph{Journal of Applied Psychology}, \bold{101}, 1457-1473. 38 | } 39 | \author{Mike W.-L. Cheung 40 | } 41 | \seealso{ \code{\link[metaSEM]{bootuniR2}}, 42 | \code{\link[metaSEM]{tssemParaVar}}, \code{\link[metaSEM]{Nohe15}} 43 | } 44 | \keyword{bootuniR} 45 | -------------------------------------------------------------------------------- /man/calEffSizes.Rd: -------------------------------------------------------------------------------- 1 | \name{calEffSizes} 2 | \alias{calEffSizes} 3 | \title{Calculate Effect Sizes using lavaan Models 4 | }\description{It calculates effect sizes with Delta Method by formulating the effect sizes 5 | as functions of SEM in lavaan. 6 | } 7 | \usage{ 8 | calEffSizes(model, data=NULL, n, Cov, Mean=NULL, group=NULL, lavaan.output=FALSE, 9 | warn=FALSE, ...) 10 | } 11 | \arguments{ 12 | \item{model}{A lavaan model. Effect sizes are defined as functions of 13 | SEM parameters with \code{:=}.} 14 | \item{data}{A data frame of the observed variables. If it is 15 | \code{NULL}, summary statistics are required.} 16 | \item{n}{Sample sizes} 17 | \item{Cov}{A covariance matrix or a list of covariance matrices.} 18 | \item{Mean}{Optional sample means.} 19 | \item{group}{A character of the variable name in the data frame defining the groups in a multiple group analysis.} 20 | \item{lavaan.output}{If \code{TRUE}, it returns the fitted object 21 | instead of the effect sizes and their sampling covariance matrix.} 22 | \item{warn}{If \code{FALSE}, it suppresses lavaan related warnings.} 23 | \item{\dots}{Further arguments passed to \code{\link[lavaan]{sem}}.} 24 | } 25 | 26 | \value{Effect sizes and their sampling covariance matrix or a lavaan 27 | fitted object. 28 | } 29 | \note{The input matrices are treated as covariance matrices unless there 30 | are explicit constraints in the model. 31 | } 32 | \references{ 33 | Cheung, M. W.-L. (2015). \emph{Meta-analysis: A structural equation 34 | modeling approach}. Chichester, West Sussex: John Wiley & Sons, Inc. 35 | 36 | Cheung, M. W.-L. (2018). Computing multivariate effect sizes and their sampling covariance matrices with structural equation modeling: Theory, examples, and computer simulations. \emph{Frontiers in Psychology}, \bold{9}(1387). https://doi.org/10.3389/fpsyg.2018.01387 37 | } 38 | \author{Mike W.-L. Cheung 39 | } 40 | 41 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 42 | 43 | \seealso{\code{\link[metaSEM]{smdMES}}, \code{\link[metaSEM]{smdMTS}} 44 | %% ~~objects to See Also as \code{\link{help}}, ~~~ 45 | } 46 | \examples{ 47 | \donttest{ 48 | ## Select ATT, Bi, and BEH 49 | obs.vars <- c("BEH", "BI", "ATT") 50 | 51 | ## Select one study from Cooke16 for illustration 52 | my.cor <- Cooke16$data[[4]][obs.vars, obs.vars] 53 | my.n <- Cooke16$n[4] 54 | 55 | ## Effect sizes: indirect effect and direct effect 56 | model <- "BEH ~ c*ATT + b*BI 57 | BI ~ a*ATT 58 | ## Indirect effect 59 | Ind := a*b 60 | Dir := c" 61 | 62 | calEffSizes(model=model, n=my.n, Cov=my.cor, lavaan.output=FALSE) 63 | 64 | ## Return the lavaan fitted model 65 | fit <- calEffSizes(model=model, n=my.n, Cov=my.cor, lavaan.output=TRUE) 66 | lavaan::summary(fit) 67 | 68 | lavaan::parameterestimates(fit) 69 | } 70 | } 71 | \keyword{meta-analysis } 72 | -------------------------------------------------------------------------------- /man/checkRAM.Rd: -------------------------------------------------------------------------------- 1 | \name{checkRAM} 2 | \alias{checkRAM} 3 | \title{Check the correctness of the RAM formulation 4 | } 5 | \description{It provides simple checks on the correctness of the RAM formulation. 6 | } 7 | \usage{ 8 | checkRAM(Amatrix, Smatrix, cor.analysis=TRUE) 9 | } 10 | \arguments{ 11 | \item{Amatrix}{An asymmetric matrix in the RAM specification with 12 | \code{\link[OpenMx]{MxMatrix-class}}. If it is a matrix, it will be converted into \code{\link[OpenMx]{MxMatrix-class}} by the \code{as.mxMatrix} function. 13 | } 14 | \item{Smatrix}{A symmetric matrix in the RAM specification with 15 | \code{\link[OpenMx]{MxMatrix-class}}. If it is a matrix, it will be converted into \code{\link[OpenMx]{MxMatrix-class}} by the \code{as.mxMatrix} function. 16 | } 17 | \item{cor.analysis}{Logical. Analysis of correlation or covariance 18 | structure. There are additional checks for cor.analysis=\code{TRUE}.} 19 | } 20 | \value{It returns silently if no error has been detected; otherwise, it 21 | returns a warning message. 22 | } 23 | \author{Mike W.-L. Cheung 24 | } 25 | 26 | \seealso{ \code{\link[metaSEM]{as.mxMatrix}}, 27 | \code{\link[metaSEM]{lavaan2RAM}} 28 | } 29 | \examples{ 30 | \donttest{ 31 | ## Digman97 example 32 | model1 <- "## Factor loadings 33 | Alpha=~A+C+ES 34 | Beta=~E+I 35 | ## Factor correlation 36 | Alpha~~Beta" 37 | 38 | RAM1 <- lavaan2RAM(model1, obs.variables=c("A","C","ES","E","I"), 39 | A.notation="on", S.notation="with") 40 | RAM1 41 | 42 | ## The model is okay. 43 | checkRAM(Amatrix=RAM1$A, Smatrix=RAM1$S) 44 | 45 | ## Hunter83 example 46 | model2 <- "## Regression paths 47 | Job_knowledge ~ A2J*Ability 48 | Work_sample ~ A2W*Ability + J2W*Job_knowledge 49 | Supervisor ~ J2S*Job_knowledge + W2S*Work_sample 50 | 51 | ## Fix the variance of Ability at 1 52 | Ability ~~ 1*Ability 53 | 54 | ## Label the error variances of the dependent variables 55 | Job_knowledge ~~ VarE_J*Job_knowledge 56 | Work_sample ~~ VarE_W*Work_sample 57 | Supervisor ~~ VarE_S*Supervisor" 58 | 59 | RAM2 <- lavaan2RAM(model2, obs.variables=c("Ability","Job_knowledge", 60 | "Work_sample","Supervisor")) 61 | 62 | ## The model is okay. 63 | checkRAM(Amatrix=RAM2$A, Smatrix=RAM2$S) 64 | } 65 | } 66 | \keyword{utilities} 67 | 68 | -------------------------------------------------------------------------------- /man/coef.Rd: -------------------------------------------------------------------------------- 1 | \name{coef} 2 | \alias{coef.tssem1FEM} 3 | \alias{coef.tssem1FEM.cluster} 4 | \alias{coef.tssem1REM} 5 | \alias{coef.wls} 6 | \alias{coef.wls.cluster} 7 | \alias{coef.meta} 8 | \alias{coef.meta3LFIML} 9 | \alias{coef.reml} 10 | \alias{coef.osmasem} 11 | \alias{coef.osmasem2} 12 | \alias{coef.mxsem} 13 | 14 | \title{Extract Parameter Estimates from various classes. 15 | } 16 | \description{It extracts the parameter estimates from objects of various 17 | classes. 18 | } 19 | \usage{ 20 | \method{coef}{tssem1FEM}(object, \dots) 21 | \method{coef}{tssem1FEM.cluster}(object, \dots) 22 | \method{coef}{tssem1REM}(object, select = c("all", "fixed", "random"), \dots) 23 | \method{coef}{wls}(object, \dots) 24 | \method{coef}{wls.cluster}(object, \dots) 25 | \method{coef}{meta}(object, select = c("all", "fixed", "random"), \dots) 26 | \method{coef}{meta3LFIML}(object, select = c("all", "fixed", "random", "allX"), \dots) 27 | \method{coef}{reml}(object, \dots) 28 | \method{coef}{osmasem}(object, select=c("fixed", "all", "random"), \dots) 29 | \method{coef}{osmasem2}(object, select=c("fixed", "all", "random"), \dots) 30 | \method{coef}{mxsem}(object, \dots) 31 | } 32 | %- maybe also 'usage' for other objects documented here. 33 | \arguments{ 34 | \item{object}{An object returned from either class 35 | \code{tssem1FEM}, class \code{tssem1FEM.cluster}, class \code{tssem1REM}, 36 | class \code{wls}, class \code{wls.cluster}, class \code{meta}, class 37 | \code{reml}, class \code{osmasem}, class \code{osmasem2}, or class \code{sem} 38 | } 39 | \item{select}{Select \code{all} for both fixed- and random-effects parameters, \code{fixed} for the 40 | fixed-effects parameters or \code{random} for the random-effects 41 | parameters. For \code{meta3LFIML} objects, \code{allX} is used to extract 42 | all parameters including the predictors and auxiliary variables. 43 | } 44 | \item{\dots}{Further arguments; currently none is used} 45 | } 46 | \note{\code{coef.sem} is simply a wraper of 47 | \code{omxGetParameters}. Extra arguments will be passed to it} 48 | 49 | \value{Parameter estimates for both fixed-effects (if any) and random-effects (if any) 50 | } 51 | 52 | \author{Mike W.-L. Cheung 53 | } 54 | 55 | \seealso{ \code{\link[metaSEM]{tssem1}}, \code{\link[metaSEM]{wls}}, 56 | \code{\link[metaSEM]{meta}}, \code{\link[metaSEM]{reml}}, 57 | \code{\link[OpenMx]{omxGetParameters}}, \code{\link[metaSEM]{osmasem}} 58 | } 59 | \examples{ 60 | ## Random-effects meta-analysis 61 | model1 <- meta(y=yi, v=vi, data=Hox02) 62 | coef(model1) 63 | 64 | ## Fixed-effects only 65 | coef(model1, select="fixed") 66 | } 67 | % Add one or more standard keywords, see file 'KEYWORDS' in the 68 | % R documentation directory. 69 | \keyword{ methods } 70 | 71 | -------------------------------------------------------------------------------- /man/create.Fmatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{create.Fmatrix} 2 | \alias{create.Fmatrix} 3 | \title{Create an F matrix to select observed variables 4 | } 5 | \description{It creates an F matrix to select observed variables for wls 6 | function. 7 | } 8 | \usage{ 9 | create.Fmatrix(x, name, as.mxMatrix=TRUE, ...) 10 | } 11 | %- maybe also 'usage' for other objects documented here. 12 | \arguments{ 13 | \item{x}{A vector of logical type 14 | } 15 | \item{name}{Name of the matrix. If it is missing, "Fmatrix" will be used. 16 | } 17 | \item{as.mxMatrix}{Logical. If it is \code{TRUE}, the output is a matrix of 18 | \code{MxMatrix-class}. If it is \code{FALSE}, it is a numeric matrix.} 19 | \item{\dots}{Not used.} 20 | } 21 | 22 | \author{Mike W.-L. Cheung 23 | } 24 | 25 | \seealso{ \code{\link[metaSEM]{as.mxMatrix}}, \code{\link[metaSEM]{create.mxMatrix}}, \code{\link[metaSEM]{wls}} 26 | } 27 | 28 | \examples{ 29 | ## Select the first 3 variables while the other 2 variables are latent. 30 | create.Fmatrix(c(1,1,1,0,0)) 31 | # FullMatrix 'Fmatrix' 32 | # 33 | # @labels: No labels assigned. 34 | # 35 | # @values 36 | # [,1] [,2] [,3] [,4] [,5] 37 | # [1,] 1 0 0 0 0 38 | # [2,] 0 1 0 0 0 39 | # [3,] 0 0 1 0 0 40 | # 41 | # @free: No free parameters. 42 | # 43 | # @lbound: No lower bounds assigned. 44 | # 45 | # @ubound: No upper bounds assigned. 46 | 47 | create.Fmatrix(c(1,1,1,0,0), as.mxMatrix=FALSE) 48 | # [,1] [,2] [,3] [,4] [,5] 49 | # [1,] 1 0 0 0 0 50 | # [2,] 0 1 0 0 0 51 | # [3,] 0 0 1 0 0 52 | } 53 | \keyword{utilities} 54 | -------------------------------------------------------------------------------- /man/create.Tau2.Rd: -------------------------------------------------------------------------------- 1 | \name{create.Tau2} 2 | \alias{create.Tau2} 3 | \title{Create a variance component of the heterogeneity of the random effects 4 | } 5 | \description{It creates variance component of the heterogeneity of the 6 | random effects by decomposing the variance component into matrices of 7 | correlation and standard deviations. 8 | } 9 | \usage{ 10 | create.Tau2(RAM, no.var, Tau1.labels=seq(no.var), 11 | RE.type = c("Diag", "Symm", "Zero", "User"), 12 | level=c("single", "between", "within"), 13 | RE.User=NULL, Transform = c("expLog", "sqSD"), 14 | RE.startvalues=0.05) 15 | } 16 | %- maybe also 'usage' for other objects documented here. 17 | \arguments{ 18 | \item{RAM}{The RAM model for testing. \code{no.var} is calculated from it.} 19 | \item{no.var}{If \code{RAM} is missing, the user has to specify 20 | the \code{no.var} argument. It represents the \code{no.var} by \code{no.var} of the random effects).} 21 | \item{Tau1.labels}{Parameter labels in \code{Tau1}. The default is 22 | \code{Tau1_1}, \code{Tau1_2}, etc. 23 | } 24 | \item{RE.type}{Either \code{"Diag"}, \code{"Symm"}, \code{"Zero"} or \code{"User"}. If 25 | it is\code{"Diag"} (the default if missing), a diagonal matrix is used 26 | for the random effects meaning that the random effects are 27 | independent. If it is \code{"Symm"}, a symmetric matrix is used for the random effects on the covariances 28 | among the correlation (or covariance) vectors. If it is 29 | \code{"Zero"}, a zero matrix is assumed on the variance component of 30 | the random effects. If it is \code{"User"}, users have to specify the 31 | \code{RE.User} argument.} 32 | \item{level}{whether it is for single-level, between-, or within-level 33 | analyses. The only difference are the names of the matrices.} 34 | \item{RE.User}{It represents the \code{no.var} by \code{no.var} symmetric matrix 35 | of \code{TRUE} or \code{FALSE} for the variance component. If the 36 | elements are \code{FALSE}, they are fixed at 0.} 37 | \item{Transform}{Either \code{"expLog"} or \code{"sqSD"}. If it is 38 | \code{"expLog"}, the variances are estimated by applying a log and exp 39 | transformation. If it is \code{"sqSD"}, the variances are estimated by 40 | applying a square on the SD. The transformation may improve the 41 | estimation when the heterogeneity is small or close to zero.} 42 | \item{RE.startvalues}{Starting values for the variances.} 43 | } 44 | 45 | \value{A list of \code{MxMatrix-class}. The variance component is 46 | computed in \code{Tau2}.} 47 | 48 | \author{Mike W.-L. Cheung 49 | } 50 | \seealso{ \code{\link[metaSEM]{osmasem}}, 51 | \code{\link[metaSEM]{create.V}}, \code{\link[metaSEM]{create.vechsR}} 52 | } 53 | 54 | \examples{ 55 | \donttest{ 56 | T0 <- create.Tau2(no.var=4, RE.type="Diag", Transform="expLog", RE.startvalues=0.05) 57 | T0 58 | 59 | T1 <- create.Tau2(no.var=4, Tau1.labels=c("a", "b", "c", "d")) 60 | T1 61 | } 62 | } 63 | % Add one or more standard keywords, see file 'KEYWORDS' in the 64 | % R documentation directory. 65 | \keyword{osmasem} 66 | \keyword{osmasem3L} 67 | -------------------------------------------------------------------------------- /man/create.V.Rd: -------------------------------------------------------------------------------- 1 | \name{create.V} 2 | \alias{create.V} 3 | \title{Create a V-known matrix} 4 | \description{It creates a V-known matrix of the sampling covariance 5 | matrix using definition variables. 6 | } 7 | \usage{ 8 | create.V(x, type = c("Symm", "Diag", "Full"), as.mxMatrix = TRUE) 9 | } 10 | \arguments{ 11 | \item{x}{A character vector of variable names of the sampling covariance matrix.} 12 | \item{type}{Either \code{"Symm"}, \code{"Diag"} or 13 | \code{"Full"}. Suppose the number of variables is \eqn{p}, the 14 | numbers of variable names for \code{"Symm"}, \code{"Diag"}, and 15 | \code{"Full"} are \eqn{p(p-1)/2 }{p(p-1)/2}, \eqn{p}{p}, and 16 | \eqn{p*p}{p*p}, respectively. The elements are arranged in a column major.} 17 | \item{as.mxMatrix}{Logical. Whether to convert the output into \code{MxMatrix-class}.} 18 | } 19 | 20 | \value{A list of \code{MxMatrix-class}. The V-known sampling covariance 21 | matrix is computed in \code{V}.} 22 | 23 | \author{Mike W.-L. Cheung 24 | } 25 | \seealso{ \code{\link[metaSEM]{osmasem}}, 26 | \code{\link[metaSEM]{create.Tau2}}, \code{\link[metaSEM]{create.vechsR}} 27 | } 28 | 29 | \examples{ 30 | \donttest{ 31 | my.df <- Cor2DataFrame(Nohe15A1) 32 | 33 | ## Create known sampling variance covariance matrix 34 | V0 <- create.V(my.df$vlabels) 35 | V0 36 | } 37 | } 38 | % Add one or more standard keywords, see file 'KEYWORDS' in the 39 | % R documentation directory. 40 | \keyword{ osmasem } 41 | -------------------------------------------------------------------------------- /man/create.modMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{create.modMatrix} 2 | \alias{create.modMatrix} 3 | \title{Create a moderator matrix used in OSMASEM 4 | } 5 | \description{It creates a moderator matrix used in OSMASEM. 6 | } 7 | \usage{ 8 | create.modMatrix(RAM, output=c("A", "S"), mod) 9 | } 10 | \arguments{ 11 | \item{RAM}{A RAM object including a list of matrices of the model 12 | returned from \code{\link[metaSEM]{lavaan2RAM}}.} 13 | \item{output}{Whether the output is an "A" or "S" matrix.} 14 | \item{mod}{A string of moderator in the dataset.} 15 | } 16 | 17 | \value{A character matrix. 18 | } 19 | \author{Mike W.-L. Cheung 20 | } 21 | 22 | \examples{ 23 | ## A multiple regression model 24 | model <- "y ~ x1 + x2 25 | x1 ~~ 1*x1 26 | x2 ~~ 1*x2 27 | x1 ~~ x2" 28 | 29 | ## RAM specification 30 | RAM <- lavaan2RAM(model, obs.variables=c("y", "x1", "x2")) 31 | 32 | ## Create a moderator matrix on A with "meanAge as the moderator. 33 | A1 <- create.modMatrix(RAM=RAM, output="A", mod="meanAge") 34 | A1 35 | 36 | ## Create a moderator matrix on S with "meanAge as the moderator. 37 | S1 <- create.modMatrix(RAM=RAM, output="S", mod="meanAge") 38 | S1 39 | } 40 | 41 | % Add one or more standard keywords, see file 'KEYWORDS' in the 42 | % R documentation directory. 43 | \keyword{utilities} 44 | -------------------------------------------------------------------------------- /man/create.mxMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{create.mxMatrix} 2 | \alias{create.mxMatrix} 3 | \title{Create a Vector into MxMatrix-class 4 | } 5 | \description{It converts a vector into \code{MxMatrix-class} via \code{mxMatrix}. 6 | } 7 | \usage{ 8 | create.mxMatrix(x, type=c("Full","Symm","Diag","Stand"), ncol=NA, 9 | nrow=NA, as.mxMatrix=TRUE, byrow=FALSE, ...) 10 | } 11 | %- maybe also 'usage' for other objects documented here. 12 | \arguments{ 13 | \item{x}{A character or numeric vector 14 | } 15 | \item{type}{Matrix type similar to those listed in 16 | \code{\link[OpenMx]{mxMatrix}} 17 | } 18 | \item{ncol}{The number of columns. It is necessary when 19 | \code{type="Full"}. It is ignored and determined by the length of 20 | \code{x} for the other types of matrices. 21 | } 22 | \item{nrow}{The number of rows. It is necessary when 23 | \code{type="Full"}. It is ignored and determined by the length of 24 | \code{x} for the other types of matrices. 25 | } 26 | \item{as.mxMatrix}{Logical. If it is \code{TRUE}, the output is a matrix of 27 | \code{MxMatrix-class}. If it is \code{FALSE}, it is a numeric matrix.} 28 | \item{byrow}{Logical. If \code{FALSE} (the default) the matrix is filled by columns, otherwise the matrix is filled by rows. 29 | } 30 | \item{\dots}{Further arguments to be passed to 31 | \code{\link[OpenMx]{mxMatrix}}. Please note that \code{type}, 32 | \code{nrow}, \code{ncol}, \code{values}, \code{free} and \code{labels} will be created automatically. Thus, these arguments 33 | except labels should be avoided in \dots 34 | } 35 | } 36 | \details{If there are non-numeric values in \code{x}, they are treated 37 | as the labels of the free parameters. If an "*" is 38 | present, the numeric value on the left-hand side will be treated as 39 | the starting value for a free parameter or a fixed value for a fixed parameter. If it is a matrix of numeric values, there are 40 | no free parameters in the output matrix. \code{nrow} and \code{ncol} 41 | will be calculated from the length of \code{x} unless 42 | \code{type="Full"} is specified. 43 | } 44 | \value{A \code{\link[OpenMx]{MxMatrix-class}} object with the same 45 | dimensions as \code{x} 46 | } 47 | 48 | \author{Mike W.-L. Cheung 49 | } 50 | 51 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 52 | 53 | \seealso{ \code{\link[OpenMx]{mxMatrix}}, 54 | \code{\link[metaSEM]{create.mxMatrix}}, \code{\link[metaSEM]{create.Fmatrix}} 55 | %% ~~objects to See Also as \code{\link{help}}, ~~~ 56 | } 57 | \examples{ 58 | ## a and b are free parameters with starting values and labels 59 | (a1 <- c(1:4, "5*a", 6, "7*b", 8, 9)) 60 | 61 | (mat1 <- create.mxMatrix(a1, ncol=3, nrow=3, name="mat1")) 62 | 63 | ## Arrange the elements by row 64 | (mat2 <- create.mxMatrix(a1, ncol=3, nrow=3, as.mxMatrix=FALSE, byrow=TRUE)) 65 | 66 | (a3 <- c(1:3, "4*f4", "5*f5", "6*f6")) 67 | 68 | (mat3 <- create.mxMatrix(a3, type="Symm", name="mat3")) 69 | 70 | ## Create character matrix 71 | (mat4 <- create.mxMatrix(a3, type="Symm", as.mxMatrix=FALSE)) 72 | 73 | ## Arrange the elements by row 74 | (mat5 <- create.mxMatrix(a3, type="Symm", as.mxMatrix=FALSE, byrow=TRUE)) 75 | 76 | (mat6 <- create.mxMatrix(a3, type="Diag", lbound=6:1, name="mat6")) 77 | } 78 | 79 | 80 | 81 | \keyword{utilities} 82 | -------------------------------------------------------------------------------- /man/create.vechsR.Rd: -------------------------------------------------------------------------------- 1 | \name{create.vechsR} 2 | \alias{create.vechsR} 3 | \title{Create a model implied correlation matrix with implicit diagonal constraints 4 | } 5 | \description{It creates implicit diagonal constraints on the model 6 | implied correlation matrix by treating the error variances as 7 | functions of other parameters. 8 | } 9 | \usage{ 10 | create.vechsR(A0, S0, F0 = NULL, Ax = NULL, Sx = NULL, A.lbound=NULL, A.ubound=NULL) 11 | } 12 | %- maybe also 'usage' for other objects documented here. 13 | \arguments{ 14 | \item{A0}{A Amatrix, which will be converted into \code{MxMatrix-class} via \code{as.mxMatrix}.} 15 | \item{S0}{A Smatrix, which will be converted into \code{MxMatrix-class} via \code{as.mxMatrix}.} 16 | \item{F0}{A Fmatrix, which will be converted into \code{MxMatrix-class} via \code{as.mxMatrix}.} 17 | \item{Ax}{A Amatrix of a list of Amatrix with definition variables as the moderators of the Amatrix.} 18 | \item{Sx}{A Smatrix of a list of Smatrix with definition variables as the moderators of the Smatrix.} 19 | \item{A.lbound}{A matrix of lower bound of the Amatrix. If a scalar is 20 | given, the lbound matrix will be filled with this scalar.} 21 | \item{A.ubound}{A matrix of upper bound of the Amatrix. If a scalar is 22 | given, the ubound matrix will be filled with this scalar.} 23 | } 24 | 25 | \value{A list of \code{MxMatrix-class}. The model implied correlation 26 | matrix is computed in \code{impliedR} and \code{vechsR}.} 27 | \note{Since \code{A0} are the intercepts and \code{Ax} are the 28 | regression coefficients. The parameters in \code{Ax} must be a subset of those in 29 | \code{A0}.} 30 | \author{Mike W.-L. Cheung 31 | } 32 | \seealso{ \code{\link[metaSEM]{osmasem}}, 33 | \code{\link[metaSEM]{create.Tau2}}, \code{\link[metaSEM]{create.V}} 34 | } 35 | 36 | \examples{ 37 | \donttest{ 38 | ## Proposed model 39 | model1 <- 'W2 ~ w2w*W1 + s2w*S1 40 | S2 ~ w2s*W1 + s2s*S1 41 | W1 ~~ w1WITHs1*S1 42 | W2 ~~ w2WITHs2*S2 43 | W1 ~~ 1*W1 44 | S1 ~~ 1*S1 45 | W2 ~~ Errw2*W2 46 | S2 ~~ Errs2*S2' 47 | 48 | ## Convert into RAM 49 | RAM1 <- lavaan2RAM(model1, obs.variables=c("W1", "S1", "W2", "S2")) 50 | 51 | ## No moderator 52 | M0 <- create.vechsR(A0=RAM1$A, S0=RAM1$S, F0=NULL, Ax=NULL, Sx=NULL) 53 | 54 | ## Lag (definition variable) as a moderator on the paths in the Amatrix 55 | Ax <- matrix(c(0,0,0,0, 56 | 0,0,0,0, 57 | "0*data.Lag","0*data.Lag",0,0, 58 | "0*data.Lag","0*data.Lag",0,0), 59 | nrow=4, ncol=4, byrow=TRUE) 60 | 61 | M1 <- create.vechsR(A0=RAM1$A, S0=RAM1$S, F0=NULL, Ax=Ax, Sx=NULL) 62 | 63 | ## Lag (definition variable) as a moderator on the correlation in the Smatrix 64 | Sx <- matrix(c(0,"0*data.Lag",0,0, 65 | "0*data.Lag",0,0,0, 66 | 0,0,0,"0*data.Lag", 67 | 0,0,"0*data.Lag",0), 68 | nrow=4, ncol=4, byrow=TRUE) 69 | 70 | M2 <- create.vechsR(A0=RAM1$A, S0=RAM1$S, F0=NULL, Ax=NULL, Sx=Sx) 71 | } 72 | } 73 | % Add one or more standard keywords, see file 'KEYWORDS' in the 74 | % R documentation directory. 75 | \keyword{ osmasem } 76 | -------------------------------------------------------------------------------- /man/homoStat.Rd: -------------------------------------------------------------------------------- 1 | \name{homoStat} 2 | \alias{homoStat} 3 | \title{Test the Homogeneity of Effect Sizes 4 | } 5 | \description{It tests the homogeneity of univariate and multivariate effect sizes. 6 | } 7 | \usage{ 8 | homoStat(y, v) 9 | } 10 | %- maybe also 'usage' for other objects documented here. 11 | \arguments{ 12 | \item{y}{A vector of effect size for univariate meta-analysis or a \eqn{k}{k} x 13 | \eqn{p}{p} matrix of effect sizes for multivariate meta-analysis 14 | where \eqn{k}{k} is the number of studies and \eqn{p}{p} is the 15 | number of effect sizes. 16 | } 17 | \item{v}{A vector of the sampling variance of the effect size for univariate 18 | meta-analysis or a \eqn{k}{k} x \eqn{p*}{p*} matrix of the sampling 19 | covariance matrix of the effect sizes for multivariate meta-analysis 20 | where \eqn{p* = p(p+1)/2 }{p* = p(p+1)/2}. It is arranged by column 21 | major as used by \code{\link[OpenMx]{vech}}. It is assumed that 22 | there is no missing value in \code{v} if \code{y} is complete. If there are missing values in \code{v} 23 | due to the missingness on \code{y}, the missing values in 24 | \code{v} will be removed automatically. 25 | } 26 | } 27 | 28 | \value{A list of 29 | \item{Q}{Q statistic on the null hypothesis of homogeneity of effect 30 | sizes. It has an approximate chi-square distribution under the null 31 | hypothesis.} 32 | \item{Q.df}{Degrees of freedom of the Q statistic} 33 | \item{pval}{p-value on the test of homogeneity of effect sizes} 34 | } 35 | \references{ 36 | Becker, B. J. (1992). Using results from replicated studies to 37 | estimate linear models. \emph{Journal of Educational Statistics}, 38 | \bold{17}, 341-362. 39 | 40 | Cheung, M. W.-L. (2010). Fixed-effects meta-analyses as multiple-group 41 | structural equation models. \emph{Structural Equation Modeling}, 42 | \bold{17}, 481-509. 43 | 44 | Cochran, W. G. (1954). The combination of estimates from different experiments. \emph{Biometrics}, \bold{10}, 101-129. 45 | } 46 | \author{Mike W.-L. Cheung 47 | } 48 | 49 | \seealso{ \code{\link[metaSEM]{meta}} 50 | } 51 | \examples{ 52 | with( Hox02, homoStat(yi, vi) ) 53 | 54 | with( HedgesOlkin85, homoStat(y=cbind(d_att, d_ach), 55 | v=cbind(var_att, cov_att_ach, var_ach)) ) 56 | } 57 | % Add one or more standard keywords, see file 'KEYWORDS' in the 58 | % R documentation directory. 59 | \keyword{ meta-analysis } 60 | -------------------------------------------------------------------------------- /man/indirectEffect.Rd: -------------------------------------------------------------------------------- 1 | \name{indirectEffect} 2 | \alias{indirectEffect} 3 | \title{Estimate the asymptotic covariance matrix of standardized or unstandardized indirect and direct effects 4 | } 5 | \description{It estimates the standardized or unstandardized indirect and direct effects 6 | and their asymptotic sampling covariance matrix. 7 | } 8 | \usage{ 9 | indirectEffect(x, n, standardized = TRUE, direct.effect = TRUE, run = TRUE) 10 | } 11 | %- maybe also 'usage' for other objects documented here. 12 | \arguments{ 13 | \item{x}{A 3x3 correlation/covariance matrix or a list of 14 | correlation/covariance matrices. Variables are 15 | arranged as the dependent variable (y), mediator (m) and independent 16 | variable (x) 17 | } 18 | \item{n}{Sample size or a vector of sample sizes 19 | } 20 | \item{standardized}{Logical. Whether the indirect effect is 21 | standardized. 22 | } 23 | \item{direct.effect}{Logical. Whether the direct effect is 24 | estimated. If it is \code{FALSE}, the direct effect is fixed at zero. 25 | } 26 | \item{run}{Logical. If \code{FALSE}, only return the mx model without running the analysis.} 27 | } 28 | \details{Cheung (2009) estimated the standardized indirect effect and 29 | its standard error with non-linear constraints. Since \code{OpenMx} does not generate standard errors when there 30 | are non-linear constraints, Kwan and Chan's (2011) approach is used in 31 | this function. Delta method is used to calculate the asymptotic covariance matrix. 32 | } 33 | \value{A vector (or a matrix if the input is a list of matrices) of 34 | (standardized) indirect effect, standardized direct effect, and their 35 | asymptotic sampling covariance matrices 36 | } 37 | \references{ 38 | Cheung, M. W.-L. (2009). Comparison of methods for constructing confidence intervals of standardized indirect effects. \emph{Behavior 39 | Research Methods}, \emph{41}, 425-438. 40 | 41 | Kwan, J., & Chan, W. (2011). Comparing standardized coefficients in 42 | structural equation modeling: a model reparameterization 43 | approach. \emph{Behavior Research Methods}, \emph{43}, 730-745. 44 | } 45 | \author{Mike W.-L. Cheung 46 | } 47 | 48 | \examples{ 49 | ## A correlation matrix as input 50 | x <- matrix(c(1, 0.4, 0.2, 0.4, 1, 0.3, 0.2, 0.3, 1), ncol=3) 51 | dimnames(x) <- list( c("y", "m", "x"), c("y", "m", "x") ) 52 | indirectEffect(x, n=300) 53 | 54 | ## A list of correlation matrices 55 | indirectEffect( list(x, x), n=c(300,500), standardized=FALSE ) 56 | } 57 | % Add one or more standard keywords, see file 'KEYWORDS' in the 58 | % R documentation directory. 59 | \keyword{ compute effect sizes } 60 | 61 | -------------------------------------------------------------------------------- /man/is.pd.Rd: -------------------------------------------------------------------------------- 1 | \name{is.pd} 2 | \alias{is.pd} 3 | \title{Test Positive Definiteness of a List of Square Matrices 4 | } 5 | \description{It tests the positive definiteness of a square matrix or a 6 | list of square matrices. It returns \code{TRUE} if the matrix is 7 | positive definite. It returns \code{FALSE} if the matrix is either 8 | non-positive definite or not symmetric. Variables with \code{NA} in the diagonals will be removed 9 | before testing. It returns \code{NA} when there are missing correlations even after deleting 10 | the missing variables. 11 | } 12 | \usage{ 13 | is.pd(x, check.aCov=FALSE, cor.analysis=TRUE, tol=1e-06) 14 | } 15 | \arguments{ 16 | \item{x}{A square matrix or a list of square matrices} 17 | \item{check.aCov}{If it is \code{TRUE}, it mirrors the checking in 18 | \code{\link[metaSEM]{asyCov}}.} 19 | \item{cor.analysis}{Whether the input matrix is a correlation or a 20 | covariance matrix. It is ignored when \code{check.aCov=FALSE}.} 21 | \item{tol}{Tolerance (relative to largest variance) for numerical lack 22 | of positive-definiteness in \code{x}. It is adopted from \code{\link[MASS]{mvrnorm}}. 23 | } 24 | } 25 | \value{If the input is a matrix, it returns \code{TRUE}, \code{FALSE} 26 | or \code{NA}. If the input is a list of matrices, it returns 27 | a list of \code{TRUE}, \code{FALSE} or \code{NA}. 28 | } 29 | 30 | \author{Mike W.-L. Cheung 31 | } 32 | 33 | \examples{ 34 | A <- diag(1,3) 35 | is.pd(A) 36 | # TRUE 37 | 38 | B <- matrix(c(1,2,2,1), ncol=2) 39 | is.pd(B) 40 | # FALSE 41 | 42 | is.pd(list(A, B)) 43 | # TRUE FALSE 44 | 45 | C <- A 46 | C[2,1] <- C[1,2] <- NA 47 | is.pd(C) 48 | # NA 49 | } 50 | \keyword{utilities} 51 | 52 | -------------------------------------------------------------------------------- /man/list2matrix.Rd: -------------------------------------------------------------------------------- 1 | \name{list2matrix} 2 | \alias{list2matrix} 3 | \title{Convert a List of Symmetric Matrices into a Stacked Matrix 4 | } 5 | \description{It converts a list of symmetric matrices into 6 | a stacked matrix. Dimensions of the symmetric matrices have to be the 7 | same. It tries to preserve the dimension names if possible. Dimension names will 8 | be created if there are no dimension names in the first symmetric matrix. 9 | } 10 | \usage{ 11 | list2matrix(x, diag = FALSE) 12 | } 13 | \arguments{ 14 | \item{x}{A list of \eqn{k}{k} \eqn{p}{p} x \eqn{p}{p} symmetric matrices. 15 | } 16 | \item{diag}{Logical. If it is \code{TRUE}, \code{\link[OpenMx]{vech}} 17 | is used to vectorize the (covariance) matrices. If it is \code{FALSE}, \code{\link[OpenMx]{vechs}} 18 | is used to vectorize the (correlation) matrices. 19 | } 20 | } 21 | \value{A \eqn{k}{k} x \eqn{p*}{p*} stacked matrix where \eqn{p* = p(p-1)/2 }{p* 22 | = p(p-1)/2} for \code{diag}=\code{FALSE} or \eqn{p* = p(p+1)/2 23 | }{p* = p(p+1)/2} for \code{diag}=\code{TRUE}. 24 | 25 | } 26 | \author{Mike W.-L. Cheung 27 | } 28 | 29 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 30 | 31 | \examples{ 32 | C1 <- matrix(c(1,0.5,0.4,0.5,1,0.2,0.4,0.2,1), ncol=3) 33 | C2 <- matrix(c(1,0.4,NA,0.4,1,NA,NA,NA,NA), ncol=3) 34 | 35 | ## A list without dimension names 36 | list2matrix(list(C1, C2)) 37 | # x2_x1 x3_x1 x3_x2 38 | # [1,] 0.5 0.4 0.2 39 | # [2,] 0.4 NA NA 40 | 41 | dimnames(C1) <- list( c("x","y","z"), c("x","y","z") ) 42 | dimnames(C2) <- list( c("x","y","z"), c("x","y","z") ) 43 | 44 | ## A list with dimension names 45 | list2matrix(list(C1, C2)) 46 | # y_x z_x z_y 47 | # [1,] 0.5 0.4 0.2 48 | # [2,] 0.4 NA NA 49 | } 50 | % Add one or more standard keywords, see file 'KEYWORDS' in the 51 | % R documentation directory. 52 | \keyword{utilities} 53 | -------------------------------------------------------------------------------- /man/matrix2bdiag.Rd: -------------------------------------------------------------------------------- 1 | \name{matrix2bdiag} 2 | \alias{matrix2bdiag} 3 | \title{Convert a Matrix into a Block Diagonal Matrix 4 | } 5 | \description{It converts a matrix into a block diagonal matrix. 6 | } 7 | \usage{ 8 | matrix2bdiag(x, ...) 9 | } 10 | %- maybe also 'usage' for other objects documented here. 11 | \arguments{ 12 | \item{x}{A \eqn{k}{k} x \eqn{p}{p} matrix of numerics or characters. 13 | } 14 | \item{\dots}{Further arguments to be passed to \code{\link[metaSEM]{vec2symMat}} 15 | } 16 | } 17 | \details{Each row of \code{x} is converted into a symmetric matrix via 18 | \code{\link[metaSEM]{vec2symMat}}. Then the list of the symmetric matrices is 19 | converted into a block diagonal matrix via a function written by Scott 20 | Chasalow posted at http://www.math.yorku.ca/Who/Faculty/Monette/pub/stmp/0827.html. 21 | } 22 | 23 | \author{Mike W.-L. Cheung 24 | } 25 | 26 | \seealso{ \code{\link[metaSEM]{vec2symMat}} 27 | } 28 | \examples{ 29 | (m1 <- matrix(1:12, ncol=6, byrow=TRUE)) 30 | # [,1] [,2] [,3] [,4] [,5] [,6] 31 | # [1,] 1 2 3 4 5 6 32 | # [2,] 7 8 9 10 11 12 33 | 34 | matrix2bdiag(m1) 35 | # [,1] [,2] [,3] [,4] [,5] [,6] 36 | # [1,] 1 2 3 0 0 0 37 | # [2,] 2 4 5 0 0 0 38 | # [3,] 3 5 6 0 0 0 39 | # [4,] 0 0 0 7 8 9 40 | # [5,] 0 0 0 8 10 11 41 | # [6,] 0 0 0 9 11 12 42 | } 43 | % Add one or more standard keywords, see file 'KEYWORDS' in the 44 | % R documentation directory. 45 | \keyword{utilities} 46 | -------------------------------------------------------------------------------- /man/meta2semPlot.Rd: -------------------------------------------------------------------------------- 1 | \name{meta2semPlot} 2 | \alias{meta2semPlot} 3 | \title{ 4 | Convert \code{metaSEM} objects into \code{semPlotModel} objects for plotting 5 | } 6 | \description{ 7 | It converts objects in class \code{wls} into objects of class \code{semPlotModel}. 8 | } 9 | \usage{ 10 | meta2semPlot(object, manNames = NULL, latNames = NULL, labels = c("labels", "RAM"), ...) 11 | } 12 | %- maybe also 'usage' for other objects documented here. 13 | \arguments{ 14 | \item{object}{An object of class \code{wls} returned from \code{wls()} 15 | or \code{tssem2()}. 16 | } 17 | \item{manNames}{A character vector of the manifest names. The program 18 | will try to get it from the \code{object} if it is not given. 19 | } 20 | \item{latNames}{A character vector of the latent names. The program 21 | will create it by using "L1", "L2", etc if it is not given. 22 | } 23 | \item{labels}{Either \code{labels} (default if missing) or 24 | \code{RAM}. If \code{labels}, the labels of the parameters are used 25 | in plotting. If \code{RAM}, the RAM notations are used in plotting. 26 | } 27 | \item{\dots}{Further arguments to be passed to \code{\link[semPlot]{ramModel}} 28 | } 29 | } 30 | \details{ 31 | It uses the \code{ramModel()} to do the conversion. 32 | } 33 | \value{ 34 | A "semPlotModel" object. 35 | } 36 | \author{Mike W.-L. Cheung 37 | } 38 | 39 | \seealso{\code{\link[semPlot]{ramModel}}, 40 | \code{\link[metaSEM]{Becker92}}, \code{\link[metaSEM]{Becker09}}, 41 | \code{\link[metaSEM]{Digman97}}, \code{\link[metaSEM]{Hunter83}} 42 | } 43 | 44 | \keyword{ methods } 45 | \keyword{ tssem }% __ONLY ONE__ keyword per line 46 | -------------------------------------------------------------------------------- /man/osmasemR2.Rd: -------------------------------------------------------------------------------- 1 | \name{osmasemR2} 2 | \alias{osmasemR2} 3 | \title{Calculate the R2 in OSMASEM and OSMASEM3L} 4 | \description{It calculates the R2 of the moderators in explaining the 5 | variances in the heterogeneity variances. 6 | } 7 | \usage{ 8 | osmasemR2(model1, model0, R2.truncate=TRUE) 9 | } 10 | \arguments{ 11 | \item{model1}{An object in class \code{osmasem}.} 12 | \item{model0}{An object in class \code{osmasem}.} 13 | \item{R2.truncate}{Whether to truncate the negative R2 to zero.} 14 | } 15 | 16 | \value{\code{model1} and \code{model0} are the models with and without 17 | the moderators, respectively. The function does not check whether the 18 | models are nested. It is the users' responsibility to make sure that 19 | the models with and without the moderators are nested. It returns a 20 | list of the diagonals of the heterogeneity variances of the 21 | models without and with the moderators, and the R2.} 22 | 23 | \author{Mike W.-L. Cheung 24 | } 25 | \seealso{ \code{\link[metaSEM]{osmasem}} 26 | } 27 | 28 | \keyword{osmasem} 29 | \keyword{osmasem3L} 30 | -------------------------------------------------------------------------------- /man/osmasemSRMR.Rd: -------------------------------------------------------------------------------- 1 | \name{osmasemSRMR} 2 | \alias{osmasemSRMR} 3 | \title{Calculate the SRMR in OSMASEM and OSMASEM3L} 4 | \description{It calculates the standardized root mean squared residuals 5 | (SRMR) in OSMASEM and OSMASEM3L. 6 | } 7 | \usage{ 8 | osmasemSRMR(x) 9 | } 10 | \arguments{ 11 | \item{x}{An OSMASEM object without any moderators.} 12 | } 13 | 14 | \value{It calculates the model implied correlation matrix and its 15 | saturated counterpart to calculate the SRMR. It should be noted that 16 | the heterogeneity variances are ignored in the calculations.} 17 | 18 | \author{Mike W.-L. Cheung 19 | } 20 | \seealso{ \code{\link[metaSEM]{osmasem}}, 21 | \code{\link[metaSEM]{Nohe15}} 22 | } 23 | 24 | \keyword{osmasem} 25 | \keyword{osmasem3L} 26 | -------------------------------------------------------------------------------- /man/pattern.n.Rd: -------------------------------------------------------------------------------- 1 | \name{pattern.n} 2 | \alias{pattern.n} 3 | \title{Display the Accumulative Sample Sizes for the Covariance Matrix 4 | } 5 | \description{It displays the accumulative sample sizes for the 6 | covariance matrix. 7 | } 8 | \usage{ 9 | pattern.n(x, n) 10 | } 11 | \arguments{ 12 | \item{x}{A list of square matrices} 13 | \item{n}{A vector of sample sizes. 14 | } 15 | } 16 | \value{A square matrix of the accumulative sample sizes of the 17 | input matrices. 18 | } 19 | \author{Mike W.-L. Cheung 20 | } 21 | 22 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 23 | 24 | \examples{ 25 | ## Show the pattern of missing data 26 | pattern.n(Hunter83$data, Hunter83$n) 27 | 28 | # Ability Knowledge Work sample Supervisor 29 | # Ability 3815 3372 3281 3605 30 | # Knowledge 3372 3532 2998 3322 31 | # Work sample 3281 2998 3441 3231 32 | # Supervisor 3605 3322 3231 3765 33 | } 34 | \keyword{utilities} 35 | 36 | -------------------------------------------------------------------------------- /man/pattern.na.Rd: -------------------------------------------------------------------------------- 1 | \name{pattern.na} 2 | \alias{pattern.na} 3 | \title{Display the Pattern of Missing Data of a List of Square Matrices 4 | } 5 | \description{It displays the pattern of missing data (or pattern of data 6 | that are present) of a list of square matrices with the same dimensions. 7 | } 8 | \usage{ 9 | pattern.na(x, show.na = TRUE, type=c("tssem", "osmasem")) 10 | } 11 | \arguments{ 12 | \item{x}{A list of square matrices} 13 | \item{show.na}{If it is \code{TRUE}, it shows the pattern of missing 14 | data. If it is \code{FALSE}, it shows the pattern of data that are 15 | present.} 16 | \item{type}{If it is \code{tssem}, it reports the pattern of missing 17 | correlations for the tssem approach. If it is \code{osmasem}, it 18 | reports the pattern of missing correlations for the data created by \code{\link[metaSEM]{Cor2DataFrame}}. 19 | } 20 | } 21 | \value{A square matrix of numerical values with the same dimensions of the 22 | input matrices. 23 | } 24 | \author{Mike W.-L. Cheung 25 | } 26 | 27 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 28 | 29 | \examples{ 30 | ## Show the pattern of missing data 31 | pattern.na(Hunter83$data, show.na=TRUE) 32 | 33 | # Ability Knowledge Work sample Supervisor 34 | # Ability 1 3 3 2 35 | # Knowledge 3 2 4 3 36 | # Work sample 3 4 2 3 37 | # Supervisor 2 3 3 1 38 | 39 | ## Show the pattern of data that are present 40 | pattern.na(Hunter83$data, show.na=FALSE) 41 | 42 | # Ability Knowledge Work sample Supervisor 43 | # Ability 13 11 11 12 44 | # Knowledge 11 12 10 11 45 | # Work sample 11 10 12 11 46 | # Supervisor 12 11 11 13 47 | } 48 | \keyword{utilities} 49 | 50 | -------------------------------------------------------------------------------- /man/print.Rd: -------------------------------------------------------------------------------- 1 | \name{print} 2 | \alias{print.tssem1FEM} 3 | \alias{print.tssem1FEM.cluster} 4 | \alias{print.tssem1REM} 5 | \alias{print.wls} 6 | \alias{print.meta} 7 | \alias{print.meta3LFIML} 8 | \alias{print.reml} 9 | \alias{print.uniR1} 10 | \alias{print.impliedR} 11 | \title{Print Methods for various Objects 12 | } 13 | \description{Print methods for the \code{tssem1FEM}, 14 | \code{tssem1FEM.cluster}, \code{tssem1REM}, \code{wls}, 15 | \code{meta}, \code{meta3LFIML}, \code{reml}, \code{uniR1} and 16 | \code{impliedR} objects. 17 | } 18 | \usage{ 19 | \method{print}{tssem1FEM}(x, \dots) 20 | \method{print}{tssem1FEM.cluster}(x, \dots) 21 | \method{print}{tssem1REM}(x, \dots) 22 | \method{print}{wls}(x, \dots) 23 | \method{print}{meta}(x, \dots) 24 | \method{print}{meta3LFIML}(x, \dots) 25 | \method{print}{reml}(x, \dots) 26 | \method{print}{uniR1}(x, \dots) 27 | \method{print}{impliedR}(x, \dots) 28 | } 29 | %- maybe also 'usage' for other objects documented here. 30 | \arguments{ 31 | \item{x}{An object returned from either class 32 | \code{tssem1FEM}, class \code{tssem1FEM.cluster}, class \code{tssem1REM}, 33 | class \code{wls}, class \code{meta}, class \code{meta3LFIML}, 34 | class \code{reml}, class \code{uniR1} or class \code{impliedR} 35 | } 36 | \item{\dots}{Further arguments to be passed to \code{summary.default} 37 | or unused. 38 | } 39 | } 40 | 41 | \author{Mike W.-L. Cheung 42 | } 43 | 44 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 45 | 46 | \seealso{ \code{\link[metaSEM]{tssem1}}, \code{\link[metaSEM]{wls}}, 47 | \code{\link[metaSEM]{meta}}, \code{\link[metaSEM]{reml}} 48 | %% ~~objects to See Also as \code{\link{help}}, ~~~ 49 | } 50 | 51 | \keyword{ methods } 52 | -------------------------------------------------------------------------------- /man/rCor.Rd: -------------------------------------------------------------------------------- 1 | \name{rCor} 2 | \alias{rCor} 3 | \alias{rCorPop} 4 | \alias{rCorSam} 5 | \alias{rCor3L} 6 | \title{Generate (Nested) Sample/Population Correlation/Covariance Matrices 7 | } 8 | \description{It generates (nested) random sample or population correlation or 9 | covariance matrices. \code{rCor()} is a wrapper to call 10 | \code{rCorPop()} and then \code{rCorSam()}. 11 | } 12 | \usage{ 13 | rCor(Sigma, V, n, corr=TRUE, raw.data=FALSE, 14 | nonPD.pop=c("replace", "nearPD", "accept"), 15 | nonPD.sam=c("stop", "nearPD")) 16 | rCorPop(Sigma, V, k, corr=TRUE, 17 | nonPD.pop=c("replace", "nearPD", "accept")) 18 | rCorSam(Sigma, n, corr=TRUE, raw.data=FALSE, 19 | nonPD.sam=c("stop", "nearPD")) 20 | rCor3L(Sigma, V.B, V.W, n, cluster, corr=TRUE, raw.data=FALSE, 21 | nonPD.pop=c("replace", "nearPD", "accept"), 22 | nonPD.sam=c("stop", "nearPD")) 23 | } 24 | %- maybe also 'usage' for other objects documented here. 25 | \arguments{ 26 | \item{Sigma}{A list of population correlation/covariance matrices or a 27 | single matrix} 28 | \item{V}{A variance-covariance matrix of Sigma.} 29 | \item{V.B}{A variance-covariance matrix of between-study Sigma.} 30 | \item{V.W}{A variance-covariance matrix of within-study Sigma} 31 | \item{n}{A vector or a single sample sizes.} 32 | \item{cluster}{A vector of number of studies in clusters.} 33 | \item{corr}{Logical. Whether to generate correlation or covariance matrices.} 34 | \item{raw.data}{Logical. Whether correlation/covariance matrices are 35 | generated via raw.data or directly from a Wishart distribution.} 36 | \item{nonPD.pop}{If it is \code{replace}, generated non-positive 37 | definite matrices are replaced by generated new ones which are 38 | positive definite. If it is \code{nearPD}, they are replaced by 39 | nearly positive definite matrices by calling 40 | \code{Matrix::nearPD()}. If it is \code{accept}, they are accepted.} 41 | \item{nonPD.sam}{If it is \code{stop}, the program stops when the 42 | inputs in the \code{rCorSam} are non-positive definite. If it is 43 | \code{nearPD}, they are replaced by nearly positive definite matrices by calling 44 | \code{Matrix::nearPD()}.} 45 | \item{k}{A vector or a single number of studies.} 46 | } 47 | 48 | \value{An object of the generated population/sample 49 | correlation/covariance matrices. 50 | } 51 | \author{Mike W.-L. Cheung 52 | } 53 | \examples{ 54 | Sigma <- matrix(c(1, .2, .3, 55 | .2, 1, .4, 56 | .3, .4, 1), ncol=3, nrow=3) 57 | V <- diag(c(.1, .1, .1)) 58 | 59 | ## Generate two population correlation matrices 60 | Pop.corr <- rCorPop(Sigma, V, k=2) 61 | Pop.corr 62 | 63 | summary(Pop.corr) 64 | 65 | ## Generate two sample correlation matrices 66 | rCorSam(Sigma=Pop.corr, n=c(10, 10)) 67 | 68 | ## The above code is the same as the following one 69 | rCor(Sigma, V, n=c(10, 10)) 70 | } 71 | \keyword{ utilities } 72 | -------------------------------------------------------------------------------- /man/readData.Rd: -------------------------------------------------------------------------------- 1 | \name{readData} 2 | \alias{readFullMat} 3 | \alias{readStackVec} 4 | \alias{readLowTriMat} 5 | \title{Read External Correlation/Covariance Matrices 6 | } 7 | \description{It reads full/lower triangle/stacked vectors of correlation/covariance data into a list of correlation/covariance matrices. 8 | } 9 | \usage{ 10 | readFullMat(file, ...) 11 | readStackVec(file, ...) 12 | readLowTriMat(file, no.var, ...) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{file}{File name of the data. 17 | } 18 | \item{no.var}{The number of variables in the data. 19 | } 20 | \item{\dots}{Further arguments to be passed to \code{\link[base]{scan}} for \code{readLowTriMat} and to \code{\link[utils]{read.table}} for \code{readFullMat} and \code{readStackVec}. 21 | } 22 | } 23 | 24 | \value{A list of correlation/covariance matrices. 25 | } 26 | 27 | \author{Mike W.-L. Cheung 28 | } 29 | 30 | \examples{ 31 | \donttest{ 32 | ## Write two full correlation matrices into a file named "fullmat.dat". 33 | ## x2 is missing in the second matrix. 34 | ## The content of "fullmat.dat" is 35 | # 1.0 0.3 0.4 36 | # 0.3 1.0 0.5 37 | # 0.4 0.5 1.0 38 | # 1.0 NA 0.4 39 | # NA NA NA 40 | # 0.4 NA 1.0 41 | 42 | ## cat("1.0 0.3 0.4\n0.3 1.0 0.5\n0.4 0.5 1.0 43 | ## 1.0 NA 0.4\nNA NA NA\n0.4 NA 1.0", 44 | ## file="fullmat.dat", sep="") 45 | 46 | ## Read the correlation matrices from a file 47 | ## my.full <- readFullMat("fullmat.dat") 48 | 49 | ## Read the correlation matrices from a string 50 | x <- 51 | "1.0 0.3 0.4 52 | 0.3 1.0 0.5 53 | 0.4 0.5 1.0 54 | 1.0 NA 0.4 55 | NA NA NA 56 | 0.4 NA 1.0" 57 | 58 | my.full <- readFullMat(textConnection(x)) 59 | 60 | ## my.full 61 | # $`1` 62 | # x1 x2 x3 63 | # x1 1.0 0.3 0.4 64 | # x2 0.3 1.0 0.5 65 | # x3 0.4 0.5 1.0 66 | # 67 | # $`2` 68 | # x1 x2 x3 69 | # x1 1.0 NA 0.4 70 | # x2 NA NA NA 71 | # x3 0.4 NA 1.0 72 | 73 | ## Write two lower triangle correlation matrices into a file named "lowertriangle.dat". 74 | ## x2 is missing in the second matrix. 75 | ## The content of "lowertriangle.dat" is 76 | # 1.0 77 | # 0.3 1.0 78 | # 0.4 0.5 1.0 79 | # 1.0 80 | # NA NA 81 | # 0.4 NA 1.0 82 | ## cat("1.0\n0.3 1.0\n0.4 0.5 1.0\n1.0\nNA NA\n0.4 NA 1.0", 83 | ## file="lowertriangle.dat", sep="") 84 | 85 | ## Read the lower triangle correlation matrices from a file 86 | ## my.lowertri <- readLowTriMat(file = "lowertriangle.dat", no.var = 3) 87 | 88 | ## Read the correlation matrices from a string 89 | x <- 90 | "1.0 91 | 0.3 1.0 92 | 0.4 0.5 1.0 93 | 1.0 94 | NA NA 95 | 0.4 NA 1.0" 96 | 97 | my.lowertri <- readLowTriMat(textConnection(x), no.var = 3) 98 | 99 | ## my.lowertri 100 | # $`1` 101 | # x1 x2 x3 102 | # x1 1.0 0.3 0.4 103 | # x2 0.3 1.0 0.5 104 | # x3 0.4 0.5 1.0 105 | # 106 | # $`2` 107 | # x1 x2 x3 108 | # x1 1.0 NA 0.4 109 | # x2 NA NA NA 110 | # x3 0.4 NA 1.0 111 | 112 | ## Write two vectors of correlation coefficients based on 113 | ## column major into a file named "stackvec.dat". 114 | ## x2 is missing in the second matrix. 115 | ## The content of "stackvec.dat" is 116 | # 1.0 0.3 0.4 1.0 0.5 1.0 117 | # 1.0 NA 0.4 NA NA 1.0 118 | ## cat("1.0 0.3 0.4 1.0 0.5 1.0\n1.0 NA 0.4 NA NA 1.0\n", 119 | ## file="stackvec.dat", sep="") 120 | 121 | ## Read the stack vectors from a file 122 | ## my.vec <- readStackVec("stackvec.dat") 123 | 124 | ## Read the stack vectors from a string 125 | x <- " 126 | 1.0 0.3 0.4 1.0 0.5 1.0 127 | 1.0 NA 0.4 NA NA 1.0" 128 | 129 | my.vec <- readStackVec(textConnection(x)) 130 | 131 | ## my.vec 132 | # $`1` 133 | # x1 x2 x3 134 | # x1 1.0 0.3 0.4 135 | # x2 0.3 1.0 0.5 136 | # x3 0.4 0.5 1.0 137 | # 138 | # $`2` 139 | # x1 x2 x3 140 | # x1 1.0 NA 0.4 141 | # x2 NA NA NA 142 | # x3 0.4 NA 1.0 143 | } 144 | } 145 | \keyword{ utilities } 146 | -------------------------------------------------------------------------------- /man/rerun.Rd: -------------------------------------------------------------------------------- 1 | \name{rerun} 2 | \alias{rerun} 3 | \title{Rerun models via mxTryHard() 4 | } 5 | \description{It reruns models via mxTryHard(). 6 | } 7 | \usage{ 8 | rerun(object, autofixtau2=FALSE, extraTries=10, ...) 9 | } 10 | %- maybe also 'usage' for other objects documented here. 11 | \arguments{ 12 | \item{object}{An object of either class \code{tssem1FEM}, 13 | class \code{tssem1REM}, class \code{wls}, class \code{meta}, class 14 | \code{reml}, class \code{osmasem}, class \code{osmasem3L}, and class \code{MxModel}. 15 | } 16 | \item{autofixtau2}{Logical. Whether automatically fixes elements of tau2 17 | with NA of standard errors. It only works for objects of 18 | class \code{tssem1REM}, class \code{meta}, and class \code{osmasem}.} 19 | \item{extraTries}{The number of attempts to run the model in addition to the first.} 20 | \item{\dots}{{Further arguments to be passed to \code{\link[OpenMx]{mxTryHard}}}} 21 | } 22 | 23 | \author{Mike W.-L. Cheung 24 | } 25 | 26 | \examples{ 27 | \donttest{ 28 | random1 <- tssem1(Digman97$data, Digman97$n, method="REM", RE.type="Diag") 29 | random1_rerun <- rerun(random1) 30 | summary(random1_rerun) 31 | } 32 | } 33 | \keyword{tssem} 34 | \keyword{meta} 35 | \keyword{osmasem} 36 | \keyword{osmasem3L} 37 | \keyword{wls} 38 | -------------------------------------------------------------------------------- /man/tssemParaVar.Rd: -------------------------------------------------------------------------------- 1 | \name{tssemParaVar} 2 | \alias{tssemParaVar} 3 | \title{Estimate the heterogeneity (SD) of the parameter estimates of the 4 | TSSEM object 5 | } 6 | \description{It estimates the heterogeneity of the parameter estimates 7 | of the TSSEM objects using either the bootstrap or the delta methods. 8 | } 9 | \usage{ 10 | tssemParaVar(tssem1.obj, tssem2.obj, method=c("bootstrap", "delta"), 11 | interval=0.8, Rep=50, output=c("data.frame", "matrices"), 12 | nonPD.pop=c("replace", "nearPD", "accept")) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{tssem1.obj}{An object of class \code{tssem1REM} returned from \code{tssem1()} 17 | } 18 | \item{tssem2.obj}{An object of class \code{wls} returned from 19 | \code{tssem2()} or \code{wls()} 20 | } 21 | \item{method}{If it is \code{bootstrap}, random correlation matrices 22 | are sampled from the \code{tssem1.obj} by the parametric bootstrap. If 23 | it is \code{delta}, the delta method is used to estimate the 24 | heterogeneity of the parameter estimates. 25 | } 26 | \item{interval}{The desired interval, e.g., .8 or .95. 27 | } 28 | \item{Rep}{The number of parametric bootstrap. It is ignored when the 29 | method is \code{delta}. 30 | } 31 | \item{output}{Either a \code{data.frame} or \code{matrices} of the output. 32 | } 33 | \item{nonPD.pop}{If it is \code{replace}, generated non-positive 34 | definite matrices are replaced by generated new ones which are 35 | positive definite. If it is \code{nearPD}, they are replaced by 36 | nearly positive definite matrices by calling 37 | \code{Matrix::nearPD()}. If it is \code{accept}, they are accepted.} 38 | } 39 | 40 | \value{Either a \code{data.frame} or \code{matrices} of the output. 41 | } 42 | \details{ 43 | The bootstrap method is based on the discussion in Cheung (2018) 44 | and Yu et al. (2016). The delta method is an alternative method to 45 | obtain the heterogeneity. 46 | } 47 | \references{ 48 | Cheung, M. W.-L. (2018). Issues in solving the problem of effect size 49 | heterogeneity in meta-analytic structural equation modeling: A 50 | commentary and simulation study on Yu, Downes, Carter, and O'Boyle 51 | (2016). \emph{Journal of Applied Psychology}, \bold{103}, 787-803. 52 | 53 | Yu, J. (Joya), Downes, P. E., Carter, K. M., & O'Boyle, 54 | E. H. (2016). The problem of effect size heterogeneity 55 | in meta-analytic structural equation modeling. 56 | \emph{Journal of Applied Psychology}, \emph{101}, 1457-1473. 57 | } 58 | \author{Mike W.-L. Cheung 59 | } 60 | \seealso{ \code{\link[metaSEM]{bootuniR1}}, \code{\link[metaSEM]{bootuniR2}}, \code{\link[metaSEM]{Nohe15}} 61 | } 62 | \keyword{tssem} 63 | -------------------------------------------------------------------------------- /man/uniR1.Rd: -------------------------------------------------------------------------------- 1 | \name{uniR1} 2 | \alias{uniR1} 3 | \title{First Stage analysis of the univariate R (uniR) approach 4 | } 5 | \description{It conducts the first stage analysis of the uniR analysis 6 | by pooling elements of the correlation coefficients individually. 7 | } 8 | \usage{ 9 | uniR1(Cor, n, ...) 10 | } 11 | \arguments{ 12 | \item{Cor}{A list of correlation matrices 13 | } 14 | \item{n}{A vector of sample sizes 15 | } 16 | \item{\dots}{Further arguments which are currently ignored} 17 | } 18 | \value{An object of class \code{uniR1} of the original data, the sample 19 | sizes, the harmonic mean of sample sizes, the average correlation 20 | matrix, the standard errors of the correlation matrix, and the 21 | standard deviations (heterogeneity) of the correlation matrix. 22 | } 23 | \details{ 24 | This function implements the univariate r approach proposed by Viswesvaran 25 | and Ones (1995) to conduct meta-analytic structural equation modeling 26 | (MASEM). It uses Schmidt and Hunter's approach to combine correlation 27 | coefficients. It is included in this package for research interests. The two-stage 28 | structural equation modeling (TSSEM) approach is preferred (e.g., 29 | Cheung, 2015; Cheung & Chan, 2005). 30 | } 31 | \references{ 32 | Cheung, M. W.-L. (2015). \emph{Meta-analysis: A structural equation 33 | modeling approach}. Chichester, West Sussex: John Wiley & Sons, Inc. 34 | 35 | Cheung, M. W.-L., & Chan, W. (2005). Meta-analytic structural equation 36 | modeling: A two-stage approach. \emph{Psychological Methods}, 37 | \bold{10}, 40-64. 38 | 39 | Schmidt, F. L., & Hunter, J. E. (2015). \emph{Methods of 40 | meta-analysis: Correcting error and bias in research findings (3rd 41 | ed.)}. Thousand Oaks, CA: Sage. 42 | 43 | Viswesvaran, C., & Ones, D. S. (1995). Theory testing: Combining 44 | psychometric meta-analysis and structural equations modeling. 45 | \emph{Personnel Psychology}, \bold{48}, 865-885. 46 | } 47 | \author{Mike W.-L. Cheung 48 | } 49 | \seealso{ \code{\link[metaSEM]{uniR2}}, \code{\link[metaSEM]{Becker09}} 50 | } 51 | \keyword{uniR} 52 | -------------------------------------------------------------------------------- /man/vanderPol17.Rd: -------------------------------------------------------------------------------- 1 | \name{vanderPol17} 2 | \alias{vanderPol17} 3 | \docType{data} 4 | \title{Dataset on the effectiveness of multidimensional family therapy in treating 5 | adolescents with multiple behavior problems 6 | } 7 | \description{ 8 | This dataset includes 61 effect sizes from 19 manuscripts nested from 9 | 8 studies reported by van der Pol et al. (2017). It studies the 10 | effectiveness of multidimensional family therapy in treating 11 | adolescents with multiple behavior problems. 12 | } 13 | \usage{data(vanderPol17)} 14 | 15 | \details{ 16 | A list of data with the following structure: 17 | \describe{ 18 | \item{Number}{Number of the effect size.} 19 | \item{Study}{Authors of the studies.} 20 | \item{N}{Total sample size.} 21 | \item{N_target}{Sample size in the target group.} 22 | \item{N_control}{Sample size in the control group.} 23 | \item{Comparison_condition}{Either cognitive behavioral therapy 24 | (\code{CBT}), combined treatment (\code{CT}) or group therapy (\code{Group}).} 25 | \item{Study_ID}{Level-3 cluster.} 26 | \item{Age_mean}{Mean age of the participants.} 27 | \item{Fllow_up}{Follow-up duration (in months).} 28 | \item{Per_Males}{Percentage of males.} 29 | \item{Per_Minorities}{Percentage of minorities.} 30 | \item{Per_Conduct_disorder}{Percentage of participants with conduct disorder} 31 | \item{Per_Severe_cannabis_users}{Percentage of participants of 32 | severe cannabis use.} 33 | \item{Outcome_measure}{Either substance abuse, delinquency, externalizing and internalizing psychopathology, and 34 | family functioning} 35 | \item{d}{Effect size in Cohen's d.} 36 | \item{v}{Sampling variance of d} 37 | } 38 | } 39 | 40 | \source{ 41 | van der Pol, T. M., Hoeve, M., Noom, M. J., Stams, G. J. J. M., Doreleijers, T. A. H., van Domburgh, L., & Vermeiren, R. R. J. M. (2017). Research Review: The effectiveness of multidimensional family therapy in treating adolescents with multiple behavior problems - a meta-analysis. \emph{Journal of Child Psychology and Psychiatry}, \bold{58}(5), 532-545. https://doi.org/10.1111/jcpp.12685 42 | } 43 | 44 | \examples{ 45 | data(vanderPol17) 46 | } 47 | \keyword{datasets} 48 | -------------------------------------------------------------------------------- /man/vcov.Rd: -------------------------------------------------------------------------------- 1 | \name{vcov} 2 | \alias{vcov.tssem1FEM} 3 | \alias{vcov.tssem1FEM.cluster} 4 | \alias{vcov.tssem1REM} 5 | \alias{vcov.wls} 6 | \alias{vcov.wls.cluster} 7 | \alias{vcov.meta} 8 | \alias{vcov.meta3LFIML} 9 | \alias{vcov.reml} 10 | \alias{vcov.osmasem} 11 | \alias{vcov.osmasem2} 12 | \alias{vcov.mxsem} 13 | \title{Extract Covariance Matrix Parameter Estimates from Objects of 14 | Various Classes 15 | } 16 | \description{It extracts the variance-covariance matrix of the 17 | parameter estimates from objects of various classes. 18 | } 19 | \usage{ 20 | \method{vcov}{tssem1FEM}(object, \dots) 21 | \method{vcov}{tssem1FEM.cluster}(object, \dots) 22 | \method{vcov}{tssem1REM}(object, select = c("all", "fixed", "random"), robust=FALSE, \dots) 23 | \method{vcov}{wls}(object, \dots) 24 | \method{vcov}{wls.cluster}(object, \dots) 25 | \method{vcov}{meta}(object, select = c("all", "fixed", "random"), robust=FALSE, \dots) 26 | \method{vcov}{meta3LFIML}(object, select = c("all", "fixed", "random","allX"), robust=FALSE, \dots) 27 | \method{vcov}{reml}(object, \dots) 28 | \method{vcov}{osmasem}(object, select=c("fixed", "all", "random"), robust=FALSE, \dots) 29 | \method{vcov}{osmasem2}(object, select=c("fixed", "all", "random"), robust=FALSE, \dots) 30 | \method{vcov}{mxsem}(object, robust=FALSE, \dots) 31 | } 32 | %- maybe also 'usage' for other objects documented here. 33 | \arguments{ 34 | \item{object}{An object returned from objects of various classes 35 | } 36 | \item{select}{Select \code{all} for both fixed- and random-effects parameters, \code{fixed} for the 37 | fixed-effects parameters or \code{random} for the random-effects 38 | parameters. For \code{meta3LFIML} objects, \code{allX} is used to extract 39 | all parameters including the predictors and auxiliary variables. 40 | } 41 | \item{robust}{Logicial. Whether to use robust standard error from \code{\link[OpenMx]{imxRobustSE}}. 42 | } 43 | \item{\dots}{Further arguments; currently not in use except for 44 | \code{tssemRobust1}, which to be passed to \code{\link[metafor]{robust}}.} 45 | } 46 | 47 | \value{A variance-covariance matrix of the parameter estimates. 48 | } 49 | 50 | \author{Mike W.-L. Cheung 51 | } 52 | \note{\code{vcov} returns \code{NA} when the \code{diag.constraints=TRUE} 53 | argument is used in \code{wls} objects. 54 | } 55 | 56 | \seealso{ \code{\link[metaSEM]{tssem1}}, \code{\link[metaSEM]{wls}}, 57 | \code{\link[metaSEM]{meta}}, \code{\link[metaSEM]{reml}} 58 | } 59 | \examples{ 60 | ## Random-effects meta-analysis 61 | model1 <- meta(y=yi, v=vi, data=Hox02) 62 | vcov(model1) 63 | 64 | ## Fixed-effects only 65 | vcov(model1, select="fixed") 66 | 67 | ## Random-effects only 68 | vcov(model1, select="random") 69 | } 70 | % Add one or more standard keywords, see file 'KEYWORDS' in the 71 | % R documentation directory. 72 | \keyword{ methods } 73 | 74 | -------------------------------------------------------------------------------- /man/vec2symMat.Rd: -------------------------------------------------------------------------------- 1 | \name{vec2symMat} 2 | \alias{vec2symMat} 3 | \title{Convert a Vector into a Symmetric Matrix 4 | } 5 | \description{It converts a vector into a symmetric matrix by filling up 6 | the elements into the lower triangle of the matrix. 7 | } 8 | \usage{ 9 | vec2symMat(x, diag = TRUE, byrow = FALSE) 10 | } 11 | %- maybe also 'usage' for other objects documented here. 12 | \arguments{ 13 | \item{x}{A vector of numerics or characters 14 | } 15 | \item{diag}{Logical. If it is \code{TRUE} (the default), the diagonals 16 | of the created matrix are replaced by elements of \code{x}; 17 | otherwise, the diagonals of the created matrix are replaced by "1".} 18 | \item{byrow}{Logical. If it is \code{FALSE} (the default), the created matrix is filled by columns; otherwise, the matrix is filled by rows. 19 | } 20 | } 21 | 22 | \value{A symmetric square matrix based on column major 23 | %% ~Describe the value returned 24 | %% If it is a LIST, use 25 | %% \item{comp1 }{Description of 'comp1'} 26 | %% \item{comp2 }{Description of 'comp2'} 27 | %% ... 28 | } 29 | 30 | \author{Mike W.-L. Cheung 31 | } 32 | 33 | %% ~Make other sections like Warning with \section{Warning }{....} ~ 34 | 35 | \seealso{ \code{\link[metaSEM]{matrix2bdiag}} 36 | %% ~~objects to See Also as \code{\link{help}}, ~~~ 37 | } 38 | \examples{ 39 | vec2symMat(1:6) 40 | # [,1] [,2] [,3] 41 | # [1,] 1 2 3 42 | # [2,] 2 4 5 43 | # [3,] 3 5 6 44 | 45 | vec2symMat(1:6, diag=FALSE) 46 | # [,1] [,2] [,3] [,4] 47 | # [1,] 1 1 2 3 48 | # [2,] 1 1 4 5 49 | # [3,] 2 4 1 6 50 | # [4,] 3 5 6 1 51 | 52 | vec2symMat(letters[1:6]) 53 | # [,1] [,2] [,3] 54 | # [1,] "a" "b" "c" 55 | # [2,] "b" "d" "e" 56 | # [3,] "c" "e" "f" 57 | } 58 | % Add one or more standard keywords, see file 'KEYWORDS' in the 59 | % R documentation directory. 60 | \keyword{utilities} 61 | -------------------------------------------------------------------------------- /man/wvs94a.Rd: -------------------------------------------------------------------------------- 1 | \name{wvs94a} 2 | \alias{wvs94a} 3 | \docType{data} 4 | \title{Forty-four Studies from Cheung (2013)} 5 | \description{ 6 | Between 1990 and 1993, 57,561 adults aged 18 and above from 42 nations 7 | were interviewed by local academic institutes in Eastern European 8 | nations and by professional survey organizations in other nations. 9 | The standardized mean difference (SMD) between males and females on life 10 | satisfaction and life control in each country were calculated as the effect sizes. Positive values indicate that males have higher scores than females do. 11 | } 12 | \usage{data(wvs94a)} 13 | 14 | \details{ 15 | The variables are: 16 | \describe{ 17 | \item{country}{Country} 18 | \item{lifesat}{SMD on life satisfaction} 19 | \item{lifecon}{SMD on life control} 20 | \item{lifesat_var}{Sampling variance of lifesat} 21 | \item{inter_cov}{Sampling covariance between lifesat and lifecon} 22 | \item{lifecon_var}{Sampling variance of lifecon} 23 | \item{gnp}{Gross National Product} 24 | } 25 | 26 | } 27 | \source{ 28 | World Values Study Group. (1994). World Values Survey, 1981-1984 and 1990-1993 [Computer file]. \emph{Ann Arbor, MI: Inter-university Consortium for Political and Social Research.} 29 | } 30 | \references{ 31 | Au, K., & Cheung, M. W.-L. (2004). Intra-cultural variation and job autonomy in 42 32 | countries. \emph{Organization Studies}, \bold{25}, 1339-1362. 33 | 34 | Cheung, M. W.-L. (2013). Multivariate meta-analysis as structural equation models. \emph{Structural Equation Modeling}, \bold{20}, 429-454. 35 | } 36 | \examples{ 37 | \donttest{ 38 | data(wvs94a) 39 | 40 | ## Random-effects model 41 | random.ma1 <- meta(y=cbind(lifesat, lifecon), 42 | v=cbind(lifesat_var, inter_cov, lifecon_var), data=wvs94a, 43 | model.name="Random effects model") 44 | summary(random.ma1) 45 | 46 | ## Random-effects model with both population effect sizes fixed at 0 47 | random.ma2 <- meta(y=cbind(lifesat, lifecon), 48 | v=cbind(lifesat_var, inter_cov, lifecon_var), data=wvs94a, 49 | intercept.constraints=matrix(0, nrow=1, ncol=2), 50 | model.name="Effect sizes are fixed at 0") 51 | summary(random.ma2) 52 | 53 | ## Compare the nested models 54 | anova(random.ma1, random.ma2) 55 | 56 | ## Fixed-effects model by fixing the variance component at 0 57 | fixed.ma <- meta(y=cbind(lifesat, lifecon), 58 | v=cbind(lifesat_var, inter_cov, lifecon_var), data=wvs94a, 59 | RE.constraints=matrix(0, ncol=2, nrow=2), 60 | model.name="Fixed effects model") 61 | summary(fixed.ma) 62 | 63 | ## Mixed-effects model 64 | ## gnp is divided by 10000 and centered by using 65 | ## scale(gnp/10000, scale=FALSE) 66 | mixed.ma1 <- meta(y=cbind(lifesat, lifecon), 67 | v=cbind(lifesat_var, inter_cov, lifecon_var), 68 | x=scale(gnp/10000, scale=FALSE), data=wvs94a, 69 | model.name="GNP as a predictor") 70 | summary(mixed.ma1) 71 | 72 | ## Mixed-effects model with equal regression coefficients 73 | mixed.ma2 <- meta(y=cbind(lifesat, lifecon), 74 | v=cbind(lifesat_var, inter_cov, lifecon_var), 75 | x=scale(gnp/10000, scale=FALSE), data=wvs94a, 76 | coef.constraints=matrix(c("0.0*Eq_slope", 77 | "0.0*Eq_slope"), nrow=2), 78 | model.name="GNP as a predictor with equal slope") 79 | summary(mixed.ma2) 80 | 81 | ## Compare the nested models 82 | anova(mixed.ma1, mixed.ma2) 83 | 84 | ## Plot the multivariate effect sizes 85 | plot(random.ma1, main="Estimated effect sizes and their 95\% confidence ellipses", 86 | axis.label=c("Gender difference on life satisfaction", 87 | "Gender difference on life control")) 88 | } 89 | } 90 | \keyword{datasets} 91 | -------------------------------------------------------------------------------- /man/wvs94b.Rd: -------------------------------------------------------------------------------- 1 | \name{wvs94b} 2 | \alias{wvs94b} 3 | \docType{data} 4 | \title{Forty-four Covariance Matrices on Life Satisfaction, Job Satisfaction, and Job Autonomy} 5 | \description{ 6 | Between 1990 and 1993, 57,561 adults aged 18 and above from 42 nations 7 | were interviewed by local academic institutes in Eastern European 8 | nations and by professional survey organizations in other nations. The 9 | covariance matrices among Life Satisfaction, Job Satisfaction, and Job Autonomy were calculated. 10 | } 11 | \usage{data(wvs94b)} 12 | 13 | \details{ 14 | The variables are: 15 | \describe{ 16 | \item{data}{Covariance matrix among Life Satisfaction (LS), Job 17 | Satisfaction (JS), and Job Autonomy (JA)} 18 | \item{n}{Sample size in the country} 19 | } 20 | 21 | } 22 | \source{ 23 | World Values Study Group. (1994). World Values Survey, 1981-1984 and 1990-1993 [Computer file]. \emph{Ann Arbor, MI: Inter-university Consortium for Political and Social Research.} 24 | } 25 | \references{ 26 | Au, K., & Cheung, M. W.-L. (2004). Intra-cultural variation and job autonomy in 42 27 | countries. \emph{Organization Studies}, \bold{25}, 1339-1362. 28 | 29 | Cheung, M.W.-L., & Cheung, S.-F. (2016). Random-effects models for 30 | meta-analytic structural equation modeling: Review, issues, and 31 | illustrations. \emph{Research Synthesis Methods}, \bold{7}, 140-155. 32 | } 33 | \examples{ 34 | \donttest{ 35 | data(wvs94b) 36 | 37 | ## Get the indirect and the direct effects and 38 | ## their sampling covariance matrices for each study 39 | indirect1 <- indirectEffect(wvs94b$data, wvs94b$n) 40 | indirect1 41 | 42 | ## Multivariate meta-analysis on the indirect and direct effects 43 | indirect2 <- meta(indirect1[, c("ind_eff", "dir_eff")], 44 | indirect1[, c("ind_var", "ind_dir_cov", "dir_var")]) 45 | 46 | summary(indirect2) 47 | } 48 | } 49 | \keyword{datasets} 50 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(metaSEM) 3 | 4 | test_check("metaSEM") 5 | -------------------------------------------------------------------------------- /vignettes/Examples.html.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{metaSEM: Examples} 2 | %\VignetteEngine{R.rsp::asis} 3 | %\VignetteKeyword{html} 4 | %\VignetteKeyword{examples} 5 | %\VignetteKeyword{package} 6 | -------------------------------------------------------------------------------- /vignettes/metaSEM.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mikewlcheung/metasem/b2a501edea833749f83230836d50f05cd2ce0dc0/vignettes/metaSEM.pdf -------------------------------------------------------------------------------- /vignettes/metaSEM.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{metaSEM: An R Package for Meta-Analysis using Structural Equation Modeling} 2 | %\VignetteEngine{R.rsp::asis} 3 | %\VignetteKeyword{PDF} 4 | %\VignetteKeyword{vignette} 5 | %\VignetteKeyword{package} 6 | --------------------------------------------------------------------------------