├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ ├── R-CMD-check.yaml │ ├── rhub.yaml │ └── test-coverage.yaml ├── .gitignore ├── .travis.yml ├── BFpack.Rproj ├── CRAN-SUBMISSION ├── DESCRIPTION ├── NAMESPACE ├── NEWS.md ├── R ├── BF.coeftest.R ├── BF.cortest.R ├── BF.coxph.R ├── BF.ergm.R ├── BF.gaussian.R ├── BF.glm.R ├── BF.hetcor.R ├── BF.icc.R ├── BF.lm.R ├── BF.meta.R ├── BF.methods.R ├── BF.mvt_test.R ├── BF.polr.R ├── BF.print.R ├── BF.summary.R ├── BF.survreg.R ├── BF.ttest.R ├── BF.var.R ├── BF.zeroinfl.R ├── BFpack-package.R ├── Fcor.R ├── actors.R ├── attention.R ├── cor_test.print.R ├── fmri.R ├── get_estimates_unique_to_BFpack.R ├── helper_functions.R ├── import_parser.R ├── memory.R ├── relevents.R ├── same_culture.R ├── same_location.R ├── sivan.R ├── therapeutic.R ├── timssICC.r ├── tvprices.R ├── wilson.r └── zzz.R ├── README.Rmd ├── README.md ├── cran-comments.md ├── data ├── Fcor.rda ├── actors.rda ├── attention.rda ├── fmri.rda ├── memory.rda ├── relevents.rda ├── same_culture.rda ├── same_location.rda ├── sivan.rda ├── therapeutic.rda ├── timssICC.rda ├── tvprices.rda └── wilson.rda ├── inst └── CITATION ├── man ├── BF.Rd ├── BFpack-package.Rd ├── Fcor.Rd ├── actors.Rd ├── attention.Rd ├── bartlett_test.Rd ├── cor_test.Rd ├── figures │ ├── logo_BFpack.png │ └── logo_BFpack_small.png ├── fmri.Rd ├── memory.Rd ├── mvt_test.Rd ├── relevents.Rd ├── same_culture.Rd ├── same_location.Rd ├── sivan.Rd ├── therapeutic.Rd ├── timssICC.Rd ├── tvprices.Rd └── wilson.Rd ├── src ├── BFpack_init.c ├── Makevars ├── bct_mixedordinal.f90 ├── bct_prior.f90 ├── rkinds0.mod ├── rkinds1.mod ├── rngfuncs.mod └── rngfuncs1.mod ├── tests ├── testthat.R └── testthat │ ├── test_BFcoeftest.R │ ├── test_BFcortest.R │ ├── test_BFcoxph.R │ ├── test_BFcoxph2.R │ ├── test_BFergm.R │ ├── test_BFglm.R │ ├── test_BFmeta.R │ ├── test_BFmlm.R │ ├── test_BFpolr.R │ ├── test_BFregression.R │ ├── test_BFsurvreg.R │ ├── test_BFzeroinfl.R │ ├── test_anova_manova.R │ ├── test_application1_BF.bain_ttest.R │ ├── test_application2_BF.lm.R │ ├── test_application3_BF.BF_bartlett.R │ ├── test_application4_BF.mlm_regression.R │ ├── test_application5_BF.glm_BF_Gaussian.R │ ├── test_application6_BF.mlm_correlations.R │ ├── test_application7_BF.lmerMod.R │ ├── test_bartlett.R │ ├── test_get_estimates_matrix.R │ ├── test_hetcor.R │ ├── test_metafor.R │ ├── test_paper_fmri.R │ ├── test_ttest.R │ └── test_variances.R └── vignettes ├── rsconnect └── documents │ └── vignette_BFpack.Rmd │ └── rpubs.com │ └── rpubs │ └── Document.dcf ├── vignette_BFpack.R ├── vignette_BFpack.Rmd └── vignette_BFpack.html /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^CRAN-RELEASE$ 2 | ^.*\.Rproj$ 3 | ^\.Rproj\.user$ 4 | dev/ 5 | ^\.travis\.yml$ 6 | ^.*.RData$ 7 | ^README\.Rmd$ 8 | cran-comments.md 9 | ^\.github$ 10 | src/*.o 11 | src/*.so 12 | src/*.dll 13 | bct_mixedordinal_final_full.f90 14 | ^CRAN-SUBMISSION$ 15 | ^data-raw$ 16 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | 9 | name: R-CMD-check 10 | 11 | jobs: 12 | R-CMD-check: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | config: 21 | - {os: macos-latest, r: 'release'} 22 | - {os: windows-latest, r: 'release'} 23 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 24 | - {os: ubuntu-latest, r: 'release'} 25 | - {os: ubuntu-latest, r: 'oldrel-1'} 26 | 27 | env: 28 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 29 | R_KEEP_PKG_SOURCE: yes 30 | 31 | steps: 32 | - uses: actions/checkout@v3 33 | 34 | - uses: r-lib/actions/setup-pandoc@v2 35 | 36 | - uses: r-lib/actions/setup-r@v2 37 | with: 38 | r-version: ${{ matrix.config.r }} 39 | http-user-agent: ${{ matrix.config.http-user-agent }} 40 | use-public-rspm: true 41 | 42 | - uses: r-lib/actions/setup-r-dependencies@v2 43 | with: 44 | extra-packages: any::rcmdcheck 45 | needs: check 46 | 47 | - uses: r-lib/actions/check-r-package@v2 48 | with: 49 | upload-snapshots: true 50 | -------------------------------------------------------------------------------- /.github/workflows/rhub.yaml: -------------------------------------------------------------------------------- 1 | # R-hub's generic GitHub Actions workflow file. It's canonical location is at 2 | # https://github.com/r-hub/actions/blob/v1/workflows/rhub.yaml 3 | # You can update this file to a newer version using the rhub2 package: 4 | # 5 | # rhub::rhub_setup() 6 | # 7 | # It is unlikely that you need to modify this file manually. 8 | 9 | name: R-hub 10 | run-name: "${{ github.event.inputs.id }}: ${{ github.event.inputs.name || format('Manually run by {0}', github.triggering_actor) }}" 11 | 12 | on: 13 | workflow_dispatch: 14 | inputs: 15 | config: 16 | description: 'A comma separated list of R-hub platforms to use.' 17 | type: string 18 | default: 'linux,windows,macos' 19 | name: 20 | description: 'Run name. You can leave this empty now.' 21 | type: string 22 | id: 23 | description: 'Unique ID. You can leave this empty now.' 24 | type: string 25 | 26 | jobs: 27 | 28 | setup: 29 | runs-on: ubuntu-latest 30 | outputs: 31 | containers: ${{ steps.rhub-setup.outputs.containers }} 32 | platforms: ${{ steps.rhub-setup.outputs.platforms }} 33 | 34 | steps: 35 | # NO NEED TO CHECKOUT HERE 36 | - uses: r-hub/actions/setup@v1 37 | with: 38 | config: ${{ github.event.inputs.config }} 39 | id: rhub-setup 40 | 41 | linux-containers: 42 | needs: setup 43 | if: ${{ needs.setup.outputs.containers != '[]' }} 44 | runs-on: ubuntu-latest 45 | name: ${{ matrix.config.label }} 46 | strategy: 47 | fail-fast: false 48 | matrix: 49 | config: ${{ fromJson(needs.setup.outputs.containers) }} 50 | container: 51 | image: ${{ matrix.config.container }} 52 | 53 | steps: 54 | - uses: r-hub/actions/checkout@v1 55 | - uses: r-hub/actions/platform-info@v1 56 | with: 57 | token: ${{ secrets.RHUB_TOKEN }} 58 | job-config: ${{ matrix.config.job-config }} 59 | - uses: r-hub/actions/setup-deps@v1 60 | with: 61 | token: ${{ secrets.RHUB_TOKEN }} 62 | job-config: ${{ matrix.config.job-config }} 63 | - uses: r-hub/actions/run-check@v1 64 | with: 65 | token: ${{ secrets.RHUB_TOKEN }} 66 | job-config: ${{ matrix.config.job-config }} 67 | 68 | other-platforms: 69 | needs: setup 70 | if: ${{ needs.setup.outputs.platforms != '[]' }} 71 | runs-on: ${{ matrix.config.os }} 72 | name: ${{ matrix.config.label }} 73 | strategy: 74 | fail-fast: false 75 | matrix: 76 | config: ${{ fromJson(needs.setup.outputs.platforms) }} 77 | 78 | steps: 79 | - uses: r-hub/actions/checkout@v1 80 | - uses: r-hub/actions/setup-r@v1 81 | with: 82 | job-config: ${{ matrix.config.job-config }} 83 | token: ${{ secrets.RHUB_TOKEN }} 84 | - uses: r-hub/actions/platform-info@v1 85 | with: 86 | token: ${{ secrets.RHUB_TOKEN }} 87 | job-config: ${{ matrix.config.job-config }} 88 | - uses: r-hub/actions/setup-deps@v1 89 | with: 90 | job-config: ${{ matrix.config.job-config }} 91 | token: ${{ secrets.RHUB_TOKEN }} 92 | - uses: r-hub/actions/run-check@v1 93 | with: 94 | job-config: ${{ matrix.config.job-config }} 95 | token: ${{ secrets.RHUB_TOKEN }} 96 | -------------------------------------------------------------------------------- /.github/workflows/test-coverage.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | - master 6 | pull_request: 7 | branches: 8 | - main 9 | - master 10 | 11 | name: test-coverage 12 | 13 | jobs: 14 | test-coverage: 15 | runs-on: macOS-latest 16 | env: 17 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 18 | steps: 19 | - uses: actions/checkout@v2 20 | 21 | - uses: r-lib/actions/setup-r@v1 22 | 23 | - uses: r-lib/actions/setup-pandoc@v1 24 | 25 | - name: Query dependencies 26 | run: | 27 | install.packages('remotes') 28 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 29 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 30 | shell: Rscript {0} 31 | 32 | - name: Cache R packages 33 | uses: actions/cache@v2 34 | with: 35 | path: ${{ env.R_LIBS_USER }} 36 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 37 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 38 | 39 | - name: Install dependencies 40 | run: | 41 | install.packages(c("remotes")) 42 | remotes::install_deps(dependencies = TRUE) 43 | remotes::install_cran("covr") 44 | shell: Rscript {0} 45 | 46 | - name: Test coverage 47 | run: covr::codecov() 48 | shell: Rscript {0} 49 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | *.RData 4 | .Ruserdata 5 | dev/ 6 | *.so 7 | *.o 8 | *.dll 9 | src-i386/ 10 | src-x64/ 11 | .Rapp.history 12 | .DS_Store 13 | errordump.RData 14 | R/Savage_Dickey_correction.R -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r 2 | 3 | language: R 4 | sudo: false 5 | cache: packages 6 | 7 | matrix: 8 | include: 9 | - os: linux 10 | r: oldrel 11 | - os: linux 12 | r: release 13 | - os: linux 14 | r: devel 15 | - os: osx 16 | r: release 17 | - osx_image: xcode8 18 | r: devel 19 | -------------------------------------------------------------------------------- /BFpack.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace,vignette 22 | -------------------------------------------------------------------------------- /CRAN-SUBMISSION: -------------------------------------------------------------------------------- 1 | Version: 1.4.2 2 | Date: 2025-02-09 20:23:54 UTC 3 | SHA: baa5b9009d4d08b479c062e4798d7cd37fd27562 4 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: BFpack 2 | Type: Package 3 | Title: Flexible Bayes Factor Testing of Scientific Expectations 4 | Version: 1.4.2 5 | Authors@R: 6 | c(person(given = c("Joris"), 7 | family = "Mulder", 8 | role = c("aut", "cre"), 9 | email = "j.mulder3@tilburguniversity.edu"), 10 | person(given = "Caspar", 11 | family = "van Lissa", 12 | role = c("aut", "ctb"), 13 | email = "c.j.vanlissa@uu.nl"), 14 | person(given = "Donald R.", 15 | family = "Williams", 16 | role = c("aut", "ctb"), 17 | email = "drwwilliams@ucdavis.edu"), 18 | person(given = "Xin", 19 | family = "Gu", 20 | role = c("aut", "ctb"), 21 | email = "guxin57@hotmail.com"), 22 | person(given = "Anton", 23 | family = "Olsson-Collentine", 24 | role = c("aut", "ctb"), 25 | email = "J.A.E.OlssonCollentine@tilburguniversity.edu"), 26 | person(given = "Florian", 27 | family = "Boeing-Messing", 28 | role = c("aut", "ctb"), 29 | email = "F.Boeing-Messing@uvt.nl"), 30 | person(given = "Jean-Paul", 31 | family = "Fox", 32 | role = c("aut", "ctb"), 33 | email = "g.j.a.fox@utwente.nl"), 34 | person(given = "Janosch", 35 | family = "Menke", 36 | role = c("ctb"), 37 | email = "janosch.menke@uni-muenster.de"), 38 | person(given = "Robbie", 39 | family = "van Aert", 40 | role = c("ctb"), 41 | email = "R.C.M.vanAert@tilburguniversity.edu"), 42 | person(given = "Barry", 43 | family = "Brown", 44 | role = "ctb"), 45 | person(given = "James", 46 | family = "Lovato", 47 | role = "ctb"), 48 | person(given = "Kathy", 49 | family = "Russell", 50 | role = "ctb"), 51 | person("Lapack 3.8", 52 | role = "ctb"), 53 | person(given = "Jack", 54 | family = "Dongarra", 55 | role = "ctb"), 56 | person(given = "Jim", 57 | family = "Bunch", 58 | role = "ctb"), 59 | person(given = "Cleve", 60 | family = "Moler", 61 | role = "ctb"), 62 | person(given = "Gilbert", 63 | family = "Stewart", 64 | role = "ctb"), 65 | person(given = "John", 66 | family = "Burkandt", 67 | role = "ctb"), 68 | person(given = "Ashwith", 69 | family = "Rego", 70 | role = "ctb"), 71 | person(given = "Alexander", 72 | family = "Godunov", 73 | role = "ctb"), 74 | person(given = "Alan", 75 | family = "Miller", 76 | role = "ctb"), 77 | person(given = "Jean-Pierre", 78 | family = "Moreau", 79 | role = "ctb"), 80 | person("The R Core Team", role = "cph") 81 | ) 82 | Maintainer: Joris Mulder 83 | Description: Implementation of default Bayes factors 84 | for testing statistical hypotheses under various statistical models. The package is 85 | intended for applied quantitative researchers in the 86 | social and behavioral sciences, medical research, 87 | and related fields. The Bayes factor tests can be 88 | executed for statistical models such as 89 | univariate and multivariate normal linear models, 90 | correlation analysis, generalized linear models, special cases of 91 | linear mixed models, survival models, relational 92 | event models. Parameters that can be tested are 93 | location parameters (e.g., group means, regression coefficients), 94 | variances (e.g., group variances), and measures of 95 | association (e.g,. polychoric/polyserial/biserial/tetrachoric/product 96 | moments correlations), among others. 97 | The statistical underpinnings are 98 | described in 99 | O'Hagan (1995) , 100 | De Santis and Spezzaferri (2001) , 101 | Mulder and Xin (2022) , 102 | Mulder and Gelissen (2019) , 103 | Mulder (2016) , 104 | Mulder and Fox (2019) , 105 | Mulder and Fox (2013) , 106 | Boeing-Messing, van Assen, Hofman, Hoijtink, and Mulder (2017) , 107 | Hoijtink, Mulder, van Lissa, and Gu (2018) , 108 | Gu, Mulder, and Hoijtink (2018) , 109 | Hoijtink, Gu, and Mulder (2018) , and 110 | Hoijtink, Gu, Mulder, and Rosseel (2018) . When using the 111 | packages, please refer to the package Mulder et al. (2021) 112 | and the relevant methodological papers. 113 | License: GPL (>= 3) 114 | URL: https://github.com/jomulder/BFpack 115 | BugReports: https://github.com/jomulder/BFpack/issues 116 | Encoding: UTF-8 117 | LazyData: true 118 | NeedsCompilation: yes 119 | RoxygenNote: 7.3.2 120 | Depends: 121 | R (>= 3.0.0), 122 | bain 123 | Imports: 124 | stats, 125 | utils, 126 | MASS, 127 | mvtnorm, 128 | pracma, 129 | lme4, 130 | extraDistr, 131 | ergm, 132 | Bergm, 133 | sandwich, 134 | QRM, 135 | coda, 136 | metaBMA, 137 | methods 138 | Suggests: 139 | testthat, 140 | polycor, 141 | survival, 142 | pscl, 143 | metafor, 144 | knitr, 145 | rmarkdown, 146 | lmtest 147 | VignetteBuilder: 148 | knitr 149 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(BF,bartlett_htest) 4 | S3method(BF,bergm) 5 | S3method(BF,coeftest) 6 | S3method(BF,cor_test) 7 | S3method(BF,coxph) 8 | S3method(BF,default) 9 | S3method(BF,ergm) 10 | S3method(BF,glm) 11 | S3method(BF,hetcor) 12 | S3method(BF,htest) 13 | S3method(BF,lm) 14 | S3method(BF,lmerMod) 15 | S3method(BF,mvt_test) 16 | S3method(BF,polr) 17 | S3method(BF,rma.uni) 18 | S3method(BF,survreg) 19 | S3method(BF,t_test) 20 | S3method(BF,zeroinfl) 21 | S3method(bartlett_test,default) 22 | S3method(get_estimates,bartlett_htest) 23 | S3method(get_estimates,bergm) 24 | S3method(get_estimates,cor_test) 25 | S3method(get_estimates,coxph) 26 | S3method(get_estimates,ergm) 27 | S3method(get_estimates,glm) 28 | S3method(get_estimates,hetcor) 29 | S3method(get_estimates,lm) 30 | S3method(get_estimates,mvt_test) 31 | S3method(get_estimates,polr) 32 | S3method(get_estimates,survreg) 33 | S3method(get_estimates,t_test) 34 | S3method(get_estimates,zeroinfl) 35 | S3method(plot,cor_test) 36 | S3method(print,BF) 37 | S3method(print,cor_test) 38 | S3method(summary,BF) 39 | S3method(summary,cor_test) 40 | export(BF) 41 | export(bartlett_test) 42 | export(cor_test) 43 | export(mvt_test) 44 | import(bain) 45 | importFrom(Bergm,bergm) 46 | importFrom(MASS,fitdistr) 47 | importFrom(MASS,ginv) 48 | importFrom(QRM,fit.st) 49 | importFrom(coda,mcmc) 50 | importFrom(ergm,ergmMPLE) 51 | importFrom(extraDistr,qinvgamma) 52 | importFrom(extraDistr,rtnorm) 53 | importFrom(lme4,VarCorr) 54 | importFrom(lme4,getME) 55 | importFrom(metaBMA,prior) 56 | importFrom(methods,is) 57 | importFrom(mvtnorm,dmvnorm) 58 | importFrom(mvtnorm,dmvt) 59 | importFrom(mvtnorm,pmvnorm) 60 | importFrom(mvtnorm,pmvt) 61 | importFrom(mvtnorm,rmvnorm) 62 | importFrom(mvtnorm,rmvt) 63 | importFrom(pracma,Rank) 64 | importFrom(pracma,rref) 65 | importFrom(sandwich,sandwich) 66 | importFrom(stats,approxfun) 67 | importFrom(stats,as.formula) 68 | importFrom(stats,bartlett.test) 69 | importFrom(stats,coef) 70 | importFrom(stats,complete.cases) 71 | importFrom(stats,cov) 72 | importFrom(stats,dbeta) 73 | importFrom(stats,density) 74 | importFrom(stats,dlnorm) 75 | importFrom(stats,dlogis) 76 | importFrom(stats,dnorm) 77 | importFrom(stats,dt) 78 | importFrom(stats,lm) 79 | importFrom(stats,median) 80 | importFrom(stats,model.matrix) 81 | importFrom(stats,nobs) 82 | importFrom(stats,pchisq) 83 | importFrom(stats,pnorm) 84 | importFrom(stats,pt) 85 | importFrom(stats,qnorm) 86 | importFrom(stats,qt) 87 | importFrom(stats,quantile) 88 | importFrom(stats,rWishart) 89 | importFrom(stats,rbeta) 90 | importFrom(stats,rchisq) 91 | importFrom(stats,rgamma) 92 | importFrom(stats,rlnorm) 93 | importFrom(stats,rnorm) 94 | importFrom(stats,rt) 95 | importFrom(stats,runif) 96 | importFrom(stats,sd) 97 | importFrom(stats,setNames) 98 | importFrom(stats,terms) 99 | importFrom(stats,var) 100 | importFrom(stats,vcov) 101 | importFrom(utils,getFromNamespace) 102 | importFrom(utils,globalVariables) 103 | useDynLib(BFpack, .registration = TRUE) 104 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # BFpack 1.4.1 2 | 3 | * Date: 2025-02-09 4 | * Bug fit of MCMC sampler for correlation test. 5 | 6 | # BFpack 1.4.0 7 | 8 | * Date: 2025-12-03 9 | * Change of default to FBF instead of adjusted FBF for normal linear models. 10 | * Change of default prior probs of standard (exploratory) tests to .5 for the null. 11 | * Minor changes, and bug fixes. 12 | 13 | # BFpack 1.3.0 14 | 15 | * Date: 2024-06-18 16 | * Package update for implementation step in JASP. 17 | * cor_test now support ordinal measurement levels. 18 | * bug fixes in Fortran subroutine for cor_test 19 | 20 | 21 | # BFpack 1.2.4 22 | 23 | * Date: 2023-11-16 24 | * Changes to the identification of groups using factors 25 | * Updates for aov and manova objects 26 | * bug fixes 27 | 28 | 29 | # BFpack 1.2.3 30 | 31 | * Date: 2023-10-16 32 | * Faster computation of BFs for correlations. 33 | * Further updates of Fortran code according to CRAN guidelines. 34 | * Minor edits. 35 | 36 | 37 | # BFpack 1.2.2 38 | 39 | * Date: 2023-09-11 40 | * Extension of BF.ergm and BF.bergm for Bayesian hypothesis testing under exponential random graph models 41 | * Updates on the random number generation in Fortran subroutines. 42 | * Minor edits. 43 | 44 | -------------------------------------------------------------------------------- /R/BF.coeftest.R: -------------------------------------------------------------------------------- 1 | #BF method for coeftest class objects 2 | 3 | #' @method BF coeftest 4 | #' @export 5 | BF.coeftest <- function(x, 6 | hypothesis = NULL, 7 | prior.hyp.explo = NULL, 8 | prior.hyp.conf = NULL, 9 | prior.hyp = NULL, 10 | complement = TRUE, 11 | log = FALSE, 12 | cov.prob = 0.95, 13 | ...){ 14 | 15 | logIN <- log 16 | 17 | Sigma <- diag(x[, 2L]^2) 18 | n <- attr(x, "nobs") 19 | 20 | if(is.null(n)) stop("'BF.coeftest' only works if 'nobs.coeftest' gives the number of observations.") 21 | if(!is.null(hypothesis)) warning("constrained hypothesis testing is not supported for objects of class 'coeftest'") 22 | if(!is.null(prior.hyp)) warning("prior specification via 'prior.hyp' is not supported for objects of class 'coeftest'") 23 | #if(!exploratory) stop("only exploratory hypothesis testing is supported for objects of class 'coeftest'") 24 | 25 | out <- BF.default(x[, 1L], Sigma = Sigma, n = n, log = logIN, cov.prob = cov.prob, ...) 26 | out$model <- x 27 | out$call <- match.call() 28 | out 29 | 30 | } 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /R/BF.coxph.R: -------------------------------------------------------------------------------- 1 | #BF method for coxph class objects 2 | 3 | 4 | #' @method BF coxph 5 | #' @export 6 | BF.coxph <- function(x, 7 | hypothesis = NULL, 8 | prior.hyp.explo = NULL, 9 | prior.hyp.conf = NULL, 10 | prior.hyp = NULL, 11 | complement = TRUE, 12 | log = FALSE, 13 | cov.prob = .95, 14 | ...){ 15 | 16 | #Extract summary statistics 17 | Args <- as.list(match.call()[-1]) 18 | get_est <- get_estimates(x) 19 | Args$x <- get_est$estimate 20 | Args$Sigma <- get_est$Sigma[[1]] 21 | Args$n <- x$nevent 22 | Args$hypothesis <- hypothesis 23 | Args$prior.hyp <- prior.hyp 24 | Args$prior.hyp.explo <- prior.hyp.explo 25 | Args$prior.hyp.conf <- prior.hyp.conf 26 | Args$complement <- complement 27 | Args$log <- log 28 | Args$cov.prob <- cov.prob 29 | out <- do.call(BF, Args) 30 | out$model <- x 31 | out$call <- match.call() 32 | out 33 | } 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /R/BF.ergm.R: -------------------------------------------------------------------------------- 1 | #BF method for ergm class 2 | 3 | #' @importFrom sandwich sandwich 4 | #' @importFrom ergm ergmMPLE 5 | #' @importFrom stats as.formula 6 | #' @importFrom Bergm bergm 7 | #' @method BF ergm 8 | #' @export 9 | BF.ergm <- function(x, 10 | hypothesis = NULL, 11 | prior.hyp.explo = NULL, 12 | prior.hyp.conf = NULL, 13 | prior.hyp = NULL, 14 | complement = TRUE, 15 | log = FALSE, 16 | cov.prob = .95, 17 | ...){ 18 | 19 | if(!(cov.prob>0 & cov.prob<1)){ 20 | stop("The argument 'cov.prob' is a coverage probability for the interval estimates that should lie between 0 and 1. The default is 0.95.") 21 | } 22 | 23 | logIN <- log 24 | 25 | # extract coefficients 26 | estimate <- coef(x) 27 | K1 <- length(estimate) 28 | # get design matrix of pseudo likelihood to construct prior covariance matrix 29 | nw <- x$network 30 | form.char <- paste(format(x$formula), collapse = '') 31 | location_tilde <- regexpr("~",form.char)[1] 32 | form.new <- as.formula(paste0("nw ~",substr(form.char,start=location_tilde+1,stop=nchar(form.char)))) 33 | x_MPLE <- ergmMPLE(form.new,output="dyadlist") 34 | design.X <- x_MPLE$predictor[,2+1:K1] 35 | which.edges <- which(colnames(design.X)=="edges") 36 | if(length(which.edges)==0){ #no intercept 'edges' 37 | Xdelta <- as.matrix(design.X) 38 | priorcov <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta) 39 | }else{ 40 | Xdelta <- as.matrix(design.X[,-which.edges]) 41 | priorcov.Xdelta <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta) 42 | priorcov <- matrix(0,ncol=K1,nrow=K1) 43 | priorcov[which.edges,which.edges] <- 100000 #flat prior for the 'edges' parameter 44 | if(which.edges==1){ 45 | priorcov[2:K1,2:K1] <- priorcov.Xdelta 46 | }else{ 47 | if(which.edges==K1){ 48 | priorcov[1:(K1-1),1:(K1-1)] <- priorcov.Xdelta 49 | }else{ 50 | priorcov[1:(which.edges-1),1:(which.edges-1)] <- priorcov.Xdelta[1:(which.edges-1),1:(which.edges-1)] 51 | priorcov[(which.edges+1):K1,(which.edges+1):K1] <- priorcov.Xdelta[which.edges:(K1-1),which.edges:(K1-1)] 52 | priorcov[1:(which.edges-1),(which.edges+1):K1] <- priorcov.Xdelta[1:(which.edges-1),which.edges:(K1-1)] 53 | priorcov[(which.edges+1):K1,1:(which.edges-1)] <- t(priorcov[1:(which.edges-1),(which.edges+1):K1]) 54 | } 55 | } 56 | } 57 | Bergm.out <- bergm(form.new,prior.mean=rep(0,K1),prior.sigma=priorcov,...) 58 | #get robust estimates for the Gaussian mean and covariance matrix 59 | post.mean <- apply(Bergm.out$Theta,2,median) 60 | names(post.mean) <- names(estimate) 61 | #get robust estimate of posterior covariance matrix 62 | mlm1 <- lm(Bergm.out$Theta ~ 1) 63 | post.Sigma <- sandwich(mlm1) * nrow(Bergm.out$Theta) 64 | 65 | # use Savage-Dickey approximation of the BF 66 | if(length(which.edges)==0){ 67 | prior.mean = rep(0,K1) 68 | names(prior.mean) <- names(estimate) 69 | row.names(priorcov) <- colnames(priorcov) <- names(estimate) 70 | BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean, 71 | prior.sigma = priorcov, 72 | post.mean = post.mean, 73 | post.sigma = post.Sigma, 74 | hypothesis = hypothesis, 75 | prior.hyp = prior.hyp, 76 | prior.hyp.explo = prior.hyp.explo, 77 | prior.hyp.conf = prior.hyp.conf, 78 | complement = complement, 79 | log = logIN) 80 | }else{ 81 | prior.mean = rep(0,K1) 82 | names(prior.mean) <- names(estimate) 83 | row.names(priorcov) <- colnames(priorcov) <- names(estimate) 84 | BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean[-which.edges], 85 | prior.sigma = priorcov[-which.edges,-which.edges], 86 | post.mean = post.mean[-which.edges], 87 | post.sigma = post.Sigma[-which.edges,-which.edges], 88 | hypothesis = hypothesis, 89 | prior.hyp = prior.hyp, 90 | prior.hyp.explo = prior.hyp.explo, 91 | prior.hyp.conf = prior.hyp.conf, 92 | complement = complement, 93 | log = logIN) 94 | } 95 | 96 | CrI_LB <- (1 - cov.prob)/2 97 | CrI_UB <- 1 - (1 - cov.prob)/2 98 | postestimates <- cbind(apply(Bergm.out$Theta,2,mean), 99 | apply(Bergm.out$Theta,2,median), 100 | apply(Bergm.out$Theta,2,quantile,CrI_LB), 101 | apply(Bergm.out$Theta,2,quantile,CrI_UB)) 102 | rownames(postestimates) <- names(estimate) 103 | colnames(postestimates) <- c("mean","median","2.5%","97.5%") 104 | 105 | BFergm_out$estimates <- postestimates 106 | BFergm_out$model <- x 107 | BFergm_out$call <- match.call() 108 | BFergm_out$bayesfactor <- "Bayes factors based on unit-information priors and Gaussian approximations" 109 | BFergm_out$parameter <- "ERGM coefficients" 110 | BFergm_out$model_update <- Bergm.out 111 | BFergm_out$prior.parameters <- list(prior.mean=prior.mean,prior.cov=priorcov) 112 | 113 | return(BFergm_out) 114 | } 115 | 116 | #' @method get_estimates ergm 117 | #' @export 118 | get_estimates.ergm <- function(x, ...){ 119 | 120 | nw <- x$network 121 | form.char <- paste(format(x$formula), collapse = '') 122 | location_tilde <- regexpr("~",form.char)[1] 123 | form.new <- as.formula(paste0("nw ~",substr(form.char,start=location_tilde+1,stop=nchar(form.char)))) 124 | 125 | estimate <- coef(x) 126 | K1 <- length(estimate) 127 | x_MPLE <- ergmMPLE(form.new,output="dyadlist") 128 | design.X <- x_MPLE$predictor[,2+1:K1] 129 | which.edges <- which(colnames(design.X)=="edges") 130 | out <- list() 131 | if(length(which.edges)==0){ #no intercept 'edges' 132 | out$estimate <- coef(x) 133 | out$Sigma <- list(vcov(x)) 134 | }else{ 135 | out$estimate <- coef(x)[-which.edges] 136 | out$Sigma <- list(as.matrix(vcov(x)[-which.edges,-which.edges])) 137 | } 138 | class(out) <- "model_estimates" 139 | attr(out, "analysisType") <- "ergm" 140 | out 141 | } 142 | 143 | 144 | #' @method BF bergm 145 | #' @export 146 | BF.bergm <- function(x, 147 | hypothesis = NULL, 148 | prior.hyp.explo = NULL, 149 | prior.hyp.conf = NULL, 150 | prior.hyp = NULL, 151 | complement = TRUE, 152 | log = FALSE, 153 | ...){ 154 | 155 | logIN <- log 156 | 157 | form.char <- paste(format(x$formula), collapse = '') 158 | location_tilde <- regexpr("~",form.char)[1] 159 | name.nw <- substr(form.char,start=1,stop=location_tilde-2) 160 | if(!exists(name.nw)){ 161 | stop(paste0("For an object of class 'bergm', the function 'BF()' only runs if the network data object '",name.nw, 162 | "' is also present in the environment.")) 163 | } 164 | 165 | # first check if effect names in hypothesis argument correspond with names in x 166 | coef_names_hyp <- names(get_estimates(x)$estimate) 167 | if(!is.null(hypothesis)){ 168 | test0 <- parse_hypothesis(coef_names_hyp,hypothesis) 169 | } 170 | 171 | # extract coefficients 172 | estimate <- apply(x$Theta,2,median) 173 | K1 <- length(estimate) 174 | coef_names <- paste0("theta",1:K1) 175 | # get design matrix of pseudo likelihood to construct prior covariance matrix 176 | x_MPLE <- ergmMPLE(formula=x$formula,output="dyadlist") 177 | design.X <- x_MPLE$predictor[,2+1:K1] 178 | which.edges <- which(colnames(design.X)=="edges") 179 | if(length(which.edges)==0){ #no intercept 'edges' 180 | Xdelta <- as.matrix(design.X) 181 | priorcov <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta) 182 | }else{ 183 | Xdelta <- as.matrix(design.X[,-which.edges]) 184 | priorcov.Xdelta <- solve(t(Xdelta)%*%Xdelta) * nrow(Xdelta) 185 | priorcov <- matrix(0,ncol=K1,nrow=K1) 186 | priorcov[which.edges,which.edges] <- 100000 #flat prior for the 'edges' parameter 187 | if(which.edges==1){ 188 | priorcov[2:K1,2:K1] <- priorcov.Xdelta 189 | }else{ 190 | if(which.edges==K1){ 191 | priorcov[1:(K1-1),1:(K1-1)] <- priorcov.Xdelta 192 | }else{ 193 | priorcov[1:(which.edges-1),1:(which.edges-1)] <- priorcov.Xdelta[1:(which.edges-1),1:(which.edges-1)] 194 | priorcov[(which.edges+1):K1,(which.edges+1):K1] <- priorcov.Xdelta[which.edges:(K1-1),which.edges:(K1-1)] 195 | priorcov[1:(which.edges-1),(which.edges+1):K1] <- priorcov.Xdelta[1:(which.edges-1),which.edges:(K1-1)] 196 | priorcov[(which.edges+1):K1,1:(which.edges-1)] <- t(priorcov[1:(which.edges-1),(which.edges+1):K1]) 197 | } 198 | } 199 | } 200 | Bergm.out <- bergm(x$formula,prior.mean=rep(0,K1),prior.sigma=priorcov,...) 201 | #get robust estimates for the Gaussian mean and covariance matrix 202 | post.mean <- apply(Bergm.out$Theta,2,median) 203 | names(post.mean) <- paste0("theta",1:K1) 204 | #get robust estimate of posterior covariance matrix 205 | mlm1 <- lm(Bergm.out$Theta ~ 1) 206 | post.Sigma <- sandwich(mlm1) * nrow(Bergm.out$Theta) 207 | 208 | # use Savage-Dickey approximation of the BF 209 | if(length(which.edges)==0){ 210 | prior.mean = rep(0,K1) 211 | names(prior.mean) <- coef_names 212 | row.names(priorcov) <- colnames(priorcov) <- coef_names 213 | BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean, 214 | prior.sigma = priorcov, 215 | post.mean = post.mean, 216 | post.sigma = post.Sigma, 217 | hypothesis = hypothesis, 218 | prior.hyp = prior.hyp, 219 | prior.hyp.explo = prior.hyp.explo, 220 | prior.hyp.conf = prior.hyp.conf, 221 | complement = complement, 222 | log = logIN) 223 | }else{ 224 | prior.mean = rep(0,K1) 225 | names(prior.mean) <- coef_names 226 | row.names(priorcov) <- colnames(priorcov) <- coef_names 227 | BFergm_out <- Savage.Dickey.Gaussian(prior.mean = prior.mean[-which.edges], 228 | prior.sigma = priorcov[-which.edges,-which.edges], 229 | post.mean = post.mean[-which.edges], 230 | post.sigma = post.Sigma[-which.edges,-which.edges], 231 | hypothesis = hypothesis, 232 | prior.hyp = prior.hyp, 233 | prior.hyp.explo = prior.hyp.explo, 234 | prior.hyp.conf = prior.hyp.conf, 235 | complement = complement, 236 | log = logIN) 237 | } 238 | 239 | postestimates <- cbind(apply(Bergm.out$Theta,2,mean), 240 | apply(Bergm.out$Theta,2,median), 241 | apply(Bergm.out$Theta,2,quantile,.025), 242 | apply(Bergm.out$Theta,2,quantile,.975)) 243 | rownames(postestimates) <- names(estimate) 244 | colnames(postestimates) <- c("mean","median","2.5%","97.5%") 245 | 246 | BFergm_out$estimates <- postestimates 247 | BFergm_out$model <- x 248 | BFergm_out$call <- match.call() 249 | BFergm_out$bayesfactor <- "Bayes factors based on unit-information priors and Gaussian approximations" 250 | BFergm_out$parameter <- "ERGM coefficients" 251 | BFergm_out$model_update <- Bergm.out 252 | BFergm_out$prior.parameters <- list(prior.mean=prior.mean,prior.cov=priorcov) 253 | 254 | return(BFergm_out) 255 | } 256 | 257 | #' @method get_estimates bergm 258 | #' @export 259 | get_estimates.bergm <- function(x, ...){ 260 | 261 | form.char <- paste(format(x$formula), collapse = '') 262 | location_tilde <- regexpr("~",form.char)[1] 263 | name.nw <- substr(form.char,start=1,stop=location_tilde-2) 264 | if(!exists(name.nw)){ 265 | stop(paste0("For an object of class 'bergm', the function 'BF()' only runs if the network data object '",name.nw, 266 | "' is also present in the environment.")) 267 | } 268 | 269 | K1 <- length(apply(x$Theta,2,median)) 270 | names.bergm.coef <- paste0("theta",1:K1) 271 | x_MPLE <- ergmMPLE(formula=x$formula,output="dyadlist") 272 | design.X <- x_MPLE$predictor[,2+1:K1] 273 | which.edges <- which(colnames(design.X)=="edges") 274 | out <- list() 275 | if(length(which.edges)==0){ #no intercept 'edges' 276 | out$estimate <- apply(x$Theta,2,median) 277 | names(out$estimate) <- names.bergm.coef 278 | mlm1 <- lm(x$Theta ~ 1) 279 | out$Sigma <- list(sandwich(mlm1) * nrow(x$Theta)) 280 | colnames(out$Sigma[[1]]) <- row.names(out$Sigma[[1]]) <- names(out$estimate) 281 | }else{ 282 | out$estimate <- apply(x$Theta,2,median) 283 | names(out$estimate) <- names.bergm.coef 284 | out$estimate <- out$estimate[-which.edges] 285 | mlm1 <- lm(x$Theta ~ 1) 286 | out$Sigma <- list(sandwich(mlm1)[-which.edges,-which.edges] * nrow(x$Theta)) 287 | colnames(out$Sigma[[1]]) <- row.names(out$Sigma[[1]]) <- names(out$estimate) 288 | } 289 | class(out) <- "model_estimates" 290 | attr(out, "analysisType") <- "ergm" 291 | out 292 | } 293 | 294 | 295 | 296 | 297 | -------------------------------------------------------------------------------- /R/BF.glm.R: -------------------------------------------------------------------------------- 1 | #BF method for glm classes 2 | 3 | 4 | #' @method BF glm 5 | #' @export 6 | BF.glm <- function(x, 7 | hypothesis = NULL, 8 | prior.hyp.explo = NULL, 9 | prior.hyp.conf = NULL, 10 | prior.hyp = NULL, 11 | complement = TRUE, 12 | log = FALSE, 13 | cov.prob = .95, 14 | ...){ 15 | 16 | if(as.character(x$family)[1]=="gaussian"){ 17 | 18 | # then use BF.lm 19 | class(x) <- "lm" 20 | Args <- as.list(match.call()[-1]) 21 | Args$x <- x 22 | Args$hypothesis <- hypothesis 23 | Args$prior.hyp <- prior.hyp 24 | Args$prior.hyp.conf <- prior.hyp.conf 25 | Args$prior.hyp.explo <- prior.hyp.explo 26 | Args$complement <- complement 27 | Args$log <- log 28 | Args$cov.prob <- cov.prob 29 | out <- do.call(BF, Args) 30 | out$model <- x 31 | out$call <- match.call() 32 | 33 | }else{ 34 | 35 | Args <- as.list(match.call()[-1]) 36 | get_est <- get_estimates(x) 37 | Args$x <- get_est$estimate 38 | Args$Sigma <- get_est$Sigma[[1]] 39 | Args$n <- nobs(x) 40 | Args$hypothesis <- hypothesis 41 | Args$prior.hyp <- prior.hyp 42 | Args$prior.hyp.conf <- prior.hyp.conf 43 | Args$prior.hyp.explo <- prior.hyp.explo 44 | Args$complement <- complement 45 | Args$log <- log 46 | Args$cov.prob <- cov.prob 47 | out <- do.call(BF, Args) 48 | out$model <- x 49 | out$call <- match.call() 50 | 51 | } 52 | 53 | out 54 | } 55 | 56 | 57 | -------------------------------------------------------------------------------- /R/BF.hetcor.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | #' @method BF hetcor 4 | #' @export 5 | BF.hetcor <- function(x, 6 | hypothesis = NULL, 7 | prior.hyp.explo = NULL, 8 | prior.hyp.conf = NULL, 9 | prior.hyp = NULL, 10 | complement = TRUE, 11 | log = FALSE, 12 | cov.prob = .95, 13 | ...){ 14 | 15 | if(!(cov.prob>0 & cov.prob<1)){ 16 | stop("The argument 'cov.prob' is a coverage probability for the interval estimates that should lie between 0 and 1. The default is 0.95.") 17 | } 18 | CrI_LB <- (1 - cov.prob)/2 19 | CrI_UB <- 1 - (1 - cov.prob)/2 20 | 21 | logIN <- log 22 | 23 | # check proper usage of argument 'prior.hyp.conf' and 'prior.hyp.explo' 24 | if(!is.null(prior.hyp.conf)){ 25 | prior.hyp <- prior.hyp.conf 26 | } 27 | prior.hyp.explo <- process.prior.hyp.explo(prior_hyp_explo = prior.hyp.explo, model=x) 28 | 29 | get_est <- get_estimates(x) 30 | P <- nrow(x$std.errors) 31 | numcorr <- P*(P-1)/2 32 | estimates <- get_est$estimate 33 | errcov <- get_est$Sigma[[1]] 34 | # use Fisher transformed for both exploratory and confirmatory to get consistent results. 35 | # skewness in the likelihood is ignored. 36 | est.var.F <- do.call(cbind,lapply(1:numcorr,function(c){ 37 | draws.norm <- rnorm(1e5,mean=estimates[c],sd=sqrt(errcov[c,c])) 38 | draws.norm.F <- FisherZ(draws.norm[draws.norm < 1 & draws.norm > -1]) 39 | return(c(median(draws.norm.F),var(draws.norm.F))) 40 | })) 41 | estimates.F <- est.var.F[1,] 42 | if(numcorr > 1){ 43 | errcov.F <- diag(est.var.F[2,]) 44 | }else{ 45 | errcov.F <- as.matrix(est.var.F[2,]) 46 | } 47 | corr_names <- names(get_est$estimate) 48 | #matrix_names <- matrix(corr_names,nrow=P) 49 | names(estimates.F) <- colnames(errcov.F) <- row.names(errcov.F) <- corr_names 50 | 51 | #exploratory BF testing 52 | relfit <- matrix(c(dnorm(0,mean=estimates.F,sd=sqrt(diag(errcov.F)),log=TRUE), 53 | pnorm(0,mean=estimates.F,sd=sqrt(diag(errcov.F)),log.p=TRUE), 54 | pnorm(0,mean=estimates.F,sd=sqrt(diag(errcov.F)),log.p=TRUE, 55 | lower.tail=FALSE)),ncol=3) 56 | # get draws from joint uniform prior to compute relative measures 57 | if(sum(P==Fcor$P)==0){ 58 | #number of draws to get 1e7 draws for the marginal of 1 Fisher transformation correlation 59 | numdraws <- round(1e7/(P*(P-1)/2)) 60 | drawsJU <- draw_ju_r(P,samsize=numdraws,Fisher=1) 61 | approx_studt <- QRM::fit.st(c(drawsJU))$par.ests[c(1,3)] 62 | }else{ 63 | # use the estimate of the scale from the Fcor object 64 | # for the df the estimates show some numerical error for P>20, so use fitted line 65 | approx_studt <- unlist(c(Fcor[which(P==Fcor$P),1:2])) 66 | if(P > 20){ 67 | # use fitted linear line rather than rough estimate of df 68 | slpe1 <- 2.944494 69 | intcept1 <- 1.864901 70 | approx_studt[1] <- P * slpe1 + intcept1 71 | } 72 | } 73 | relcomp0 <- dt(0,df=approx_studt[1],log=TRUE)-log(approx_studt[2]) # all marginal priors are the same 74 | relcomp <- matrix(c(rep(relcomp0,numcorr),rep(log(.5),numcorr*2)),ncol=3) 75 | row.names(relfit) <- row.names(relcomp) <- names(estimates.F) 76 | 77 | BFtu_exploratory <- relfit - relcomp 78 | colnames(BFtu_exploratory) <- colnames(BFtu_exploratory) <- c("Pr(=0)","Pr(<0)","Pr(>0)") 79 | maxrow <- apply(BFtu_exploratory,1,max) 80 | BFtu_explo_norm <- exp(BFtu_exploratory - maxrow %*% t(rep(1,ncol(BFtu_exploratory)))) * 81 | rep(1,nrow(BFtu_exploratory)) %*% t(prior.hyp.explo[[1]]) 82 | PHP_exploratory <- BFtu_explo_norm / apply(BFtu_explo_norm,1,sum) 83 | 84 | if(logIN == FALSE){ 85 | BFtu_exploratory <- exp(BFtu_exploratory) 86 | } 87 | 88 | #confirmatory BF testing 89 | if(!is.null(hypothesis)){ 90 | numG <- 1 91 | numcorrgroup <- numcorr 92 | varnames <- list(row.names(x$correlations)) 93 | # get all names combinations for correlations (similar as BF.cor_test) 94 | corrnames <- lapply(1:numG,function(g){ 95 | matrix(unlist(lapply(1:P,function(p2){ 96 | unlist(lapply(1:P,function(p1){ 97 | if(numG==1){ 98 | paste0(varnames[[g]][p1],"_with_",varnames[[g]][p2]) 99 | }else{ 100 | paste0(varnames[[g]][p1],"_with_",varnames[[g]][p2],"_in_g",as.character(g)) 101 | } 102 | })) 103 | })),nrow=P) 104 | }) 105 | x$corrnames <- corrnames 106 | 107 | params_in_hyp1 <- params_in_hyp(hypothesis) 108 | 109 | corr_names <- unlist(lapply(1:length(x$corrnames),function(g){ 110 | c(x$corrnames[[g]][lower.tri(x$corrnames[[g]])], 111 | t(x$corrnames[[g]])[lower.tri(x$corrnames[[g]])]) 112 | })) #which includes Y1_with_Y2 and Y2_with_Y1 113 | 114 | parse_hyp <- parse_hypothesis(corr_names,hypothesis) 115 | parse_hyp$hyp_mat <- do.call(rbind, parse_hyp$hyp_mat) 116 | if(nrow(parse_hyp$hyp_mat)==1){ 117 | select1 <- rep(1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup) 118 | select2 <- rep(numcorrgroup+1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup) 119 | parse_hyp$hyp_mat <- 120 | t(as.matrix(c(parse_hyp$hyp_mat[,select1] + parse_hyp$hyp_mat[,select2],parse_hyp$hyp_mat[,numcorrgroup*2*numG+1]))) 121 | }else{ 122 | #combine equivalent correlations, e.g., cor(Y1,Y2)=corr(Y2,Y1). 123 | select1 <- rep(1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup) 124 | select2 <- rep(numcorrgroup+1:numcorrgroup,numG) + rep((0:(numG-1))*2*numcorrgroup,each=numcorrgroup) 125 | parse_hyp$hyp_mat <- 126 | cbind(parse_hyp$hyp_mat[,select1] + parse_hyp$hyp_mat[,select2],parse_hyp$hyp_mat[,numcorrgroup*2*numG+1]) 127 | } 128 | #create coefficient with equality and order constraints 129 | RrList <- make_RrList2(parse_hyp) 130 | RrE <- RrList[[1]] 131 | RrO <- RrList[[2]] 132 | 133 | numhyp <- length(RrE) 134 | relfit <- t(matrix(unlist(lapply(1:numhyp,function(h){ 135 | Gaussian_measures(estimates,errcov,RrE1=RrE[[h]],RrO1=RrO[[h]],names1=names(estimates), 136 | constraints1=parse_hyp$original_hypothesis[h]) 137 | })),nrow=2)) 138 | # approximate unconstrained Fisher transformed correlations with a multivariate Student t 139 | mean0 <- rep(0,numcorr) 140 | if(numcorr==1){ 141 | Scale0 <- as.matrix(approx_studt[2]**2) 142 | df0 <- round(approx_studt[1]) 143 | }else{ 144 | Scale0 <- diag(rep(approx_studt[2]**2,numcorr)) 145 | df0 <- round(approx_studt[1]) 146 | } 147 | mean0 <- rep(0,numcorr) 148 | relcomp <- t(matrix(unlist(lapply(1:numhyp,function(h){ 149 | relcomp_h <- Student_measures(mean1=mean0, 150 | Scale1=Scale0, 151 | df1=df0, 152 | RrE1=RrE[[h]], 153 | RrO1=RrO[[h]]) 154 | return(relcomp_h) 155 | 156 | })),nrow=2)) 157 | 158 | row.names(relcomp) <- parse_hyp$original_hypothesis 159 | row.names(relfit) <- parse_hyp$original_hypothesis 160 | # evaluation of complement hypothesis 161 | if(complement == TRUE){ 162 | relfit <- Gaussian_prob_Hc(estimates.F,errcov.F,relfit,RrO) 163 | relcomp <- Student_prob_Hc(mean1=mean0,scale1=Scale0,df1=df0,relmeas1=relcomp, 164 | constraints=NULL,RrO1=RrO) 165 | } 166 | hypothesisshort <- unlist(lapply(1:nrow(relfit),function(h) paste0("H",as.character(h)))) 167 | row.names(relfit) <- row.names(relfit) <- hypothesisshort 168 | 169 | colnames(relcomp) <- c("c_E","c_O") 170 | colnames(relfit) <- c("f_E","f_O") 171 | # computation of exploratory BFs and PHPs 172 | # the BF for the complement hypothesis vs Hu needs to be computed. 173 | BFtu_confirmatory <- c(apply(relfit - relcomp, 1, sum)) 174 | # Check input of prior probabilies 175 | if(is.null(prior.hyp)){ 176 | priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory)) 177 | }else{ 178 | if(!is.numeric(prior.hyp) || length(prior.hyp)!=length(BFtu_confirmatory)){ 179 | warning(paste0("Argument 'prior.hyp' should be numeric and of length ",as.character(length(BFtu_confirmatory)),". Equal prior probabilities are used.")) 180 | priorprobs <- rep(1/length(BFtu_confirmatory),length(BFtu_confirmatory)) 181 | }else{ 182 | priorprobs <- prior.hyp 183 | } 184 | } 185 | names(priorprobs) <- names(BFtu_confirmatory) 186 | maxBFtu <- max(BFtu_confirmatory) 187 | PHP_confirmatory <- exp(BFtu_confirmatory-maxBFtu)*priorprobs / 188 | sum(exp(BFtu_confirmatory-maxBFtu)*priorprobs) 189 | BFtable <- cbind(relcomp,relfit,relfit[,1]-relcomp[,1],relfit[,2]-relcomp[,2], 190 | apply(relfit,1,sum)-apply(relcomp,1,sum),PHP_confirmatory) 191 | BFtable[,1:7] <- exp(BFtable[,1:7]) 192 | row.names(BFtable) <- names(BFtu_confirmatory) 193 | colnames(BFtable) <- c("complex=","complex>","fit=","fit>","BF=","BF>","BF","PHP") 194 | BFmatrix_confirmatory <- matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory)) - 195 | t(matrix(rep(BFtu_confirmatory,length(BFtu_confirmatory)),ncol=length(BFtu_confirmatory))) 196 | diag(BFmatrix_confirmatory) <- 0 197 | row.names(BFmatrix_confirmatory) <- colnames(BFmatrix_confirmatory) <- names(BFtu_confirmatory) 198 | hypotheses <- row.names(relcomp) 199 | 200 | if(logIN == FALSE){ 201 | BFtu_confirmatory <- exp(BFtu_confirmatory) 202 | BFmatrix_confirmatory <- exp(BFmatrix_confirmatory) 203 | } 204 | 205 | }else{ 206 | BFtu_confirmatory <- PHP_confirmatory <- BFmatrix_confirmatory <- relfit <- 207 | relcomp <- hypotheses <- BFtable <- priorprobs <- NULL 208 | } 209 | 210 | # Store in output 211 | BF_out <- list( 212 | BFtu_exploratory=BFtu_exploratory, 213 | PHP_exploratory=PHP_exploratory, 214 | BFtu_confirmatory=BFtu_confirmatory, 215 | PHP_confirmatory=PHP_confirmatory, 216 | BFmatrix_confirmatory=BFmatrix_confirmatory, 217 | BFtable_confirmatory=BFtable, 218 | prior.hyp=priorprobs, 219 | hypotheses=hypotheses, 220 | estimates=estimates, 221 | model=x, 222 | bayesfactor="Bayes factors based on joint uniform priors", 223 | parameter="measures of association", 224 | log=logIN, 225 | call=match.call()) 226 | 227 | class(BF_out) <- "BF" 228 | 229 | return(BF_out) 230 | 231 | } 232 | -------------------------------------------------------------------------------- /R/BF.mvt_test.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | #' @title Multivariate Student t test 4 | #' @description First step to performs a Bayesian multivariate one sample Student t test using the 5 | #' (adjusted) fractional Bayes factor using the \code{BF()} function. 6 | #' 7 | #'@details \code{X} must be a data matrix and \code{null} 8 | #'must be a vector of the assumed null values of the variables. 9 | #' 10 | #'@param X a data matrix with the variables in the columns. 11 | #'@param Y an optional data matrix with the variables in the columns. 12 | #'@param null a vector of the null values of the variables. 13 | #'@param paired a logical indicating whether you want a multivariate paired t-test. 14 | #'@param ... further arguments to be passed to or from methods. 15 | #' 16 | #'@return An object that can be applied to the \code{BF()}. 17 | #' 18 | #'@references Mulder, J. and Gu, X. (2023). Bayesian Testing of Scientific 19 | #'Expectations under Multivariate Normal Linear Models. Multivariate Behavioral 20 | #'Research, 57, 767-783. DOI: 10.1080/00273171.2021.1904809. 21 | #' 22 | #'@examples 23 | #' 24 | #'mvt_fmri <- mvt_test(fmri[,1:2],null = c(0,0)) 25 | #'BF(mvt_fmri) 26 | #' 27 | #'# the same test can be executed via the lm() function 28 | #'intercept <- rep(1,nrow(fmri)) 29 | #'lm1 <- lm(cbind(Face,Vehicle) ~ -1 + intercept, data=fmri) 30 | #'BF(lm1,hypothesis="intercept_on_Face=intercept_on_Vehicle=0") 31 | #' 32 | #' @rdname mvt_test 33 | #' @export 34 | mvt_test <- function(X, Y, null = NULL, paired = FALSE, ...){ 35 | 36 | if(missing(Y)){ 37 | 38 | X <- as.data.frame(as.matrix(X)) 39 | p <- ncol(X) 40 | n <- nrow(X) 41 | 42 | if(is.null(null)){ 43 | null <- rep(0,p) 44 | } 45 | 46 | if(length(null) != p){ 47 | stop("'null' must be a vector of length equal to the number of variables.") 48 | } 49 | 50 | if(p == 1){ 51 | out <- t_test(x=X,mu=null) 52 | }else{ 53 | intercept <- rep(1,n) 54 | varnames <- colnames(X) 55 | formu <- as.formula(paste0("cbind(",paste0(varnames,collapse = ","),") ~ -1 + intercept")) 56 | out <- lm(formu,data=X) 57 | out$null <- null 58 | out$numpop <- 1 59 | out$paired <- paired 60 | class(out) <- "mvt_test" 61 | # 62 | } 63 | 64 | }else{ 65 | 66 | if(paired==TRUE){ 67 | 68 | p <- ncol(X) 69 | n <- nrow(X) 70 | 71 | if(is.null(null)){ 72 | null <- rep(0,p) 73 | } 74 | 75 | if(length(null) != p){ 76 | stop("'null' must be a vector of length equal to the number of variables.") 77 | } 78 | 79 | if(dim(X)[1]!=dim(Y)[1] || dim(X)[2]!=dim(Y)[2]){ 80 | stop("X and Y must have same dimension in case of paired test") 81 | } 82 | 83 | if(p == 1){ 84 | out <- t_test(x=X-Y,mu=null,paired=paired) 85 | }else{ 86 | intercept <- rep(1,n) 87 | varnames <- colnames(X) 88 | formu <- as.formula(paste0("cbind(",paste0(varnames,collapse = ","),") ~ -1 + intercept")) 89 | df.XY <- as.data.frame(X-Y) 90 | colnames(df.XY) <- varnames 91 | out <- lm(formu,data=df.XY) 92 | out$null <- null 93 | out$numpop <- 1 94 | out$paired <- paired 95 | class(out) <- "mvt_test" 96 | # 97 | } 98 | 99 | }else{ # independent samples multivariate t test 100 | # equal covariances assumed 101 | 102 | p <- ncol(X) 103 | n1 <- nrow(X) 104 | n2 <- nrow(Y) 105 | N <- n1 + n2 106 | 107 | if(is.null(null)){ 108 | null <- rep(0,p) 109 | } 110 | 111 | if(length(null) != p){ 112 | stop("'null' must be a vector of length equal to the number of variables.") 113 | } 114 | 115 | if(dim(X)[2]!=dim(Y)[2]){ 116 | stop("X and Y must have same number of columns") 117 | } 118 | 119 | if(p == 1){ 120 | out <- t_test(x=X,y=Y,mu=null,paired=paired) 121 | }else{ 122 | intercept <- rep(1,N) 123 | difference <- c(rep(1,n1),rep(0,n2)) 124 | varnames <- colnames(X) 125 | formu <- as.formula(paste0("cbind(",paste0(varnames,collapse = ","),") ~ -1 + intercept + difference")) 126 | df.XY <- as.data.frame(rbind(X,Y)) 127 | colnames(df.XY) <- varnames 128 | out <- lm(formu,data=df.XY) 129 | out$null <- null 130 | out$numpop <- 2 131 | out$paired <- paired 132 | class(out) <- "mvt_test" 133 | # 134 | } 135 | 136 | } 137 | 138 | } 139 | 140 | return(out) 141 | } 142 | 143 | #' @method get_estimates mvt_test 144 | #' @export 145 | get_estimates.mvt_test <- function(x, ...){ 146 | class(x) <- "lm" 147 | x_est <- get_estimates(x) 148 | if(x$numpop==1){ 149 | if(x$paired==FALSE){ 150 | names(x_est$estimate) <- colnames(x_est$Sigma[[1]]) <- 151 | row.names(x_est$Sigma[[1]]) <- gsub("intercept_on_","",names(x_est$estimate)) 152 | }else{ 153 | names(x_est$estimate) <- colnames(x_est$Sigma[[1]]) <- 154 | row.names(x_est$Sigma[[1]]) <- gsub("intercept_on_","difference_",names(x_est$estimate)) 155 | } 156 | }else{ 157 | P <- ncol(x$coefficients) 158 | select_diff <- 2*(1:P) 159 | names(x_est$estimate) <- colnames(x_est$Sigma[[1]]) <- 160 | row.names(x_est$Sigma[[1]]) <- gsub("difference_on_","difference_",names(x_est$estimate)) 161 | x_est$estimate <- x_est$estimate[select_diff] 162 | x_est$Sigma[[1]] <- x_est$Sigma[[1]][select_diff,select_diff] 163 | } 164 | x_est 165 | } 166 | 167 | #' @method BF mvt_test 168 | #' @export 169 | BF.mvt_test <- function(x, 170 | hypothesis = NULL, 171 | prior.hyp.explo = NULL, 172 | prior.hyp.conf = NULL, 173 | prior.hyp = NULL, 174 | complement = TRUE, 175 | log = FALSE, 176 | cov.prob = .95, 177 | BF.type = NULL, 178 | ...) { 179 | 180 | if(!(cov.prob>0 & cov.prob<1)){ 181 | stop("The argument 'cov.prob' is a coverage probability for the interval estimates that should lie between 0 and 1. The default is 0.95.") 182 | } 183 | 184 | if(x$numpop==1 & x$paired==FALSE){ 185 | parameters <- "means" 186 | }else{ 187 | parameters <- "differences" 188 | } 189 | 190 | if(x$numpop == 1){ 191 | P <- length(x$coefficients) 192 | names1 <- colnames(x$coefficients) 193 | names2 <- paste0("intercept_on_",names1) 194 | }else{ 195 | P <- ncol(x$coefficients) 196 | names1 <- colnames(x$coefficients) 197 | names2 <- paste0("difference_on_",names1) 198 | } 199 | #exploratory test of joint equality to null 200 | hypothesis.explo <- paste0(unlist(lapply(1:P,function(p){ 201 | paste0(names2[p],"=",x$null[p]) 202 | })),collapse = " & ") 203 | x1 <- x 204 | class(x1) <- "lm" 205 | BF.explo <- BF(x1, 206 | hypothesis=hypothesis.explo, 207 | prior.hyp.conf=prior.hyp.explo, 208 | log=log, 209 | cov.prob=cov.prob, 210 | BF.type=BF.type) 211 | BF.explo$BFtu_confirmatory <- t(as.matrix(BF.explo$BFtu_confirmatory)) 212 | BF.explo$PHP_confirmatory <- t(as.matrix(BF.explo$PHP_confirmatory)) 213 | row.names(BF.explo$BFtu_confirmatory) <- row.names(BF.explo$PHP_confirmatory) <- parameters 214 | colnames(BF.explo$BFtu_confirmatory) <- c("BF0u","BFuu") 215 | colnames(BF.explo$PHP_confirmatory) <- c("Pr(=null)","Pr(not null)") 216 | 217 | if(!is.null(hypothesis)){ 218 | variable.names <- colnames(x$coefficients) 219 | if(x$numpop==1 | x$paired==TRUE){ 220 | add.name <- "intercept_on_" 221 | rem.name <- "" 222 | if(x$numpop==1 & x$paired==TRUE){ 223 | rem.name <- "difference_" 224 | } 225 | }else{ 226 | add.name <- "difference_on_" 227 | rem.name <- "difference_" 228 | } 229 | hypothesis.updated <- hypothesis 230 | for(varname in variable.names){ 231 | hypothesis.updated <- gsub(paste0(rem.name,varname),paste0(add.name,varname),hypothesis.updated) 232 | } 233 | }else{ 234 | hypothesis.updated <- NULL 235 | } 236 | 237 | BF.conf <- BF(x1, 238 | hypothesis=hypothesis.updated, 239 | prior.hyp.conf=prior.hyp.conf, 240 | log=log, 241 | complement=complement, 242 | BF.type=BF.type) 243 | if(!is.null(hypothesis)){ 244 | BF.conf$hypotheses <- gsub(add.name,rem.name,BF.conf$hypotheses) 245 | } 246 | 247 | BF.explo_estimates <- BF.explo$estimates 248 | if(x$numpop == 1){ 249 | row.names(BF.explo_estimates) <- names(get_estimates(x)$estimate) 250 | }else{ 251 | BF.explo_estimates <- BF.explo_estimates[(1:P)*2,] 252 | row.names(BF.explo_estimates) <- names(get_estimates(x)$estimate) 253 | } 254 | 255 | BFlm_out <- list( 256 | BFtu_exploratory=BF.explo$BFtu_confirmatory, 257 | PHP_exploratory=BF.explo$PHP_confirmatory, 258 | BFtu_confirmatory=BF.conf$BFtu_confirmatory, 259 | PHP_confirmatory=BF.conf$PHP_confirmatory, 260 | BFmatrix_confirmatory=BF.conf$BFmatrix_confirmatory, 261 | BFtable_confirmatory=BF.conf$BFtable_confirmatory, 262 | prior.hyp.explo=BF.explo$prior.hyp.conf, 263 | prior.hyp.conf=BF.conf$prior.hyp.conf, 264 | hypotheses=BF.conf$hypotheses, 265 | estimates=BF.explo_estimates, 266 | model=x1, 267 | bayesfactor=BF.conf$bayesfactor, 268 | parameter=parameters, 269 | log = BF.conf$log, 270 | fraction_number_groupIDs = BF.conf$fraction_number_groupIDs, 271 | fraction_groupID_observations = BF.conf$fraction_groupID_observations, 272 | fraction_groupID = BF.conf$fraction_groupID, 273 | call=match.call()) 274 | 275 | class(BFlm_out) <- "BF" 276 | 277 | return(BFlm_out) 278 | 279 | } 280 | 281 | 282 | -------------------------------------------------------------------------------- /R/BF.polr.R: -------------------------------------------------------------------------------- 1 | #BF method for polr classes 2 | 3 | 4 | #' @method BF polr 5 | #' @export 6 | BF.polr <- function(x, 7 | hypothesis = NULL, 8 | prior.hyp.explo = NULL, 9 | prior.hyp.conf = NULL, 10 | prior.hyp = NULL, 11 | complement = TRUE, 12 | log = FALSE, 13 | cov.prob = .95, 14 | ...){ 15 | 16 | #Extract summary statistics 17 | Args <- as.list(match.call()[-1]) 18 | get_est <- get_estimates(x) 19 | Args$x <- get_est$estimate 20 | Args$Sigma <- get_est$Sigma[[1]] 21 | Args$n <- nrow(x$fitted.values) 22 | Args$hypothesis <- hypothesis 23 | Args$prior.hyp <- prior.hyp 24 | Args$prior.hyp.explo <- prior.hyp.explo 25 | Args$prior.hyp.conf <- prior.hyp.conf 26 | Args$complement <- complement 27 | Args$log <- log 28 | Args$cov.prob <- cov.prob 29 | out <- do.call(BF, Args) 30 | out$model <- x 31 | out$call <- match.call() 32 | out 33 | 34 | } 35 | 36 | 37 | -------------------------------------------------------------------------------- /R/BF.print.R: -------------------------------------------------------------------------------- 1 | 2 | #' @method print BF 3 | #' @export 4 | print.BF <- function(x, 5 | digits = 3, 6 | na.print = "", ...){ 7 | 8 | 9 | cat("Call:") 10 | cat("\n") 11 | print(x$call) 12 | 13 | cat("\n") 14 | 15 | digits <- 3 16 | 17 | if(is.null(x$BFtu_confirmatory)){ 18 | 19 | cat("Bayesian hypothesis test","\n", sep = "") 20 | cat("Type: exploratory","\n", sep = "") 21 | cat("Object: ",class(x$model)[1],"\n", sep = "") 22 | cat("Parameter: ",x$parameter,"\n", sep = "") 23 | cat("Method: ",x$bayesfactor,"\n\n", sep = "") 24 | 25 | cat("Posterior probabilities of the hypotheses","\n", sep = "") 26 | cat("\n") 27 | cat("Model parameters:") 28 | cat("\n") 29 | print(round(x$PHP_exploratory,digits)) 30 | 31 | cat("\n") 32 | 33 | if(sum(class(x$model)=="aov")>0){ 34 | if(!is.null(x$PHP_main)){ 35 | cat("main effects:") 36 | cat("\n") 37 | print(round(x$PHP_main,digits)) 38 | 39 | cat("\n") 40 | } 41 | if(!is.null(x$PHP_interaction)){ 42 | cat("interaction effects:") 43 | cat("\n") 44 | print(round(x$PHP_interaction,digits)) 45 | 46 | cat("\n") 47 | } 48 | } 49 | 50 | }else{ 51 | 52 | cat("Bayesian hypothesis test","\n", sep = "") 53 | cat("Type: confirmatory","\n", sep = "") 54 | cat("Object: ",class(x$model)[1],"\n", sep = "") 55 | cat("Parameter: ",x$parameter,"\n", sep = "") 56 | cat("Method: ",x$bayesfactor,"\n\n", sep = "") 57 | 58 | cat("Posterior probabilities of the hypotheses") 59 | cat("\n") 60 | 61 | PHPmatrix <- as.matrix(round(x$PHP_confirmatory,digits)) 62 | colnames(PHPmatrix) <- "Pr(hypothesis|data)" 63 | hypnumbers <- unlist(lapply(1:nrow(PHPmatrix),function(r){ 64 | paste0("H",as.character(r)) 65 | })) 66 | row.names(PHPmatrix) <- hypnumbers 67 | print(PHPmatrix) 68 | 69 | cat("\n") 70 | if(x$log==FALSE){ 71 | cat("Evidence matrix (BFs):") 72 | }else{ 73 | cat("Evidence matrix (log BFs):") 74 | } 75 | cat("\n") 76 | 77 | BFmat <- round(x$BFmatrix_confirmatory,digits) 78 | row.names(BFmat) <- colnames(BFmat) <- hypnumbers 79 | print(BFmat) 80 | 81 | cat("\n") 82 | cat("Hypotheses:") 83 | cat("\n") 84 | 85 | for(h in 1:length(x$hypotheses)){ 86 | cat(paste0(hypnumbers[h],": ",x$hypotheses[h])) 87 | cat("\n") 88 | } 89 | 90 | } 91 | 92 | } 93 | 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /R/BF.summary.R: -------------------------------------------------------------------------------- 1 | 2 | #' @method summary BF 3 | #' @export 4 | summary.BF <- function(object, ...){ 5 | 6 | cat("Call:") 7 | cat("\n") 8 | print(object$call) 9 | 10 | cat("\n") 11 | digits <- 3 12 | 13 | cat("Bayesian hypothesis test","\n", sep = "") 14 | cat("Type: exploratory","\n", sep = "") 15 | cat("Object: ",class(object$model)[1],"\n", sep = "") 16 | cat("Parameter: ",object$parameter,"\n", sep = "") 17 | cat("Method: ",object$bayesfactor,"\n\n", sep = "") 18 | 19 | cat("Posterior probabilities of the hypotheses","\n", sep = "") 20 | cat("\n") 21 | cat("Model parameters:") 22 | cat("\n") 23 | print(round(object$PHP_exploratory,digits)) 24 | 25 | cat("\n") 26 | 27 | if(sum(class(object$model)=="aov")>0){ 28 | if(!is.null(object$PHP_main)){ 29 | cat("main effects:") 30 | cat("\n") 31 | print(round(object$PHP_main,digits)) 32 | 33 | cat("\n") 34 | } 35 | if(!is.null(object$PHP_interaction)){ 36 | cat("interaction effects:") 37 | cat("\n") 38 | print(round(object$PHP_interaction,digits)) 39 | 40 | cat("\n") 41 | } 42 | } 43 | 44 | if(!is.null(object$BFtu_confirmatory)){ 45 | 46 | cat("Bayesian hypothesis test","\n", sep = "") 47 | cat("Type: confirmatory","\n", sep = "") 48 | cat("Object: ",class(object$model)[1],"\n", sep = "") 49 | cat("Parameter: ",object$parameter,"\n", sep = "") 50 | cat("Method: ",object$bayesfactor,"\n\n", sep = "") 51 | 52 | cat("Posterior probabilities of the hypotheses") 53 | cat("\n") 54 | 55 | PHPmatrix <- as.matrix(round(object$PHP_confirmatory,digits)) 56 | colnames(PHPmatrix) <- "Pr(hypothesis|data)" 57 | hypnumbers <- unlist(lapply(1:nrow(PHPmatrix),function(r){ 58 | paste0("H",as.character(r)) 59 | })) 60 | row.names(PHPmatrix) <- hypnumbers 61 | print(PHPmatrix) 62 | 63 | cat("\n") 64 | if(object$log==FALSE){ 65 | cat("Evidence matrix (BFs):") 66 | }else{ 67 | cat("Evidence matrix (log BFs):") 68 | } 69 | cat("\n") 70 | 71 | BFmat <- round(object$BFmatrix_confirmatory,digits) 72 | row.names(BFmat) <- colnames(BFmat) <- hypnumbers 73 | print(BFmat) 74 | 75 | cat("\n") 76 | cat("Specification table:") 77 | cat("\n") 78 | 79 | BFtable <- round(object$BFtable_confirmatory,digits) 80 | row.names(BFtable) <- hypnumbers 81 | print(BFtable) 82 | 83 | cat("\n") 84 | cat("Hypotheses:") 85 | cat("\n") 86 | 87 | for(h in 1:length(object$hypotheses)){ 88 | cat(paste0(hypnumbers[h],": ",object$hypotheses[h])) 89 | cat("\n") 90 | } 91 | } 92 | } 93 | 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /R/BF.survreg.R: -------------------------------------------------------------------------------- 1 | #BF method for survreg classes 2 | 3 | 4 | #' @method BF survreg 5 | #' @export 6 | BF.survreg <- function(x, 7 | hypothesis = NULL, 8 | prior.hyp.explo = NULL, 9 | prior.hyp.conf = NULL, 10 | prior.hyp = NULL, 11 | complement = TRUE, 12 | log = FALSE, 13 | cov.prob = .95, 14 | ...){ 15 | 16 | #Extract summary statistics 17 | get_est <- get_estimates(x) 18 | Args <- as.list(match.call()[-1]) 19 | get_est <- get_estimates(x) 20 | Args$x <- get_est$estimate 21 | Args$Sigma <- get_est$Sigma[[1]] 22 | Args$n <- length(x$y) 23 | Args$hypothesis <- hypothesis 24 | Args$prior.hyp <- prior.hyp 25 | Args$prior.hyp.explo <- prior.hyp.explo 26 | Args$prior.hyp.conf <- prior.hyp.conf 27 | Args$complement <- complement 28 | Args$log <- log 29 | Args$cov.prob <- cov.prob 30 | out <- do.call(BF, Args) 31 | out$model <- x 32 | out$call <- match.call() 33 | out 34 | } 35 | 36 | 37 | -------------------------------------------------------------------------------- /R/BF.zeroinfl.R: -------------------------------------------------------------------------------- 1 | #BF method for zeroinfl classes 2 | 3 | 4 | #' @method BF zeroinfl 5 | #' @export 6 | BF.zeroinfl <- function(x, 7 | hypothesis = NULL, 8 | prior.hyp.explo = NULL, 9 | prior.hyp.conf = NULL, 10 | prior.hyp = NULL, 11 | complement = TRUE, 12 | log = FALSE, 13 | ...){ 14 | 15 | #Extract summary statistics 16 | Args <- as.list(match.call()[-1]) 17 | get_est <- get_estimates(x) 18 | Args$x <- get_est$estimate 19 | Args$Sigma <- get_est$Sigma[[1]] 20 | Args$n <- length(x$residuals) 21 | Args$hypothesis <- hypothesis 22 | Args$prior.hyp <- prior.hyp 23 | Args$prior.hyp.explo <- prior.hyp.explo 24 | Args$prior.hyp.conf <- prior.hyp.conf 25 | Args$complement <- complement 26 | Args$log <- log 27 | out <- do.call(BF, Args) 28 | out$model <- x 29 | out$call <- match.call() 30 | out 31 | } 32 | 33 | 34 | -------------------------------------------------------------------------------- /R/BFpack-package.R: -------------------------------------------------------------------------------- 1 | #' BFpack: Flexible Bayes factor testing of scientific expectations 2 | #' 3 | #' 4 | #' @description The \code{R} package \strong{BFpack} provides tools for exploratory and 5 | #' confirmatory Bayesian hypothesis testing using Bayes factors and posterior probabilities 6 | #' under common statistical models. The main function `BF` needs a fitted model `x` as input 7 | #' argument. Depending on the class of the fitted model, a standard hypothesis test is 8 | #' executed by default. For example, if `x` is a 9 | #' fitted regression model of class `lm` then posterior probabilities are computed of whether 10 | #' each separate coefficient is zero, negative, or positive (assuming equal prior probabilities). 11 | #' If one has specific hypotheses with 12 | #' equality and/or order constraints on the parameters under the fitted model `x` then these 13 | #' can be formulated using the `hypothesis` argument (a character string), possibly together 14 | #' prior probabilities for the hypotheses via the `prior` argument (default all hypotheses are 15 | #' equally likely a priori), and the `complement` argument which is a logical stating whether 16 | #' the complement hypotheses should be included in the case (`TRUE` by default). 17 | #' 18 | #' Use compilation for Fortran functions 19 | #' 20 | #' @references 21 | #' Mulder, J., D.R. Williams, Gu, X., A. Tomarken, 22 | #' F. Böing-Messing, J.A.O.C. Olsson-Collentine, Marlyne Meyerink, J. Menke, 23 | #' J.-P. Fox, Y. Rosseel, E.J. Wagenmakers, H. Hoijtink., and van Lissa, C. 24 | #' (submitted). BFpack: Flexible Bayes Factor Testing of Scientific Theories 25 | #' in R. \url{https://arxiv.org/abs/1911.07728} 26 | #' 27 | #' Mulder, J., van Lissa, C., Gu, X., Olsson-Collentine, A., Boeing-Messing, F., Williams, 28 | #' D. R., Fox, J.-P., Menke, J., et al. (2019). BFpack: Flexible Bayes Factor Testing of 29 | #' Scientific Expectations. (Version 0.2.1) \url{https://CRAN.R-project.org/package=BFpack} 30 | #' 31 | #' 32 | #' @examples 33 | #' \dontrun{ 34 | #' # EXAMPLE 1. One-sample t test 35 | #' ttest1 <- t_test(therapeutic, mu = 5) 36 | #' print(ttest1) 37 | #' # confirmatory Bayesian one sample t test 38 | #' BF1 <- BF(ttest1, hypothesis = "mu = 5") 39 | #' summary(BF1) 40 | #' # exploratory Bayesian one sample t test 41 | #' BF(ttest1) 42 | #' 43 | #' # EXAMPLE 2. ANOVA 44 | #' aov1 <- aov(price ~ anchor * motivation,data = tvprices) 45 | #' BF1 <- BF(aov1, hypothesis = "anchorrounded = motivationlow; 46 | #' anchorrounded < motivationlow") 47 | #' summary(BF1) 48 | #' 49 | #' # EXAMPLE 3. Logistic regression 50 | #' fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity + 51 | #' tattoos, family = binomial(), data = wilson) 52 | #' BF1 <- BF(fit, hypothesis = "ztrust > zfWHR > 0; 53 | #' ztrust > 0 & zfWHR = 0") 54 | #' summary(BF1) 55 | #' 56 | #' # EXAMPLE 4. Correlation analysis 57 | #' set.seed(123) 58 | #' cor1 <- cor_test(memory[1:20,1:3]) 59 | #' BF1 <- BF(cor1) 60 | #' summary(BF1) 61 | #' BF2 <- BF(cor1, hypothesis = "Wmn_with_Im > Wmn_with_Del > 0; 62 | #' Wmn_with_Im = Wmn_with_Del = 0") 63 | #' summary(BF2) 64 | #' } 65 | #' 66 | #' @docType package 67 | #' 68 | #' 69 | #' @name BFpack-package 70 | #' 71 | "_PACKAGE" 72 | -------------------------------------------------------------------------------- /R/Fcor.R: -------------------------------------------------------------------------------- 1 | #' Student t approximations of Fisher transformed correlations 2 | #' 3 | #' Approximated degrees of freedom and approximated scale of the Fisher transformed 4 | #' correlations depending on the dimension of the vector of dependent variables P 5 | #' based on a joint uniform prior. 6 | #' 7 | #' \tabular{lll}{ 8 | #' \strong{nu} \tab \code{numeric} \tab Approximated degrees of freedom\cr 9 | #' \strong{sigma} \tab \code{numeric} \tab Approximated scale\cr 10 | #' \strong{P} \tab \code{integer} \tab Dimension of vector of dependent variables\cr 11 | #' } 12 | #' @docType data 13 | #' @keywords datasets 14 | #' @name Fcor 15 | #' @usage data(Fcor) 16 | #' @format A data.frame with 3 columns. 17 | NULL 18 | -------------------------------------------------------------------------------- /R/actors.R: -------------------------------------------------------------------------------- 1 | #' Actors from a small hypothetical network 2 | #' 3 | #' The related data files 'events', 'same_location', 'same_culture' contain 4 | #' information on the event sequence and the two event statistics respectively. 5 | #' 6 | #' 7 | #' @name actors 8 | #' @docType data 9 | #' @usage data(actors) 10 | #' @keywords datasets 11 | #' @format dataframe (25 rows, 4 columns) 12 | #' 13 | #' \tabular{lll}{ 14 | #' \strong{actors$id} \tab \code{integer} \tab ID of the employee, corresponding to 15 | #' the sender and receiver IDs in the events dataframe \cr 16 | #' \strong{actors$location} \tab \code{numeric} \tab Location of the actor, 17 | #' ranging from 1-4 \cr 18 | #' \strong{actors$culture} \tab \code{character} \tab Categorical variable, indicating the 19 | #' culture of the employee \cr 20 | #' } 21 | #' 22 | #' @keywords datasets 23 | NULL 24 | -------------------------------------------------------------------------------- /R/attention.R: -------------------------------------------------------------------------------- 1 | #' Multiple Sources of Attentional Dysfunction in Adults With Tourette's Syndrome 2 | #' 3 | #' Data from a psychological study comparing attentional performances of 4 | #' Tourette's syndrome (TS) patients, ADHD patients, and controls. 5 | #' These data were simulated using the sufficient statistics from Silverstein, 6 | #' Como, Palumbo, West, and Osborn (1995). 7 | #' 8 | #' \tabular{lll}{ 9 | #' \strong{accuracy} \tab \code{numeric} \tab Participant's accuracy in the attentional task\cr 10 | #' \strong{group} \tab \code{factor} \tab Participant's group membership (TS patient, ADHD patient, or control)\cr 11 | #' } 12 | #' @docType data 13 | #' @keywords datasets 14 | #' @name attention 15 | #' @usage data(attention) 16 | #' @references Silverstein, S. M., Como, P. G., Palumbo, D. R., West, L. L., & Osborn, L. M. (1995). Multiple sources of attentional dysfunction in adults with Tourette's syndrome: Comparison with attention deficit-hyperactivity disorder. Neuropsychology, 9(2), 157-164. doi:10.1037/0894-4105.9.2.157 17 | #' @format A data.frame with 51 rows and 2 columns. 18 | NULL 19 | -------------------------------------------------------------------------------- /R/cor_test.print.R: -------------------------------------------------------------------------------- 1 | #' @method print cor_test 2 | #' @export 3 | print.cor_test <- function(x, 4 | digits = 3, ...){ 5 | 6 | estimates <- x$correstimates 7 | names <- x$corrnames 8 | groups <- length(names) 9 | P <- nrow(names[[1]]) 10 | numcorr <- P*(P-1)/2 11 | countg = 0 12 | corrlist <- lapply(1:groups,function(g){ 13 | lapply(1:3,function(b){ 14 | matje <- matrix(NA,P,P) 15 | row.names(matje) <- colnames(matje) <- x$variables[[1]] 16 | matje[lower.tri(diag(P))] <- estimates[numcorr*(g-1)+1:numcorr,1+b] 17 | matje 18 | }) 19 | }) 20 | 21 | cat("\n") 22 | if(x$prior.cor=="joint.unif"){ 23 | cat("Unconstrained Bayesian estimation (joint uniform prior)","\n", sep = "") 24 | }else{ 25 | cat("Unconstrained Bayesian estimation (marginally uniform prior)","\n", sep = "") 26 | } 27 | cat("\n") 28 | 29 | if(groups > 1){ 30 | for(g in 1:groups){ 31 | cat(paste0("Group g",as.character(g),":"),"\n", sep = "") 32 | cat("\n") 33 | cat("correlation types","\n") 34 | print(x$cor.type[[g]],na.print="",quote=FALSE) 35 | cat("\n") 36 | cat("Posterior 2.5% lower bounds:","\n", sep = "") 37 | print(round(corrlist[[g]][[2]],digits), na.print = "") 38 | cat("\n") 39 | cat("Posterior median:","\n", sep = "") 40 | print(round(corrlist[[g]][[1]],digits), na.print = "") 41 | cat("\n") 42 | cat("Posterior 97.5% upper bounds:","\n", sep = "") 43 | print(round(corrlist[[g]][[3]],digits), na.print = "") 44 | cat("\n") 45 | } 46 | }else{ 47 | cat("correlation types","\n") 48 | print(x$cor.type[[1]],na.print="",quote=FALSE) 49 | cat("\n") 50 | cat("Posterior 2.5% lower bounds:","\n", sep = "") 51 | print(round(corrlist[[1]][[2]],digits), na.print = "") 52 | cat("\n") 53 | cat("Posterior median:","\n", sep = "") 54 | print(round(corrlist[[1]][[1]],digits), na.print = "") 55 | cat("\n") 56 | cat("Posterior 97.5% upper bounds:","\n", sep = "") 57 | print(round(corrlist[[1]][[3]],digits), na.print = "") 58 | cat("\n") 59 | } 60 | 61 | } 62 | 63 | 64 | #' @method summary cor_test 65 | #' @export 66 | summary.cor_test <- function(object, digits = 3, ...){ 67 | 68 | cor.df <- round(as.data.frame(object$correstimates),digits) 69 | cor.df$cor.type <- unlist(lapply(1:length(object$cor.type),function(g){ 70 | object$cor.type[[g]][lower.tri(object$cor.type[[g]])] 71 | })) 72 | 73 | cor.df 74 | 75 | } 76 | 77 | 78 | #' @importFrom coda mcmc 79 | #' @method plot cor_test 80 | #' @export 81 | plot.cor_test <- function(x, ...){ 82 | 83 | numgroups <- length(x$corrdraws) 84 | P <- dim(x$corrdraws[[1]])[2] 85 | numcor <- P*(P-1)/2 86 | numcor.total <- numcor * numgroups 87 | 88 | cor.draws.matrix <- mcmc(data=do.call(cbind,lapply(1:numgroups,function(g){ 89 | do.call(cbind,lapply(1:(P-1),function(p){ 90 | draws_g_p <- as.matrix(x$corrdraws[[g]][,(p+1):P,p]) 91 | colnames(draws_g_p) <- x$corrnames[[g]][(p+1):P,p] 92 | draws_g_p 93 | })) 94 | })),start=1,end=length(x$corrdraws[[1]][,1,1]),thin=1) 95 | 96 | plot(cor.draws.matrix, ...) 97 | 98 | } 99 | 100 | 101 | 102 | -------------------------------------------------------------------------------- /R/fmri.R: -------------------------------------------------------------------------------- 1 | #' fMRI data 2 | #' 3 | #' fMRI data assessing relation between individual differences in the ability to recognize 4 | #' faces and cars and thickness of the superficial, middle, and deep layers of the 5 | #' fusiform face area, as assessed by high-resolution fMRI recognition (Williams et al, 2019, under review) 6 | #' 7 | #' \tabular{lll}{ 8 | #' \strong{Subject}\tab\code{numeric}\tab Particicpant ID number\cr 9 | #' \strong{Face} \tab \code{numeric} \tab Standardized score on face recognition battery\cr 10 | #' \strong{Vehicle} \tab \code{numeric} \tab Standardized score on vehicle recognition battery\cr 11 | #' \strong{Superficial} \tab \code{numeric} \tab Depth in mm of superficial layer of FFA\cr 12 | #' \strong{Middle} \tab \code{numeric} \tab Depth in mm of middle layer of FFA\cr 13 | #' \strong{Bform} \tab \code{numeric} \tab Depth in mm of deep layer of FFA\cr 14 | #' } 15 | #' @docType data 16 | #' @keywords datasets 17 | #' @name fmri 18 | #' @usage data(fmri) 19 | #' @references McGuigin, R.W., Newton, A.T., Tamber-Rosenau, B., Tomarken, A.J, & Gauthier, I. (under review). Thickness of deep layers in the fusiform face area predicts face recognition. 20 | #' @format A data.frame with 13 rows and 6 columns. 21 | NULL 22 | -------------------------------------------------------------------------------- /R/get_estimates_unique_to_BFpack.R: -------------------------------------------------------------------------------- 1 | #' @method get_estimates hetcor 2 | #' @import bain 3 | #' @export 4 | get_estimates.hetcor <- function(x, ...){ 5 | cl <- match.call() 6 | cl[[1]] <- as.name("get_estimates") 7 | cl[["x"]] <- x$correlations 8 | P <- nrow(x$std.errors) 9 | out <- eval.parent(cl) 10 | retain <- matrix(1:length(out$estimate), nrow = nrow(x$std.errors)) 11 | out$estimate <- out$estimate[retain[lower.tri(retain)]] 12 | errcov <- x$std.errors**2 13 | errcov <- errcov[lower.tri(retain)] 14 | if(length(errcov) == 1){ 15 | out$Sigma <- list(matrix(errcov)) 16 | } else { 17 | out$Sigma <- list(diag(errcov)) 18 | } 19 | class(out) <- "model_estimates" 20 | attr(out, "analysisType") <- "hetcor" 21 | out 22 | } 23 | 24 | 25 | #' @method get_estimates coxph 26 | #' @export 27 | get_estimates.coxph <- function(x, ...){ 28 | out <- list() 29 | out$estimate <- coef(x) 30 | out$Sigma <- list(vcov(x)) 31 | class(out) <- "model_estimates" 32 | attr(out, "analysisType") <- "cophx" 33 | out 34 | } 35 | 36 | 37 | #' @method get_estimates glm 38 | #' @export 39 | get_estimates.glm <- function(x, ...){ 40 | out <- list() 41 | out$estimate <- coef(x) 42 | out$Sigma <- list(vcov(x)) 43 | class(out) <- "model_estimates" 44 | attr(out, "analysisType") <- "glm" 45 | out 46 | } 47 | 48 | #' @method get_estimates polr 49 | #' @export 50 | get_estimates.polr <- function(x, ...){ 51 | out <- list() 52 | out$estimate <- c(coef(x),x$zeta) 53 | out$Sigma <- list(vcov(x)) 54 | class(out) <- "model_estimates" 55 | attr(out, "analysisType") <- "polr" 56 | out 57 | } 58 | 59 | 60 | #' @method get_estimates bartlett_htest 61 | #' @export 62 | get_estimates.bartlett_htest <- function(x, ...){ 63 | out <- list() 64 | out$estimate <- x$vars 65 | out$Sigma <- NULL 66 | class(out) <- "model_estimates" 67 | attr(out, "analysisType") <- "bartlett_htest" 68 | out 69 | } 70 | 71 | 72 | #' @method get_estimates survreg 73 | #' @export 74 | get_estimates.survreg <- function(x, ...){ 75 | out <- list() 76 | out$estimate <- x$coefficients 77 | out$Sigma <- list(x$var) 78 | class(out) <- "model_estimates" 79 | attr(out, "analysisType") <- "survreg" 80 | out 81 | } 82 | 83 | 84 | #' @method get_estimates zeroinfl 85 | #' @export 86 | get_estimates.zeroinfl <- function(x, ...){ 87 | out <- list() 88 | out$estimate <- c(coef(x),x$zeta) 89 | out$Sigma <- list(vcov(x)) 90 | class(out) <- "model_estimates" 91 | attr(out, "analysisType") <- "zeroinfl" 92 | out 93 | } 94 | 95 | 96 | #' @method get_estimates lm 97 | #' @export 98 | get_estimates.lm <- function(x, ...){ 99 | out <- list() 100 | P <- ncol(x$coefficients) 101 | K <- nrow(x$coefficients) 102 | N <- nrow(x$residuals) 103 | if(!is.matrix(x$coefficients)){ 104 | if(sum(names(x$coefficients) == "(Intercept)")>0){ 105 | names(x$coefficients)[which(names(x$coefficients) == "(Intercept)")] <- "Intercept" 106 | } 107 | out$estimate <- coef(x) 108 | out$Sigma <- list(vcov(x)) 109 | class(out) <- "model_estimates" 110 | attr(out, "analysisType") <- "lm" 111 | out 112 | }else{ 113 | names_coef1 <- row.names(x$coefficients) 114 | if(sum(names_coef1 == "(Intercept)")>0){ 115 | names_coef1[which(names_coef1 == "(Intercept)")] <- "Intercept" 116 | } 117 | names_coef2 <- colnames(x$coefficients) 118 | names_coef <- unlist(lapply(1:P,function(p){ 119 | lapply(1:K,function(k){ 120 | paste0(names_coef1[k],"_on_",names_coef2[p]) 121 | }) 122 | })) 123 | # estimates of regression coefficients 124 | estimatesBeta <- c(x$coefficients) 125 | names(estimatesBeta) <- names_coef 126 | Xmat <- model.matrix(x) 127 | Ymat <- model.matrix(x)%*%x$coefficients + x$residuals 128 | SigmaEst <- t(x$residuals)%*%x$residuals/N 129 | covmBeta <- kronecker(SigmaEst,solve(t(Xmat)%*%Xmat)) 130 | row.names(covmBeta) <- colnames(covmBeta) <- names_coef 131 | 132 | out$estimate <- estimatesBeta 133 | out$Sigma <- list(covmBeta) 134 | class(out) <- "model_estimates" 135 | attr(out, "analysisType") <- "mlm" 136 | out 137 | } 138 | } 139 | 140 | 141 | #' @method get_estimates cor_test 142 | #' @export 143 | get_estimates.cor_test <- function(x, ...){ 144 | out <- list() 145 | out$estimate <- x$meanF 146 | out$Sigma <- list(x$covmF) 147 | class(out) <- "model_estimates" 148 | attr(out, "analysisType") <- "corr_htest" 149 | out 150 | } 151 | 152 | 153 | #' @method get_estimates t_test 154 | #' @export 155 | get_estimates.t_test <- function(x, ...){ 156 | out <- list() 157 | if(length(x$estimate)>1){ 158 | difference <- x$estimate[1] - x$estimate[2] 159 | names(difference) <- "difference" 160 | out$estimate <- difference 161 | out$Sigma <- list((x$stderr)**2) 162 | }else if(names(x$estimate) == "mean difference"){ 163 | difference <- x$estimate 164 | names(difference) <- "difference" 165 | out$estimate <- difference 166 | out$Sigma <- list((x$stderr)**2) 167 | }else{ 168 | populationmean <- x$estimate 169 | names(populationmean) <- "mu" 170 | out$estimate <- populationmean 171 | out$Sigma <- list((x$stderr)**2) 172 | } 173 | class(out) <- "model_estimates" 174 | attr(out, "analysisType") <- "t_test" 175 | out 176 | } 177 | 178 | 179 | 180 | 181 | -------------------------------------------------------------------------------- /R/helper_functions.R: -------------------------------------------------------------------------------- 1 | check_vcov <- function(x){ 2 | if (!isTRUE(all.equal(x, t(x))) || any(diag(x) < 0)){ 3 | saveRDS(x, "c:/git_repositories/BFpack/erordump.RData") 4 | stop(sQuote("sigma"), " is not a covariance matrix") 5 | } 6 | } 7 | 8 | process.prior.hyp.explo <- function(prior_hyp_explo, model){ 9 | 10 | if(sum(class(model)=="bartlett_htest")>0){ 11 | if(is.null(prior_hyp_explo)){ 12 | prior_hyp_explo <- list(rep(.5,2)) 13 | }else{ 14 | if(!is.list(prior_hyp_explo)){ 15 | prior_hyp_explo <- list(prior_hyp_explo) 16 | }else{ 17 | prior_hyp_explo <- list(prior_hyp_explo[[1]]) 18 | } 19 | if(length(prior_hyp_explo[[1]])!=2){ 20 | stop("For an object of class 'bartlett_htest', the argument 'prior_hyp_explo' should be a vector 21 | of length 2 of the prior probabilities for the hypotheses of homogeneity of variances and an 22 | unconstrained alternative. Or use the default ('NULL') which implies equal prior probabilities.") 23 | } 24 | } 25 | }else{ 26 | if(is.null(prior_hyp_explo)){ # then equal prior probabilities 27 | if(sum(class(model)=="aov")>0){ 28 | prior_hyp_explo <- list(c(.5,.25,.25),rep(1,2),rep(1,2)) 29 | }else{ 30 | prior_hyp_explo <- list(c(.5,.25,.25)) 31 | } 32 | }else{ 33 | if(!is.list(prior_hyp_explo)){#then no error if it is a vector of length 3 for testing individual parameters 34 | if(length(prior_hyp_explo)==3){ 35 | if(sum(class(model)=="aov")>0){ 36 | prior_hyp_explo <- list(prior_hyp_explo,rep(1,2),rep(1,2)) 37 | }else{ 38 | prior_hyp_explo <- list(prior_hyp_explo) 39 | } 40 | }else{ 41 | stop("The argument 'prior_hyp_explo' must be a vector of length three specifying the 42 | prior probabilities of a zero, negative, or positive parameter. In the case of an object of 43 | class 'aov' then 'prior_hyp_explo' can be a list of three elements containing the three prior 44 | probabilities of the exploratory test of the parameters, the two prior 45 | probabilities for testing the main effects, and the two prior probabilities for testing the 46 | interaction effects (check the documentation: ?BF).") 47 | } 48 | }else{ 49 | if(length(prior_hyp_explo)==1){ 50 | if(is.null(prior_hyp_explo[[1]])){ 51 | prior_hyp_explo <- list(c(.5,.25,.25),rep(1,2),rep(1,2)) 52 | }else{ 53 | if(length(prior_hyp_explo[[1]]!=3)){ 54 | stop("Specify three prior probabilities for the exploratory test of a zero, negative, or positive 55 | effect or use the default 'NULL' implying equal prior probabilities (check the documentation: ?BF).") 56 | } 57 | prior_hyp_explo <- lapply(prior_hyp_explo,function(x){x/sum(x)}) 58 | } 59 | }else{ 60 | if(sum(class(model)=="aov")>0){ 61 | 62 | if(length(prior_hyp_explo)==2){ 63 | if(is.null(prior_hyp_explo[[1]])){ 64 | prior_hyp_explo[[1]] <- c(.5,.25,.25) 65 | }else{ 66 | if(length(prior_hyp_explo[[1]]!=3)){ 67 | stop("Specify three prior probabilities for the exploratory test of a zero, negative, or positive 68 | effect or use the default 'NULL' implying equal prior probabilities (check the documentation: ?BF).") 69 | } 70 | } 71 | if(is.null(prior_hyp_explo[[2]])){ 72 | prior_hyp_explo[[2]] <- rep(1,2) 73 | }else{ 74 | if(length(prior_hyp_explo[[2]]!=2)){ 75 | stop("Specify two prior probabilities for the exploratory testing of main effects 76 | or use the default 'NULL' implying equal prior probabilities 77 | (check the documentation: ?BF).") 78 | } 79 | } 80 | prior_hyp_explo[[3]] <- rep(1,2) 81 | } 82 | if(length(prior_hyp_explo)==3){ 83 | if(is.null(prior_hyp_explo[[1]])){ 84 | prior_hyp_explo[[1]] <- c(.5,.25,.25) 85 | }else{ 86 | if(length(prior_hyp_explo[[1]])!=3){ 87 | stop("Specify three prior probabilities for the exploratory test of a zero, negative, or positive 88 | effect or use the default 'NULL' implying equal prior probabilities 89 | (check the documentation: ?BF).") 90 | } 91 | } 92 | if(is.null(prior_hyp_explo[[2]])){ 93 | prior_hyp_explo[[2]] <- rep(1,2) 94 | }else{ 95 | if(length(prior_hyp_explo[[2]])!=2){ 96 | stop("Specify two prior probabilities for the exploratory testing of main effects 97 | or use the default 'NULL' implying equal prior probabilities 98 | (check the documentation: ?BF).") 99 | } 100 | } 101 | if(is.null(prior_hyp_explo[[3]])){ 102 | prior_hyp_explo[[3]] <- rep(1,2) 103 | }else{ 104 | if(length(prior_hyp_explo[[3]])!=2){ 105 | stop("Specify two prior probabilities for the exploratory testing of interaction effects 106 | or use the default 'NULL' implying equal prior probabilities 107 | (check the documentation: ?BF).") 108 | } 109 | } 110 | } 111 | if(length(prior_hyp_explo)>3){ 112 | stop("Use a list of three vectors or use the default 'NULL' implying equal prior probabilities 113 | (check the documentation: ?BF).") 114 | } 115 | }else{ 116 | if(is.null(prior_hyp_explo[[1]])){ 117 | prior_hyp_explo <- list(c(.5,.25,.25)) 118 | }else{ 119 | if(length(prior_hyp_explo[[1]])!=3){ 120 | stop("Specify three prior probabilities for the exploratory test of a zero, negative, or positive 121 | effect or use the default 'NULL' implying equal prior probabilities (check the documentation: ?BF).") 122 | } 123 | prior_hyp_explo <- list(prior_hyp_explo[[1]]/sum(prior_hyp_explo[[1]])) 124 | } 125 | } 126 | } 127 | } 128 | } 129 | prior_hyp_explo <- lapply(prior_hyp_explo,function(x)x/sum(x)) 130 | } 131 | 132 | return(prior_hyp_explo) 133 | 134 | } 135 | 136 | # from the output of the constraints in 'parse_hypothesis' create lists for the equality and order matrices 137 | make_RrList <- function(parse_hyp){ 138 | numhyp <- length(parse_hyp$hyp_mat) 139 | RrE <- lapply(1:numhyp,function(h){ 140 | qE <- parse_hyp$n_constraints[h*2-1] 141 | if(qE==1){ 142 | RrE_h <- t(as.matrix(parse_hyp$hyp_mat[[h]][1:qE,])) 143 | }else if(qE>1){ 144 | RrE_h <- parse_hyp$hyp_mat[[h]][1:qE,] 145 | }else {RrE_h=NULL} 146 | RrE_h 147 | }) 148 | RrO <- lapply(1:numhyp,function(h){ 149 | qE <- parse_hyp$n_constraints[h*2-1] 150 | qO <- parse_hyp$n_constraints[h*2] 151 | if(qO==1){ 152 | RrO_h <- t(as.matrix(parse_hyp$hyp_mat[[h]][qE+1:qO,])) 153 | }else if(qO>1){ 154 | RrO_h <- parse_hyp$hyp_mat[[h]][qE+1:qO,] 155 | }else {RrO_h=NULL} 156 | RrO_h 157 | }) 158 | return(list(RrE,RrO)) 159 | } 160 | 161 | # from the output of the constraints in 'parse_hypothesis' create lists for the equality and order matrices 162 | # different format parse_hyp object 163 | make_RrList2 <- function(parse_hyp2){ 164 | numhyp <- length(parse_hyp2$original_hypothesis) 165 | qE <- parse_hyp2$n_constraints[(0:(numhyp-1))*2+1] 166 | qO <- parse_hyp2$n_constraints[(1:numhyp)*2] 167 | RrE <- lapply(1:numhyp,function(h){ 168 | startcon <- sum(qE[1:h]+qO[1:h])-qE[h]-qO[h] 169 | if(qE[h]==1){ 170 | RrE_h <- t(as.matrix(parse_hyp2$hyp_mat[startcon+1:qE[h],])) 171 | }else if(qE[h]>1){ 172 | RrE_h <- parse_hyp2$hyp_mat[startcon+1:qE[h],] 173 | }else {RrE_h=NULL} 174 | RrE_h 175 | }) 176 | RrO <- lapply(1:numhyp,function(h){ 177 | startcon <- sum(qE[1:h]+qO[1:h])-qE[h]-qO[h] 178 | if(qO[h]==1){ 179 | RrO_h <- t(as.matrix(parse_hyp2$hyp_mat[startcon+qE[h]+1:qO[h],])) 180 | }else if(qO[h]>1){ 181 | RrO_h <- parse_hyp2$hyp_mat[startcon+qE[h]+1:qO[h],] 182 | }else {RrO_h=NULL} 183 | RrO_h 184 | }) 185 | return(list(RrE,RrO)) 186 | } 187 | 188 | #for checking whether constraints are conflicting replace interval constraints by equality constraints 189 | interval_RrStack <- function(RrStack){ 190 | q1 <- nrow(RrStack) 191 | q2 <- ncol(RrStack) 192 | RrStack_out <- RrStack 193 | if(q1 > 1){ 194 | row1 <- 1 195 | while(row1 < q1){ 196 | for(row2 in (row1+1):q1){ 197 | # print(row2) 198 | if(sum(abs(RrStack_out[row1,-q2] + RrStack_out[row2,-q2]))==0){ # && RrStack_out[row1,q2]!=RrStack_out[row2,q2] ){ 199 | #together row1 and row2 imply an interval constraint 200 | whichcol <- abs(RrStack_out[row1,-q2])!=0 201 | whichcol1 <- which(whichcol) 202 | if(sum(whichcol)==1){ 203 | welkpos <- ifelse(RrStack_out[row1,c(whichcol,F)]>0,row1,row2) 204 | welkneg <- ifelse(RrStack_out[row1,c(whichcol,F)]<0,row1,row2) 205 | lb <- RrStack_out[welkpos,q2] 206 | ub <- -RrStack_out[welkneg,q2] 207 | RrStack_out[row1,] <- RrStack_out[welkpos,] 208 | RrStack_out[row1,q2] <- (ub+lb)/2 209 | RrStack_out <- RrStack_out[-row2,] 210 | q1 <- q1 - 1 211 | }else{ 212 | RrStack_out[row1,q2] <- 0 213 | RrStack_out <- RrStack_out[-row2,] 214 | q1 <- q1 - 1 215 | } 216 | break 217 | } 218 | } 219 | row1 <- row1 + 1 220 | } 221 | } 222 | if(is.matrix(RrStack_out)==F){ 223 | RrStack_out <- t(RrStack_out) 224 | } 225 | return(RrStack_out) 226 | } 227 | 228 | params_in_hyp <- function(hyp){ 229 | params_in_hyp <- trimws(unique(strsplit(hyp, split = "[ =<>,\\(\\);&\\*+-]+", perl = TRUE)[[1]])) 230 | params_in_hyp <- params_in_hyp[!sapply(params_in_hyp, grepl, pattern = "^[0-9]*\\.?[0-9]+$")] 231 | params_in_hyp[grepl("^[a-zA-Z]", params_in_hyp)] 232 | } 233 | -------------------------------------------------------------------------------- /R/import_parser.R: -------------------------------------------------------------------------------- 1 | #' @importFrom utils getFromNamespace 2 | #' @importFrom stats approxfun coef complete.cases cov dbeta density dnorm dt lm median model.matrix 3 | #' @importFrom stats nobs pchisq pnorm pt quantile rWishart rbeta rgamma rnorm rt sd setNames var vcov 4 | parse_hypothesis <- getFromNamespace("parse_hypothesis", "bain") 5 | constraint_to_equation <- getFromNamespace("constraint_to_equation", "bain") 6 | constraint_to_row <- getFromNamespace("constraint_to_row", "bain") 7 | expand_compound_constraints <- getFromNamespace("expand_compound_constraints", "bain") 8 | expand_parentheses <- getFromNamespace("expand_parentheses", "bain") 9 | flip_inequality <- getFromNamespace("flip_inequality", "bain") 10 | order_terms <- getFromNamespace("order_terms", "bain") 11 | params_in_hyp <- getFromNamespace("params_in_hyp", "bain") 12 | 13 | -------------------------------------------------------------------------------- /R/memory.R: -------------------------------------------------------------------------------- 1 | #' Memory data on health and schizophrenic patients 2 | #' 3 | #' Data set from study assessing differences between schizophrenic patients and 4 | #' healthy control participants in patterns of correlations among 6 verbal memory 5 | #' tasks (Ichinose et al., 2019). 6 | #' \tabular{lll}{ 7 | #' \strong{Im} \tab \code{numeric} \tab Percent correct on immediate recall of 3 word lists\cr 8 | #' \strong{Del} \tab \code{numeric} \tab Percent correct on delayed recall of 3 word lists\cr 9 | #' \strong{Wmn} \tab \code{numeric} \tab Number correct on letter-number span test of auditory working memory\cr 10 | #' \strong{Cat} \tab \code{numeric} \tab Number correct on category fluency task\cr 11 | #' \strong{Fas} \tab \code{numeric} \tab Number correct on letter fluency task\cr 12 | #' \strong{Rat} \tab \code{numeric} \tab Number correct on remote associates task\cr 13 | #' \strong{Group} \tab \code{factor} \tab Participant Group (HC = Healthy Control; SZ = Schizophrenia) \cr 14 | #' } 15 | #' @docType data 16 | #' @keywords datasets 17 | #' @name memory 18 | #' @usage data(memory) 19 | #' @references Ichinose, M.C., Han, G., Polyn, S., Park, S., & Tomarken, A.J. (2019). Verbal memory performance discordance in schizophrenia: A reflection of cognitive dysconnectivity. 20 | #' Unpublished manuscript. 21 | #' @format A data.frame with 40 rows and 8 columns. 22 | NULL 23 | -------------------------------------------------------------------------------- /R/relevents.R: -------------------------------------------------------------------------------- 1 | #' A sequence of innovation-related e-mail messages 2 | #' 3 | #' A time-ordered sequence of 247 communication messages between 25 actors. 4 | #' 5 | #' The related data files 'actors', 'same_location', 'same_culture' contain information 6 | #' on the actors and three event statistics respectively. 7 | #' 8 | #' 9 | #' @name relevents 10 | #' @docType data 11 | #' @usage data(relevents) 12 | #' @format dataframe (247 rows, 3 columns) 13 | #' 14 | #' \tabular{lll}{ 15 | #' \strong{relevents$time} \tab \code{numeric} \tab Time of the e-mail message, 16 | #' in seconds since onset of the observation \cr 17 | #' \strong{relevents$sender} \tab \code{integer} \tab ID of the sender, corresponding to 18 | #' the employee IDs in the actors dataframe \cr 19 | #' \strong{relevents$receiver} \tab \code{integer} \tab ID of the receiver \cr 20 | #' } 21 | #' 22 | #' @keywords datasets 23 | NULL 24 | -------------------------------------------------------------------------------- /R/same_culture.R: -------------------------------------------------------------------------------- 1 | #' Same culture event statistic 2 | #' 3 | #' A matrix coding whether senders of events (in the rows) and receivers of events 4 | #' (in the column) have the background culture. Related to the 'events' data object, 5 | #' that contains a relational event sequence, and the 'actors' object, that contains 6 | #' information on the 25 actors involved in the relational event sequence. 7 | #' 8 | #' 9 | #' @name same_culture 10 | #' @docType data 11 | #' @usage data(same_culture) 12 | #' @format dataframe (25 rows, 4 columns) 13 | #' 14 | #' \tabular{lll}{ 15 | #' \strong{same_culture} \tab \code{integer} \tab Event statistic. Matrix with senders in the 16 | #' rows and receivers in the columns. The event statistic is 1 if sender and receiver have 17 | #' the same culture and 0 otherwise. \cr 18 | #' } 19 | #' 20 | #' @keywords datasets 21 | NULL 22 | -------------------------------------------------------------------------------- /R/same_location.R: -------------------------------------------------------------------------------- 1 | #' Same location event statistic 2 | #' 3 | #' A matrix coding whether senders of events (in the rows) and receivers of events 4 | #' (in the column) have the same location. Related to the 'events' data object, 5 | #' that contains a relational event sequence, and the 'actors' object, that contains 6 | #' information on the 25 actors involved in the relational event sequence. 7 | #' 8 | #' 9 | #' @name same_location 10 | #' @docType data 11 | #' @usage data(same_location) 12 | #' @format dataframe (25 rows, 4 columns) 13 | #' 14 | #' \tabular{lll}{ 15 | #' \strong{same_location} \tab \code{integer} \tab Event statistic. Matrix with senders in the 16 | #' rows and receivers in the columns. The event statistic is 1 if sender and receiver have 17 | #' the same location and 0 otherwise. \cr 18 | #' } 19 | #' 20 | #' @keywords datasets 21 | NULL 22 | -------------------------------------------------------------------------------- /R/sivan.R: -------------------------------------------------------------------------------- 1 | #' Wason task performance and morality 2 | #' 3 | #' Data from an experimental study, using the Wason selection task (Wason 1968) 4 | #' to examine whether humans have cognitive adaptations for detecting violations 5 | #' of rules in multiple moral domains. Moral domains are operationalized in 6 | #' terms of the five domains of the Moral Foundations Questionnaire 7 | #' (Graham et al. 2011). 8 | #' These data were simulated using the 9 | #' R-package \code{synthpop}, based on the characteristics of the original data. 10 | #' 11 | #' \tabular{lll}{ 12 | #' \strong{sex} \tab \code{factor} \tab Participant sex\cr 13 | #' \strong{age} \tab \code{integer} \tab Participant age\cr 14 | #' \strong{nationality} \tab \code{factor} \tab Participant nationality\cr 15 | #' \strong{politics} \tab \code{integer} \tab How would you define your political opinions? Likert type scale, from 1 (Liberal) to 6 (Conservative)\cr 16 | #' \strong{WasonOrder} \tab \code{factor} \tab Was the Wason task presented before, or after the MFQ? \cr 17 | #' \strong{Harm} \tab \code{numeric} \tab MFQ harm domain.\cr 18 | #' \strong{Fairness} \tab \code{numeric} \tab MFQ fairness domain.\cr 19 | #' \strong{Loyalty} \tab \code{numeric} \tab MFQ loyalty domain.\cr 20 | #' \strong{Purity} \tab \code{numeric} \tab MFQ purity domain.\cr 21 | #' \strong{Tasktype} \tab \code{ordered} \tab How was the Wason task framed?\cr 22 | #' \strong{GotRight} \tab \code{factor} \tab Did the participant give the correct answer to the Wason task? 23 | #' } 24 | #' @docType data 25 | #' @keywords datasets 26 | #' @name sivan 27 | #' @usage data(sivan) 28 | #' @references Sivan, J., Curry, O. S., & Van Lissa, C. J. (2018). Excavating the Foundations: Cognitive Adaptations for Multiple Moral Domains. Evolutionary Psychological Science, 4(4), 408–419. doi:10.1007/s40806-018-0154-8 29 | #' @format A data.frame with 887 rows and 12 columns. 30 | NULL 31 | -------------------------------------------------------------------------------- /R/therapeutic.R: -------------------------------------------------------------------------------- 1 | #' 2 | #' Data come from an experimental study (Rosa, Rosa, Sarner, and Barrett, 1998) 3 | #' that were also used in Howell (2012, p.196). 4 | #' An experiment was conducted to investigate if Therapeutic Touch practitioners 5 | #' who were blindfolded can effectively identify which of their hands is below the experimenter¡¯s. 6 | #' Twenty-eight practitioners were involved and tested 10 times in the experiment. 7 | #' Researchers expected an average of 5 correct answers from each practitioner 8 | #' as it is the number by chance if they do not outperform others. 9 | #' 10 | #' \tabular{lll}{ 11 | #' \strong{correct} \tab \code{integer} \tab How many correct answers are from each practitioner)\cr 12 | #' } 13 | #' @docType data 14 | #' @keywords datasets 15 | #' @name therapeutic 16 | #' @usage data(therapeutic) 17 | #' @references Howell, D. (2012). Statistical methods for psychology (8th ed.). Belmont, CA: Cengage Learning. 18 | #' @format A data.frame with 22 rows and 1 column. 19 | NULL 20 | -------------------------------------------------------------------------------- /R/timssICC.r: -------------------------------------------------------------------------------- 1 | #' Trends in International Mathematics and Science Study (TIMSS) 2011-2015 2 | #' 3 | #' A stratified sample was drawn by country and school to obtain a balanced 4 | #' sample of p = 15 grade-4 students 5 | #' per school for each of four countries (The Netherlands (NL), Croatia (HR), 6 | #' Germany 7 | #' (DE), and Denmark (DK)) and two measurement occasions (2011, 2015). 8 | #' Achievement scores 9 | #' (first plausible value) of overall mathematics were considered. Performances 10 | #' of fourth 11 | #' and eight graders from more than 50 participating countries around the world 12 | #' can be found at (https://www.iea.nl/timss) 13 | #' The TIMSS achievement scale is centered at 500 and the standard deviation is 14 | #' equal to 100 scale score points. 15 | #' The TIMSS data set has a three-level structure, where students are nested 16 | #' within classrooms/schools, and 17 | #' the classrooms/schools are nested within countries. Only one classroom was 18 | #' sampled per school. 19 | #' Changes in the mathematics achievement can be investigated by examining the 20 | #' grouping of 21 | #' students in schools across countries. Changes in country-specific intraclass 22 | #' correlation coefficient 23 | #' from 2011 to 2015, representing heterogeneity in mathematic achievements 24 | #' within and between schools across years, 25 | #' can be tested. When detecting a decrease in average performance together 26 | #' with an increase 27 | #' of the intraclass correlation, a subset of schools performed worse. For a 28 | #' constant 29 | #' intraclass correlation across years the drop in performance applied to the 30 | #' entire population 31 | #' of schools. For different countries, changes in the intraclass correlation 32 | #' across years 33 | #' can be tested concurrently to examine also differences across countries. 34 | #' 35 | #' \tabular{lll}{ 36 | #' \strong{math} \tab \code{numeric} \tab math score child\cr 37 | #' \strong{groupNL11} \tab \code{numeric} \tab 38 | #' Indicator for child from NL in 2011\cr 39 | #' \strong{groupNL15} \tab \code{numeric} \tab 40 | #' Indicator for child from NL in 2015\cr 41 | #' \strong{groupHR11} \tab \code{numeric} \tab 42 | #' Indicator for child from HR in 2011\cr 43 | #' \strong{groupHR15} \tab \code{numeric} \tab 44 | #' Indicator for child from HR in 2015\cr 45 | #' \strong{groupDE11} \tab \code{numeric} \tab 46 | #' Indicator for child from DE in 2011\cr 47 | #' \strong{groupDE15} \tab \code{numeric} \tab 48 | #' Indicator for child from DE in 2015\cr 49 | #' \strong{groupDR11} \tab \code{numeric} \tab 50 | #' Indicator for child from DK in 2011\cr 51 | #' \strong{groupDR15} \tab \code{numeric} \tab 52 | #' Indicator for child from DK in 2015\cr 53 | #' \strong{gender} \tab \code{numeric} \tab Female=0,Male=1 \cr 54 | #' \strong{weight} \tab \code{numeric} \tab Child sampling weight \cr 55 | #' \strong{yeargender} \tab \code{numeric} \tab 56 | #' Interaction for occassion and gender \cr 57 | #' \strong{lln} \tab \code{numeric} \tab 58 | #' total number of children in school-class \cr 59 | #' \strong{groupschool} \tab \code{factor} \tab 60 | #' Nested indicator for school in country\cr 61 | #' \strong{schoolID} \tab \code{factor} \tab 62 | #' Unique indicator for school 63 | #' } 64 | #' @docType data 65 | #' @keywords datasets 66 | #' @name timssICC 67 | #' @usage data(timssICC) 68 | #' @references Mulder, J. & Fox, J.-P. (2019). Bayes factor testing of multiple 69 | #' intraclass correlations. Bayesian Analysis. 14, 2, p. 521-552. 70 | #' @format A data.frame with 16770 rows and 15 columns. 71 | NULL 72 | -------------------------------------------------------------------------------- /R/tvprices.R: -------------------------------------------------------------------------------- 1 | #' Precision of the Anchor Influences the Amount of Adjustment 2 | #' 3 | #' Data from an experimental study where participants have to guess the price 4 | #' of a plasma tv. There were two experimental conditions. 5 | #' These data were simulated using the sufficient statistics from Janiszewski & 6 | #' Uy (2008). 7 | #' 8 | #' \tabular{lll}{ 9 | #' \strong{price} \tab \code{numeric} \tab Participant z-scores of price\cr 10 | #' \strong{anchor} \tab \code{factor} \tab Participant anchor\cr 11 | #' \strong{motivation} \tab \code{factor} \tab motivation to change\cr 12 | #' } 13 | #' @docType data 14 | #' @keywords datasets 15 | #' @name tvprices 16 | #' @usage data(tvprices) 17 | #' @references Janiszewski, C., & Uy, D. (2008). Precision of the anchor influences the amount of adjustment. Psychological Science, 19(2), 121–127. doi:10.1111/j.1467-9280.2008.02057.x 18 | #' @format A data.frame with 59 rows and 3 columns. 19 | NULL 20 | -------------------------------------------------------------------------------- /R/wilson.r: -------------------------------------------------------------------------------- 1 | #' Facial trustworthiness and criminal sentencing 2 | #' 3 | #' Data from a correlational study in which the correlation between ratings of 4 | #' facial trustworthiness of inmates was correlated with whether they had 5 | #' received the death penalty or not (wilson and Rule, 2015). These data were 6 | #' simulated using the R-package \code{synthpop}, based on the characteristics 7 | #' of the original data. 8 | #' 9 | #' 10 | #' 11 | #' \tabular{lll}{ 12 | #' \strong{stim} \tab \code{integer} \tab Stimulus Number\cr 13 | #' \strong{sent} \tab \code{integer} \tab Sentence: 1 = Death, 0 = Life\cr 14 | #' \strong{race} \tab \code{integer} \tab Race: 1 = White, -1 = Black\cr 15 | #' \strong{glasses} \tab \code{integer} \tab Glasses: 1 = Yes, 0 = No\cr 16 | #' \strong{tattoos} \tab \code{integer} \tab Tattoos: 1 = Yes, 0 = No \cr 17 | #' \strong{ztrust} \tab \code{numeric} \tab Trustworthiness \cr 18 | #' \strong{trust_2nd} \tab \code{numeric} \tab Trustworthiness ratings with 2nd control group; Death targets are same as in primary analysis, Life targets are different.\cr 19 | #' \strong{afro} \tab \code{numeric} \tab raw Afrocentricity ratings.\cr 20 | #' \strong{zAfro} \tab \code{numeric} \tab Afrocentricity ratings normalized within target race. Analyses in paper were done with this variable.\cr 21 | #' \strong{attract} \tab \code{numeric} \tab Attractiveness\cr 22 | #' \strong{fWHR} \tab \code{numeric} \tab facial width-to-height \cr 23 | #' \strong{afWHR} \tab \code{numeric} \tab fWHR normalized within target race. Analyses in paper were done with this variable \cr 24 | #' \strong{maturity} \tab \code{numeric} \tab Maturity 25 | #' } 26 | #' @docType data 27 | #' @keywords datasets 28 | #' @name wilson 29 | #' @usage data(wilson) 30 | #' @references Wilson, J. P., & Rule, N. O. (2015). Facial Trustworthiness 31 | #' Predicts Extreme Criminal-Sentencing Outcomes. Psychological Science, 32 | #' 26(8), 1325–1331. doi: 10.1177/0956797615590992 33 | #' @format A data.frame with 742 rows and 13 columns. 34 | NULL 35 | -------------------------------------------------------------------------------- /R/zzz.R: -------------------------------------------------------------------------------- 1 | .onAttach <- function(libname, pkgname) { 2 | version <- read.dcf( 3 | file = system.file("DESCRIPTION", package = pkgname), 4 | fields = "Version" 5 | ) 6 | packageStartupMessage( 7 | "\n", 8 | "This is ", paste(pkgname, version),".", "\n", 9 | "Updates on default settings:","\n", 10 | "- For standard (exploratory) tests, the default prior probability for a zero, negative,", "\n", 11 | "and positive effect are 0.5, 0.25, and 0.25, respectively. The previous default was 1/3 for each","\n", 12 | "hypothesis. Changing these prior probabilities can be done using the argument 'prior.hyp.explo'.", "\n", 13 | "- For linear regression, ANOVA, t-tests, the fractional Bayes factor ('FBF') is the new default.","\n", 14 | "To change this to the adjusted fractional Bayes factor (the previous default), users can set","\n", 15 | "the argument: BF.type='AFBF'." 16 | ) 17 | } 18 | 19 | 20 | # .onAttach <- function(libname, pkgname) { 21 | # packageStartupMessage("test") 22 | # } 23 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | ```{r, echo = FALSE, message=F} 6 | knitr::opts_chunk$set( 7 | collapse = TRUE, 8 | comment = "#>", 9 | fig.path = "man/figures/", 10 | dev = "png", 11 | dpi = 500, 12 | fig.align = "center", 13 | knitr::opts_chunk$set(comment = NA) 14 | ) 15 | library(bain) 16 | library(BFpack) 17 | ``` 18 | 19 | 20 | 21 | # 22 | 23 | [![CRAN Version](http://www.r-pkg.org/badges/version/BFpack)](https://cran.r-project.org/package=BFpack) 24 | [![Downloads](https://cranlogs.r-pkg.org/badges/BGGM)](https://cran.r-project.org/package=BFpack) 25 | 26 | 27 | 28 | [![Contributor 29 | Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg)](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) 30 | 31 | The `R` package **BFpack** contains a set of functions for exploratory hypothesis testing (e.g., equal vs negative vs postive) and confirmatory hypothesis testing (with equality and/or order constraints) using Bayes factors and posterior probabilities under commonly used statistical models, including (but not limited to) Bayesian t testing, (M)AN(C)OVA, multivariate/univariate linear regression, correlation analysis, multilevel analysis, or generalized linear models (e.g., logistic regression). The main function `BF` needs a fitted model (e.g., an object of class `lm` for a linear regression model) and (optionally) the argument `hypothesis`, a string which specifies a set of equality/order constraints on the parameters. By applying the function `get_estimates`on a fitted model, the names of the parameters are returned on which constrained hypotheses can be formulated. Bayes factors and posterior probabilities are computed for the hypotheses of interest. 32 | 33 | 34 | ## Installation 35 | 36 | Install the latest release version of `BFpack` from CRAN: 37 | 38 | ```{r cran-installation, eval = FALSE} 39 | install.packages("BFpack") 40 | ``` 41 | 42 | The current developmental version can be installed with 43 | 44 | ```{r, eval = FALSE} 45 | if (!requireNamespace("remotes")) { 46 | install.packages("remotes") 47 | } 48 | remotes::install_github("jomulder/BFpack") 49 | ``` 50 | 51 | 52 | ## Example analyses 53 | 54 | Below several example analyses are provided using **BFpack**. 55 | 56 | 57 | ### Bayesian t testing 58 | 59 | #### Univariate t testing 60 | 61 | First a classical one sample t test is executed for the test value $\mu = 5$ on the therapeutic data 62 | 63 | ```{r, eval = FALSE} 64 | ttest1 <- bain::t_test(therapeutic, alternative = "greater", mu = 5) 65 | ``` 66 | The `t_test` function is part of the ***bain*** package. The function is equivalent to the standard `t.test` function with the addition that the returned object contains additional output than the standard `t.test` function. 67 | 68 | To perform a Bayesian t test plug the fitted object into the `BF` function. 69 | 70 | ```{r, eval = FALSE} 71 | library(BFpack) 72 | BF1 <- BF(ttest1) 73 | ``` 74 | 75 | This executes an exhaustive test around the null value: `H1: mu = 5` versus `H2: mu < 5` versus `H3: mu > 5` assuming equal prior probabilities for `H1`, `H2`, and `H3` of 1/3. The output presents the posterior probabilities for the three hypotheses. 76 | 77 | The same test would be executed when the same hypotheses are explicitly specified using the `hypothesis` argument. 78 | 79 | ```{r, eval = FALSE} 80 | hypothesis <- "mu = 5; mu < 5; mu > 5" 81 | BF(ttest1, hypothesis = hypothesis) 82 | ``` 83 | 84 | When testing hypotheses via the `hypothesis` argument, the output also presents an `Evidence matrix` containing the Bayes factors between the hypotheses. 85 | 86 | 87 | #### Multivariate t testing 88 | 89 | Bayesian multivariate t tests can be executed by first fitting a multivariate (regression) model using the `lm` function, and subsequently, the means of the dependent variables (or other coefficients) in the model can be tested using the `BF()` function. Users have to be aware however that means are modeled using intercepts which are named `(Intercept)` by default by `lm` while the hypothesis argument in `BF()` does not allow effect names that include brackets (i.e., `(` or `)`). To circumvent this, one can create a vector of 1s, with name (say) `ones`, to replace the intercept. For example, let us consider a multivariate normal model for the dependent variables `Superficial`, `Middle`, and `Deep` in the `fmri` data set: 90 | 91 | ``` r 92 | fmri1 <- cbind(fmri,ones=1) 93 | mlm1 <- lm(cbind(Superficial,Middle,Deep) ~ -1 + ones, data = fmri1) 94 | ``` 95 | 96 | Next, we can (for instance) test whether all means equal 0 (`H1`), whether all means are positive (`H2`), or none of these two hypotheses (`complement`): 97 | 98 | ``` r 99 | BFmlm1 <- BF(mlm1, hypothesis="ones_on_Superficial=ones_on_Middle=ones_on_Deep=0; 100 | (ones_on_Superficial,ones_on_Middle,ones_on_Deep)>0") 101 | ``` 102 | 103 | ### Analysis of variance 104 | 105 | First an analysis of variance (ANOVA) model is fitted using the `aov` fuction in `R`. 106 | ```{r, eval = FALSE} 107 | aov1 <- aov(price ~ anchor * motivation, data = tvprices) 108 | ``` 109 | Next a Bayesian test can be performed on the fitted object. 110 | ```{r, eval = FALSE} 111 | BF(aov1) 112 | ``` 113 | By default posterior probabilities are computed of whether main effects and interaction effects are present. Alternative constrained hypotheses can be tested on the model parameters `get_estimates(aov1)`. 114 | 115 | 116 | ### Logistic regression 117 | 118 | An example hypothesis test is consdered under a logistic regression model. First a logistic regression model is fitted using the `glm` function 119 | ```{r, eval = FALSE} 120 | fit_glm <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity + 121 | tattoos, family = binomial(), data = wilson) 122 | ``` 123 | The names of the regression coefficients on which constrained hypotheses can be formualted can be extracted using the `get_estimates` function. 124 | ```{r, eval = FALSE} 125 | get_estimates(fit_glm) 126 | ``` 127 | Two different hypotheses are formulated with competing equality and/or order constraints on the parameters of interest. These hypotheses are motivated in Mulder et al. (2019) 128 | ```{r, eval = FALSE} 129 | BF_glm <- BF(fit_glm, hypothesis = "ztrust > (zfWHR, zAfro) > 0; 130 | ztrust > zfWHR = zAfro = 0") 131 | summary(BF_glm) 132 | ``` 133 | By calling the `summary` function on the output object of class `BF`, the results of the exploratory tests are presented of whether each separate parameter is zero, negative, or positive, and the results of the confirmatory test of the hypotheses under the `hypothesis` argument are presented. When the hypotheses do not cover the complete parameter space, by default the complement hypothesis is added which covers the remaining parameter space that is not covered by the constraints under the hypotheses of interest. In the above example, the complement hypothesis covers the parameter space where neither `"ztrust > (zfWHR, zAfro) > 0"` holds, nor where `"ztrust > zfWHR = zAfro = 0"` holds. 134 | 135 | 136 | ### Correlation analysis 137 | 138 | By default `BF` performs exhaustice tests of whether the separate correlations are zero, negative, or positive. The name of the correlations is constructed using the names of the variables separated by `_with_`. 139 | ```{r, eval = FALSE} 140 | set.seed(123) 141 | cor1 <- cor_test(memory[,1:3]) 142 | BF1 <- BF(cor1) 143 | print(BF1) 144 | ``` 145 | 146 | Constraints can also be tested between correlations, e.g., whether all correlations are equal and positive versus an unconstrained complement. 147 | ```{r, eval = FALSE} 148 | BF2 <- BF(cor1, hypothesis = "Del_with_Im = Wmn_with_Im = Wmn_with_Del > 0") 149 | print(BF2) 150 | ``` 151 | 152 | 153 | ### Univariate/Multivariate multiple regression 154 | 155 | For a univariate regression model, by default an exhaustive test is executed of whether an effect is zero, negative, or postive. 156 | ```{r, eval = FALSE} 157 | lm1 <- lm(Superficial ~ Face + Vehicle, data = fmri) 158 | BF1 <- BF(lm1) 159 | print(BF1) 160 | ``` 161 | 162 | Hypotheses can be tested with equality and/or order constraints on the effects of interest. If prefered the complement hypothesis can be omitted using the `complement` argument 163 | ```{r, eval = FALSE} 164 | BF2 <- BF(lm1, hypothesis = "Vehicle > 0 & Face < 0; Vehicle = Face = 0", 165 | complement = FALSE) 166 | print(BF2) 167 | ``` 168 | 169 | In a multivariate regression model hypotheses can be tested on the effects on the same dependent variable, and on effects across different dependent variables. The name of an effect is constructed as the name of the predictor variable and the dependent variable separated by `_on_`. Testing hypotheses with both constraints within a dependent variable and across dependent variables makes use of a Monte Carlo estimate which may take a few seconds. 170 | ```{r, eval = FALSE} 171 | lm2 <- lm(cbind(Superficial, Middle, Deep) ~ Face + Vehicle, 172 | data = fmri) 173 | constraint2 <- "Face_on_Deep = Face_on_Superficial = Face_on_Middle < 0 < 174 | Vehicle_on_Deep = Vehicle_on_Superficial = Vehicle_on_Middle; 175 | Face_on_Deep < Face_on_Superficial = Face_on_Middle < 0 < Vehicle_on_Deep = 176 | Vehicle_on_Superficial = Vehicle_on_Middle" 177 | set.seed(123) 178 | BF3 <- BF(lm2, hypothesis = constraint2) 179 | summary(BF3) 180 | ``` 181 | 182 | 183 | ### Running `BF` on a named vector 184 | 185 | The input for the `BF` function can also be a named vector containing the estimates of the parameters of interest. In this case the error covariance matrix of the estimates is also needed via the `Sigma` argument, as well as the sample size that was used for obtaining the estimates via the `n` argument. Bayes factors are then computed using Gaussian approximations of the likelihood (and posterior), similar as in classical Wald test. 186 | 187 | We illustrate this for a Poisson regression model 188 | ```{r, eval = FALSE} 189 | poisson1 <- glm(formula = breaks ~ wool + tension, data = datasets::warpbreaks, 190 | family = poisson) 191 | ``` 192 | The estimates, the error covariance matrix, and the sample size are extracted from the fitted model 193 | ```{r, eval = FALSE} 194 | estimates <- poisson1$coefficients 195 | covmatrix <- vcov(poisson1) 196 | samplesize <- nobs(poisson1) 197 | ``` 198 | Constrained hypotheses on the parameters `names(estimates)` can then be tested as follows 199 | ```{r, eval = FALSE} 200 | BF1 <- BF(estimates, Sigma = covmatrix, n = samplesize, hypothesis = 201 | "woolB > tensionM > tensionH; woolB = tensionM = tensionH") 202 | ``` 203 | 204 | Note that the same hypothesis test would be executed when calling 205 | ```{r, eval = FALSE} 206 | BF2 <- BF(poisson1, hypothesis = "woolB > tensionM > tensionH; 207 | woolB = tensionM = tensionH") 208 | ``` 209 | because the same Bayes factor is used when running `BF` on an object of class `glm` (see `Method: Bayes factor using Gaussian approximations` when calling `print(BF11)` and `print(BF2)`). 210 | 211 | 212 | ## Citing **BFpack** 213 | 214 | You can cite the package and the paper using the following reference 215 | 216 | > Mulder, J., van Lissa, C., Gu, X., Olsson-Collentine, A., Boeing-Messing, F., Williams, D. R., Fox, J.-P., Menke, J., et al. (2020). BFpack: Flexible Bayes Factor Testing of Scientific Expectations. (Version 0.3.1) [R package]. https://CRAN.R-project.org/package=BFpack 217 | 218 | > Mulder, J., Williams, D. R., Gu, X., Olsson-Collentine, A., Tomarken, A., Böing-Messing, F., Hoijtink, H., . . . van Lissa, C. (2019). BFpack: Flexible Bayes factor testing of scientific theories in R. Retrieved from https://arxiv.org/abs/1911.07728 219 | 220 | 221 | ## Contributing and Contact Information 222 | 223 | If you have ideas, please get involved. You can contribute by opening an issue on GitHub, or sending a pull request with proposed features. 224 | 225 | * File a GitHub issue [here](https://github.com/jomulder/BFpack) 226 | * Make a pull request [here](https://github.com/jomulder/BFpack/pulls) 227 | 228 | By participating in this project, you agree to abide by the Contributor Code of Conduct v2.0. 229 | 230 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | # BFpack 1.4.2 2 | 3 | * Date: 2024-02-09. 4 | * Critical bug fix of MCMC sampler for correlation test. 5 | * Minor edits. 6 | 7 | ## Test environments 8 | * Local OS X Somoma 13.5, R 4.3.2, clang-1400.0.29.202, GNU Fortran 12.2.0 9 | * rhub check: atlas, c23, gcc13, intel, linux (R-devel) 10 | 11 | ## R CMD check results 12 | There were no ERRORs, WARNINGs, or NOTEs 13 | 14 | -------------------------------------------------------------------------------- /data/Fcor.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/Fcor.rda -------------------------------------------------------------------------------- /data/actors.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/actors.rda -------------------------------------------------------------------------------- /data/attention.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/attention.rda -------------------------------------------------------------------------------- /data/fmri.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/fmri.rda -------------------------------------------------------------------------------- /data/memory.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/memory.rda -------------------------------------------------------------------------------- /data/relevents.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/relevents.rda -------------------------------------------------------------------------------- /data/same_culture.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/same_culture.rda -------------------------------------------------------------------------------- /data/same_location.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/same_location.rda -------------------------------------------------------------------------------- /data/sivan.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/sivan.rda -------------------------------------------------------------------------------- /data/therapeutic.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/therapeutic.rda -------------------------------------------------------------------------------- /data/timssICC.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/timssICC.rda -------------------------------------------------------------------------------- /data/tvprices.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/tvprices.rda -------------------------------------------------------------------------------- /data/wilson.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/data/wilson.rda -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | bibentry(bibtype = "Article", 2 | title = "{BFpack}: Flexible Bayes Factor Testing of Scientific Theories in {R}", 3 | author = c(person(given = "Joris", 4 | family = "Mulder", 5 | email = "j.mulder3@tilburguniversity.edu"), 6 | person(given = c("Donald", "R."), 7 | family = "Williams"), 8 | person(given = "Xin", 9 | family = "Gu"), 10 | person(given = "Andrew", 11 | family = "Tomarken"), 12 | person(given = "Florian", 13 | family = "B{\\\"o}ing-Messing"), 14 | person(given = "Anton", 15 | family = "Olsson-Collentine"), 16 | person(given = "Marlyne", 17 | family = "Meijerink"), 18 | person(given = "Janosch", 19 | family = "Menke"), 20 | person(given = "Robbie", 21 | family = "van Aert"), 22 | person(given = "Jean-Paul", 23 | family = "Fox"), 24 | person(given = "Herbert", 25 | family = "Hoijtink"), 26 | person(given = "Yves", 27 | family = "Rosseel"), 28 | person(given = "Eric-Jan", 29 | family = "Wagenmakers"), 30 | person(given = "Caspar", 31 | family = "van Lissa")), 32 | journal = "Journal of Statistical Software", 33 | year = "2021", 34 | volume = "100", 35 | number = "18", 36 | pages = "1--63", 37 | doi = "10.18637/jss.v100.i18", 38 | 39 | header = "To cite BFpack in publications use:", 40 | textVersion = 41 | paste("Joris Mulder, Donald R. Williams, Xin Gu, Andrew Tomarken, Florian B{\"o}ing-Messing, Anton Olsson-Collentine, Marlyne Meijerink, Janosch Menke, Robbie van Aert, Jean-Paul Fox, Herbert Hoijtink, Yves Rosseel, Eric-Jan Wagenmakers, Caspar van Lissa (2021).", 42 | "BFpack: Flexible Bayes Factor Testing of Scientific Theories in R.", 43 | "Journal of Statistical Software, 100(18), 1-63.", 44 | "doi:10.18637/jss.v100.i18") 45 | ) 46 | 47 | -------------------------------------------------------------------------------- /man/BFpack-package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/BFpack-package.R 3 | \docType{package} 4 | \name{BFpack-package} 5 | \alias{BFpack} 6 | \alias{BFpack-package} 7 | \title{BFpack: Flexible Bayes factor testing of scientific expectations} 8 | \description{ 9 | The \code{R} package \strong{BFpack} provides tools for exploratory and 10 | confirmatory Bayesian hypothesis testing using Bayes factors and posterior probabilities 11 | under common statistical models. The main function `BF` needs a fitted model `x` as input 12 | argument. Depending on the class of the fitted model, a standard hypothesis test is 13 | executed by default. For example, if `x` is a 14 | fitted regression model of class `lm` then posterior probabilities are computed of whether 15 | each separate coefficient is zero, negative, or positive (assuming equal prior probabilities). 16 | If one has specific hypotheses with 17 | equality and/or order constraints on the parameters under the fitted model `x` then these 18 | can be formulated using the `hypothesis` argument (a character string), possibly together 19 | prior probabilities for the hypotheses via the `prior` argument (default all hypotheses are 20 | equally likely a priori), and the `complement` argument which is a logical stating whether 21 | the complement hypotheses should be included in the case (`TRUE` by default). 22 | 23 | Use compilation for Fortran functions 24 | } 25 | \examples{ 26 | \dontrun{ 27 | # EXAMPLE 1. One-sample t test 28 | ttest1 <- t_test(therapeutic, mu = 5) 29 | print(ttest1) 30 | # confirmatory Bayesian one sample t test 31 | BF1 <- BF(ttest1, hypothesis = "mu = 5") 32 | summary(BF1) 33 | # exploratory Bayesian one sample t test 34 | BF(ttest1) 35 | 36 | # EXAMPLE 2. ANOVA 37 | aov1 <- aov(price ~ anchor * motivation,data = tvprices) 38 | BF1 <- BF(aov1, hypothesis = "anchorrounded = motivationlow; 39 | anchorrounded < motivationlow") 40 | summary(BF1) 41 | 42 | # EXAMPLE 3. Logistic regression 43 | fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity + 44 | tattoos, family = binomial(), data = wilson) 45 | BF1 <- BF(fit, hypothesis = "ztrust > zfWHR > 0; 46 | ztrust > 0 & zfWHR = 0") 47 | summary(BF1) 48 | 49 | # EXAMPLE 4. Correlation analysis 50 | set.seed(123) 51 | cor1 <- cor_test(memory[1:20,1:3]) 52 | BF1 <- BF(cor1) 53 | summary(BF1) 54 | BF2 <- BF(cor1, hypothesis = "Wmn_with_Im > Wmn_with_Del > 0; 55 | Wmn_with_Im = Wmn_with_Del = 0") 56 | summary(BF2) 57 | } 58 | 59 | } 60 | \references{ 61 | Mulder, J., D.R. Williams, Gu, X., A. Tomarken, 62 | F. Böing-Messing, J.A.O.C. Olsson-Collentine, Marlyne Meyerink, J. Menke, 63 | J.-P. Fox, Y. Rosseel, E.J. Wagenmakers, H. Hoijtink., and van Lissa, C. 64 | (submitted). BFpack: Flexible Bayes Factor Testing of Scientific Theories 65 | in R. \url{https://arxiv.org/abs/1911.07728} 66 | 67 | Mulder, J., van Lissa, C., Gu, X., Olsson-Collentine, A., Boeing-Messing, F., Williams, 68 | D. R., Fox, J.-P., Menke, J., et al. (2019). BFpack: Flexible Bayes Factor Testing of 69 | Scientific Expectations. (Version 0.2.1) \url{https://CRAN.R-project.org/package=BFpack} 70 | } 71 | \seealso{ 72 | Useful links: 73 | \itemize{ 74 | \item \url{https://github.com/jomulder/BFpack} 75 | \item Report bugs at \url{https://github.com/jomulder/BFpack/issues} 76 | } 77 | 78 | } 79 | \author{ 80 | \strong{Maintainer}: Joris Mulder \email{j.mulder3@tilburguniversity.edu} 81 | 82 | Authors: 83 | \itemize{ 84 | \item Caspar van Lissa \email{c.j.vanlissa@uu.nl} [contributor] 85 | \item Donald R. Williams \email{drwwilliams@ucdavis.edu} [contributor] 86 | \item Xin Gu \email{guxin57@hotmail.com} [contributor] 87 | \item Anton Olsson-Collentine \email{J.A.E.OlssonCollentine@tilburguniversity.edu} [contributor] 88 | \item Florian Boeing-Messing \email{F.Boeing-Messing@uvt.nl} [contributor] 89 | \item Jean-Paul Fox \email{g.j.a.fox@utwente.nl} [contributor] 90 | } 91 | 92 | Other contributors: 93 | \itemize{ 94 | \item Janosch Menke \email{janosch.menke@uni-muenster.de} [contributor] 95 | \item Robbie van Aert \email{R.C.M.vanAert@tilburguniversity.edu} [contributor] 96 | \item Barry Brown [contributor] 97 | \item James Lovato [contributor] 98 | \item Kathy Russell [contributor] 99 | \item Lapack 3.8 [contributor] 100 | \item Jack Dongarra [contributor] 101 | \item Jim Bunch [contributor] 102 | \item Cleve Moler [contributor] 103 | \item Gilbert Stewart [contributor] 104 | \item John Burkandt [contributor] 105 | \item Ashwith Rego [contributor] 106 | \item Alexander Godunov [contributor] 107 | \item Alan Miller [contributor] 108 | \item Jean-Pierre Moreau [contributor] 109 | \item The R Core Team [copyright holder] 110 | } 111 | 112 | } 113 | -------------------------------------------------------------------------------- /man/Fcor.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Fcor.R 3 | \docType{data} 4 | \name{Fcor} 5 | \alias{Fcor} 6 | \title{Student t approximations of Fisher transformed correlations} 7 | \format{ 8 | A data.frame with 3 columns. 9 | } 10 | \usage{ 11 | data(Fcor) 12 | } 13 | \description{ 14 | Approximated degrees of freedom and approximated scale of the Fisher transformed 15 | correlations depending on the dimension of the vector of dependent variables P 16 | based on a joint uniform prior. 17 | } 18 | \details{ 19 | \tabular{lll}{ 20 | \strong{nu} \tab \code{numeric} \tab Approximated degrees of freedom\cr 21 | \strong{sigma} \tab \code{numeric} \tab Approximated scale\cr 22 | \strong{P} \tab \code{integer} \tab Dimension of vector of dependent variables\cr 23 | } 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /man/actors.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/actors.R 3 | \docType{data} 4 | \name{actors} 5 | \alias{actors} 6 | \title{Actors from a small hypothetical network} 7 | \format{ 8 | dataframe (25 rows, 4 columns) 9 | 10 | \tabular{lll}{ 11 | \strong{actors$id} \tab \code{integer} \tab ID of the employee, corresponding to 12 | the sender and receiver IDs in the events dataframe \cr 13 | \strong{actors$location} \tab \code{numeric} \tab Location of the actor, 14 | ranging from 1-4 \cr 15 | \strong{actors$culture} \tab \code{character} \tab Categorical variable, indicating the 16 | culture of the employee \cr 17 | } 18 | } 19 | \usage{ 20 | data(actors) 21 | } 22 | \description{ 23 | The related data files 'events', 'same_location', 'same_culture' contain 24 | information on the event sequence and the two event statistics respectively. 25 | } 26 | \keyword{datasets} 27 | -------------------------------------------------------------------------------- /man/attention.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/attention.R 3 | \docType{data} 4 | \name{attention} 5 | \alias{attention} 6 | \title{Multiple Sources of Attentional Dysfunction in Adults With Tourette's Syndrome} 7 | \format{ 8 | A data.frame with 51 rows and 2 columns. 9 | } 10 | \usage{ 11 | data(attention) 12 | } 13 | \description{ 14 | Data from a psychological study comparing attentional performances of 15 | Tourette's syndrome (TS) patients, ADHD patients, and controls. 16 | These data were simulated using the sufficient statistics from Silverstein, 17 | Como, Palumbo, West, and Osborn (1995). 18 | } 19 | \details{ 20 | \tabular{lll}{ 21 | \strong{accuracy} \tab \code{numeric} \tab Participant's accuracy in the attentional task\cr 22 | \strong{group} \tab \code{factor} \tab Participant's group membership (TS patient, ADHD patient, or control)\cr 23 | } 24 | } 25 | \references{ 26 | Silverstein, S. M., Como, P. G., Palumbo, D. R., West, L. L., & Osborn, L. M. (1995). Multiple sources of attentional dysfunction in adults with Tourette's syndrome: Comparison with attention deficit-hyperactivity disorder. Neuropsychology, 9(2), 157-164. doi:10.1037/0894-4105.9.2.157 27 | } 28 | \keyword{datasets} 29 | -------------------------------------------------------------------------------- /man/bartlett_test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/BF.var.R 3 | \name{bartlett_test} 4 | \alias{bartlett_test} 5 | \alias{bartlett_test.default} 6 | \title{Bartlett Test of Homogeneity of Variances} 7 | \usage{ 8 | bartlett_test(x, g, ...) 9 | 10 | \method{bartlett_test}{default}(x, g, ...) 11 | } 12 | \arguments{ 13 | \item{x}{a numeric vector of data values, or a list of 14 | numeric data vectors representing the respective samples, 15 | or fitted linear model objects (inheriting from class "lm").} 16 | 17 | \item{g}{a vector or factor object giving the group for 18 | the corresponding elements of x. Ignored if x is a list.} 19 | 20 | \item{...}{further arguments to be passed to or from methods.} 21 | } 22 | \value{ 23 | A list with class \code{"bartlett_htest"} containing the following 24 | components: \item{statistic}{Bartlett's K-squared test statistic.} 25 | \item{parameter}{the degrees of freedom of the approximate chi-squared 26 | distribution of the test statistic.} 27 | \item{p.value}{the p-value of the test.} \item{conf.int}{a confidence 28 | interval for the mean appropriate to the specified alternative hypothesis.} 29 | \item{method}{the character string "Bartlett test of homogeneity of variances".} 30 | \item{data.name}{a character string giving the names of the data.} 31 | \item{vars}{the sample variances across groups (samples).} 32 | \item{n}{the number of observations per group (sample)} 33 | } 34 | \description{ 35 | Performs Bartlett's test of the null that the 36 | variances in each of the groups (samples) are the same. 37 | } 38 | \details{ 39 | \code{x} must be a numeric data vector, and \code{g} 40 | must be a vector or factor object of the same length as \code{x} 41 | giving the group for the corresponding elements of \code{x}. 42 | } 43 | \section{Bain t_test}{ 44 | 45 | In order to allow users to enjoy the functionality of bain with the familiar 46 | stats-function \code{bartlett.test}, we have had to make minor changes to the 47 | function \code{bartlett.test.default}. All rights to, and credit for, the 48 | function \code{bartlett.test.default} 49 | belong to the R Core Team, as indicated in the original license below. 50 | We make no claims to copyright and incur no liability with regard to the 51 | changes implemented in \code{bartlett_test}. 52 | 53 | This the original copyright notice by the R core team: 54 | File src/library/stats/R/bartlett_test.R 55 | Part of the R package, https://www.R-project.org 56 | 57 | Copyright (C) 1995-2015 The R Core Team 58 | 59 | This program is free software; you can redistribute it and/or modify 60 | it under the terms of the GNU General Public License as published by 61 | the Free Software Foundation; either version 2 of the License, or 62 | (at your option) any later version. 63 | 64 | This program is distributed in the hope that it will be useful, 65 | but WITHOUT ANY WARRANTY; without even the implied warranty of 66 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 67 | GNU General Public License for more details. 68 | 69 | A copy of the GNU General Public License is available at 70 | https://www.R-project.org/Licenses/ 71 | } 72 | 73 | \examples{ 74 | require(graphics) 75 | 76 | plot(count ~ spray, data = InsectSprays) 77 | bartlett_test(InsectSprays$count, InsectSprays$spray) 78 | 79 | } 80 | \references{ 81 | Bartlett, M. S. (1937). Properties of sufficiency 82 | and statistical tests. Proceedings of the Royal Society of 83 | London Series A 160, 268–282. DOI: 10.1098/rspa.1937.0109. 84 | } 85 | -------------------------------------------------------------------------------- /man/cor_test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/BF.cortest.R 3 | \name{cor_test} 4 | \alias{cor_test} 5 | \title{Bayesian correlation analysis} 6 | \usage{ 7 | cor_test(..., formula = NULL, iter = 5000, burnin = 3000, nugget.scale = 0.999) 8 | } 9 | \arguments{ 10 | \item{...}{matrices (or data frames) of dimensions \emph{n} (observations) by \emph{p} (variables) 11 | for different groups (in case of multiple matrices or data frames).} 12 | 13 | \item{formula}{an object of class \code{\link[stats]{formula}}. This allows for including 14 | control variables in the model (e.g., \code{~ education}).} 15 | 16 | \item{iter}{number of iterations from posterior (default is 5000).} 17 | 18 | \item{burnin}{number of iterations for burnin (default is 3000).} 19 | 20 | \item{nugget.scale}{a scalar to avoid computational issues due to posterior draws for the corralations 21 | too close to 1 in absolute value. Posterior draws for the correlations are multiplied with this nugget.scale. 22 | So \code{nugget.scale} should be close to 1 (the default is .999). If the traceplots show that draws are stuck 23 | at 1 or -1 too long try a slightly smaller \code{nugget.scale}.} 24 | } 25 | \value{ 26 | list of class \code{cor_test}: 27 | \itemize{ 28 | \item \code{meanF} posterior means of Fisher transform correlations 29 | \item \code{covmF} posterior covariance matrix of Fisher transformed correlations 30 | \item \code{correstimates} posterior estimates of correlation coefficients 31 | \item \code{corrdraws} list of posterior draws of correlation matrices per group 32 | \item \code{corrnames} names of all correlations 33 | } 34 | } 35 | \description{ 36 | Estimate the unconstrained posterior for the correlations using a joint uniform prior (Mulder and Gelissen, 37 | 2023) or a marginally uniform prior (Barnard et al., 2000, Mulder, 2016). Correlation matrices are sampled from the posterior 38 | using the MCMC algorithm of Talhouk et al. (2012). 39 | } 40 | \examples{ 41 | \donttest{ 42 | # Bayesian correlation analysis of the 6 variables in 'memory' object 43 | # we consider a correlation analysis of the first three variable of the memory data. 44 | fit <- cor_test(BFpack::memory[,1:3]) 45 | 46 | # Bayesian correlation of variables in memory object in BFpack while controlling 47 | # for the Cat variable 48 | fit <- cor_test(BFpack::memory[,c(1:4)],formula = ~ Cat) 49 | 50 | # Example of Bayesian estimation of polyserial correlations 51 | memory_example <- memory[,c("Im","Rat")] 52 | memory_example$Rat <- as.ordered(memory_example$Rat) 53 | fit <- cor_test(memory_example) 54 | 55 | # Bayesian correlation analysis of first three variables in memory data 56 | # for two different groups 57 | HC <- subset(BFpack::memory[,c(1:3,7)], Group == "HC")[,-4] 58 | SZ <- subset(BFpack::memory[,c(1:3,7)], Group == "SZ")[,-4] 59 | fit <- cor_test(HC,SZ) 60 | 61 | } 62 | } 63 | \references{ 64 | Barnard, J., McCulloch, R., & Meng, X. L. (2000). Modeling covariance matrices in terms of standard deviations and 65 | correlations, with application to shrinkage. Statistica Sinica, 1281-1311. 66 | 67 | Joe, H. (2006). Generating random correlation matrices based on partial correlations. Journal of Multivariate 68 | Analysis, 97(10), 2177-2189. 69 | 70 | Mulder, J., & Gelissen, J. P. (2023). Bayes factor testing of equality and order constraints on measures of 71 | association in social research. Journal of Applied Statistics, 50(2), 315-351. 72 | 73 | Mulder, J. (2016). Bayes factors for testing order-constrained hypotheses on correlations. Journal of Mathematical 74 | Psychology, 72, 104-115. 75 | 76 | Talhouk, A., Doucet, A., & Murphy, K. (2012). Efficient Bayesian inference for multivariate probit models with 77 | sparse inverse correlation matrices. Journal of Computational and Graphical Statistics, 21(3), 739-757. 78 | 79 | } 80 | -------------------------------------------------------------------------------- /man/figures/logo_BFpack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/man/figures/logo_BFpack.png -------------------------------------------------------------------------------- /man/figures/logo_BFpack_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/man/figures/logo_BFpack_small.png -------------------------------------------------------------------------------- /man/fmri.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fmri.R 3 | \docType{data} 4 | \name{fmri} 5 | \alias{fmri} 6 | \title{fMRI data} 7 | \format{ 8 | A data.frame with 13 rows and 6 columns. 9 | } 10 | \usage{ 11 | data(fmri) 12 | } 13 | \description{ 14 | fMRI data assessing relation between individual differences in the ability to recognize 15 | faces and cars and thickness of the superficial, middle, and deep layers of the 16 | fusiform face area, as assessed by high-resolution fMRI recognition (Williams et al, 2019, under review) 17 | } 18 | \details{ 19 | \tabular{lll}{ 20 | \strong{Subject}\tab\code{numeric}\tab Particicpant ID number\cr 21 | \strong{Face} \tab \code{numeric} \tab Standardized score on face recognition battery\cr 22 | \strong{Vehicle} \tab \code{numeric} \tab Standardized score on vehicle recognition battery\cr 23 | \strong{Superficial} \tab \code{numeric} \tab Depth in mm of superficial layer of FFA\cr 24 | \strong{Middle} \tab \code{numeric} \tab Depth in mm of middle layer of FFA\cr 25 | \strong{Bform} \tab \code{numeric} \tab Depth in mm of deep layer of FFA\cr 26 | } 27 | } 28 | \references{ 29 | McGuigin, R.W., Newton, A.T., Tamber-Rosenau, B., Tomarken, A.J, & Gauthier, I. (under review). Thickness of deep layers in the fusiform face area predicts face recognition. 30 | } 31 | \keyword{datasets} 32 | -------------------------------------------------------------------------------- /man/memory.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/memory.R 3 | \docType{data} 4 | \name{memory} 5 | \alias{memory} 6 | \title{Memory data on health and schizophrenic patients} 7 | \format{ 8 | A data.frame with 40 rows and 8 columns. 9 | } 10 | \usage{ 11 | data(memory) 12 | } 13 | \description{ 14 | Data set from study assessing differences between schizophrenic patients and 15 | healthy control participants in patterns of correlations among 6 verbal memory 16 | tasks (Ichinose et al., 2019). 17 | \tabular{lll}{ 18 | \strong{Im} \tab \code{numeric} \tab Percent correct on immediate recall of 3 word lists\cr 19 | \strong{Del} \tab \code{numeric} \tab Percent correct on delayed recall of 3 word lists\cr 20 | \strong{Wmn} \tab \code{numeric} \tab Number correct on letter-number span test of auditory working memory\cr 21 | \strong{Cat} \tab \code{numeric} \tab Number correct on category fluency task\cr 22 | \strong{Fas} \tab \code{numeric} \tab Number correct on letter fluency task\cr 23 | \strong{Rat} \tab \code{numeric} \tab Number correct on remote associates task\cr 24 | \strong{Group} \tab \code{factor} \tab Participant Group (HC = Healthy Control; SZ = Schizophrenia) \cr 25 | } 26 | } 27 | \references{ 28 | Ichinose, M.C., Han, G., Polyn, S., Park, S., & Tomarken, A.J. (2019). Verbal memory performance discordance in schizophrenia: A reflection of cognitive dysconnectivity. 29 | Unpublished manuscript. 30 | } 31 | \keyword{datasets} 32 | -------------------------------------------------------------------------------- /man/mvt_test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/BF.mvt_test.R 3 | \name{mvt_test} 4 | \alias{mvt_test} 5 | \title{Multivariate Student t test} 6 | \usage{ 7 | mvt_test(X, Y, null = NULL, paired = FALSE, ...) 8 | } 9 | \arguments{ 10 | \item{X}{a data matrix with the variables in the columns.} 11 | 12 | \item{Y}{an optional data matrix with the variables in the columns.} 13 | 14 | \item{null}{a vector of the null values of the variables.} 15 | 16 | \item{paired}{a logical indicating whether you want a multivariate paired t-test.} 17 | 18 | \item{...}{further arguments to be passed to or from methods.} 19 | } 20 | \value{ 21 | An object that can be applied to the \code{BF()}. 22 | } 23 | \description{ 24 | First step to performs a Bayesian multivariate one sample Student t test using the 25 | (adjusted) fractional Bayes factor using the \code{BF()} function. 26 | } 27 | \details{ 28 | \code{X} must be a data matrix and \code{null} 29 | must be a vector of the assumed null values of the variables. 30 | } 31 | \examples{ 32 | 33 | mvt_fmri <- mvt_test(fmri[,1:2],null = c(0,0)) 34 | BF(mvt_fmri) 35 | 36 | # the same test can be executed via the lm() function 37 | intercept <- rep(1,nrow(fmri)) 38 | lm1 <- lm(cbind(Face,Vehicle) ~ -1 + intercept, data=fmri) 39 | BF(lm1,hypothesis="intercept_on_Face=intercept_on_Vehicle=0") 40 | 41 | } 42 | \references{ 43 | Mulder, J. and Gu, X. (2023). Bayesian Testing of Scientific 44 | Expectations under Multivariate Normal Linear Models. Multivariate Behavioral 45 | Research, 57, 767-783. DOI: 10.1080/00273171.2021.1904809. 46 | } 47 | -------------------------------------------------------------------------------- /man/relevents.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/relevents.R 3 | \docType{data} 4 | \name{relevents} 5 | \alias{relevents} 6 | \title{A sequence of innovation-related e-mail messages} 7 | \format{ 8 | dataframe (247 rows, 3 columns) 9 | 10 | \tabular{lll}{ 11 | \strong{relevents$time} \tab \code{numeric} \tab Time of the e-mail message, 12 | in seconds since onset of the observation \cr 13 | \strong{relevents$sender} \tab \code{integer} \tab ID of the sender, corresponding to 14 | the employee IDs in the actors dataframe \cr 15 | \strong{relevents$receiver} \tab \code{integer} \tab ID of the receiver \cr 16 | } 17 | } 18 | \usage{ 19 | data(relevents) 20 | } 21 | \description{ 22 | A time-ordered sequence of 247 communication messages between 25 actors. 23 | } 24 | \details{ 25 | The related data files 'actors', 'same_location', 'same_culture' contain information 26 | on the actors and three event statistics respectively. 27 | } 28 | \keyword{datasets} 29 | -------------------------------------------------------------------------------- /man/same_culture.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/same_culture.R 3 | \docType{data} 4 | \name{same_culture} 5 | \alias{same_culture} 6 | \title{Same culture event statistic} 7 | \format{ 8 | dataframe (25 rows, 4 columns) 9 | 10 | \tabular{lll}{ 11 | \strong{same_culture} \tab \code{integer} \tab Event statistic. Matrix with senders in the 12 | rows and receivers in the columns. The event statistic is 1 if sender and receiver have 13 | the same culture and 0 otherwise. \cr 14 | } 15 | } 16 | \usage{ 17 | data(same_culture) 18 | } 19 | \description{ 20 | A matrix coding whether senders of events (in the rows) and receivers of events 21 | (in the column) have the background culture. Related to the 'events' data object, 22 | that contains a relational event sequence, and the 'actors' object, that contains 23 | information on the 25 actors involved in the relational event sequence. 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /man/same_location.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/same_location.R 3 | \docType{data} 4 | \name{same_location} 5 | \alias{same_location} 6 | \title{Same location event statistic} 7 | \format{ 8 | dataframe (25 rows, 4 columns) 9 | 10 | \tabular{lll}{ 11 | \strong{same_location} \tab \code{integer} \tab Event statistic. Matrix with senders in the 12 | rows and receivers in the columns. The event statistic is 1 if sender and receiver have 13 | the same location and 0 otherwise. \cr 14 | } 15 | } 16 | \usage{ 17 | data(same_location) 18 | } 19 | \description{ 20 | A matrix coding whether senders of events (in the rows) and receivers of events 21 | (in the column) have the same location. Related to the 'events' data object, 22 | that contains a relational event sequence, and the 'actors' object, that contains 23 | information on the 25 actors involved in the relational event sequence. 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /man/sivan.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sivan.R 3 | \docType{data} 4 | \name{sivan} 5 | \alias{sivan} 6 | \title{Wason task performance and morality} 7 | \format{ 8 | A data.frame with 887 rows and 12 columns. 9 | } 10 | \usage{ 11 | data(sivan) 12 | } 13 | \description{ 14 | Data from an experimental study, using the Wason selection task (Wason 1968) 15 | to examine whether humans have cognitive adaptations for detecting violations 16 | of rules in multiple moral domains. Moral domains are operationalized in 17 | terms of the five domains of the Moral Foundations Questionnaire 18 | (Graham et al. 2011). 19 | These data were simulated using the 20 | R-package \code{synthpop}, based on the characteristics of the original data. 21 | } 22 | \details{ 23 | \tabular{lll}{ 24 | \strong{sex} \tab \code{factor} \tab Participant sex\cr 25 | \strong{age} \tab \code{integer} \tab Participant age\cr 26 | \strong{nationality} \tab \code{factor} \tab Participant nationality\cr 27 | \strong{politics} \tab \code{integer} \tab How would you define your political opinions? Likert type scale, from 1 (Liberal) to 6 (Conservative)\cr 28 | \strong{WasonOrder} \tab \code{factor} \tab Was the Wason task presented before, or after the MFQ? \cr 29 | \strong{Harm} \tab \code{numeric} \tab MFQ harm domain.\cr 30 | \strong{Fairness} \tab \code{numeric} \tab MFQ fairness domain.\cr 31 | \strong{Loyalty} \tab \code{numeric} \tab MFQ loyalty domain.\cr 32 | \strong{Purity} \tab \code{numeric} \tab MFQ purity domain.\cr 33 | \strong{Tasktype} \tab \code{ordered} \tab How was the Wason task framed?\cr 34 | \strong{GotRight} \tab \code{factor} \tab Did the participant give the correct answer to the Wason task? 35 | } 36 | } 37 | \references{ 38 | Sivan, J., Curry, O. S., & Van Lissa, C. J. (2018). Excavating the Foundations: Cognitive Adaptations for Multiple Moral Domains. Evolutionary Psychological Science, 4(4), 408–419. doi:10.1007/s40806-018-0154-8 39 | } 40 | \keyword{datasets} 41 | -------------------------------------------------------------------------------- /man/therapeutic.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/therapeutic.R 3 | \docType{data} 4 | \name{therapeutic} 5 | \alias{therapeutic} 6 | \title{Data come from an experimental study (Rosa, Rosa, Sarner, and Barrett, 1998) 7 | that were also used in Howell (2012, p.196). 8 | An experiment was conducted to investigate if Therapeutic Touch practitioners 9 | who were blindfolded can effectively identify which of their hands is below the experimenter¡¯s. 10 | Twenty-eight practitioners were involved and tested 10 times in the experiment. 11 | Researchers expected an average of 5 correct answers from each practitioner 12 | as it is the number by chance if they do not outperform others.} 13 | \format{ 14 | A data.frame with 22 rows and 1 column. 15 | } 16 | \usage{ 17 | data(therapeutic) 18 | } 19 | \description{ 20 | \tabular{lll}{ 21 | \strong{correct} \tab \code{integer} \tab How many correct answers are from each practitioner)\cr 22 | } 23 | } 24 | \references{ 25 | Howell, D. (2012). Statistical methods for psychology (8th ed.). Belmont, CA: Cengage Learning. 26 | } 27 | \keyword{datasets} 28 | -------------------------------------------------------------------------------- /man/timssICC.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/timssICC.r 3 | \docType{data} 4 | \name{timssICC} 5 | \alias{timssICC} 6 | \title{Trends in International Mathematics and Science Study (TIMSS) 2011-2015} 7 | \format{ 8 | A data.frame with 16770 rows and 15 columns. 9 | } 10 | \usage{ 11 | data(timssICC) 12 | } 13 | \description{ 14 | A stratified sample was drawn by country and school to obtain a balanced 15 | sample of p = 15 grade-4 students 16 | per school for each of four countries (The Netherlands (NL), Croatia (HR), 17 | Germany 18 | (DE), and Denmark (DK)) and two measurement occasions (2011, 2015). 19 | Achievement scores 20 | (first plausible value) of overall mathematics were considered. Performances 21 | of fourth 22 | and eight graders from more than 50 participating countries around the world 23 | can be found at (https://www.iea.nl/timss) 24 | The TIMSS achievement scale is centered at 500 and the standard deviation is 25 | equal to 100 scale score points. 26 | The TIMSS data set has a three-level structure, where students are nested 27 | within classrooms/schools, and 28 | the classrooms/schools are nested within countries. Only one classroom was 29 | sampled per school. 30 | Changes in the mathematics achievement can be investigated by examining the 31 | grouping of 32 | students in schools across countries. Changes in country-specific intraclass 33 | correlation coefficient 34 | from 2011 to 2015, representing heterogeneity in mathematic achievements 35 | within and between schools across years, 36 | can be tested. When detecting a decrease in average performance together 37 | with an increase 38 | of the intraclass correlation, a subset of schools performed worse. For a 39 | constant 40 | intraclass correlation across years the drop in performance applied to the 41 | entire population 42 | of schools. For different countries, changes in the intraclass correlation 43 | across years 44 | can be tested concurrently to examine also differences across countries. 45 | } 46 | \details{ 47 | \tabular{lll}{ 48 | \strong{math} \tab \code{numeric} \tab math score child\cr 49 | \strong{groupNL11} \tab \code{numeric} \tab 50 | Indicator for child from NL in 2011\cr 51 | \strong{groupNL15} \tab \code{numeric} \tab 52 | Indicator for child from NL in 2015\cr 53 | \strong{groupHR11} \tab \code{numeric} \tab 54 | Indicator for child from HR in 2011\cr 55 | \strong{groupHR15} \tab \code{numeric} \tab 56 | Indicator for child from HR in 2015\cr 57 | \strong{groupDE11} \tab \code{numeric} \tab 58 | Indicator for child from DE in 2011\cr 59 | \strong{groupDE15} \tab \code{numeric} \tab 60 | Indicator for child from DE in 2015\cr 61 | \strong{groupDR11} \tab \code{numeric} \tab 62 | Indicator for child from DK in 2011\cr 63 | \strong{groupDR15} \tab \code{numeric} \tab 64 | Indicator for child from DK in 2015\cr 65 | \strong{gender} \tab \code{numeric} \tab Female=0,Male=1 \cr 66 | \strong{weight} \tab \code{numeric} \tab Child sampling weight \cr 67 | \strong{yeargender} \tab \code{numeric} \tab 68 | Interaction for occassion and gender \cr 69 | \strong{lln} \tab \code{numeric} \tab 70 | total number of children in school-class \cr 71 | \strong{groupschool} \tab \code{factor} \tab 72 | Nested indicator for school in country\cr 73 | \strong{schoolID} \tab \code{factor} \tab 74 | Unique indicator for school 75 | } 76 | } 77 | \references{ 78 | Mulder, J. & Fox, J.-P. (2019). Bayes factor testing of multiple 79 | intraclass correlations. Bayesian Analysis. 14, 2, p. 521-552. 80 | } 81 | \keyword{datasets} 82 | -------------------------------------------------------------------------------- /man/tvprices.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tvprices.R 3 | \docType{data} 4 | \name{tvprices} 5 | \alias{tvprices} 6 | \title{Precision of the Anchor Influences the Amount of Adjustment} 7 | \format{ 8 | A data.frame with 59 rows and 3 columns. 9 | } 10 | \usage{ 11 | data(tvprices) 12 | } 13 | \description{ 14 | Data from an experimental study where participants have to guess the price 15 | of a plasma tv. There were two experimental conditions. 16 | These data were simulated using the sufficient statistics from Janiszewski & 17 | Uy (2008). 18 | } 19 | \details{ 20 | \tabular{lll}{ 21 | \strong{price} \tab \code{numeric} \tab Participant z-scores of price\cr 22 | \strong{anchor} \tab \code{factor} \tab Participant anchor\cr 23 | \strong{motivation} \tab \code{factor} \tab motivation to change\cr 24 | } 25 | } 26 | \references{ 27 | Janiszewski, C., & Uy, D. (2008). Precision of the anchor influences the amount of adjustment. Psychological Science, 19(2), 121–127. doi:10.1111/j.1467-9280.2008.02057.x 28 | } 29 | \keyword{datasets} 30 | -------------------------------------------------------------------------------- /man/wilson.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/wilson.r 3 | \docType{data} 4 | \name{wilson} 5 | \alias{wilson} 6 | \title{Facial trustworthiness and criminal sentencing} 7 | \format{ 8 | A data.frame with 742 rows and 13 columns. 9 | } 10 | \usage{ 11 | data(wilson) 12 | } 13 | \description{ 14 | Data from a correlational study in which the correlation between ratings of 15 | facial trustworthiness of inmates was correlated with whether they had 16 | received the death penalty or not (wilson and Rule, 2015). These data were 17 | simulated using the R-package \code{synthpop}, based on the characteristics 18 | of the original data. 19 | } 20 | \details{ 21 | \tabular{lll}{ 22 | \strong{stim} \tab \code{integer} \tab Stimulus Number\cr 23 | \strong{sent} \tab \code{integer} \tab Sentence: 1 = Death, 0 = Life\cr 24 | \strong{race} \tab \code{integer} \tab Race: 1 = White, -1 = Black\cr 25 | \strong{glasses} \tab \code{integer} \tab Glasses: 1 = Yes, 0 = No\cr 26 | \strong{tattoos} \tab \code{integer} \tab Tattoos: 1 = Yes, 0 = No \cr 27 | \strong{ztrust} \tab \code{numeric} \tab Trustworthiness \cr 28 | \strong{trust_2nd} \tab \code{numeric} \tab Trustworthiness ratings with 2nd control group; Death targets are same as in primary analysis, Life targets are different.\cr 29 | \strong{afro} \tab \code{numeric} \tab raw Afrocentricity ratings.\cr 30 | \strong{zAfro} \tab \code{numeric} \tab Afrocentricity ratings normalized within target race. Analyses in paper were done with this variable.\cr 31 | \strong{attract} \tab \code{numeric} \tab Attractiveness\cr 32 | \strong{fWHR} \tab \code{numeric} \tab facial width-to-height \cr 33 | \strong{afWHR} \tab \code{numeric} \tab fWHR normalized within target race. Analyses in paper were done with this variable \cr 34 | \strong{maturity} \tab \code{numeric} \tab Maturity 35 | } 36 | } 37 | \references{ 38 | Wilson, J. P., & Rule, N. O. (2015). Facial Trustworthiness 39 | Predicts Extreme Criminal-Sentencing Outcomes. Psychological Science, 40 | 26(8), 1325–1331. doi: 10.1177/0956797615590992 41 | } 42 | \keyword{datasets} 43 | -------------------------------------------------------------------------------- /src/BFpack_init.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include // for NULL 3 | #include 4 | 5 | /* FIXME: 6 | Check these declarations against the C/Fortran source code. 7 | */ 8 | 9 | /* .Fortran calls */ 10 | extern void F77_NAME(draw_ju)(void *, void *, void *, void *, void *); 11 | extern void F77_NAME(estimate_bct_ordinal)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); 12 | 13 | static const R_FortranMethodDef FortranEntries[] = { 14 | {"draw_ju", (DL_FUNC) &F77_NAME(draw_ju), 5}, 15 | {"estimate_bct_ordinal", (DL_FUNC) &F77_NAME(estimate_bct_ordinal), 28}, 16 | {NULL, NULL, 0} 17 | }; 18 | 19 | void R_init_BFpack(DllInfo *dll) 20 | { 21 | R_registerRoutines(dll, NULL, NULL, FortranEntries, NULL); 22 | R_useDynamicSymbols(dll, FALSE); 23 | } 24 | -------------------------------------------------------------------------------- /src/Makevars: -------------------------------------------------------------------------------- 1 | 2 | 3 | PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) 4 | -------------------------------------------------------------------------------- /src/bct_prior.f90: -------------------------------------------------------------------------------- 1 | 2 | ! rngfuncs.f90 3 | module rngfuncs1 4 | 5 | implicit none 6 | 7 | public 8 | 9 | ! See the R header `R_ext/Random.h` for details 10 | interface 11 | ! double unif_rand(void); 12 | function unif_rand() bind(c,name="unif_rand") 13 | use, intrinsic :: iso_c_binding, only: c_double 14 | real(c_double) :: unif_rand 15 | end function 16 | 17 | ! void GetRNGstate(void); 18 | subroutine getrngstate() bind(c,name="GetRNGstate") 19 | end subroutine 20 | 21 | ! void PutRNGstate(void); 22 | subroutine putrngstate() bind(c,name="PutRNGstate") 23 | end subroutine 24 | 25 | end interface 26 | 27 | end module 28 | 29 | module rkinds1 30 | use, intrinsic :: iso_c_binding 31 | use, intrinsic :: iso_fortran_env 32 | private 33 | integer, parameter, public :: rint = int32 ! Using int32 from iso_fortran_env 34 | integer, parameter, public :: rdp = real64 ! Using real64 from iso_fortran_env 35 | ! Using real64 from iso_fortran_env 36 | end module 37 | 38 | 39 | subroutine draw_ju(P,drawscorr,samsize,numcorrgroup,Fisher) 40 | ! Fortran implementation of the algorithm proposed by Joe (2006) 41 | 42 | use rkinds1, only: rint, rdp 43 | use rngfuncs1 44 | 45 | implicit none 46 | 47 | integer(rint), intent(in) :: P, samsize, numcorrgroup, Fisher 48 | integer(rint) :: s1,r1, r2, i1, i2, k1, corrIndex(P,P), teldummy,& 49 | t1, t2, error1 50 | real (rdp) :: corrMat(P,P),draw1(1),& 51 | R2inv(P,P), vec1(P,1), vec2(P,1),& 52 | dummy11(1,1), dummy12(1,1), dummy22(1,1),& 53 | Di1i2, preinv(P,P) 54 | real(rdp), intent (out) :: drawscorr(samsize,numcorrgroup) 55 | real(rdp) :: alpha 56 | 57 | !========================================================================================! 58 | 59 | ! create corrIndex matrix 60 | teldummy = 1 61 | 62 | do r1=2,P 63 | do r2=1,r1-1 64 | corrIndex(r1,r2) = teldummy 65 | corrIndex(r2,r1) = teldummy 66 | teldummy = teldummy + 1 67 | end do 68 | end do 69 | 70 | do s1=1,samsize 71 | 72 | ! create identity matrix 73 | do t1=1,P 74 | do t2=1,P 75 | if (t1==t2) then 76 | corrMat(t1,t2)=1.0 77 | else 78 | corrMat(t1,t2)=0.0 79 | ENDIF 80 | end do 81 | end do 82 | do r1 = 1,P-1 83 | alpha=P/2.0 84 | draw1 = random_beta(alpha, alpha, .true.) 85 | draw1 = draw1*2.0-1.0 86 | corrMat(r1,r1+1) = draw1(1) 87 | corrMat(r1+1,r1) = corrMat(r1,r1+1) 88 | drawscorr(s1,corrIndex(r1+1,r1)) = corrMat(r1,r1+1) 89 | end do 90 | R2inv(:,:) = 0 91 | preinv(:,:)= 0 92 | do r1 = 3,P 93 | do r2 = 1,P-r1+1 94 | i1 = r2 95 | i2 = r2+r1-1 96 | k1 = i2 - i1 97 | !draw partial correlations 98 | alpha = .5*(P+1-k1) 99 | draw1 = random_beta(alpha, alpha, .true.) 100 | draw1=draw1*2-1.0 101 | !rbeta(1,.5*(dim+1-k),.5*(dim+1-k))*2-1 102 | vec1(1:(k1-1),1) = corrMat(i1,(i1+1):(i1+k1-1)) 103 | vec2(1:(k1-1),1) = corrMat(i2,(i1+1):(i1+k1-1)) 104 | preinv(1:(i2-i1-1),1:(i2-i1-1)) = ((corrMat((i1+1):(i2-1),(i1+1):(i2-1)))) 105 | ! R2inv(1:(i2-i1-1),1:(i2-i1-1)) = inverse(preinv(1:(i2-i1-1),1:(i2-i1-1)),(i2-i1-1)) 106 | call FINDinv(preinv(1:(i2-i1-1),1:(i2-i1-1)),R2inv(1:(i2-i1-1),1:(i2-i1-1)),(i2-i1-1),error1) 107 | 108 | dummy11 = matmul(matmul(transpose(vec1(1:(k1-1),:)),R2inv(1:(i2-i1-1),1:(i2-i1-1))),vec1(1:(k1-1),:)) 109 | dummy22 = matmul(matmul(transpose(vec2(1:(k1-1),:)),R2inv(1:(i2-i1-1),1:(i2-i1-1))),vec2(1:(k1-1),:)) 110 | dummy12 = matmul(matmul(transpose(vec1(1:(k1-1),:)),R2inv(1:(i2-i1-1),1:(i2-i1-1))),vec2(1:(k1-1),:)) 111 | Di1i2 = sqrt((1-dummy11(1,1))*(1-dummy22(1,1))) 112 | 113 | corrMat(i1,i2) = dummy12(1,1) + Di1i2*draw1(1) 114 | corrMat(i2,i1) = corrMat(i1,i2) 115 | 116 | drawscorr(s1,corrIndex(i1,i2)) = corrMat(i1,i2) 117 | end do 118 | end do 119 | end do 120 | if(Fisher==1) then 121 | drawscorr(1:samsize,1:numcorrgroup) = .5*log((1.0+drawscorr(1:samsize,1:numcorrgroup)) & 122 | /(1.0-drawscorr(1:samsize,1:numcorrgroup))) 123 | end if 124 | 125 | 126 | contains 127 | 128 | 129 | ! Subroutine to find the inverse of a square matrix 130 | SUBROUTINE FINDinv(matrix, inverse, n, errorflag) 131 | 132 | implicit none 133 | 134 | !Declarations 135 | INTEGER(rint), INTENT(IN) :: n 136 | REAL(rdp), INTENT(IN) :: matrix(n,n) !Input matrix 137 | INTEGER(rint), INTENT(OUT) :: errorflag !Return error status. -1 for error, 0 for normal 138 | REAL(rdp), INTENT(OUT) :: inverse(n,n) !Inverted matrix 139 | 140 | integer :: ipiv(n), info, lwork 141 | real(rdp) :: work(n) 142 | 143 | external :: dgetrf, dgetri 144 | 145 | errorflag = 0 146 | 147 | inverse = matrix 148 | call dgetrf(n,n,inverse,n,ipiv,info) 149 | if (info > 0) then 150 | inverse = 0 151 | errorflag = -1 152 | return 153 | end if 154 | 155 | lwork = n 156 | call dgetri(n,inverse,n,ipiv,work,lwork,info) 157 | if (info > 0) then 158 | inverse = 0 159 | errorflag = -1 160 | return 161 | end if 162 | 163 | END SUBROUTINE FINDinv 164 | 165 | 166 | 167 | FUNCTION random_beta(aa, bb, first) RESULT(fn_val) 168 | 169 | ! Adapted from Fortran 77 code from the book: 170 | ! Dagpunar, J. 'Principles of random variate generation' 171 | ! Clarendon Press, Oxford, 1988. ISBN 0-19-852202-9 172 | 173 | ! Author: Alan Miller 174 | ! CSIRO Division of Mathematical & Information Sciences 175 | ! Private Bag 10, Clayton South MDC 176 | ! Clayton 3169, Victoria, Australia 177 | ! Phone: (+61) 3 9545-8016 Fax: (+61) 3 9545-8080 178 | ! e-mail: amiller @ bigpond.net.au 179 | 180 | ! FUNCTION GENERATES A RANDOM VARIATE IN [0,1] 181 | ! FROM A BETA DISTRIBUTION WITH DENSITY 182 | ! PROPORTIONAL TO BETA**(AA-1) * (1-BETA)**(BB-1). 183 | ! USING CHENG'S LOG LOGISTIC METHOD. 184 | 185 | ! AA = SHAPE PARAMETER FROM DISTRIBUTION (0 < REAL) 186 | ! BB = SHAPE PARAMETER FROM DISTRIBUTION (0 < REAL) 187 | 188 | implicit none 189 | 190 | REAL(rdp), INTENT(IN) :: aa, bb 191 | LOGICAL, INTENT(IN) :: first 192 | !INTEGER(rint), INTENT(IN) :: iseed 193 | REAL ( kind = rdp ) :: fn_val 194 | 195 | ! Local variables 196 | REAL(rdp), PARAMETER :: aln4 = 1.3862944, one=1.0, two=2.0, & 197 | vlarge = HUGE(1.0), vsmall = TINY(1.0), zero = 0.0 198 | REAL ( kind = rdp ) :: a, b, g, r, s, x, y, z 199 | REAL ( kind = rdp ), SAVE :: d, f, h, t, c 200 | LOGICAL, SAVE :: swap 201 | 202 | 203 | 204 | IF (first) THEN ! Initialization, if necessary 205 | a = aa 206 | b = bb 207 | swap = b > a 208 | IF (swap) THEN 209 | g = b 210 | b = a 211 | a = g 212 | END IF 213 | d = a/b 214 | f = a+b 215 | IF (b > one) THEN 216 | h = SQRT((two*a*b - f)/(f - two)) 217 | t = one 218 | ELSE 219 | h = b 220 | t = one/(one + (a/(vlarge*b))**b) 221 | END IF 222 | c = a+h 223 | END IF 224 | 225 | DO 226 | r = unif_rand() 227 | x = unif_rand() 228 | !print*, r,x 229 | 230 | s = r*r*x 231 | IF (r < vsmall .OR. s <= zero) CYCLE 232 | IF (r < t) THEN 233 | x = LOG(r/(one - r))/h 234 | y = d*EXP(x) 235 | z = c*x + f*LOG((one + d)/(one + y)) - aln4 236 | IF (s - one > z) THEN 237 | IF (s - s*z > one) CYCLE 238 | IF (LOG(s) > z) CYCLE 239 | END IF 240 | fn_val = y/(one + y) 241 | ELSE 242 | IF (4.0*s > (one + one/d)**f) CYCLE 243 | fn_val = one 244 | END IF 245 | EXIT 246 | END DO 247 | 248 | IF (swap) fn_val = one - fn_val 249 | RETURN 250 | END FUNCTION random_beta 251 | 252 | 253 | 254 | function eye(n) 255 | 256 | implicit none 257 | 258 | integer(rint):: i,n 259 | real(rdp):: eye(n,n) 260 | real(rdp):: check(n,n) 261 | 262 | check=0 263 | do i=1,n 264 | check(i,i)= 1 265 | enddo 266 | 267 | eye(:,:) = check(:,:) 268 | return 269 | 270 | end function eye 271 | 272 | 273 | end subroutine draw_ju 274 | 275 | 276 | 277 | 278 | 279 | 280 | -------------------------------------------------------------------------------- /src/rkinds0.mod: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/src/rkinds0.mod -------------------------------------------------------------------------------- /src/rkinds1.mod: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/src/rkinds1.mod -------------------------------------------------------------------------------- /src/rngfuncs.mod: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/src/rngfuncs.mod -------------------------------------------------------------------------------- /src/rngfuncs1.mod: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jomulder/BFpack/36de2e04a0eff31a16f5c03ded748ec634a31277/src/rngfuncs1.mod -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(BFpack) 3 | 4 | test_check("BFpack") 5 | -------------------------------------------------------------------------------- /tests/testthat/test_BFcoeftest.R: -------------------------------------------------------------------------------- 1 | fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity + 2 | tattoos, family = binomial(), data = wilson) 3 | BF_glm <- BF(fit) 4 | 5 | ct <- lmtest::coeftest(fit) 6 | BFdefault1 <- BF(ct[,1], Sigma = diag(ct[,2]^2), n = attr(ct,"nobs")) 7 | BFcoeftest1 <- BF(ct) 8 | 9 | test_that("BF.coeftest gives same exploratory results as BF.default", { 10 | expect_equivalent( 11 | BFdefault1$BFtu_exploratory,BFcoeftest1$BFtu_exploratory 12 | )}) 13 | 14 | 15 | -------------------------------------------------------------------------------- /tests/testthat/test_BFcortest.R: -------------------------------------------------------------------------------- 1 | 2 | # exploratory testing correlations in multivariate normal model 3 | set.seed(123) 4 | cor1 <- cor_test(mtcars[,4:6],iter = 1e3,burnin = 0) 5 | BF1 <- BF(cor1,prior.hyp.explo = c(1,1,1),cov.prob=.99) 6 | test_that("BF.cor_test use of cov.prob argument", { 7 | expect_equivalent( 8 | colnames(BF1$estimates)[3],"0.5%" 9 | ) 10 | expect_equivalent( 11 | BF1$estimates[1,3],-0.7098442,tol=.05 12 | ) 13 | }) 14 | BF1a <- BF(cor1,prior.hyp.explo = 3:5) 15 | PHPexplo <- matrix( 16 | c(0.14, 0.84, 0.01, 17 | 0.0, 0.0, 1.0, 18 | 0.0, 1.0, 0.0),nrow=3,byrow=T) 19 | test_that("BF.cor_test exploratory hypotheses on correlations correctly evaluated", { 20 | expect_equivalent( 21 | round(BF1$PHP_exploratory,2),PHPexplo, tolerance = .1 22 | ) 23 | expect_equivalent( 24 | BF1a$PHP_exploratory[1,], 25 | BF1$BFtu_exploratory[1,]*(3:5)/sum(BF1$BFtu_exploratory[1,]*(3:5)), 26 | tolerance = .02 27 | ) 28 | }) 29 | # confirmatory hypothesis test on the correlations 30 | BF2 <- BF(cor1,hypothesis="wt_with_drat .5") 11 | 12 | test_that("BF.coxph returns correct results,", { 13 | expect_true(out$PHP_confirmatory[1] > .5 & out$PHP_confirmatory[2] < .5) 14 | }) 15 | 16 | 17 | # Create a simple data set for a time-dependent model 18 | test2 <- list(start=c(1,2,5,2,1,7,3,4,8,8), 19 | stop=c(2,3,6,7,8,9,9,9,14,17), 20 | event=c(1,1,1,1,1,1,1,0,0,0), 21 | pred=c(1,0,0,1,0,1,1,1,0,0)) 22 | fit <- coxph(Surv(start, stop, event) ~ pred, test2) 23 | 24 | out <- BF(fit, hypothesis = "pred > .5; pred = .5") 25 | test_that("BF.coxph returns correct results,", { 26 | expect_true(out$PHP_confirmatory[1] < .5 & out$PHP_confirmatory[2] > .5) 27 | }) 28 | 29 | out <- BF(fit, hypothesis = "pred > .5; pred = .5",complement = FALSE) 30 | test_that("BF.coxph returns correct results,", { 31 | expect_equivalent( 32 | out$PHP_confirmatory, c(0.194,0.806), tol = .1 33 | ) 34 | }) 35 | -------------------------------------------------------------------------------- /tests/testthat/test_BFergm.R: -------------------------------------------------------------------------------- 1 | 2 | # test for BF on an ergm object 3 | 4 | #check if confirmatory test are the same for ergm object 5 | test_that("BF.ergm tests", { 6 | skip_on_cran() 7 | library(ergm) 8 | seed <- 123 9 | # florentine data 10 | data(florentine) 11 | ergm_fit <- ergm(flomarriage ~ edges + 12 | kstar(2) + 13 | absdiff("wealth"), 14 | control = control.ergm(seed = seed)) 15 | get_estimates(ergm_fit) 16 | seed <- 123 17 | BFergm.test <- BF(ergm_fit, 18 | hypothesis = "0 = absdiff.wealth > kstar2", 19 | main.iters = 500, prior.hyp.explo = c(1,1,1)) 20 | expect_true( 21 | all.equal(c(0.185,0.815), 22 | unname(BFergm.test$PHP_confirmatory), tolerance = .2) 23 | ) 24 | expect_true( 25 | all.equal(c(0.1,0,0.9), 26 | unname(BFergm.test$PHP_exploratory[2,]), tolerance = .2) 27 | ) 28 | seed <- 123 29 | BFergm.test2 <- BF(ergm_fit, 30 | hypothesis = "0 = absdiff.wealth > kstar2", 31 | main.iters = 500, prior.hyp.explo = 1:3) 32 | expect_equivalent( 33 | unname(BFergm.test2$PHP_exploratory[1,]), 34 | unname(BFergm.test2$BFtu_exploratory[1,]*(1:3)/sum(BFergm.test2$BFtu_exploratory[1,]*(1:3))), 35 | tol=.05 36 | ) 37 | }) 38 | 39 | # same test with bergm 40 | #check if confirmatory test are the same 41 | test_that("BF.bergm one hypotheses correctly evaluated", { 42 | skip_on_cran() 43 | # example analysis 44 | library(Bergm) 45 | library(ergm) 46 | data(florentine) 47 | set.seed(222) 48 | bergm_fit <- bergm(flomarriage ~ kstar(2) + edges + absdiff("wealth"), 49 | seed = 1,main.iters = 500) 50 | BFbergm.test <- BF(bergm_fit,hypothesis = "0 = theta3 > theta1",main.iters = 500) 51 | expect_true( 52 | all.equal(0.17, 53 | unname(BFbergm.test$PHP_confirmatory)[1], tolerance = .2) 54 | ) 55 | }) 56 | 57 | 58 | -------------------------------------------------------------------------------- /tests/testthat/test_BFglm.R: -------------------------------------------------------------------------------- 1 | #Test glm class object 2 | counts <- c(18,17,15,20,10,20,25,13,12) 3 | outcome <- gl(3,1,9) 4 | treatment <- gl(3,3) 5 | glm.D93 <- glm(counts ~ treatment, family = poisson()) 6 | set.seed(432) 7 | BF1 <- BF(x=glm.D93, hypothesis = "treatment2 = 0; treatment2 < 0",prior.hyp.explo=c(1,1,1)) 8 | #check if confirmatory and exploratory test are the same for Gaussian estimator 9 | test_that("BF.glm two hypotheses correctly evaluated", { 10 | expect_true( 11 | all.equal(unname(BF1$PHP_exploratory[2,]), 12 | unname(BF1$PHP_confirmatory), tolerance = .005) 13 | )}) 14 | 15 | BF1.log <- BF(x=glm.D93, hypothesis = "treatment2 = 0; treatment2 < 0", log = TRUE) 16 | #check if confirmatory and exploratory test are the same for Gaussian estimator 17 | test_that("BF.glm two hypotheses correctly evaluated", { 18 | expect_true( 19 | all.equal(BF1$BFmatrix_confirmatory, 20 | exp(BF1.log$BFmatrix_confirmatory), tolerance = .005) 21 | )}) 22 | 23 | -------------------------------------------------------------------------------- /tests/testthat/test_BFmeta.R: -------------------------------------------------------------------------------- 1 | 2 | # test for BF on rma.uni object 3 | library(metafor) 4 | 5 | # ### Generate data 6 | tau2 <- 0.05 # True between-study variance 7 | 8 | set.seed(123) 9 | vi <- runif(50, min = 0.01, max = 0.2) # Observed sampling variances 10 | yi <- rnorm(50, mean = 0, sd = sqrt(vi+tau2)) # Observed effect sizes 11 | 12 | test_that("exploratory metafor random effects model", { 13 | skip_on_cran() 14 | ### Fit a random-effects model to the data 15 | res <- rma(yi = yi, vi = vi) 16 | set.seed(123) 17 | BFmeta1 <- BF(res,BF.type="stand.effect",cov.prob=.9) 18 | set.seed(123) 19 | BFmeta1a <- BF(res,BF.type="stand.effect",prior.hyp.explo = c(5:7)) 20 | set.seed(123) 21 | BFmeta1b <- BF(res,BF.type=prior("norm", c(mean = 0, sd = .5))) 22 | expect_equivalent( 23 | round(BFmeta1$PHP_exploratory[1,],3),c(.950,.020,.039), tolerance = .05 24 | ) 25 | expect_equivalent( 26 | round(BFmeta1b$PHP_exploratory[1,],3),c(.906,.055,.039), tolerance = .05 27 | ) 28 | expect_equivalent( 29 | unname(round(BFmeta1a$PHP_exploratory[2,],3)), 30 | unname(round(BFmeta1$BFtu_exploratory[2,]*(5:7)/sum(BFmeta1$BFtu_exploratory[2,]*(5:7)),3)), 31 | tolerance = .05 32 | ) 33 | }) 34 | 35 | # test fixed effects meta model 36 | test_that("exploratory metafor fixed effects model", { 37 | set.seed(123) 38 | res <- metafor::rma(yi = yi, vi = vi, method = "EE") 39 | BFmeta2 <- BF(res,BF.type="stand.effect") 40 | expect_equivalent( 41 | round(BFmeta2$PHP_exploratory,3),c(.964,0.019,0.017), tolerance = .05 42 | ) 43 | set.seed(123) 44 | res$ni <- rep(10,50) 45 | BFmeta2b <- BF(res,BF.type="unit.info") 46 | expect_equivalent( 47 | round(BFmeta2b$PHP_exploratory,3),c(.957,0.023,0.022), tolerance = .05 48 | ) 49 | }) 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /tests/testthat/test_BFmlm.R: -------------------------------------------------------------------------------- 1 | #several tests if output is the same 2 | set.seed(36) 3 | # testing coefficients in multivariate normal model 4 | lm1 <- lm(cbind(mpg,cyl,hp) ~ disp + wt, data = mtcars) 5 | BF1 <- BF(lm1, BF.type = "AFBF",prior.hyp.explo=c(1,1,1)) 6 | PHPexplo <- matrix( 7 | c(0.000, 0.000, 1.000, 8 | 0.373, 0.604, 0.023, 9 | 0.087, 0.908, 0.004, 10 | 0.000, 0.000, 1.000, 11 | 0.000, 0.000, 1.000, 12 | 0.741, 0.178, 0.082, 13 | 0.284, 0.017, 0.700, 14 | 0.007, 0.000, 0.993, 15 | 0.697, 0.239, 0.064),nrow=9,byrow=T) 16 | test_that("BF.mlm exploratory correctly evaluated for given data sets", { 17 | expect_equivalent( 18 | round(BF1$PHP_exploratory,3),PHPexplo, tolerance = .03 19 | )}) 20 | 21 | # tests on same predictor on different DVs 22 | set.seed(123) 23 | BF2 <- BF(x=lm1,hypothesis="disp_on_mpg>disp_on_cyl>disp_on_hp>0; disp_on_mpg=disp_on_cyl=disp_on_hp=0") 24 | test_that("BF.mlm two hypotheses for same IVs correctly evaluated", { 25 | expect_equivalent( 26 | round(BF2$PHP_confirmatory,3),c(0.010,0.000,0.988), tolerance = .015 27 | )}) 28 | # tests on different predictors on same DVs 29 | set.seed(574) 30 | BF3 <- BF(lm1,hypothesis="disp_on_mpg>wt_on_mpg;disp_on_mpg=wt_on_mpg;disp_on_mpg Del_with_Im_in_g2 & 11 | Del_with_Wmn_in_g1 > Del_with_Wmn_in_g2 & 12 | Im_with_Wmn_in_g1 > Im_with_Wmn_in_g2") 13 | expect_equivalent( 14 | log(BF6_cor$BFmatrix_confirmatory[1,2]), 4.9, tolerance = .5 15 | )}) 16 | 17 | 18 | -------------------------------------------------------------------------------- /tests/testthat/test_application7_BF.lmerMod.R: -------------------------------------------------------------------------------- 1 | 2 | #check results confirmatory test 3 | test_that("lmerMod two hypotheses correctly evaluated", { 4 | skip_on_cran() 5 | library(lme4) 6 | timssICC_subset <- timssICC[(timssICC$groupNL11==1)+(timssICC$groupHR11==1)>0, 7 | ][c(1:150,1395+1:150),] 8 | outlme1 <- lme4::lmer(math ~ -1 + lln + 9 | groupNL11 + (0+groupNL11 | schoolID) + 10 | groupHR11 + (0+groupHR11 | schoolID), 11 | data=timssICC_subset) 12 | set.seed(123) 13 | BFicc <- BF(outlme1,hypothesis= 14 | "groupNL11 .5 & out$PHP_confirmatory[2] < .5) 17 | }) 18 | 19 | in_count <- c(10, 7, 20, 14, 14, 12, 10, 23, 17, 20, 14, 13, 11, 17, 21, 20 | 11, 16, 14, 17, 17, 19, 21, 7, 13, 0, 1, 7, 2, 3, 1, 2, 1, 3, 21 | 0, 1, 4, 3, 5, 12, 6, 4, 3, 5, 5, 5, 5, 2, 4, 3, 5, 3, 5, 3, 22 | 6, 1, 1, 3, 2, 6, 4, 11, 9, 15, 22, 15, 16, 13, 10, 26, 26, 24, 23 | 13) 24 | 25 | in_spray <- structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 26 | 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 27 | 3L, 3L, 3L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 4L, 28 | 4L, 4L, 4L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 6L, 29 | 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L), .Label = c("A", 30 | "B", "C", "D", "E", "F"), class = "factor") 31 | 32 | res_standard <- bartlett.test(in_count, in_spray) 33 | res <- bartlett_test(in_count, in_spray) 34 | test_that("bartlett_test same as bartlett.test", { 35 | expect_equivalent(res_standard, res[1:length(res_standard)]) 36 | }) 37 | 38 | out <- BF(res, "F> A=B>D>E=C") 39 | test_that("BF.bartlett returns correct results,", { 40 | expect_equivalent(out$PHP_confirmatory, c(.9958, .00417), tolerance = .001) 41 | }) 42 | 43 | set.seed(57) 44 | out2 <- BF(res, "F> A=B>D>E=C", log = TRUE) 45 | test_that("BF.bartlett returns correct results,", { 46 | expect_equivalent(out2$BFmatrix_confirmatory[2,1], -5.452534, tolerance = .01) 47 | }) 48 | 49 | set.seed(57) 50 | out2b <- BF(res, log = TRUE, prior.hyp.explo = c(1,2)) 51 | test_that("BF.bartlett returns correct results with prior.hyp.explo,", { 52 | expect_equivalent(out2b$PHP_exploratory, 53 | exp(out2$BFtu_exploratory) * c(1,2) / sum(exp(out2$BFtu_exploratory) * c(1,2)), 54 | tolerance = .01) 55 | }) 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /tests/testthat/test_get_estimates_matrix.R: -------------------------------------------------------------------------------- 1 | cor_est <- get_estimates(cor(iris[,1:4])) 2 | 3 | test_that("get_estimates works for correlations", { 4 | expect_equal(as.vector(cor_est$estimate), 5 | as.vector(cor(iris[,1:4]))) 6 | }) 7 | -------------------------------------------------------------------------------- /tests/testthat/test_hetcor.R: -------------------------------------------------------------------------------- 1 | # test for correlation test on hetcor object 2 | set.seed(54) 3 | res <- polycor::hetcor(fmri[,3:5]) 4 | #BF1 <- BF(res, hypothesis = "Deep_with_Superficial > Middle_with_Superficial") 5 | 6 | BF2 <- BF(res,hypothesis="(Superficial_with_Middle,Deep_with_Superficial,Deep_with_Middle) > 0; 7 | Middle_with_Superficial=Deep_with_Superficial=Deep_with_Middle= 0",prior.hyp.explo=c(1,1,1)) 8 | PHPexplo <- matrix(c(0.51, 0.21, 0.27, 9 | 0.50, 0.17, 0.33, 10 | 0.51, 0.22, 0.26),nrow=3,byrow=T) 11 | test_that("Hetcor exploratory BF gives correct result", {expect_equivalent( 12 | BF2$PHP_exploratory, PHPexplo, tolerance = .01)}) 13 | set.seed(463) 14 | 15 | test_that("Hetcor two hypotheses correctly evaluated", { 16 | expect_equivalent( 17 | unname(BF2$BFtu_confirmatory), 18 | c(1.63,10.2,0.91), 19 | tolerance = .05 20 | )} 21 | ) 22 | 23 | set.seed(164) 24 | BF5 <- BF(res,hypothesis="Middle_with_Superficial = Deep_with_Superficial > 0") 25 | test_that("Hetcor one hypothesis with equality and order constraint correctly evaluated", { 26 | expect_equivalent( 27 | unname(BF5$PHP_confirmatory),c(0.75,0.24), tolerance = .1 28 | )}) 29 | 30 | 31 | set.seed(564) 32 | res1 <- polycor::hetcor(fmri[,3:4]) 33 | BF3 <- BF(res1,hypothesis="Superficial_with_Middle > .4;Superficial_with_Middle = .4; Superficial_with_Middle < .4") 34 | test_that("Hetcor test BF3 (automatically omit complement)", { 35 | expect_equivalent( 36 | log(unname(BF3$BFtu_confirmatory)),c(-1.2,.38,.27), tolerance = .5 37 | )}) 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /tests/testthat/test_metafor.R: -------------------------------------------------------------------------------- 1 | 2 | vi <- 1:10 #squared s.e.'s 3 | yi <- 1:10 #sample means 4 | 5 | ### Fit a random-effects model to the data 6 | res <- metafor::rma(yi = yi, vi = vi) 7 | 8 | #check results exploratory test 9 | test_that("metafor exploratory test", { 10 | set.seed(123) 11 | BF1 <- BF(res,BF.type="correlation",iter=1e3,cov.prob=.9) 12 | expect_equivalent( 13 | round(BF1$BFtu_exploratory[,3],3),c(1.918,1.954), tolerance = .05 14 | ) 15 | expect_equivalent( 16 | round(BF1$estimates[1:2,3],2),c(2.66,1.04), tolerance = .5 17 | ) 18 | }) 19 | -------------------------------------------------------------------------------- /tests/testthat/test_paper_fmri.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | constraints.fmri2 <- "Face_on_Deep = Face_on_Superficial = Face_on_Middle < 0; Face_on_Deep < Face_on_Superficial = Face_on_Middle < 0" 3 | 4 | 5 | # Missing data ------------------------------------------------------------ 6 | 7 | fmri_missing <- fmri 8 | set.seed(123) 9 | for(i in 1:6){ 10 | fmri_missing[sample(1:nrow(fmri), 1), sample(1:ncol(fmri), 1)] <- NA 11 | } 12 | 13 | fmri_listdel <- fmri_missing[!is.na(apply(fmri_missing, 1, sum)),] 14 | fmri.lm2_listdel <- lm(cbind(Superficial, Middle, Deep) ~ Face + Vehicle, data = fmri_listdel) 15 | 16 | res <- BF(fmri.lm2_listdel, hypothesis = constraints.fmri2) 17 | test_that("BF.fmri with missing data runs", { 18 | expect_true(res$PHP_confirmatory[1]>res$PHP_confirmatory[3]& res$PHP_confirmatory[1]res$PHP_confirmatory[3]& res$PHP_confirmatory[1]4",complement=TRUE,log=FALSE, BF.type = "AFBF",prior.hyp.conf = c(2,1,1)) 36 | summary(BF1) 37 | BF1$BFtable_confirmatory 38 | 39 | # test if one-sided PMP is same as one-sided p-value 40 | ttest2 <- t_test(therapeutic,mu=5,alternative="less") 41 | BF2 <- BF(x=ttest2,hypothesis="mu>5", BF.type = "AFBF") 42 | test_that("1 sample t test of one-sided hypotheses correctly evaluated", { 43 | expect_equivalent( 44 | ttest2$p.value,c(BF2$PHP_confirmatory)[1] 45 | )}) 46 | 47 | ###################################### 48 | # TWO SAMPLES T TEST EQUAL VARIANCES # 49 | ###################################### 50 | 51 | # check if posterior model probabilities are correct 52 | ttest3 <- t_test(therapeutic,therapeutic*.9+.1,var.equal=TRUE) 53 | BF3 <- BF(ttest3,prior.hyp.explo=c(1,1,1), BF.type = "AFBF") 54 | test_that("2 samples t test of exploratory hypotheses correctly evaluated 55 | with equal variances", { 56 | expect_equivalent( 57 | c(unname(BF3$PHP_exploratory)),c(0.767913,0.04941605,0.1826709) 58 | ,tolerance = .00001) 59 | }) 60 | # check if posterior model probabilities are correct 61 | ttest3a <- t_test(x=therapeutic$correct,y=therapeutic$correct*.9+.1,paired=TRUE) 62 | BF3a <- BF(ttest3a,prior.hyp.explo=c(1,1,1), BF.type = "AFBF",hypothesis="difference=0.3;difference<0.3") 63 | test_that("2 samples t test of exploratory hypotheses correctly evaluated 64 | with equal variances", { 65 | expect_equivalent( 66 | round(c(unname(BF3a$PHP_confirmatory)),2),c(0.59,0.04,0.36) 67 | ,tolerance = .00001) 68 | }) 69 | 70 | ttest3a <- t_test(x=therapeutic$correct,y=therapeutic$correct*.9+.1,paired=TRUE) 71 | BF3a <- BF(ttest3a,prior.hyp.explo=c(1,1,1), BF.type = "FBF",hypothesis="difference < 1;difference > -1") 72 | BF3a$BFtable_confirmatory 73 | 74 | # t test check for testing interval hypotheses 75 | set.seed(123) 76 | ttest3 <- t_test(therapeutic,therapeutic*runif(length(therapeutic),min=.9,max=1.1)+.1,var.equal=TRUE) 77 | BF3 <- BF(ttest3,hypothesis="difference< 0.5 & difference > -0.5; difference > 0.5; difference < -0.5", 78 | BF.type = "FBF",log = TRUE) 79 | BF3b <- BF(ttest3,BF.type = "FBF",log = TRUE,prior.hyp.explo = 3:1) 80 | test_that("2 samples t test of exploratory hypotheses correctly evaluated 81 | with equal variances", { 82 | expect_equivalent( 83 | c(unname(BF3$BFmatrix_confirmatory[1,])),c(0,3.5,4.1) 84 | ,tolerance = 1) 85 | expect_equivalent( 86 | BF3b$PHP_exploratory, 87 | exp(BF3$BFtu_exploratory)*(3:1)/sum(exp(BF3$BFtu_exploratory) * (3:1)), 88 | tolerance = 1) 89 | }) 90 | 91 | # test if one-sided PMP is same as one-sided p-value 92 | ttest4 <- t_test(therapeutic,therapeutic*.9+.1,var.equal=TRUE,alternative="greater") 93 | BF4 <- BF(ttest4,"difference<0", BF.type = "AFBF") 94 | test_that("2 samples t test of one-sided hypotheses correctly evaluated 95 | with equal variances", { 96 | expect_equivalent( 97 | ttest4$p.value,c(BF4$PHP_confirmatory)[1] 98 | )}) 99 | 100 | 101 | ######################################## 102 | # TWO SAMPLES T TEST UNEQUAL VARIANCES # 103 | ######################################## 104 | 105 | # check posterior probabilities for a given data set 106 | test_that("2 samples t test of two-sided hypotheses correctly evaluated 107 | with unequal variances", { 108 | skip_on_cran() 109 | ttest5 <- t_test(therapeutic,therapeutic*.7+2.5,"two.sided",var.equal=FALSE) 110 | set.seed(123) 111 | BF5 <- BF(ttest5,hypothesis="difference=0;difference<0",prior.hyp.explo=c(1,1,1)) 112 | set.seed(123) 113 | BF5a <- BF(ttest5,prior.hyp.explo=0:2) 114 | expect_equivalent( 115 | c(unname(BF5$PHP_exploratory)),c(unname(BF5$PHP_confirmatory)), 116 | tolerance = .05) 117 | expect_equivalent( 118 | unname(BF5a$PHP_exploratory), 119 | unname(BF5$BFtu_exploratory * (0:2) / sum(BF5$BFtu_exploratory * (0:2))), 120 | tolerance = .05) 121 | 122 | BF5b <- BF(ttest5,hypothesis="difference=0; difference> -1 & difference<1; difference< -1; difference>1", 123 | BF.type="FBF",log=TRUE) 124 | expect_equivalent( 125 | length(unname(BF5b$PHP_confirmatory)),4 126 | ) 127 | expect_equivalent( 128 | round(unname(BF5b$PHP_confirmatory),4),c(0.0464, 0.5373, 0.4163, 0.0000), tol=.05 129 | ) 130 | BF5c <- BF(ttest5,hypothesis="difference=0; difference> -1 & difference<1; difference< -1",BF.type="FBF",log=TRUE) 131 | expect_equivalent( 132 | BF5c$BFtu_confirmatory[4],BF5b$BFtu_confirmatory[4], tol=.05 133 | ) 134 | }) 135 | 136 | 137 | 138 | 139 | -------------------------------------------------------------------------------- /tests/testthat/test_variances.R: -------------------------------------------------------------------------------- 1 | 2 | #test for variances 3 | vtest1 <- bartlett_test(InsectSprays$count, InsectSprays$spray) 4 | hypothesis <- "A=B=F>C=D=E" 5 | set.seed(123) 6 | BF1 <- BF(x=vtest1,hypothesis) 7 | BF11 <- BF(x=vtest1,prior.hyp.explo = 3:4) 8 | 9 | #check results exploratory test 10 | test_that("BF.bartlett_htest exploratory hypotheses correctly evaluated", { 11 | expect_equivalent( 12 | round(BF1$PHP_exploratory,7),c(0.0044175,0.9955825) 13 | ) 14 | expect_equivalent( 15 | unname(round(BF11$PHP_exploratory,6)), 16 | unname(round(BF1$BFtu_exploratory * (3:4) / sum(BF1$BFtu_exploratory * (3:4)),6)) 17 | ) 18 | }) 19 | #check results confirmatory test 20 | test_that("BF.bartlett_htest confirmatory hypotheses correctly evaluated", { 21 | expect_equivalent( 22 | round(BF1$PHP_confirmatory,7),c(0.9911905,0.0088095) 23 | )}) 24 | 25 | BF1a <- BF(x=vtest1,hypothesis,log=TRUE) 26 | test_that("BF.bartlett_htest exploratory hypotheses correctly evaluated on log scale", { 27 | expect_equivalent( 28 | round(exp(BF1a$BFtu_exploratory),5),round(BF1$BFtu_exploratory,5) 29 | )}) 30 | 31 | hypothesis <- "A=B=F>C=D=E; A=B=F>C>D>E" 32 | set.seed(123) 33 | BF1 <- BF(x=vtest1,hypothesis,complement = F, log = TRUE) 34 | #check results confirmatory test 35 | test_that("BF.bartlett_htest confirmatory hypotheses correctly evaluated log(BF)", { 36 | expect_equivalent( 37 | round(BF1$BFtu_confirmatory,3),c(4.723,4.245) 38 | )}) 39 | test_that("BF.bartlett_htest confirmatory hypotheses correctly evaluated log(BF)", { 40 | expect_equivalent( 41 | round(BF1$BFtu_exploratory[1],4),-5.4178 42 | )}) 43 | 44 | -------------------------------------------------------------------------------- /vignettes/rsconnect/documents/vignette_BFpack.Rmd/rpubs.com/rpubs/Document.dcf: -------------------------------------------------------------------------------- 1 | name: Document 2 | title: 3 | username: 4 | account: rpubs 5 | server: rpubs.com 6 | hostUrl: rpubs.com 7 | appId: https://api.rpubs.com/api/v1/document/1252279/f3305f44a80749fea8291a1f9c5dd267 8 | bundleId: https://api.rpubs.com/api/v1/document/1252279/f3305f44a80749fea8291a1f9c5dd267 9 | url: http://rpubs.com/publish/claim/1252279/09fffec4ae4143579533ba025b3a5a39 10 | version: 1 11 | -------------------------------------------------------------------------------- /vignettes/vignette_BFpack.R: -------------------------------------------------------------------------------- 1 | ## ----setup, include=FALSE----------------------------------------------------- 2 | knitr::opts_chunk$set(echo = TRUE) 3 | 4 | --------------------------------------------------------------------------------